summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/pci.c3
-rw-r--r--arch/alpha/kernel/sys_miata.c10
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/alpha/lib/checksum.c37
-rw-r--r--arch/alpha/lib/csum_partial_copy.c31
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arm/common/sharpsl_pm.c22
-rw-r--r--arch/arm/kernel/signal.c1
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig2
-rw-r--r--arch/arm/mach-lh7a40x/Kconfig2
-rw-r--r--arch/arm/mach-omap1/board-h3.c3
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c6
-rw-r--r--arch/arm/mach-omap1/devices.c2
-rw-r--r--arch/arm/mach-omap1/leds-osk.c4
-rw-r--r--arch/arm/mach-omap2/board-h4.c3
-rw-r--r--arch/arm/mach-pxa/akita-ioexp.c6
-rw-r--r--arch/arm/mach-s3c2410/Kconfig2
-rw-r--r--arch/arm/mach-s3c2410/dma.c4
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm26/kernel/ecard.c4
-rw-r--r--arch/arm26/mm/fault.c2
-rw-r--r--arch/arm26/mm/memc.c6
-rw-r--r--arch/avr32/kernel/kprobes.c1
-rw-r--r--arch/avr32/kernel/signal.c2
-rw-r--r--arch/avr32/mm/dma-coherent.c2
-rw-r--r--arch/cris/arch-v10/Kconfig2
-rw-r--r--arch/cris/arch-v10/drivers/Kconfig2
-rw-r--r--arch/cris/arch-v10/drivers/eeprom.c6
-rw-r--r--arch/cris/arch-v10/drivers/i2c.c2
-rw-r--r--arch/cris/arch-v10/kernel/kgdb.c2
-rw-r--r--arch/cris/arch-v10/lib/old_checksum.c62
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig8
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/kernel/futex.c6
-rw-r--r--arch/frv/kernel/setup.c2
-rw-r--r--arch/frv/kernel/signal.c2
-rw-r--r--arch/frv/lib/checksum.c25
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/frv/mm/pgalloc.c6
-rw-r--r--arch/h8300/kernel/h8300_ksyms.c2
-rw-r--r--arch/h8300/kernel/setup.c2
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S1
-rw-r--r--arch/h8300/lib/checksum.c29
-rw-r--r--arch/i386/Kconfig72
-rw-r--r--arch/i386/Kconfig.cpu19
-rw-r--r--arch/i386/Kconfig.debug10
-rw-r--r--arch/i386/Makefile8
-rw-r--r--arch/i386/Makefile.cpu1
-rw-r--r--arch/i386/boot/compressed/Makefile28
-rw-r--r--arch/i386/boot/compressed/head.S185
-rw-r--r--arch/i386/boot/compressed/misc.c264
-rw-r--r--arch/i386/boot/compressed/relocs.c625
-rw-r--r--arch/i386/boot/compressed/vmlinux.lds43
-rw-r--r--arch/i386/boot/compressed/vmlinux.scr3
-rw-r--r--arch/i386/boot/setup.S42
-rw-r--r--arch/i386/defconfig22
-rw-r--r--arch/i386/kernel/Makefile5
-rw-r--r--arch/i386/kernel/acpi/boot.c10
-rw-r--r--arch/i386/kernel/acpi/cstate.c7
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c21
-rw-r--r--arch/i386/kernel/alternative.c64
-rw-r--r--arch/i386/kernel/apic.c22
-rw-r--r--arch/i386/kernel/apm.c3
-rw-r--r--arch/i386/kernel/asm-offsets.c39
-rw-r--r--arch/i386/kernel/cpu/amd.c5
-rw-r--r--arch/i386/kernel/cpu/common.c249
-rw-r--r--arch/i386/kernel/cpu/intel.c12
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c11
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c6
-rw-r--r--arch/i386/kernel/cpu/mcheck/therm_throt.c3
-rw-r--r--arch/i386/kernel/cpu/mtrr/Makefile4
-rw-r--r--arch/i386/kernel/cpu/mtrr/amd.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/centaur.c9
-rw-r--r--arch/i386/kernel/cpu/mtrr/cyrix.c25
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c78
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c31
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c71
-rw-r--r--arch/i386/kernel/cpu/mtrr/mtrr.h25
-rw-r--r--arch/i386/kernel/cpu/proc.c3
-rw-r--r--arch/i386/kernel/cpuid.c23
-rw-r--r--arch/i386/kernel/crash.c66
-rw-r--r--arch/i386/kernel/e820.c894
-rw-r--r--arch/i386/kernel/efi.c17
-rw-r--r--arch/i386/kernel/entry.S331
-rw-r--r--arch/i386/kernel/head.S66
-rw-r--r--arch/i386/kernel/hpet.c7
-rw-r--r--arch/i386/kernel/i8259.c5
-rw-r--r--arch/i386/kernel/io_apic.c66
-rw-r--r--arch/i386/kernel/kprobes.c4
-rw-r--r--arch/i386/kernel/ldt.c4
-rw-r--r--arch/i386/kernel/mca.c13
-rw-r--r--arch/i386/kernel/microcode.c2
-rw-r--r--arch/i386/kernel/module.c11
-rw-r--r--arch/i386/kernel/mpparse.c2
-rw-r--r--arch/i386/kernel/msr.c25
-rw-r--r--arch/i386/kernel/nmi.c42
-rw-r--r--arch/i386/kernel/paravirt.c569
-rw-r--r--arch/i386/kernel/pci-dma.c10
-rw-r--r--arch/i386/kernel/process.c81
-rw-r--r--arch/i386/kernel/ptrace.c18
-rw-r--r--arch/i386/kernel/quirks.c46
-rw-r--r--arch/i386/kernel/reboot.c1
-rw-r--r--arch/i386/kernel/setup.c855
-rw-r--r--arch/i386/kernel/signal.c6
-rw-r--r--arch/i386/kernel/smp.c10
-rw-r--r--arch/i386/kernel/smpboot.c80
-rw-r--r--arch/i386/kernel/sysenter.c6
-rw-r--r--arch/i386/kernel/time.c15
-rw-r--r--arch/i386/kernel/time_hpet.c15
-rw-r--r--arch/i386/kernel/topology.c8
-rw-r--r--arch/i386/kernel/traps.c120
-rw-r--r--arch/i386/kernel/tsc.c5
-rw-r--r--arch/i386/kernel/vm86.c121
-rw-r--r--arch/i386/kernel/vmlinux.lds.S147
-rw-r--r--arch/i386/mach-generic/probe.c4
-rw-r--r--arch/i386/mach-voyager/voyager_cat.c6
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c14
-rw-r--r--arch/i386/math-emu/fpu_emu.h1
-rw-r--r--arch/i386/math-emu/fpu_entry.c3
-rw-r--r--arch/i386/math-emu/fpu_system.h1
-rw-r--r--arch/i386/math-emu/load_store.c2
-rw-r--r--arch/i386/math-emu/reg_ld_str.c15
-rw-r--r--arch/i386/mm/boot_ioremap.c1
-rw-r--r--arch/i386/mm/discontig.c2
-rw-r--r--arch/i386/mm/fault.c12
-rw-r--r--arch/i386/mm/highmem.c26
-rw-r--r--arch/i386/mm/hugetlbpage.c112
-rw-r--r--arch/i386/mm/init.c6
-rw-r--r--arch/i386/mm/pageattr.c24
-rw-r--r--arch/i386/mm/pgtable.c13
-rw-r--r--arch/i386/pci/common.c2
-rw-r--r--arch/i386/pci/early.c7
-rw-r--r--arch/i386/pci/fixup.c46
-rw-r--r--arch/i386/pci/i386.c64
-rw-r--r--arch/i386/pci/irq.c10
-rw-r--r--arch/i386/pci/pcbios.c11
-rw-r--r--arch/i386/power/Makefile2
-rw-r--r--arch/i386/power/cpu.c8
-rw-r--r--arch/i386/power/suspend.c158
-rw-r--r--arch/i386/power/swsusp.S9
-rw-r--r--arch/ia64/Kconfig23
-rw-r--r--arch/ia64/hp/common/sba_iommu.c18
-rw-r--r--arch/ia64/hp/sim/simserial.c7
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c8
-rw-r--r--arch/ia64/ia32/ia32_support.c2
-rw-r--r--arch/ia64/ia32/ia32priv.h2
-rw-r--r--arch/ia64/ia32/sys_ia32.c2
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c11
-rw-r--r--arch/ia64/kernel/crash.c245
-rw-r--r--arch/ia64/kernel/efi.c71
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c1
-rw-r--r--arch/ia64/kernel/iosapic.c21
-rw-r--r--arch/ia64/kernel/kprobes.c4
-rw-r--r--arch/ia64/kernel/machine_kexec.c133
-rw-r--r--arch/ia64/kernel/mca.c13
-rw-r--r--arch/ia64/kernel/palinfo.c24
-rw-r--r--arch/ia64/kernel/perfmon.c5
-rw-r--r--arch/ia64/kernel/perfmon_montecito.h12
-rw-r--r--arch/ia64/kernel/relocate_kernel.S334
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/setup.c38
-rw-r--r--arch/ia64/kernel/smp.c28
-rw-r--r--arch/ia64/kernel/smpboot.c12
-rw-r--r--arch/ia64/kernel/topology.c8
-rw-r--r--arch/ia64/lib/checksum.c38
-rw-r--r--arch/ia64/lib/csum_partial_copy.c31
-rw-r--r--arch/ia64/lib/ip_fast_csum.S58
-rw-r--r--arch/ia64/mm/hugetlbpage.c5
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/ia64/pci/pci.c82
-rw-r--r--arch/ia64/sn/kernel/Makefile5
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c231
-rw-r--r--arch/ia64/sn/kernel/io_common.c613
-rw-r--r--arch/ia64/sn/kernel/io_init.c633
-rw-r--r--arch/ia64/sn/kernel/iomv.c11
-rw-r--r--arch/ia64/sn/kernel/irq.c18
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c4
-rw-r--r--arch/ia64/sn/kernel/setup.c26
-rw-r--r--arch/ia64/sn/kernel/tiocx.c2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c17
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c18
-rw-r--r--arch/m32r/kernel/setup.c4
-rw-r--r--arch/m32r/kernel/signal.c2
-rw-r--r--arch/m32r/lib/csum_partial_copy.c12
-rw-r--r--arch/m32r/mm/discontig.c4
-rw-r--r--arch/m68k/amiga/chipram.c3
-rw-r--r--arch/m68k/atari/hades-pci.c3
-rw-r--r--arch/m68k/lib/checksum.c13
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/m68knommu/Kconfig16
-rw-r--r--arch/m68knommu/kernel/m68k_ksyms.c2
-rw-r--r--arch/m68knommu/kernel/process.c34
-rw-r--r--arch/m68knommu/kernel/setup.c3
-rw-r--r--arch/m68knommu/kernel/sys_m68k.c23
-rw-r--r--arch/m68knommu/kernel/traps.c13
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S1
-rw-r--r--arch/m68knommu/lib/checksum.c28
-rw-r--r--arch/m68knommu/platform/5307/head.S20
-rw-r--r--arch/m68knommu/platform/5307/timers.c16
-rw-r--r--arch/m68knommu/platform/68360/config.c2
-rw-r--r--arch/m68knommu/platform/68360/head-ram.S3
-rw-r--r--arch/mips/Kconfig59
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/au1000/common/irq.c63
-rw-r--r--arch/mips/au1000/pb1200/board_setup.c8
-rw-r--r--arch/mips/cobalt/irq.c31
-rw-r--r--arch/mips/cobalt/setup.c16
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq_5477.c23
-rw-r--r--arch/mips/dec/ecc-berr.c7
-rw-r--r--arch/mips/dec/int-handler.S2
-rw-r--r--arch/mips/dec/ioasic-irq.c74
-rw-r--r--arch/mips/dec/kn01-berr.c2
-rw-r--r--arch/mips/dec/kn02-irq.c58
-rw-r--r--arch/mips/dec/setup.c6
-rw-r--r--arch/mips/dec/time.c4
-rw-r--r--arch/mips/emma2rh/common/irq_emma2rh.c42
-rw-r--r--arch/mips/emma2rh/markeins/irq_markeins.c63
-rw-r--r--arch/mips/gt64120/ev64120/irq.c40
-rw-r--r--arch/mips/jazz/irq.c34
-rw-r--r--arch/mips/jmr3927/rbhma3100/irq.c32
-rw-r--r--arch/mips/jmr3927/rbhma3100/setup.c4
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c1
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c1
-rw-r--r--arch/mips/kernel/cpu-probe.c19
-rw-r--r--arch/mips/kernel/dma-no-isa.c28
-rw-r--r--arch/mips/kernel/genex.S63
-rw-r--r--arch/mips/kernel/head.S3
-rw-r--r--arch/mips/kernel/i8259.c179
-rw-r--r--arch/mips/kernel/irixelf.c8
-rw-r--r--arch/mips/kernel/irq-msc01.c47
-rw-r--r--arch/mips/kernel/irq-mv6434x.c64
-rw-r--r--arch/mips/kernel/irq-rm7000.c61
-rw-r--r--arch/mips/kernel/irq-rm9000.c55
-rw-r--r--arch/mips/kernel/irq.c34
-rw-r--r--arch/mips/kernel/irq_cpu.c90
-rw-r--r--arch/mips/kernel/kspd.c4
-rw-r--r--arch/mips/kernel/linux32.c578
-rw-r--r--arch/mips/kernel/machine_kexec.c85
-rw-r--r--arch/mips/kernel/module.c15
-rw-r--r--arch/mips/kernel/relocate_kernel.S80
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S16
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c87
-rw-r--r--arch/mips/kernel/signal_n32.c1
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/kernel/smtc.c1
-rw-r--r--arch/mips/kernel/time.c64
-rw-r--r--arch/mips/kernel/traps.c72
-rw-r--r--arch/mips/lasat/interrupt.c43
-rw-r--r--arch/mips/lib-32/Makefile2
-rw-r--r--arch/mips/lib-32/csum_partial.S240
-rw-r--r--arch/mips/lib-64/Makefile2
-rw-r--r--arch/mips/lib-64/csum_partial.S242
-rw-r--r--arch/mips/lib/Makefile4
-rw-r--r--arch/mips/lib/csum_partial.S258
-rw-r--r--arch/mips/lib/csum_partial_copy.c8
-rw-r--r--arch/mips/mips-boards/atlas/atlas_int.c29
-rw-r--r--arch/mips/mips-boards/generic/time.c1
-rw-r--r--arch/mips/mips-boards/sim/sim_time.c18
-rw-r--r--arch/mips/mm/c-r4k.c22
-rw-r--r--arch/mips/mm/dma-coherent.c4
-rw-r--r--arch/mips/mm/dma-ip27.c4
-rw-r--r--arch/mips/mm/dma-ip32.c5
-rw-r--r--arch/mips/mm/dma-noncoherent.c5
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/highmem.c10
-rw-r--r--arch/mips/mm/init.c42
-rw-r--r--arch/mips/mm/pgtable-64.c3
-rw-r--r--arch/mips/mm/tlbex.c55
-rw-r--r--arch/mips/momentum/ocelot_c/cpci-irq.c63
-rw-r--r--arch/mips/momentum/ocelot_c/uart-irq.c66
-rw-r--r--arch/mips/oprofile/Makefile1
-rw-r--r--arch/mips/oprofile/common.c3
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c32
-rw-r--r--arch/mips/pci/fixup-cobalt.c11
-rw-r--r--arch/mips/pci/ops-gt64111.c16
-rw-r--r--arch/mips/philips/pnx8550/common/int.c74
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c33
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c137
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c25
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c37
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c133
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c30
-rw-r--r--arch/mips/sibyte/bcm1480/time.c6
-rw-r--r--arch/mips/sibyte/sb1250/irq.c30
-rw-r--r--arch/mips/sibyte/sb1250/time.c8
-rw-r--r--arch/mips/sibyte/swarm/setup.c8
-rw-r--r--arch/mips/sni/irq.c40
-rw-r--r--arch/mips/tx4927/common/tx4927_irq.c190
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c205
-rw-r--r--arch/mips/tx4938/common/irq.c135
-rw-r--r--arch/mips/tx4938/common/setup.c1
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/irq.c64
-rw-r--r--arch/mips/vr41xx/Kconfig5
-rw-r--r--arch/mips/vr41xx/common/icu.c62
-rw-r--r--arch/mips/vr41xx/nec-cmbvr4133/irq.c20
-rw-r--r--arch/parisc/kernel/binfmt_elf32.c1
-rw-r--r--arch/parisc/lib/checksum.c17
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/.gitignore1
-rw-r--r--arch/powerpc/Kconfig107
-rw-r--r--arch/powerpc/Kconfig.debug13
-rw-r--r--arch/powerpc/boot/.gitignore13
-rw-r--r--arch/powerpc/boot/Makefile15
-rw-r--r--arch/powerpc/boot/dts/kuroboxHG.dts148
-rw-r--r--arch/powerpc/boot/dts/lite5200.dts313
-rw-r--r--arch/powerpc/boot/dts/lite5200b.dts318
-rw-r--r--arch/powerpc/boot/dts/mpc7448hpc2.dts44
-rw-r--r--arch/powerpc/boot/flatdevtree.c880
-rw-r--r--arch/powerpc/boot/flatdevtree.h62
-rw-r--r--arch/powerpc/boot/flatdevtree_env.h47
-rw-r--r--arch/powerpc/boot/flatdevtree_misc.c51
-rw-r--r--arch/powerpc/boot/io.h53
-rw-r--r--arch/powerpc/boot/main.c57
-rw-r--r--arch/powerpc/boot/mktree.c152
-rw-r--r--arch/powerpc/boot/ns16550.c74
-rw-r--r--arch/powerpc/boot/of.c8
-rw-r--r--arch/powerpc/boot/ops.h26
-rw-r--r--arch/powerpc/boot/serial.c142
-rw-r--r--arch/powerpc/boot/simple_alloc.c149
-rw-r--r--arch/powerpc/boot/stdio.c3
-rw-r--r--arch/powerpc/boot/util.S88
-rwxr-xr-xarch/powerpc/boot/wrapper3
-rw-r--r--arch/powerpc/boot/zImage.coff.lds.S4
-rw-r--r--arch/powerpc/configs/cell_defconfig50
-rw-r--r--arch/powerpc/configs/linkstation_defconfig1583
-rw-r--r--arch/powerpc/configs/lite5200_defconfig931
-rw-r--r--arch/powerpc/configs/ppc64_defconfig4
-rw-r--r--arch/powerpc/configs/ps3_defconfig837
-rw-r--r--arch/powerpc/kernel/Makefile15
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S16
-rw-r--r--arch/powerpc/kernel/cputable.c62
-rw-r--r--arch/powerpc/kernel/crash.c63
-rw-r--r--arch/powerpc/kernel/dma_64.c249
-rw-r--r--arch/powerpc/kernel/entry_64.S51
-rw-r--r--arch/powerpc/kernel/head_64.S163
-rw-r--r--arch/powerpc/kernel/ibmebus.c9
-rw-r--r--arch/powerpc/kernel/idle.c7
-rw-r--r--arch/powerpc/kernel/idle_power4.S8
-rw-r--r--arch/powerpc/kernel/io.c105
-rw-r--r--arch/powerpc/kernel/iomap.c2
-rw-r--r--arch/powerpc/kernel/iommu.c6
-rw-r--r--arch/powerpc/kernel/irq.c80
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/of_device.c173
-rw-r--r--arch/powerpc/kernel/of_platform.c489
-rw-r--r--arch/powerpc/kernel/pci_32.c96
-rw-r--r--arch/powerpc/kernel/pci_64.c70
-rw-r--r--arch/powerpc/kernel/pci_direct_iommu.c98
-rw-r--r--arch/powerpc/kernel/pci_iommu.c164
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c4
-rw-r--r--arch/powerpc/kernel/prom.c111
-rw-r--r--arch/powerpc/kernel/prom_init.c18
-rw-r--r--arch/powerpc/kernel/prom_parse.c290
-rw-r--r--arch/powerpc/kernel/rtas.c5
-rw-r--r--arch/powerpc/kernel/rtas_flash.c8
-rw-r--r--arch/powerpc/kernel/rtas_pci.c35
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c18
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c5
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c84
-rw-r--r--arch/powerpc/kernel/time.c7
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/kernel/vdso.c2
-rw-r--r--arch/powerpc/kernel/vio.c94
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/fault.c25
-rw-r--r--arch/powerpc/mm/hash_native_64.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c7
-rw-r--r--arch/powerpc/mm/init_64.c8
-rw-r--r--arch/powerpc/mm/pgtable_32.c33
-rw-r--r--arch/powerpc/mm/pgtable_64.c52
-rw-r--r--arch/powerpc/mm/slb.c13
-rw-r--r--arch/powerpc/oprofile/Makefile1
-rw-r--r--arch/powerpc/oprofile/common.c15
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c724
-rw-r--r--arch/powerpc/platforms/52xx/Makefile9
-rw-r--r--arch/powerpc/platforms/52xx/efika-pci.c119
-rw-r--r--arch/powerpc/platforms/52xx/efika-setup.c150
-rw-r--r--arch/powerpc/platforms/52xx/efika.h19
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c162
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c126
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c473
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.h53
-rw-r--r--arch/powerpc/platforms/82xx/mpc82xx_ads.c13
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c3
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_sys.c3
-rw-r--r--arch/powerpc/platforms/83xx/mpc8360e_pb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h1
-rw-r--r--arch/powerpc/platforms/83xx/pci.c9
-rw-r--r--arch/powerpc/platforms/85xx/misc.c8
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c11
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c10
-rw-r--r--arch/powerpc/platforms/Makefile4
-rw-r--r--arch/powerpc/platforms/cell/Kconfig14
-rw-r--r--arch/powerpc/platforms/cell/Makefile7
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c248
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c71
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.h203
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c226
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h2
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c346
-rw-r--r--arch/powerpc/platforms/cell/iommu.c1049
-rw-r--r--arch/powerpc/platforms/cell/iommu.h65
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c101
-rw-r--r--arch/powerpc/platforms/cell/pmu.c429
-rw-r--r--arch/powerpc/platforms/cell/setup.c78
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c415
-rw-r--r--arch/powerpc/platforms/cell/spu_coredump.c81
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c428
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.h26
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c31
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c27
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c238
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c536
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c51
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c64
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c149
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h33
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c63
-rw-r--r--arch/powerpc/platforms/chrp/chrp.h1
-rw-r--r--arch/powerpc/platforms/chrp/pci.c9
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig30
-rw-r--r--arch/powerpc/platforms/embedded6xx/Makefile1
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c211
-rw-r--r--arch/powerpc/platforms/embedded6xx/ls_uart.c131
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c94
-rw-r--r--arch/powerpc/platforms/iseries/Makefile8
-rw-r--r--arch/powerpc/platforms/iseries/dt.c15
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c16
-rw-r--r--arch/powerpc/platforms/iseries/ksyms.c6
-rw-r--r--arch/powerpc/platforms/iseries/misc.S35
-rw-r--r--arch/powerpc/platforms/iseries/pci.c372
-rw-r--r--arch/powerpc/platforms/iseries/setup.c48
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c3
-rw-r--r--arch/powerpc/platforms/maple/maple.h2
-rw-r--r--arch/powerpc/platforms/maple/pci.c47
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h1
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c8
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c17
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c12
-rw-r--r--arch/powerpc/platforms/powermac/feature.c8
-rw-r--r--arch/powerpc/platforms/powermac/pci.c36
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c7
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig43
-rw-r--r--arch/powerpc/platforms/ps3/Makefile4
-rw-r--r--arch/powerpc/platforms/ps3/exports.c27
-rw-r--r--arch/powerpc/platforms/ps3/htab.c277
-rw-r--r--arch/powerpc/platforms/ps3/hvcall.S804
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c575
-rw-r--r--arch/powerpc/platforms/ps3/mm.c831
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c259
-rw-r--r--arch/powerpc/platforms/ps3/platform.h68
-rw-r--r--arch/powerpc/platforms/ps3/repository.c840
-rw-r--r--arch/powerpc/platforms/ps3/setup.c173
-rw-r--r--arch/powerpc/platforms/ps3/smp.c158
-rw-r--r--arch/powerpc/platforms/ps3/spu.c613
-rw-r--r--arch/powerpc/platforms/ps3/time.c104
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c6
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c90
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/pci.c35
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c6
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c5
-rw-r--r--arch/powerpc/platforms/pseries/xics.c68
-rw-r--r--arch/powerpc/sysdev/Makefile5
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c34
-rw-r--r--arch/powerpc/sysdev/dcr-low.S39
-rw-r--r--arch/powerpc/sysdev/dcr.c137
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c28
-rw-r--r--arch/powerpc/sysdev/mpic.c172
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c3
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c4
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c4
-rw-r--r--arch/powerpc/sysdev/rom.c31
-rw-r--r--arch/powerpc/sysdev/todc.c392
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c48
-rw-r--r--arch/powerpc/xmon/Makefile9
-rw-r--r--arch/powerpc/xmon/dis-asm.h31
-rw-r--r--arch/powerpc/xmon/ppc-dis.c29
-rw-r--r--arch/powerpc/xmon/ppc-opc.c778
-rw-r--r--arch/powerpc/xmon/ppc.h39
-rw-r--r--arch/powerpc/xmon/spu-dis.c248
-rw-r--r--arch/powerpc/xmon/spu-insns.h410
-rw-r--r--arch/powerpc/xmon/spu-opc.c44
-rw-r--r--arch/powerpc/xmon/spu.h126
-rw-r--r--arch/powerpc/xmon/xmon.c356
-rw-r--r--arch/ppc/.gitignore1
-rw-r--r--arch/ppc/8260_io/fcc_enet.c21
-rw-r--r--arch/ppc/8xx_io/fec.c21
-rw-r--r--arch/ppc/Kconfig13
-rw-r--r--arch/ppc/boot/images/.gitignore6
-rw-r--r--arch/ppc/boot/lib/.gitignore3
-rw-r--r--arch/ppc/boot/utils/.gitignore3
-rw-r--r--arch/ppc/kernel/setup.c2
-rw-r--r--arch/ppc/kernel/traps.c2
-rw-r--r--arch/ppc/kernel/vmlinux.lds.S1
-rw-r--r--arch/ppc/platforms/4xx/bubinga.c2
-rw-r--r--arch/ppc/platforms/4xx/cpci405.c2
-rw-r--r--arch/ppc/platforms/4xx/ep405.c2
-rw-r--r--arch/ppc/platforms/83xx/mpc834x_sys.c4
-rw-r--r--arch/ppc/platforms/85xx/mpc8540_ads.c4
-rw-r--r--arch/ppc/platforms/85xx/mpc8560_ads.c4
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_cds_common.c6
-rw-r--r--arch/ppc/platforms/85xx/sbc8560.c2
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.c2
-rw-r--r--arch/ppc/platforms/85xx/tqm85xx.c4
-rw-r--r--arch/ppc/platforms/mpc8272ads_setup.c6
-rw-r--r--arch/ppc/platforms/mpc866ads_setup.c4
-rw-r--r--arch/ppc/syslib/mpc8xx_devices.c8
-rw-r--r--arch/s390/Kconfig5
-rw-r--r--arch/s390/Makefile3
-rw-r--r--arch/s390/appldata/appldata_base.c8
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/binfmt_elf32.c1
-rw-r--r--arch/s390/kernel/cpcmd.c18
-rw-r--r--arch/s390/kernel/head.S21
-rw-r--r--arch/s390/kernel/head31.S8
-rw-r--r--arch/s390/kernel/head64.S17
-rw-r--r--arch/s390/kernel/ipl.c185
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/machine_kexec.c78
-rw-r--r--arch/s390/kernel/reipl.S17
-rw-r--r--arch/s390/kernel/reipl64.S16
-rw-r--r--arch/s390/kernel/relocate_kernel.S5
-rw-r--r--arch/s390/kernel/relocate_kernel64.S5
-rw-r--r--arch/s390/kernel/reset.S48
-rw-r--r--arch/s390/kernel/setup.c66
-rw-r--r--arch/s390/kernel/smp.c117
-rw-r--r--arch/s390/kernel/traps.c30
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/uaccess_mvcos.c27
-rw-r--r--arch/s390/lib/uaccess_pt.c153
-rw-r--r--arch/s390/lib/uaccess_std.c73
-rw-r--r--arch/s390/mm/extmem.c38
-rw-r--r--arch/s390/mm/fault.c28
-rw-r--r--arch/sh/Kconfig102
-rw-r--r--arch/sh/Kconfig.debug22
-rw-r--r--arch/sh/Makefile26
-rw-r--r--arch/sh/boards/renesas/r7780rp/Makefile4
-rw-r--r--arch/sh/boards/renesas/r7780rp/irq.c1
-rw-r--r--arch/sh/boards/renesas/r7780rp/psw.c122
-rw-r--r--arch/sh/boards/renesas/r7780rp/setup.c29
-rw-r--r--arch/sh/boards/se/7206/Makefile7
-rw-r--r--arch/sh/boards/se/7206/io.c123
-rw-r--r--arch/sh/boards/se/7206/irq.c139
-rw-r--r--arch/sh/boards/se/7206/led.c57
-rw-r--r--arch/sh/boards/se/7206/setup.c79
-rw-r--r--arch/sh/boards/se/7619/Makefile5
-rw-r--r--arch/sh/boards/se/7619/io.c102
-rw-r--r--arch/sh/boards/se/7619/setup.c43
-rw-r--r--arch/sh/boards/titan/setup.c27
-rw-r--r--arch/sh/boot/compressed/misc.c3
-rw-r--r--arch/sh/configs/r7780rp_defconfig69
-rw-r--r--arch/sh/configs/se7206_defconfig826
-rw-r--r--arch/sh/drivers/Kconfig9
-rw-r--r--arch/sh/drivers/Makefile2
-rw-r--r--arch/sh/drivers/dma/Makefile4
-rw-r--r--arch/sh/drivers/dma/dma-api.c274
-rw-r--r--arch/sh/drivers/dma/dma-sh.c9
-rw-r--r--arch/sh/drivers/dma/dma-sysfs.c23
-rw-r--r--arch/sh/drivers/pci/ops-titan.c24
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.c14
-rw-r--r--arch/sh/drivers/push-switch.c138
-rw-r--r--arch/sh/kernel/Makefile3
-rw-r--r--arch/sh/kernel/cpu/Makefile11
-rw-r--r--arch/sh/kernel/cpu/clock.c27
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile3
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c5
-rw-r--r--arch/sh/kernel/cpu/irq/intc2.c25
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c93
-rw-r--r--arch/sh/kernel/cpu/sh2/Makefile3
-rw-r--r--arch/sh/kernel/cpu/sh2/clock-sh7619.c81
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S341
-rw-r--r--arch/sh/kernel/cpu/sh2/ex.S46
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c16
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c53
-rw-r--r--arch/sh/kernel/cpu/sh2a/Makefile10
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7206.c85
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c39
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c58
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7709.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S (renamed from arch/sh/kernel/entry.S)546
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile3
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c4
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh7780.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c25
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c19
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c70
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7780.c36
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c13
-rw-r--r--arch/sh/kernel/early_printk.c44
-rw-r--r--arch/sh/kernel/entry-common.S433
-rw-r--r--arch/sh/kernel/head.S17
-rw-r--r--arch/sh/kernel/irq.c55
-rw-r--r--arch/sh/kernel/process.c32
-rw-r--r--arch/sh/kernel/relocate_kernel.S14
-rw-r--r--arch/sh/kernel/setup.c5
-rw-r--r--arch/sh/kernel/sh_ksyms.c7
-rw-r--r--arch/sh/kernel/signal.c37
-rw-r--r--arch/sh/kernel/stacktrace.c43
-rw-r--r--arch/sh/kernel/sys_sh.c7
-rw-r--r--arch/sh/kernel/time.c139
-rw-r--r--arch/sh/kernel/timers/Makefile2
-rw-r--r--arch/sh/kernel/timers/timer-cmt.c196
-rw-r--r--arch/sh/kernel/timers/timer-mtu2.c200
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c13
-rw-r--r--arch/sh/kernel/timers/timer.c6
-rw-r--r--arch/sh/kernel/traps.c200
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c2
-rw-r--r--arch/sh/mm/Kconfig77
-rw-r--r--arch/sh/mm/cache-sh2.c69
-rw-r--r--arch/sh/mm/cache-sh4.c18
-rw-r--r--arch/sh/mm/clear_page.S18
-rw-r--r--arch/sh/mm/copy_page.S16
-rw-r--r--arch/sh/mm/fault.c161
-rw-r--r--arch/sh/mm/hugetlbpage.c5
-rw-r--r--arch/sh/mm/init.c45
-rw-r--r--arch/sh/mm/ioremap.c4
-rw-r--r--arch/sh/mm/pg-dma.c2
-rw-r--r--arch/sh/mm/pg-sh4.c35
-rw-r--r--arch/sh/mm/pmb.c6
-rw-r--r--arch/sh/tools/mach-types2
-rw-r--r--arch/sh64/kernel/setup.c4
-rw-r--r--arch/sh64/kernel/sh_ksyms.c2
-rw-r--r--arch/sh64/kernel/signal.c2
-rw-r--r--arch/sh64/lib/c-checksum.c49
-rw-r--r--arch/sh64/lib/dbg.c2
-rw-r--r--arch/sh64/mm/fault.c2
-rw-r--r--arch/sh64/mm/hugetlbpage.c5
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc/mm/highmem.c8
-rw-r--r--arch/sparc64/kernel/binfmt_elf32.c1
-rw-r--r--arch/sparc64/kernel/pci.c9
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc64/mm/hugetlbpage.c5
-rw-r--r--arch/sparc64/mm/init.c4
-rw-r--r--arch/sparc64/mm/tsb.c2
-rw-r--r--arch/um/drivers/chan_kern.c2
-rw-r--r--arch/um/drivers/chan_user.c2
-rw-r--r--arch/um/drivers/daemon_kern.c2
-rw-r--r--arch/um/drivers/line.c6
-rw-r--r--arch/um/drivers/mcast_kern.c2
-rw-r--r--arch/um/drivers/mconsole_kern.c4
-rw-r--r--arch/um/drivers/net_kern.c1
-rw-r--r--arch/um/drivers/pcap_kern.c2
-rw-r--r--arch/um/drivers/port_kern.c4
-rw-r--r--arch/um/drivers/slip_kern.c2
-rw-r--r--arch/um/drivers/slirp_kern.c2
-rw-r--r--arch/um/include/chan_kern.h2
-rw-r--r--arch/um/include/line.h2
-rw-r--r--arch/um/include/sysdep-i386/checksum.h74
-rw-r--r--arch/um/include/sysdep-i386/ptrace.h2
-rw-r--r--arch/um/include/sysdep-i386/stub.h1
-rw-r--r--arch/um/include/sysdep-x86_64/checksum.h47
-rw-r--r--arch/um/include/sysdep-x86_64/ptrace.h2
-rw-r--r--arch/um/os-Linux/drivers/ethertap_kern.c2
-rw-r--r--arch/um/os-Linux/drivers/tuntap_kern.c2
-rw-r--r--arch/um/sys-i386/ldt.c1
-rw-r--r--arch/um/sys-i386/ptrace_user.c2
-rw-r--r--arch/um/sys-i386/user-offsets.c2
-rw-r--r--arch/v850/kernel/v850_ksyms.c2
-rw-r--r--arch/v850/kernel/vmlinux.lds.S1
-rw-r--r--arch/v850/lib/checksum.c26
-rw-r--r--arch/x86_64/Kconfig43
-rw-r--r--arch/x86_64/Makefile4
-rw-r--r--arch/x86_64/defconfig27
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c4
-rw-r--r--arch/x86_64/ia32/ia32_signal.c5
-rw-r--r--arch/x86_64/ia32/syscall32.c2
-rw-r--r--arch/x86_64/kernel/apic.c104
-rw-r--r--arch/x86_64/kernel/crash.c69
-rw-r--r--arch/x86_64/kernel/early-quirks.c18
-rw-r--r--arch/x86_64/kernel/entry.S36
-rw-r--r--arch/x86_64/kernel/genapic.c9
-rw-r--r--arch/x86_64/kernel/head64.c6
-rw-r--r--arch/x86_64/kernel/i387.c7
-rw-r--r--arch/x86_64/kernel/i8259.c3
-rw-r--r--arch/x86_64/kernel/io_apic.c258
-rw-r--r--arch/x86_64/kernel/irq.c2
-rw-r--r--arch/x86_64/kernel/kprobes.c2
-rw-r--r--arch/x86_64/kernel/mce.c9
-rw-r--r--arch/x86_64/kernel/mce_amd.c4
-rw-r--r--arch/x86_64/kernel/mpparse.c2
-rw-r--r--arch/x86_64/kernel/nmi.c29
-rw-r--r--arch/x86_64/kernel/pci-calgary.c218
-rw-r--r--arch/x86_64/kernel/pci-dma.c5
-rw-r--r--arch/x86_64/kernel/pci-gart.c3
-rw-r--r--arch/x86_64/kernel/process.c45
-rw-r--r--arch/x86_64/kernel/setup.c24
-rw-r--r--arch/x86_64/kernel/smp.c5
-rw-r--r--arch/x86_64/kernel/smpboot.c20
-rw-r--r--arch/x86_64/kernel/time.c4
-rw-r--r--arch/x86_64/kernel/traps.c55
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S9
-rw-r--r--arch/x86_64/kernel/vsyscall.c4
-rw-r--r--arch/x86_64/lib/csum-partial.c11
-rw-r--r--arch/x86_64/lib/csum-wrappers.c37
-rw-r--r--arch/x86_64/lib/delay.c4
-rw-r--r--arch/x86_64/mm/fault.c10
-rw-r--r--arch/x86_64/mm/init.c7
-rw-r--r--arch/x86_64/mm/pageattr.c58
728 files changed, 35383 insertions, 12279 deletions
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index ffb7d5423cc..3c10b9a1ddf 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -516,10 +516,11 @@ sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
if (bus == 0 && dfn == 0) {
hose = pci_isa_hose;
} else {
- dev = pci_find_slot(bus, dfn);
+ dev = pci_get_bus_and_slot(bus, dfn);
if (!dev)
return -ENODEV;
hose = dev->sysdata;
+ pci_dev_put(dev);
}
}
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index b8b817feb1e..910b43cd63e 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -183,11 +183,15 @@ miata_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) {
u8 irq=0;
-
- if(pci_read_config_byte(pci_find_slot(dev->bus->number, dev->devfn & ~(7)), 0x40,&irq)!=PCIBIOS_SUCCESSFUL)
+ struct pci_dev *pdev = pci_get_slot(dev->bus, dev->devfn & ~7);
+ if(pdev == NULL || pci_read_config_byte(pdev, 0x40,&irq) != PCIBIOS_SUCCESSFUL) {
+ pci_dev_put(pdev);
return -1;
- else
+ }
+ else {
+ pci_dev_put(pdev);
return irq;
+ }
}
return COMMON_TABLE_LOOKUP;
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 93744bab73f..e7594a7cf58 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -200,7 +200,7 @@ nautilus_init_pci(void)
bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
hose->bus = bus;
- irongate = pci_find_slot(0, 0);
+ irongate = pci_get_bus_and_slot(0, 0);
bus->self = irongate;
bus->resource[1] = &irongate_mem;
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index 89044e6385f..ab3761c437a 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -41,28 +41,25 @@ static inline unsigned short from64to16(unsigned long x)
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented.
*/
-unsigned short int csum_tcpudp_magic(unsigned long saddr,
- unsigned long daddr,
+__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
- unsigned int sum)
+ __wsum sum)
{
- return ~from64to16(saddr + daddr + sum +
- ((unsigned long) ntohs(len) << 16) +
- ((unsigned long) proto << 8));
+ return (__force __sum16)~from64to16(
+ (__force u64)saddr + (__force u64)daddr +
+ (__force u64)sum + ((len + proto) << 8));
}
-unsigned int csum_tcpudp_nofold(unsigned long saddr,
- unsigned long daddr,
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
- unsigned int sum)
+ __wsum sum)
{
unsigned long result;
- result = (saddr + daddr + sum +
- ((unsigned long) ntohs(len) << 16) +
- ((unsigned long) proto << 8));
+ result = (__force u64)saddr + (__force u64)daddr +
+ (__force u64)sum + ((len + proto) << 8);
/* Fold down to 32-bits so we don't lose in the typedef-less
network stack. */
@@ -70,7 +67,7 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr,
result = (result & 0xffffffff) + (result >> 32);
/* 33 to 32 */
result = (result & 0xffffffff) + (result >> 32);
- return result;
+ return (__force __wsum)result;
}
/*
@@ -146,9 +143,9 @@ out:
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
-unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
- return ~do_csum(iph,ihl*4);
+ return (__force __sum16)~do_csum(iph,ihl*4);
}
/*
@@ -163,15 +160,15 @@ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned long result = do_csum(buff, len);
/* add in old sum, and carry.. */
- result += sum;
+ result += (__force u32)sum;
/* 32+c bits -> 32 bits */
result = (result & 0xffffffff) + (result >> 32);
- return result;
+ return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
@@ -180,7 +177,7 @@ EXPORT_SYMBOL(csum_partial);
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-unsigned short ip_compute_csum(unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
{
- return ~from64to16(do_csum(buff,len));
+ return (__force __sum16)~from64to16(do_csum(buff,len));
}
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index a37948f3037..4ca75c74ce9 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -329,11 +329,11 @@ csum_partial_cfu_unaligned(const unsigned long __user * src,
return checksum;
}
-static unsigned int
-do_csum_partial_copy_from_user(const char __user *src, char *dst, int len,
- unsigned int sum, int *errp)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum sum, int *errp)
{
- unsigned long checksum = (unsigned) sum;
+ unsigned long checksum = (__force u32) sum;
unsigned long soff = 7 & (unsigned long) src;
unsigned long doff = 7 & (unsigned long) dst;
@@ -367,25 +367,12 @@ do_csum_partial_copy_from_user(const char __user *src, char *dst, int len,
}
checksum = from64to16 (checksum);
}
- return checksum;
-}
-
-unsigned int
-csum_partial_copy_from_user(const char __user *src, char *dst, int len,
- unsigned int sum, int *errp)
-{
- if (!access_ok(VERIFY_READ, src, len)) {
- *errp = -EFAULT;
- memset(dst, 0, len);
- return sum;
- }
-
- return do_csum_partial_copy_from_user(src, dst, len, sum, errp);
+ return (__force __wsum)checksum;
}
-unsigned int
-csum_partial_copy_nocheck(const char __user *src, char *dst, int len,
- unsigned int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
- return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
+ return csum_partial_copy_from_user((__force const void __user *)src,
+ dst, len, sum, NULL);
}
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 8871529a34e..8aa9db834c1 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -108,7 +108,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* If we're in an interrupt context, or have no user context,
we must not take the fault. */
- if (!mm || in_interrupt())
+ if (!mm || in_atomic())
goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
index 605dedf9679..b3599743093 100644
--- a/arch/arm/common/sharpsl_pm.c
+++ b/arch/arm/common/sharpsl_pm.c
@@ -60,16 +60,16 @@ static int sharpsl_ac_check(void);
static int sharpsl_fatal_check(void);
static int sharpsl_average_value(int ad);
static void sharpsl_average_clear(void);
-static void sharpsl_charge_toggle(void *private_);
-static void sharpsl_battery_thread(void *private_);
+static void sharpsl_charge_toggle(struct work_struct *private_);
+static void sharpsl_battery_thread(struct work_struct *private_);
/*
* Variables
*/
struct sharpsl_pm_status sharpsl_pm;
-DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
-DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
+DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
@@ -116,7 +116,7 @@ void sharpsl_battery_kick(void)
EXPORT_SYMBOL(sharpsl_battery_kick);
-static void sharpsl_battery_thread(void *private_)
+static void sharpsl_battery_thread(struct work_struct *private_)
{
int voltage, percent, apm_status, i = 0;
@@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_)
/* Corgi cannot confirm when battery fully charged so periodically kick! */
if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
&& time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
- schedule_work(&toggle_charger);
+ schedule_delayed_work(&toggle_charger, 0);
while(1) {
voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
@@ -212,7 +212,7 @@ static void sharpsl_charge_off(void)
sharpsl_pm_led(SHARPSL_LED_OFF);
sharpsl_pm.charge_mode = CHRG_OFF;
- schedule_work(&sharpsl_bat);
+ schedule_delayed_work(&sharpsl_bat, 0);
}
static void sharpsl_charge_error(void)
@@ -222,7 +222,7 @@ static void sharpsl_charge_error(void)
sharpsl_pm.charge_mode = CHRG_ERROR;
}
-static void sharpsl_charge_toggle(void *private_)
+static void sharpsl_charge_toggle(struct work_struct *private_)
{
dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
@@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data)
else if (sharpsl_pm.charge_mode == CHRG_ON)
sharpsl_charge_off();
- schedule_work(&sharpsl_bat);
+ schedule_delayed_work(&sharpsl_bat, 0);
}
@@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data)
sharpsl_charge_off();
} else if (sharpsl_pm.full_count < 2) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
- schedule_work(&toggle_charger);
+ schedule_delayed_work(&toggle_charger, 0);
} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
- schedule_work(&toggle_charger);
+ schedule_delayed_work(&toggle_charger, 0);
} else {
sharpsl_charge_off();
sharpsl_pm.charge_mode = CHRG_DONE;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index f2b1d61fbc0..3843d3bab2d 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -11,6 +11,7 @@
#include <linux/signal.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
+#include <linux/freezer.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 57f23b46539..e316bd93313 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -133,7 +133,7 @@ config IXP4XX_INDIRECT_PCI
into the kernel and we can use the standard read[bwl]/write[bwl]
macros. This is the preferred method due to speed but it
limits the system to just 64MB of PCI memory. This can be
- problamatic if using video cards and other memory-heavy devices.
+ problematic if using video cards and other memory-heavy devices.
2) If > 64MB of memory space is required, the IXP4xx can be
configured to use indirect registers to access PCI This allows
diff --git a/arch/arm/mach-lh7a40x/Kconfig b/arch/arm/mach-lh7a40x/Kconfig
index 147b01928a9..6f4c6a1798c 100644
--- a/arch/arm/mach-lh7a40x/Kconfig
+++ b/arch/arm/mach-lh7a40x/Kconfig
@@ -8,7 +8,7 @@ config MACH_KEV7A400
help
Say Y here if you are using the Sharp KEV7A400 development
board. This hardware is discontinued, so I'd be very
- suprised if you wanted this option.
+ surprised if you wanted this option.
config MACH_LPD7A400
bool "LPD7A400 Card Engine"
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f225a083dee..9d2346fb68f 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode)
cancel_delayed_work(&irda_config->gpio_expa);
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
- schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+ schedule_delayed_work(&irda_config->gpio_expa, 0);
return 0;
}
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index dbc555d209f..cbe909bad79 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = {
.rows = 8,
.cols = 8,
.keymap = nokia770_keymap,
- .keymapsize = ARRAY_SIZE(nokia770_keymap)
+ .keymapsize = ARRAY_SIZE(nokia770_keymap),
.delay = 4,
};
@@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void)
printk("HP connected\n");
}
-static void codec_delayed_power_down(void *arg)
+static void codec_delayed_power_down(struct work_struct *work)
{
down(&audio_pwr_sem);
if (audio_pwr_state == -1)
@@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg)
up(&audio_pwr_sem);
}
-static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
+static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
static void nokia770_audio_pwr_down(void)
{
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index a611c3b6395..6dcd10ab449 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -55,7 +55,7 @@ static inline void omap_init_irda(void) {}
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_OMAP_RTC) || defined(CONFIG_OMAP_RTC)
+#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
#define OMAP_RTC_BASE 0xfffb4800
diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c
index 3b29e59b0e6..0cbf1b0071f 100644
--- a/arch/arm/mach-omap1/leds-osk.c
+++ b/arch/arm/mach-omap1/leds-osk.c
@@ -35,7 +35,7 @@ static u8 hw_led_state;
static u8 tps_leds_change;
-static void tps_work(void *unused)
+static void tps_work(struct work_struct *unused)
{
for (;;) {
u8 leds;
@@ -61,7 +61,7 @@ static void tps_work(void *unused)
}
}
-static DECLARE_WORK(work, tps_work, NULL);
+static DECLARE_WORK(work, tps_work);
#ifdef CONFIG_OMAP_OSK_MISTRAL
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 26a95a642ad..3b1ad1d981a 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode)
cancel_delayed_work(&irda_config->gpio_expa);
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
- schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+ schedule_delayed_work(&irda_config->gpio_expa, 0);
return 0;
}
diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c
index 1b398742ab5..12d2fe0ceff 100644
--- a/arch/arm/mach-pxa/akita-ioexp.c
+++ b/arch/arm/mach-pxa/akita-ioexp.c
@@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD;
static int max7310_write(struct i2c_client *client, int address, int data);
static struct i2c_client max7310_template;
-static void akita_ioexp_work(void *private_);
+static void akita_ioexp_work(struct work_struct *private_);
static struct device *akita_ioexp_device;
static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
-DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL);
+DECLARE_WORK(akita_ioexp, akita_ioexp_work);
/*
@@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit)
EXPORT_SYMBOL(akita_set_ioexp);
EXPORT_SYMBOL(akita_reset_ioexp);
-static void akita_ioexp_work(void *private_)
+static void akita_ioexp_work(struct work_struct *private_)
{
if (akita_ioexp_device)
max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index 08b2f300eb7..9f46bf330bc 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -98,7 +98,7 @@ config SMDK2440_CPU2442
config MACH_S3C2413
bool
help
- Internal node for S3C2413 verison of SMDK2413, so that
+ Internal node for S3C2413 version of SMDK2413, so that
machine_is_s3c2413() will work when MACH_SMDK2413 is
selected
diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c
index 3d211dc2f2f..01abb0ace23 100644
--- a/arch/arm/mach-s3c2410/dma.c
+++ b/arch/arm/mach-s3c2410/dma.c
@@ -40,7 +40,7 @@
/* io map for dma */
static void __iomem *dma_base;
-static kmem_cache_t *dma_kmem;
+static struct kmem_cache *dma_kmem;
struct s3c24xx_dma_selection dma_sel;
@@ -1271,7 +1271,7 @@ struct sysdev_class dma_sysclass = {
/* kmem cache implementation */
-static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
+static void s3c2410_dma_cache_ctor(void *p, struct kmem_cache *c, unsigned long f)
{
memset(p, 0, sizeof(struct s3c2410_dma_buf));
}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 125cb3ff558..aade2f72c92 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -197,7 +197,7 @@ config CPU_ARM940T
select CPU_CP15_MPU
help
ARM940T is a member of the ARM9TDMI family of general-
- purpose microprocessors with MPU and seperate 4KB
+ purpose microprocessors with MPU and separate 4KB
instruction and 4KB data cases, each with a 4-word line
length.
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5e658a87449..9fd6d2eafb4 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -230,7 +230,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_interrupt() || !mm)
+ if (in_atomic() || !mm)
goto no_context;
/*
diff --git a/arch/arm26/kernel/ecard.c b/arch/arm26/kernel/ecard.c
index 047d0a408b9..43dd41be71f 100644
--- a/arch/arm26/kernel/ecard.c
+++ b/arch/arm26/kernel/ecard.c
@@ -620,12 +620,10 @@ ecard_probe(int slot, card_type_t type)
struct ex_ecid cid;
int i, rc = -ENOMEM;
- ec = kmalloc(sizeof(ecard_t), GFP_KERNEL);
+ ec = kzalloc(sizeof(ecard_t), GFP_KERNEL);
if (!ec)
goto nomem;
- memset(ec, 0, sizeof(ecard_t));
-
ec->slot_no = slot;
ec->type = type;
ec->irq = NO_IRQ;
diff --git a/arch/arm26/mm/fault.c b/arch/arm26/mm/fault.c
index a1f6d8a9cc3..93c0cee0fb5 100644
--- a/arch/arm26/mm/fault.c
+++ b/arch/arm26/mm/fault.c
@@ -215,7 +215,7 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_interrupt() || !mm)
+ if (in_atomic() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/arm26/mm/memc.c b/arch/arm26/mm/memc.c
index 34def6397c3..f2901581d4d 100644
--- a/arch/arm26/mm/memc.c
+++ b/arch/arm26/mm/memc.c
@@ -24,7 +24,7 @@
#define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
-kmem_cache_t *pte_cache, *pgd_cache;
+struct kmem_cache *pte_cache, *pgd_cache;
int page_nr;
/*
@@ -162,12 +162,12 @@ void __init create_memmap_holes(struct meminfo *mi)
{
}
-static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
+static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags)
{
memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
}
-static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags)
+static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags)
{
memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
}
diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c
index ca41fc1edbe..d0abbcaf1c1 100644
--- a/arch/avr32/kernel/kprobes.c
+++ b/arch/avr32/kernel/kprobes.c
@@ -154,6 +154,7 @@ ss_probe:
return 1;
no_kprobe:
+ preempt_enable_no_resched();
return ret;
}
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index 33096651c24..0ec14854a20 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -15,7 +15,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index 44ab8a7bdae..b68d669f823 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -11,7 +11,7 @@
#include <asm/addrspace.h>
#include <asm/cacheflush.h>
-void dma_cache_sync(void *vaddr, size_t size, int direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
{
/*
* No need to sync an uncached area
diff --git a/arch/cris/arch-v10/Kconfig b/arch/cris/arch-v10/Kconfig
index 44eb1b9accb..c7ea9efd010 100644
--- a/arch/cris/arch-v10/Kconfig
+++ b/arch/cris/arch-v10/Kconfig
@@ -323,7 +323,7 @@ config ETRAX_DEF_R_WAITSTATES
depends on ETRAX_ARCH_V10
default "95a6"
help
- Waitstates for SRAM, Flash and peripherials (not DRAM). 95f8 is a
+ Waitstates for SRAM, Flash and peripherals (not DRAM). 95f8 is a
good choice for most Axis products...
config ETRAX_DEF_R_BUS_CONFIG
diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig
index 734d5f3a530..e7e724bc0ba 100644
--- a/arch/cris/arch-v10/drivers/Kconfig
+++ b/arch/cris/arch-v10/drivers/Kconfig
@@ -839,7 +839,7 @@ config ETRAX_DS1302_TRICKLE_CHARGE
default "0"
help
This controls the initial value of the trickle charge register.
- 0 = disabled (use this if you are unsure or have a non rechargable battery)
+ 0 = disabled (use this if you are unsure or have a non rechargeable battery)
Otherwise the following values can be OR:ed together to control the
charge current:
1 = 2kohm, 2 = 4kohm, 3 = 4kohm
diff --git a/arch/cris/arch-v10/drivers/eeprom.c b/arch/cris/arch-v10/drivers/eeprom.c
index 6e1f191a71e..284ebfda03f 100644
--- a/arch/cris/arch-v10/drivers/eeprom.c
+++ b/arch/cris/arch-v10/drivers/eeprom.c
@@ -1,7 +1,7 @@
/*!*****************************************************************************
*!
-*! Implements an interface for i2c compatible eeproms to run under linux.
-*! Supports 2k, 8k(?) and 16k. Uses adaptive timing adjustents by
+*! Implements an interface for i2c compatible eeproms to run under Linux.
+*! Supports 2k, 8k(?) and 16k. Uses adaptive timing adjustments by
*! Johan.Adolfsson@axis.com
*!
*! Probing results:
@@ -51,7 +51,7 @@
*! Revision 1.8 2001/06/15 13:24:29 jonashg
*! * Added verification of pointers from userspace in read and write.
*! * Made busy counter volatile.
-*! * Added define for inital write delay.
+*! * Added define for initial write delay.
*! * Removed warnings by using loff_t instead of unsigned long.
*!
*! Revision 1.7 2001/06/14 15:26:54 jonashg
diff --git a/arch/cris/arch-v10/drivers/i2c.c b/arch/cris/arch-v10/drivers/i2c.c
index 6114596c3b3..092c724a645 100644
--- a/arch/cris/arch-v10/drivers/i2c.c
+++ b/arch/cris/arch-v10/drivers/i2c.c
@@ -47,7 +47,7 @@
*! Update Port B register and shadow even when running with hardware support
*! to avoid glitches when reading bits
*! Never set direction to out in i2c_inbyte
-*! Removed incorrect clock togling at end of i2c_inbyte
+*! Removed incorrect clock toggling at end of i2c_inbyte
*!
*! Revision 1.8 2002/08/13 06:31:53 starvik
*! Made SDA and SCL line configurable
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index 34528da9881..07628a13c6c 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -33,7 +33,7 @@
*!
*! Revision 1.2 2002/11/19 14:35:24 starvik
*! Changes from linux 2.4
-*! Changed struct initializer syntax to the currently prefered notation
+*! Changed struct initializer syntax to the currently preferred notation
*!
*! Revision 1.1 2001/12/17 13:59:27 bjornw
*! Initial revision
diff --git a/arch/cris/arch-v10/lib/old_checksum.c b/arch/cris/arch-v10/lib/old_checksum.c
index 22a6f0aa9ce..497634a6482 100644
--- a/arch/cris/arch-v10/lib/old_checksum.c
+++ b/arch/cris/arch-v10/lib/old_checksum.c
@@ -47,39 +47,41 @@
#include <asm/delay.h>
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *p, int len, __wsum __sum)
{
- /*
- * Experiments with ethernet and slip connections show that buff
- * is aligned on either a 2-byte or 4-byte boundary.
- */
- const unsigned char *endMarker = buff + len;
- const unsigned char *marker = endMarker - (len % 16);
+ u32 sum = (__force u32)__sum;
+ const u16 *buff = p;
+ /*
+ * Experiments with ethernet and slip connections show that buff
+ * is aligned on either a 2-byte or 4-byte boundary.
+ */
+ const void *endMarker = p + len;
+ const void *marker = endMarker - (len % 16);
#if 0
- if((int)buff & 0x3)
- printk("unaligned buff %p\n", buff);
- __delay(900); /* extra delay of 90 us to test performance hit */
+ if((int)buff & 0x3)
+ printk("unaligned buff %p\n", buff);
+ __delay(900); /* extra delay of 90 us to test performance hit */
#endif
- BITON;
- while (buff < marker) {
- sum += *((unsigned short *)buff)++;
- sum += *((unsigned short *)buff)++;
- sum += *((unsigned short *)buff)++;
- sum += *((unsigned short *)buff)++;
- sum += *((unsigned short *)buff)++;
- sum += *((unsigned short *)buff)++;
- sum += *((unsigned short *)buff)++;
- sum += *((unsigned short *)buff)++;
- }
- marker = endMarker - (len % 2);
- while(buff < marker) {
- sum += *((unsigned short *)buff)++;
- }
- if(endMarker - buff > 0) {
- sum += *buff; /* add extra byte seperately */
- }
- BITOFF;
- return(sum);
+ BITON;
+ while (buff < marker) {
+ sum += *buff++;
+ sum += *buff++;
+ sum += *buff++;
+ sum += *buff++;
+ sum += *buff++;
+ sum += *buff++;
+ sum += *buff++;
+ sum += *buff++;
+ }
+ marker = endMarker - (len % 2);
+ while (buff < marker)
+ sum += *buff++;
+
+ if (endMarker > buff)
+ sum += *(const u8 *)buff; /* add extra byte seperately */
+
+ BITOFF;
+ return (__force __wsum)sum;
}
EXPORT_SYMBOL(csum_partial);
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index a33097f9536..f64624fc450 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -88,7 +88,7 @@ config ETRAX_SERIAL_PORT0_DMA7_IN
help
Enables the DMA7 input channel for ser0 (ttyS0).
If you do not enable DMA, an interrupt for each character will be
- used when receiveing data.
+ used when receiving data.
Normally you want to use DMA, unless you use the DMA channel for
something else.
@@ -157,7 +157,7 @@ config ETRAX_SERIAL_PORT1_DMA5_IN
help
Enables the DMA5 input channel for ser1 (ttyS1).
If you do not enable DMA, an interrupt for each character will be
- used when receiveing data.
+ used when receiving data.
Normally you want this on, unless you use the DMA channel for
something else.
@@ -228,7 +228,7 @@ config ETRAX_SERIAL_PORT2_DMA3_IN
help
Enables the DMA3 input channel for ser2 (ttyS2).
If you do not enable DMA, an interrupt for each character will be
- used when receiveing data.
+ used when receiving data.
Normally you want to use DMA, unless you use the DMA channel for
something else.
@@ -297,7 +297,7 @@ config ETRAX_SERIAL_PORT3_DMA9_IN
help
Enables the DMA9 input channel for ser3 (ttyS3).
If you do not enable DMA, an interrupt for each character will be
- used when receiveing data.
+ used when receiving data.
Normally you want to use DMA, unless you use the DMA channel for
something else.
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 934c51078cc..c73e91f1299 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -232,7 +232,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
* context, we must not take the fault..
*/
- if (in_interrupt() || !mm)
+ if (in_atomic() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index eae874a970c..14f64b054c7 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -10,9 +10,9 @@
*/
#include <linux/futex.h>
+#include <linux/uaccess.h>
#include <asm/futex.h>
#include <asm/errno.h>
-#include <asm/uaccess.h>
/*
* the various futex operations; MMU fault checking is ignored under no-MMU
@@ -200,7 +200,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- inc_preempt_count();
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -223,7 +223,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
break;
}
- dec_preempt_count();
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index a8c61dac1ce..1a5eb6c301c 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -947,7 +947,7 @@ static void __init setup_linux_memory(void)
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (low_top_pfn << PAGE_SHIFT)) {
reserve_bootmem(INITRD_START, INITRD_SIZE);
- initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+ initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
}
else {
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index b8a5882b862..85baeae9666 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -21,7 +21,7 @@
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
diff --git a/arch/frv/lib/checksum.c b/arch/frv/lib/checksum.c
index 20e7dfc474e..44e16d59bc1 100644
--- a/arch/frv/lib/checksum.c
+++ b/arch/frv/lib/checksum.c
@@ -32,7 +32,6 @@
of the assembly has to go. */
#include <net/checksum.h>
-#include <asm/checksum.h>
#include <linux/module.h>
static inline unsigned short from32to16(unsigned long x)
@@ -105,15 +104,15 @@ out:
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned int result = do_csum(buff, len);
/* add in old sum, and carry.. */
- result += sum;
- if (sum > result)
+ result += (__force u32)sum;
+ if ((__force u32)sum > result)
result += 1;
- return result;
+ return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
@@ -122,9 +121,9 @@ EXPORT_SYMBOL(csum_partial);
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-unsigned short ip_compute_csum(const unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
{
- return ~do_csum(buff, len);
+ return (__force __sum16)~do_csum(buff, len);
}
EXPORT_SYMBOL(ip_compute_csum);
@@ -132,9 +131,9 @@ EXPORT_SYMBOL(ip_compute_csum);
/*
* copy from fs while checksumming, otherwise like csum_partial
*/
-unsigned int
-csum_partial_copy_from_user(const char __user *src, char *dst,
- int len, int sum, int *csum_err)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err)
{
int rem;
@@ -157,11 +156,11 @@ EXPORT_SYMBOL(csum_partial_copy_from_user);
/*
* copy from ds while checksumming, otherwise like csum_partial
*/
-unsigned int
-csum_partial_copy(const char *src, char *dst, int len, int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 8b3eb50c510..3f12296c368 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_interrupt() || !mm)
+ if (in_atomic() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index f76dd03ddd9..19b13be114a 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -18,7 +18,7 @@
#include <asm/cacheflush.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
-kmem_cache_t *pgd_cache;
+struct kmem_cache *pgd_cache;
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
@@ -100,7 +100,7 @@ static inline void pgd_list_del(pgd_t *pgd)
set_page_private(next, (unsigned long) pprev);
}
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags;
@@ -120,7 +120,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
}
/* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags; /* can be called from interrupt context */
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index 9b4be053de3..d1b15267ac8 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -39,7 +39,7 @@ EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 1077b71d522..6adf8f41d2a 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -116,7 +116,7 @@ void __init setup_arch(char **cmdline_p)
#endif
#else
if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) &&
- (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS)
+ (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS))
/* overlap userarea */
memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS;
#endif
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index 7787f70a05b..02955604d76 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -38,7 +38,7 @@
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index 756325dd480..f05288be887 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -70,6 +70,7 @@ SECTIONS
#endif
.text :
{
+ _text = .;
#if defined(CONFIG_ROMKERNEL)
*(.int_redirect)
#endif
diff --git a/arch/h8300/lib/checksum.c b/arch/h8300/lib/checksum.c
index 5aa688d9242..bdc5b032acd 100644
--- a/arch/h8300/lib/checksum.c
+++ b/arch/h8300/lib/checksum.c
@@ -96,9 +96,9 @@ out:
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
-unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
- return ~do_csum(iph,ihl*4);
+ return (__force __sum16)~do_csum(iph,ihl*4);
}
/*
@@ -113,15 +113,19 @@ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+/*
+ * Egads... That thing apparently assumes that *all* checksums it ever sees will
+ * be folded. Very likely a bug.
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned int result = do_csum(buff, len);
/* add in old sum, and carry.. */
- result += sum;
+ result += (__force u32)sum;
/* 16+c bits -> 16 bits */
result = (result & 0xffff) + (result >> 16);
- return result;
+ return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
@@ -130,20 +134,21 @@ EXPORT_SYMBOL(csum_partial);
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-unsigned short ip_compute_csum(const unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
{
- return ~do_csum(buff,len);
+ return (__force __sum16)~do_csum(buff,len);
}
/*
* copy from fs while checksumming, otherwise like csum_partial
*/
-unsigned int
-csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *csum_err)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum sum, int *csum_err)
{
if (csum_err) *csum_err = 0;
- memcpy(dst, src, len);
+ memcpy(dst, (__force const void *)src, len);
return csum_partial(dst, len, sum);
}
@@ -151,8 +156,8 @@ csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *c
* copy from ds while checksumming, otherwise like csum_partial
*/
-unsigned int
-csum_partial_copy(const char *src, char *dst, int len, int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 8ff1c6fb5aa..ea70359b02d 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -182,6 +182,17 @@ config X86_ES7000
endchoice
+config PARAVIRT
+ bool "Paravirtualization support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ Paravirtualization is a way of running multiple instances of
+ Linux on the same machine, under a hypervisor. This option
+ changes the kernel so it can modify itself when it is run
+ under a hypervisor, improving performance significantly.
+ However, when run without a hypervisor the kernel is
+ theoretically slower. If in doubt, say N.
+
config ACPI_SRAT
bool
default y
@@ -443,7 +454,8 @@ source "drivers/firmware/Kconfig"
choice
prompt "High Memory Support"
- default NOHIGHMEM
+ default HIGHMEM4G if !X86_NUMAQ
+ default HIGHMEM64G if X86_NUMAQ
config NOHIGHMEM
bool "off"
@@ -710,20 +722,6 @@ config BOOT_IOREMAP
depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
default y
-config REGPARM
- bool "Use register arguments"
- default y
- help
- Compile the kernel with -mregparm=3. This instructs gcc to use
- a more efficient function call ABI which passes the first three
- arguments of a function call via registers, which results in denser
- and faster code.
-
- If this option is disabled, then the default ABI of passing
- arguments via the stack is used.
-
- If unsure, say Y.
-
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
@@ -773,23 +771,39 @@ config CRASH_DUMP
PHYSICAL_START.
For more details see Documentation/kdump/kdump.txt
-config PHYSICAL_START
- hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+config RELOCATABLE
+ bool "Build a relocatable kernel(EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ This build a kernel image that retains relocation information
+ so it can be loaded someplace besides the default 1MB.
+ The relocations tend to the kernel binary about 10% larger,
+ but are discarded at runtime.
+
+ One use is for the kexec on panic case where the recovery kernel
+ must live at a different physical address than the primary
+ kernel.
- default "0x1000000" if CRASH_DUMP
+config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned"
default "0x100000"
+ range 0x2000 0x400000
help
- This gives the physical address where the kernel is loaded. Normally
- for regular kernels this value is 0x100000 (1MB). But in the case
- of kexec on panic the fail safe kernel needs to run at a different
- address than the panic-ed kernel. This option is used to set the load
- address for kernels used to capture crash dump on being kexec'ed
- after panic. The default value for crash dump kernels is
- 0x1000000 (16MB). This can also be set based on the "X" value as
- specified in the "crashkernel=YM@XM" command line boot parameter
- passed to the panic-ed kernel. Typically this parameter is set as
- crashkernel=64M@16M. Please take a look at
- Documentation/kdump/kdump.txt for more details about crash dumps.
+ This value puts the alignment restrictions on physical address
+ where kernel is loaded and run from. Kernel is compiled for an
+ address which meets above alignment restriction.
+
+ If bootloader loads the kernel at a non-aligned address and
+ CONFIG_RELOCATABLE is set, kernel will move itself to nearest
+ address aligned to above value and run from there.
+
+ If bootloader loads the kernel at a non-aligned address and
+ CONFIG_RELOCATABLE is not set, kernel will ignore the run time
+ load address and decompress itself to the address it has been
+ compiled for and run from there. The address for which kernel is
+ compiled already meets above alignment restrictions. Hence the
+ end result is that kernel runs from a physical address meeting
+ above alignment restrictions.
Don't change this unless you know what you are doing.
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index fc4f2abccf0..821fd269ca5 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -103,8 +103,15 @@ config MPENTIUMM
Select this for Intel Pentium M (not Pentium-4 M)
notebook chips.
+config MCORE2
+ bool "Core 2/newer Xeon"
+ help
+ Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx)
+ CPUs. You can distingush newer from older Xeons by the CPU family
+ in /proc/cpuinfo. Newer ones have 6.
+
config MPENTIUM4
- bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
+ bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
help
Select this for Intel Pentium 4 chips. This includes the
Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
@@ -229,7 +236,7 @@ config X86_L1_CACHE_SHIFT
default "7" if MPENTIUM4 || X86_GENERIC
default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
- default "6" if MK7 || MK8 || MPENTIUMM
+ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2
config RWSEM_GENERIC_SPINLOCK
bool
@@ -287,17 +294,17 @@ config X86_ALIGNMENT_16
config X86_GOOD_APIC
bool
- depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
+ depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON || MCORE2
default y
config X86_INTEL_USERCOPY
bool
- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
default y
config X86_USE_PPRO_CHECKSUM
bool
- depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX
+ depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2
default y
config X86_USE_3DNOW
@@ -312,5 +319,5 @@ config X86_OOSTORE
config X86_TSC
bool
- depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ
+ depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ
default y
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index b31c0802e1c..f68cc6f215f 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -85,4 +85,14 @@ config DOUBLEFAULT
option saves about 4k and might cause you much additional grey
hair.
+config DEBUG_PARAVIRT
+ bool "Enable some paravirtualization debugging"
+ default y
+ depends on PARAVIRT && DEBUG_KERNEL
+ help
+ Currently deliberately clobbers regs which are allowed to be
+ clobbered in inlined paravirt hooks, even in native mode.
+ If turning this off solves a problem, then DISABLE_INTERRUPTS() or
+ ENABLE_INTERRUPTS() is lying about what registers can be clobbered.
+
endmenu
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 0677908dfa0..f7ac1aea1d8 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -26,10 +26,12 @@ endif
LDFLAGS := -m elf_i386
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
-LDFLAGS_vmlinux :=
+ifdef CONFIG_RELOCATABLE
+LDFLAGS_vmlinux := --emit-relocs
+endif
CHECKFLAGS += -D__i386__
-CFLAGS += -pipe -msoft-float
+CFLAGS += -pipe -msoft-float -mregparm=3
# prevent gcc from keeping the stack 16 byte aligned
CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
@@ -37,8 +39,6 @@ CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
# CPU-specific tuning. Anything which can be shared with UML should go here.
include $(srctree)/arch/i386/Makefile.cpu
-cflags-$(CONFIG_REGPARM) += -mregparm=3
-
# temporary until string.h is fixed
cflags-y += -ffreestanding
diff --git a/arch/i386/Makefile.cpu b/arch/i386/Makefile.cpu
index a11befba26d..a32c031c90d 100644
--- a/arch/i386/Makefile.cpu
+++ b/arch/i386/Makefile.cpu
@@ -32,6 +32,7 @@ cflags-$(CONFIG_MWINCHIP2) += $(call cc-option,-march=winchip2,-march=i586)
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
+cflags-$(CONFIG_MCORE2) += -march=i686 $(call cc-option,-mtune=core2,$(call cc-option,-mtune=generic,-mtune=i686))
# AMD Elan support
cflags-$(CONFIG_X86_ELAN) += -march=i486
diff --git a/arch/i386/boot/compressed/Makefile b/arch/i386/boot/compressed/Makefile
index 258ea95224f..a661217f33e 100644
--- a/arch/i386/boot/compressed/Makefile
+++ b/arch/i386/boot/compressed/Makefile
@@ -4,22 +4,42 @@
# create a compressed vmlinux image from the original vmlinux
#
-targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
+targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o \
+ vmlinux.bin.all vmlinux.relocs
EXTRA_AFLAGS := -traditional
-LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup_32
+LDFLAGS_vmlinux := -T
+CFLAGS_misc.o += -fPIC
+hostprogs-y := relocs
-$(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
+$(obj)/vmlinux: $(src)/vmlinux.lds $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
$(call if_changed,ld)
@:
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
+quiet_cmd_relocs = RELOCS $@
+ cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
+$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
+ $(call if_changed,relocs)
+
+vmlinux.bin.all-y := $(obj)/vmlinux.bin
+vmlinux.bin.all-$(CONFIG_RELOCATABLE) += $(obj)/vmlinux.relocs
+quiet_cmd_relocbin = BUILD $@
+ cmd_relocbin = cat $(filter-out FORCE,$^) > $@
+$(obj)/vmlinux.bin.all: $(vmlinux.bin.all-y) FORCE
+ $(call if_changed,relocbin)
+
+ifdef CONFIG_RELOCATABLE
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
+ $(call if_changed,gzip)
+else
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
+endif
LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
+$(obj)/piggy.o: $(src)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,ld)
diff --git a/arch/i386/boot/compressed/head.S b/arch/i386/boot/compressed/head.S
index b5893e4ecd3..f395a4bb38b 100644
--- a/arch/i386/boot/compressed/head.S
+++ b/arch/i386/boot/compressed/head.S
@@ -26,9 +26,11 @@
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/page.h>
+#include <asm/boot.h>
+.section ".text.head"
.globl startup_32
-
+
startup_32:
cld
cli
@@ -37,93 +39,142 @@ startup_32:
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
+ movl %eax,%ss
- lss stack_start,%esp
- xorl %eax,%eax
-1: incl %eax # check that A20 really IS enabled
- movl %eax,0x000000 # loop forever if it isn't
- cmpl %eax,0x100000
- je 1b
+/* Calculate the delta between where we were compiled to run
+ * at and where we were actually loaded at. This can only be done
+ * with a short local call on x86. Nothing else will tell us what
+ * address we are running at. The reserved chunk of the real-mode
+ * data at 0x34-0x3f are used as the stack for this calculation.
+ * Only 4 bytes are needed.
+ */
+ leal 0x40(%esi), %esp
+ call 1f
+1: popl %ebp
+ subl $1b, %ebp
+
+/* %ebp contains the address we are loaded at by the boot loader and %ebx
+ * contains the address where we should move the kernel image temporarily
+ * for safe in-place decompression.
+ */
+
+#ifdef CONFIG_RELOCATABLE
+ movl %ebp, %ebx
+ addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebx
+ andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebx
+#else
+ movl $LOAD_PHYSICAL_ADDR, %ebx
+#endif
+
+ /* Replace the compressed data size with the uncompressed size */
+ subl input_len(%ebp), %ebx
+ movl output_len(%ebp), %eax
+ addl %eax, %ebx
+ /* Add 8 bytes for every 32K input block */
+ shrl $12, %eax
+ addl %eax, %ebx
+ /* Add 32K + 18 bytes of extra slack */
+ addl $(32768 + 18), %ebx
+ /* Align on a 4K boundary */
+ addl $4095, %ebx
+ andl $~4095, %ebx
+
+/* Copy the compressed kernel to the end of our buffer
+ * where decompression in place becomes safe.
+ */
+ pushl %esi
+ leal _end(%ebp), %esi
+ leal _end(%ebx), %edi
+ movl $(_end - startup_32), %ecx
+ std
+ rep
+ movsb
+ cld
+ popl %esi
+
+/* Compute the kernel start address.
+ */
+#ifdef CONFIG_RELOCATABLE
+ addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebp
+ andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebp
+#else
+ movl $LOAD_PHYSICAL_ADDR, %ebp
+#endif
/*
- * Initialize eflags. Some BIOS's leave bits like NT set. This would
- * confuse the debugger if this code is traced.
- * XXX - best to initialize before switching to protected mode.
+ * Jump to the relocated address.
*/
- pushl $0
- popfl
+ leal relocated(%ebx), %eax
+ jmp *%eax
+.section ".text"
+relocated:
+
/*
* Clear BSS
*/
xorl %eax,%eax
- movl $_edata,%edi
- movl $_end,%ecx
+ leal _edata(%ebx),%edi
+ leal _end(%ebx), %ecx
subl %edi,%ecx
cld
rep
stosb
+
+/*
+ * Setup the stack for the decompressor
+ */
+ leal stack_end(%ebx), %esp
+
/*
* Do the decompression, and jump to the new kernel..
*/
- subl $16,%esp # place for structure on the stack
- movl %esp,%eax
+ movl output_len(%ebx), %eax
+ pushl %eax
+ pushl %ebp # output address
+ movl input_len(%ebx), %eax
+ pushl %eax # input_len
+ leal input_data(%ebx), %eax
+ pushl %eax # input_data
+ leal _end(%ebx), %eax
+ pushl %eax # end of the image as third argument
pushl %esi # real mode pointer as second arg
- pushl %eax # address of structure as first arg
call decompress_kernel
- orl %eax,%eax
- jnz 3f
- popl %esi # discard address
- popl %esi # real mode pointer
- xorl %ebx,%ebx
- ljmp $(__BOOT_CS), $__PHYSICAL_START
+ addl $20, %esp
+ popl %ecx
+#if CONFIG_RELOCATABLE
+/* Find the address of the relocations.
+ */
+ movl %ebp, %edi
+ addl %ecx, %edi
+
+/* Calculate the delta between where vmlinux was compiled to run
+ * and where it was actually loaded.
+ */
+ movl %ebp, %ebx
+ subl $LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
/*
- * We come here, if we were loaded high.
- * We need to move the move-in-place routine down to 0x1000
- * and then start it with the buffer addresses in registers,
- * which we got from the stack.
+ * Process relocations.
*/
-3:
- movl $move_routine_start,%esi
- movl $0x1000,%edi
- movl $move_routine_end,%ecx
- subl %esi,%ecx
- addl $3,%ecx
- shrl $2,%ecx
- cld
- rep
- movsl
-
- popl %esi # discard the address
- popl %ebx # real mode pointer
- popl %esi # low_buffer_start
- popl %ecx # lcount
- popl %edx # high_buffer_start
- popl %eax # hcount
- movl $__PHYSICAL_START,%edi
- cli # make sure we don't get interrupted
- ljmp $(__BOOT_CS), $0x1000 # and jump to the move routine
+
+1: subl $4, %edi
+ movl 0(%edi), %ecx
+ testl %ecx, %ecx
+ jz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
+2:
+#endif
/*
- * Routine (template) for moving the decompressed kernel in place,
- * if we were high loaded. This _must_ PIC-code !
+ * Jump to the decompressed kernel.
*/
-move_routine_start:
- movl %ecx,%ebp
- shrl $2,%ecx
- rep
- movsl
- movl %ebp,%ecx
- andl $3,%ecx
- rep
- movsb
- movl %edx,%esi
- movl %eax,%ecx # NOTE: rep movsb won't move if %ecx == 0
- addl $3,%ecx
- shrl $2,%ecx
- rep
- movsl
- movl %ebx,%esi # Restore setup pointer
xorl %ebx,%ebx
- ljmp $(__BOOT_CS), $__PHYSICAL_START
-move_routine_end:
+ jmp *%ebp
+
+.bss
+.balign 4
+stack:
+ .fill 4096, 1, 0
+stack_end:
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index b2ccd543410..1ce7017fd62 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -9,11 +9,94 @@
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
+#undef CONFIG_PARAVIRT
#include <linux/linkage.h>
#include <linux/vmalloc.h>
#include <linux/screen_info.h>
#include <asm/io.h>
#include <asm/page.h>
+#include <asm/boot.h>
+
+/* WARNING!!
+ * This code is compiled with -fPIC and it is relocated dynamically
+ * at run time, but no relocation processing is performed.
+ * This means that it is not safe to place pointers in static structures.
+ */
+
+/*
+ * Getting to provable safe in place decompression is hard.
+ * Worst case behaviours need to be analized.
+ * Background information:
+ *
+ * The file layout is:
+ * magic[2]
+ * method[1]
+ * flags[1]
+ * timestamp[4]
+ * extraflags[1]
+ * os[1]
+ * compressed data blocks[N]
+ * crc[4] orig_len[4]
+ *
+ * resulting in 18 bytes of non compressed data overhead.
+ *
+ * Files divided into blocks
+ * 1 bit (last block flag)
+ * 2 bits (block type)
+ *
+ * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
+ * The smallest block type encoding is always used.
+ *
+ * stored:
+ * 32 bits length in bytes.
+ *
+ * fixed:
+ * magic fixed tree.
+ * symbols.
+ *
+ * dynamic:
+ * dynamic tree encoding.
+ * symbols.
+ *
+ *
+ * The buffer for decompression in place is the length of the
+ * uncompressed data, plus a small amount extra to keep the algorithm safe.
+ * The compressed data is placed at the end of the buffer. The output
+ * pointer is placed at the start of the buffer and the input pointer
+ * is placed where the compressed data starts. Problems will occur
+ * when the output pointer overruns the input pointer.
+ *
+ * The output pointer can only overrun the input pointer if the input
+ * pointer is moving faster than the output pointer. A condition only
+ * triggered by data whose compressed form is larger than the uncompressed
+ * form.
+ *
+ * The worst case at the block level is a growth of the compressed data
+ * of 5 bytes per 32767 bytes.
+ *
+ * The worst case internal to a compressed block is very hard to figure.
+ * The worst case can at least be boundined by having one bit that represents
+ * 32764 bytes and then all of the rest of the bytes representing the very
+ * very last byte.
+ *
+ * All of which is enough to compute an amount of extra data that is required
+ * to be safe. To avoid problems at the block level allocating 5 extra bytes
+ * per 32767 bytes of data is sufficient. To avoind problems internal to a block
+ * adding an extra 32767 bytes (the worst case uncompressed block size) is
+ * sufficient, to ensure that in the worst case the decompressed data for
+ * block will stop the byte before the compressed data for a block begins.
+ * To avoid problems with the compressed data's meta information an extra 18
+ * bytes are needed. Leading to the formula:
+ *
+ * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
+ *
+ * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
+ * Adding 32768 instead of 32767 just makes for round numbers.
+ * Adding the decompressor_size is necessary as it musht live after all
+ * of the data as well. Last I measured the decompressor is about 14K.
+ * 10K of actuall data and 4K of bss.
+ *
+ */
/*
* gzip declarations
@@ -30,15 +113,20 @@ typedef unsigned char uch;
typedef unsigned short ush;
typedef unsigned long ulg;
-#define WSIZE 0x8000 /* Window size must be at least 32k, */
- /* and a power of two */
+#define WSIZE 0x80000000 /* Window size must be at least 32k,
+ * and a power of two
+ * We don't actually have a window just
+ * a huge output buffer so I report
+ * a 2G windows size, as that should
+ * always be larger than our output buffer.
+ */
-static uch *inbuf; /* input buffer */
-static uch window[WSIZE]; /* Sliding window buffer */
+static uch *inbuf; /* input buffer */
+static uch *window; /* Sliding window buffer, (and final output buffer) */
-static unsigned insize = 0; /* valid bytes in inbuf */
-static unsigned inptr = 0; /* index of next byte to be processed in inbuf */
-static unsigned outcnt = 0; /* bytes in output buffer */
+static unsigned insize; /* valid bytes in inbuf */
+static unsigned inptr; /* index of next byte to be processed in inbuf */
+static unsigned outcnt; /* bytes in output buffer */
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
@@ -89,8 +177,6 @@ extern unsigned char input_data[];
extern int input_len;
static long bytes_out = 0;
-static uch *output_data;
-static unsigned long output_ptr = 0;
static void *malloc(int size);
static void free(void *where);
@@ -100,24 +186,17 @@ static void *memcpy(void *dest, const void *src, unsigned n);
static void putstr(const char *);
-extern int end;
-static long free_mem_ptr = (long)&end;
-static long free_mem_end_ptr;
+static unsigned long free_mem_ptr;
+static unsigned long free_mem_end_ptr;
-#define INPLACE_MOVE_ROUTINE 0x1000
-#define LOW_BUFFER_START 0x2000
-#define LOW_BUFFER_MAX 0x90000
#define HEAP_SIZE 0x3000
-static unsigned int low_buffer_end, low_buffer_size;
-static int high_loaded =0;
-static uch *high_buffer_start /* = (uch *)(((ulg)&end) + HEAP_SIZE)*/;
static char *vidmem = (char *)0xb8000;
static int vidport;
static int lines, cols;
#ifdef CONFIG_X86_NUMAQ
-static void * xquad_portio = NULL;
+void *xquad_portio;
#endif
#include "../../../../lib/inflate.c"
@@ -151,7 +230,7 @@ static void gzip_mark(void **ptr)
static void gzip_release(void **ptr)
{
- free_mem_ptr = (long) *ptr;
+ free_mem_ptr = (unsigned long) *ptr;
}
static void scroll(void)
@@ -179,7 +258,7 @@ static void putstr(const char *s)
y--;
}
} else {
- vidmem [ ( x + cols * y ) * 2 ] = c;
+ vidmem [ ( x + cols * y ) * 2 ] = c;
if ( ++x >= cols ) {
x = 0;
if ( ++y >= lines ) {
@@ -224,58 +303,31 @@ static void* memcpy(void* dest, const void* src, unsigned n)
*/
static int fill_inbuf(void)
{
- if (insize != 0) {
- error("ran out of input data");
- }
-
- inbuf = input_data;
- insize = input_len;
- inptr = 1;
- return inbuf[0];
+ error("ran out of input data");
+ return 0;
}
/* ===========================================================================
* Write the output window window[0..outcnt-1] and update crc and bytes_out.
* (Used for the decompressed data only.)
*/
-static void flush_window_low(void)
-{
- ulg c = crc; /* temporary variable */
- unsigned n;
- uch *in, *out, ch;
-
- in = window;
- out = &output_data[output_ptr];
- for (n = 0; n < outcnt; n++) {
- ch = *out++ = *in++;
- c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
- }
- crc = c;
- bytes_out += (ulg)outcnt;
- output_ptr += (ulg)outcnt;
- outcnt = 0;
-}
-
-static void flush_window_high(void)
-{
- ulg c = crc; /* temporary variable */
- unsigned n;
- uch *in, ch;
- in = window;
- for (n = 0; n < outcnt; n++) {
- ch = *output_data++ = *in++;
- if ((ulg)output_data == low_buffer_end) output_data=high_buffer_start;
- c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
- }
- crc = c;
- bytes_out += (ulg)outcnt;
- outcnt = 0;
-}
-
static void flush_window(void)
{
- if (high_loaded) flush_window_high();
- else flush_window_low();
+ /* With my window equal to my output buffer
+ * I only need to compute the crc here.
+ */
+ ulg c = crc; /* temporary variable */
+ unsigned n;
+ uch *in, ch;
+
+ in = window;
+ for (n = 0; n < outcnt; n++) {
+ ch = *in++;
+ c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+ }
+ crc = c;
+ bytes_out += (ulg)outcnt;
+ outcnt = 0;
}
static void error(char *x)
@@ -287,66 +339,8 @@ static void error(char *x)
while(1); /* Halt */
}
-#define STACK_SIZE (4096)
-
-long user_stack [STACK_SIZE];
-
-struct {
- long * a;
- short b;
- } stack_start = { & user_stack [STACK_SIZE] , __BOOT_DS };
-
-static void setup_normal_output_buffer(void)
-{
-#ifdef STANDARD_MEMORY_BIOS_CALL
- if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory");
-#else
- if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
-#endif
- output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
- free_mem_end_ptr = (long)real_mode;
-}
-
-struct moveparams {
- uch *low_buffer_start; int lcount;
- uch *high_buffer_start; int hcount;
-};
-
-static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
-{
- high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
-#ifdef STANDARD_MEMORY_BIOS_CALL
- if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
-#else
- if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
-#endif
- mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
- low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
- ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
- low_buffer_size = low_buffer_end - LOW_BUFFER_START;
- high_loaded = 1;
- free_mem_end_ptr = (long)high_buffer_start;
- if ( (__PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
- high_buffer_start = (uch *)(__PHYSICAL_START + low_buffer_size);
- mv->hcount = 0; /* say: we need not to move high_buffer */
- }
- else mv->hcount = -1;
- mv->high_buffer_start = high_buffer_start;
-}
-
-static void close_output_buffer_if_we_run_high(struct moveparams *mv)
-{
- if (bytes_out > low_buffer_size) {
- mv->lcount = low_buffer_size;
- if (mv->hcount)
- mv->hcount = bytes_out - low_buffer_size;
- } else {
- mv->lcount = bytes_out;
- mv->hcount = 0;
- }
-}
-
-asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
+asmlinkage void decompress_kernel(void *rmode, unsigned long end,
+ uch *input_data, unsigned long input_len, uch *output)
{
real_mode = rmode;
@@ -361,13 +355,25 @@ asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
lines = RM_SCREEN_INFO.orig_video_lines;
cols = RM_SCREEN_INFO.orig_video_cols;
- if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
- else setup_output_buffer_if_we_run_high(mv);
+ window = output; /* Output buffer (Normally at 1M) */
+ free_mem_ptr = end; /* Heap */
+ free_mem_end_ptr = end + HEAP_SIZE;
+ inbuf = input_data; /* Input buffer */
+ insize = input_len;
+ inptr = 0;
+
+ if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1))
+ error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
+ if (end > ((-__PAGE_OFFSET-(512 <<20)-1) & 0x7fffffff))
+ error("Destination address too large");
+#ifndef CONFIG_RELOCATABLE
+ if ((u32)output != LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+#endif
makecrc();
putstr("Uncompressing Linux... ");
gunzip();
putstr("Ok, booting the kernel.\n");
- if (high_loaded) close_output_buffer_if_we_run_high(mv);
- return high_loaded;
+ return;
}
diff --git a/arch/i386/boot/compressed/relocs.c b/arch/i386/boot/compressed/relocs.c
new file mode 100644
index 00000000000..468da89153c
--- /dev/null
+++ b/arch/i386/boot/compressed/relocs.c
@@ -0,0 +1,625 @@
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <elf.h>
+#include <byteswap.h>
+#define USE_BSD
+#include <endian.h>
+
+#define MAX_SHDRS 100
+static Elf32_Ehdr ehdr;
+static Elf32_Shdr shdr[MAX_SHDRS];
+static Elf32_Sym *symtab[MAX_SHDRS];
+static Elf32_Rel *reltab[MAX_SHDRS];
+static char *strtab[MAX_SHDRS];
+static unsigned long reloc_count, reloc_idx;
+static unsigned long *relocs;
+
+/*
+ * Following symbols have been audited. There values are constant and do
+ * not change if bzImage is loaded at a different physical address than
+ * the address for which it has been compiled. Don't warn user about
+ * absolute relocations present w.r.t these symbols.
+ */
+static const char* safe_abs_relocs[] = {
+ "__kernel_vsyscall",
+ "__kernel_rt_sigreturn",
+ "__kernel_sigreturn",
+ "SYSENTER_RETURN",
+};
+
+static int is_safe_abs_reloc(const char* sym_name)
+{
+ int i, array_size;
+
+ array_size = sizeof(safe_abs_relocs)/sizeof(char*);
+
+ for(i = 0; i < array_size; i++) {
+ if (!strcmp(sym_name, safe_abs_relocs[i]))
+ /* Match found */
+ return 1;
+ }
+ return 0;
+}
+
+static void die(char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(1);
+}
+
+static const char *sym_type(unsigned type)
+{
+ static const char *type_name[] = {
+#define SYM_TYPE(X) [X] = #X
+ SYM_TYPE(STT_NOTYPE),
+ SYM_TYPE(STT_OBJECT),
+ SYM_TYPE(STT_FUNC),
+ SYM_TYPE(STT_SECTION),
+ SYM_TYPE(STT_FILE),
+ SYM_TYPE(STT_COMMON),
+ SYM_TYPE(STT_TLS),
+#undef SYM_TYPE
+ };
+ const char *name = "unknown sym type name";
+ if (type < sizeof(type_name)/sizeof(type_name[0])) {
+ name = type_name[type];
+ }
+ return name;
+}
+
+static const char *sym_bind(unsigned bind)
+{
+ static const char *bind_name[] = {
+#define SYM_BIND(X) [X] = #X
+ SYM_BIND(STB_LOCAL),
+ SYM_BIND(STB_GLOBAL),
+ SYM_BIND(STB_WEAK),
+#undef SYM_BIND
+ };
+ const char *name = "unknown sym bind name";
+ if (bind < sizeof(bind_name)/sizeof(bind_name[0])) {
+ name = bind_name[bind];
+ }
+ return name;
+}
+
+static const char *sym_visibility(unsigned visibility)
+{
+ static const char *visibility_name[] = {
+#define SYM_VISIBILITY(X) [X] = #X
+ SYM_VISIBILITY(STV_DEFAULT),
+ SYM_VISIBILITY(STV_INTERNAL),
+ SYM_VISIBILITY(STV_HIDDEN),
+ SYM_VISIBILITY(STV_PROTECTED),
+#undef SYM_VISIBILITY
+ };
+ const char *name = "unknown sym visibility name";
+ if (visibility < sizeof(visibility_name)/sizeof(visibility_name[0])) {
+ name = visibility_name[visibility];
+ }
+ return name;
+}
+
+static const char *rel_type(unsigned type)
+{
+ static const char *type_name[] = {
+#define REL_TYPE(X) [X] = #X
+ REL_TYPE(R_386_NONE),
+ REL_TYPE(R_386_32),
+ REL_TYPE(R_386_PC32),
+ REL_TYPE(R_386_GOT32),
+ REL_TYPE(R_386_PLT32),
+ REL_TYPE(R_386_COPY),
+ REL_TYPE(R_386_GLOB_DAT),
+ REL_TYPE(R_386_JMP_SLOT),
+ REL_TYPE(R_386_RELATIVE),
+ REL_TYPE(R_386_GOTOFF),
+ REL_TYPE(R_386_GOTPC),
+#undef REL_TYPE
+ };
+ const char *name = "unknown type rel type name";
+ if (type < sizeof(type_name)/sizeof(type_name[0])) {
+ name = type_name[type];
+ }
+ return name;
+}
+
+static const char *sec_name(unsigned shndx)
+{
+ const char *sec_strtab;
+ const char *name;
+ sec_strtab = strtab[ehdr.e_shstrndx];
+ name = "<noname>";
+ if (shndx < ehdr.e_shnum) {
+ name = sec_strtab + shdr[shndx].sh_name;
+ }
+ else if (shndx == SHN_ABS) {
+ name = "ABSOLUTE";
+ }
+ else if (shndx == SHN_COMMON) {
+ name = "COMMON";
+ }
+ return name;
+}
+
+static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
+{
+ const char *name;
+ name = "<noname>";
+ if (sym->st_name) {
+ name = sym_strtab + sym->st_name;
+ }
+ else {
+ name = sec_name(shdr[sym->st_shndx].sh_name);
+ }
+ return name;
+}
+
+
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define le16_to_cpu(val) (val)
+#define le32_to_cpu(val) (val)
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+#define le16_to_cpu(val) bswap_16(val)
+#define le32_to_cpu(val) bswap_32(val)
+#endif
+
+static uint16_t elf16_to_cpu(uint16_t val)
+{
+ return le16_to_cpu(val);
+}
+
+static uint32_t elf32_to_cpu(uint32_t val)
+{
+ return le32_to_cpu(val);
+}
+
+static void read_ehdr(FILE *fp)
+{
+ if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
+ die("Cannot read ELF header: %s\n",
+ strerror(errno));
+ }
+ if (memcmp(ehdr.e_ident, ELFMAG, 4) != 0) {
+ die("No ELF magic\n");
+ }
+ if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) {
+ die("Not a 32 bit executable\n");
+ }
+ if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
+ die("Not a LSB ELF executable\n");
+ }
+ if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
+ die("Unknown ELF version\n");
+ }
+ /* Convert the fields to native endian */
+ ehdr.e_type = elf16_to_cpu(ehdr.e_type);
+ ehdr.e_machine = elf16_to_cpu(ehdr.e_machine);
+ ehdr.e_version = elf32_to_cpu(ehdr.e_version);
+ ehdr.e_entry = elf32_to_cpu(ehdr.e_entry);
+ ehdr.e_phoff = elf32_to_cpu(ehdr.e_phoff);
+ ehdr.e_shoff = elf32_to_cpu(ehdr.e_shoff);
+ ehdr.e_flags = elf32_to_cpu(ehdr.e_flags);
+ ehdr.e_ehsize = elf16_to_cpu(ehdr.e_ehsize);
+ ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize);
+ ehdr.e_phnum = elf16_to_cpu(ehdr.e_phnum);
+ ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize);
+ ehdr.e_shnum = elf16_to_cpu(ehdr.e_shnum);
+ ehdr.e_shstrndx = elf16_to_cpu(ehdr.e_shstrndx);
+
+ if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
+ die("Unsupported ELF header type\n");
+ }
+ if (ehdr.e_machine != EM_386) {
+ die("Not for x86\n");
+ }
+ if (ehdr.e_version != EV_CURRENT) {
+ die("Unknown ELF version\n");
+ }
+ if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) {
+ die("Bad Elf header size\n");
+ }
+ if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) {
+ die("Bad program header entry\n");
+ }
+ if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) {
+ die("Bad section header entry\n");
+ }
+ if (ehdr.e_shstrndx >= ehdr.e_shnum) {
+ die("String table index out of bounds\n");
+ }
+}
+
+static void read_shdrs(FILE *fp)
+{
+ int i;
+ if (ehdr.e_shnum > MAX_SHDRS) {
+ die("%d section headers supported: %d\n",
+ ehdr.e_shnum, MAX_SHDRS);
+ }
+ if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
+ die("Seek to %d failed: %s\n",
+ ehdr.e_shoff, strerror(errno));
+ }
+ if (fread(&shdr, sizeof(shdr[0]), ehdr.e_shnum, fp) != ehdr.e_shnum) {
+ die("Cannot read ELF section headers: %s\n",
+ strerror(errno));
+ }
+ for(i = 0; i < ehdr.e_shnum; i++) {
+ shdr[i].sh_name = elf32_to_cpu(shdr[i].sh_name);
+ shdr[i].sh_type = elf32_to_cpu(shdr[i].sh_type);
+ shdr[i].sh_flags = elf32_to_cpu(shdr[i].sh_flags);
+ shdr[i].sh_addr = elf32_to_cpu(shdr[i].sh_addr);
+ shdr[i].sh_offset = elf32_to_cpu(shdr[i].sh_offset);
+ shdr[i].sh_size = elf32_to_cpu(shdr[i].sh_size);
+ shdr[i].sh_link = elf32_to_cpu(shdr[i].sh_link);
+ shdr[i].sh_info = elf32_to_cpu(shdr[i].sh_info);
+ shdr[i].sh_addralign = elf32_to_cpu(shdr[i].sh_addralign);
+ shdr[i].sh_entsize = elf32_to_cpu(shdr[i].sh_entsize);
+ }
+
+}
+
+static void read_strtabs(FILE *fp)
+{
+ int i;
+ for(i = 0; i < ehdr.e_shnum; i++) {
+ if (shdr[i].sh_type != SHT_STRTAB) {
+ continue;
+ }
+ strtab[i] = malloc(shdr[i].sh_size);
+ if (!strtab[i]) {
+ die("malloc of %d bytes for strtab failed\n",
+ shdr[i].sh_size);
+ }
+ if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
+ die("Seek to %d failed: %s\n",
+ shdr[i].sh_offset, strerror(errno));
+ }
+ if (fread(strtab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
+ }
+}
+
+static void read_symtabs(FILE *fp)
+{
+ int i,j;
+ for(i = 0; i < ehdr.e_shnum; i++) {
+ if (shdr[i].sh_type != SHT_SYMTAB) {
+ continue;
+ }
+ symtab[i] = malloc(shdr[i].sh_size);
+ if (!symtab[i]) {
+ die("malloc of %d bytes for symtab failed\n",
+ shdr[i].sh_size);
+ }
+ if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
+ die("Seek to %d failed: %s\n",
+ shdr[i].sh_offset, strerror(errno));
+ }
+ if (fread(symtab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
+ for(j = 0; j < shdr[i].sh_size/sizeof(symtab[i][0]); j++) {
+ symtab[i][j].st_name = elf32_to_cpu(symtab[i][j].st_name);
+ symtab[i][j].st_value = elf32_to_cpu(symtab[i][j].st_value);
+ symtab[i][j].st_size = elf32_to_cpu(symtab[i][j].st_size);
+ symtab[i][j].st_shndx = elf16_to_cpu(symtab[i][j].st_shndx);
+ }
+ }
+}
+
+
+static void read_relocs(FILE *fp)
+{
+ int i,j;
+ for(i = 0; i < ehdr.e_shnum; i++) {
+ if (shdr[i].sh_type != SHT_REL) {
+ continue;
+ }
+ reltab[i] = malloc(shdr[i].sh_size);
+ if (!reltab[i]) {
+ die("malloc of %d bytes for relocs failed\n",
+ shdr[i].sh_size);
+ }
+ if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
+ die("Seek to %d failed: %s\n",
+ shdr[i].sh_offset, strerror(errno));
+ }
+ if (fread(reltab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
+ for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
+ reltab[i][j].r_offset = elf32_to_cpu(reltab[i][j].r_offset);
+ reltab[i][j].r_info = elf32_to_cpu(reltab[i][j].r_info);
+ }
+ }
+}
+
+
+static void print_absolute_symbols(void)
+{
+ int i;
+ printf("Absolute symbols\n");
+ printf(" Num: Value Size Type Bind Visibility Name\n");
+ for(i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ int j;
+ if (shdr[i].sh_type != SHT_SYMTAB) {
+ continue;
+ }
+ sh_symtab = symtab[i];
+ sym_strtab = strtab[shdr[i].sh_link];
+ for(j = 0; j < shdr[i].sh_size/sizeof(symtab[0][0]); j++) {
+ Elf32_Sym *sym;
+ const char *name;
+ sym = &symtab[i][j];
+ name = sym_name(sym_strtab, sym);
+ if (sym->st_shndx != SHN_ABS) {
+ continue;
+ }
+ printf("%5d %08x %5d %10s %10s %12s %s\n",
+ j, sym->st_value, sym->st_size,
+ sym_type(ELF32_ST_TYPE(sym->st_info)),
+ sym_bind(ELF32_ST_BIND(sym->st_info)),
+ sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)),
+ name);
+ }
+ }
+ printf("\n");
+}
+
+static void print_absolute_relocs(void)
+{
+ int i, printed = 0;
+
+ for(i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ unsigned sec_applies, sec_symtab;
+ int j;
+ if (shdr[i].sh_type != SHT_REL) {
+ continue;
+ }
+ sec_symtab = shdr[i].sh_link;
+ sec_applies = shdr[i].sh_info;
+ if (!(shdr[sec_applies].sh_flags & SHF_ALLOC)) {
+ continue;
+ }
+ sh_symtab = symtab[sec_symtab];
+ sym_strtab = strtab[shdr[sec_symtab].sh_link];
+ for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
+ Elf32_Rel *rel;
+ Elf32_Sym *sym;
+ const char *name;
+ rel = &reltab[i][j];
+ sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+ name = sym_name(sym_strtab, sym);
+ if (sym->st_shndx != SHN_ABS) {
+ continue;
+ }
+
+ /* Absolute symbols are not relocated if bzImage is
+ * loaded at a non-compiled address. Display a warning
+ * to user at compile time about the absolute
+ * relocations present.
+ *
+ * User need to audit the code to make sure
+ * some symbols which should have been section
+ * relative have not become absolute because of some
+ * linker optimization or wrong programming usage.
+ *
+ * Before warning check if this absolute symbol
+ * relocation is harmless.
+ */
+ if (is_safe_abs_reloc(name))
+ continue;
+
+ if (!printed) {
+ printf("WARNING: Absolute relocations"
+ " present\n");
+ printf("Offset Info Type Sym.Value "
+ "Sym.Name\n");
+ printed = 1;
+ }
+
+ printf("%08x %08x %10s %08x %s\n",
+ rel->r_offset,
+ rel->r_info,
+ rel_type(ELF32_R_TYPE(rel->r_info)),
+ sym->st_value,
+ name);
+ }
+ }
+
+ if (printed)
+ printf("\n");
+}
+
+static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+{
+ int i;
+ /* Walk through the relocations */
+ for(i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ unsigned sec_applies, sec_symtab;
+ int j;
+ if (shdr[i].sh_type != SHT_REL) {
+ continue;
+ }
+ sec_symtab = shdr[i].sh_link;
+ sec_applies = shdr[i].sh_info;
+ if (!(shdr[sec_applies].sh_flags & SHF_ALLOC)) {
+ continue;
+ }
+ sh_symtab = symtab[sec_symtab];
+ sym_strtab = strtab[shdr[sec_symtab].sh_link];
+ for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
+ Elf32_Rel *rel;
+ Elf32_Sym *sym;
+ unsigned r_type;
+ rel = &reltab[i][j];
+ sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+ r_type = ELF32_R_TYPE(rel->r_info);
+ /* Don't visit relocations to absolute symbols */
+ if (sym->st_shndx == SHN_ABS) {
+ continue;
+ }
+ if (r_type == R_386_PC32) {
+ /* PC relative relocations don't need to be adjusted */
+ }
+ else if (r_type == R_386_32) {
+ /* Visit relocations that need to be adjusted */
+ visit(rel, sym);
+ }
+ else {
+ die("Unsupported relocation type: %d\n", r_type);
+ }
+ }
+ }
+}
+
+static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
+{
+ reloc_count += 1;
+}
+
+static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
+{
+ /* Remember the address that needs to be adjusted. */
+ relocs[reloc_idx++] = rel->r_offset;
+}
+
+static int cmp_relocs(const void *va, const void *vb)
+{
+ const unsigned long *a, *b;
+ a = va; b = vb;
+ return (*a == *b)? 0 : (*a > *b)? 1 : -1;
+}
+
+static void emit_relocs(int as_text)
+{
+ int i;
+ /* Count how many relocations I have and allocate space for them. */
+ reloc_count = 0;
+ walk_relocs(count_reloc);
+ relocs = malloc(reloc_count * sizeof(relocs[0]));
+ if (!relocs) {
+ die("malloc of %d entries for relocs failed\n",
+ reloc_count);
+ }
+ /* Collect up the relocations */
+ reloc_idx = 0;
+ walk_relocs(collect_reloc);
+
+ /* Order the relocations for more efficient processing */
+ qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
+
+ /* Print the relocations */
+ if (as_text) {
+ /* Print the relocations in a form suitable that
+ * gas will like.
+ */
+ printf(".section \".data.reloc\",\"a\"\n");
+ printf(".balign 4\n");
+ for(i = 0; i < reloc_count; i++) {
+ printf("\t .long 0x%08lx\n", relocs[i]);
+ }
+ printf("\n");
+ }
+ else {
+ unsigned char buf[4];
+ buf[0] = buf[1] = buf[2] = buf[3] = 0;
+ /* Print a stop */
+ printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
+ /* Now print each relocation */
+ for(i = 0; i < reloc_count; i++) {
+ buf[0] = (relocs[i] >> 0) & 0xff;
+ buf[1] = (relocs[i] >> 8) & 0xff;
+ buf[2] = (relocs[i] >> 16) & 0xff;
+ buf[3] = (relocs[i] >> 24) & 0xff;
+ printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
+ }
+ }
+}
+
+static void usage(void)
+{
+ die("relocs [--abs-syms |--abs-relocs | --text] vmlinux\n");
+}
+
+int main(int argc, char **argv)
+{
+ int show_absolute_syms, show_absolute_relocs;
+ int as_text;
+ const char *fname;
+ FILE *fp;
+ int i;
+
+ show_absolute_syms = 0;
+ show_absolute_relocs = 0;
+ as_text = 0;
+ fname = NULL;
+ for(i = 1; i < argc; i++) {
+ char *arg = argv[i];
+ if (*arg == '-') {
+ if (strcmp(argv[1], "--abs-syms") == 0) {
+ show_absolute_syms = 1;
+ continue;
+ }
+
+ if (strcmp(argv[1], "--abs-relocs") == 0) {
+ show_absolute_relocs = 1;
+ continue;
+ }
+ else if (strcmp(argv[1], "--text") == 0) {
+ as_text = 1;
+ continue;
+ }
+ }
+ else if (!fname) {
+ fname = arg;
+ continue;
+ }
+ usage();
+ }
+ if (!fname) {
+ usage();
+ }
+ fp = fopen(fname, "r");
+ if (!fp) {
+ die("Cannot open %s: %s\n",
+ fname, strerror(errno));
+ }
+ read_ehdr(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+ read_relocs(fp);
+ if (show_absolute_syms) {
+ print_absolute_symbols();
+ return 0;
+ }
+ if (show_absolute_relocs) {
+ print_absolute_relocs();
+ return 0;
+ }
+ emit_relocs(as_text);
+ return 0;
+}
diff --git a/arch/i386/boot/compressed/vmlinux.lds b/arch/i386/boot/compressed/vmlinux.lds
new file mode 100644
index 00000000000..cc4854f6c6c
--- /dev/null
+++ b/arch/i386/boot/compressed/vmlinux.lds
@@ -0,0 +1,43 @@
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(startup_32)
+SECTIONS
+{
+ /* Be careful parts of head.S assume startup_32 is at
+ * address 0.
+ */
+ . = 0 ;
+ .text.head : {
+ _head = . ;
+ *(.text.head)
+ _ehead = . ;
+ }
+ .data.compressed : {
+ *(.data.compressed)
+ }
+ .text : {
+ _text = .; /* Text */
+ *(.text)
+ *(.text.*)
+ _etext = . ;
+ }
+ .rodata : {
+ _rodata = . ;
+ *(.rodata) /* read-only data */
+ *(.rodata.*)
+ _erodata = . ;
+ }
+ .data : {
+ _data = . ;
+ *(.data)
+ *(.data.*)
+ _edata = . ;
+ }
+ .bss : {
+ _bss = . ;
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ _end = . ;
+ }
+}
diff --git a/arch/i386/boot/compressed/vmlinux.scr b/arch/i386/boot/compressed/vmlinux.scr
index 1ed9d791f86..707a88f7f29 100644
--- a/arch/i386/boot/compressed/vmlinux.scr
+++ b/arch/i386/boot/compressed/vmlinux.scr
@@ -1,9 +1,10 @@
SECTIONS
{
- .data : {
+ .data.compressed : {
input_len = .;
LONG(input_data_end - input_data) input_data = .;
*(.data)
+ output_len = . - 4;
input_data_end = .;
}
}
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S
index 3aec4538a11..06edf1c6624 100644
--- a/arch/i386/boot/setup.S
+++ b/arch/i386/boot/setup.S
@@ -81,7 +81,7 @@ start:
# This is the setup header, and it must start at %cs:2 (old 0x9020:2)
.ascii "HdrS" # header signature
- .word 0x0204 # header version number (>= 0x0105)
+ .word 0x0205 # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
start_sys_seg: .word SYSSEG
@@ -160,6 +160,17 @@ ramdisk_max: .long (-__PAGE_OFFSET-(512 << 20)-1) & 0x7fffffff
# The highest safe address for
# the contents of an initrd
+kernel_alignment: .long CONFIG_PHYSICAL_ALIGN #physical addr alignment
+ #required for protected mode
+ #kernel
+#ifdef CONFIG_RELOCATABLE
+relocatable_kernel: .byte 1
+#else
+relocatable_kernel: .byte 0
+#endif
+pad2: .byte 0
+pad3: .word 0
+
trampoline: call start_of_setup
.align 16
# The offset at this point is 0x240
@@ -588,11 +599,6 @@ rmodeswtch_normal:
call default_switch
rmodeswtch_end:
-# we get the code32 start address and modify the below 'jmpi'
-# (loader may have changed it)
- movl %cs:code32_start, %eax
- movl %eax, %cs:code32
-
# Now we move the system to its rightful place ... but we check if we have a
# big-kernel. In that case we *must* not move it ...
testb $LOADED_HIGH, %cs:loadflags
@@ -788,11 +794,12 @@ a20_err_msg:
a20_done:
#endif /* CONFIG_X86_VOYAGER */
-# set up gdt and idt
+# set up gdt and idt and 32bit start address
lidt idt_48 # load idt with 0,0
xorl %eax, %eax # Compute gdt_base
movw %ds, %ax # (Convert %ds:gdt to a linear ptr)
shll $4, %eax
+ addl %eax, code32
addl $gdt, %eax
movl %eax, (gdt_48+2)
lgdt gdt_48 # load gdt with whatever is
@@ -851,9 +858,26 @@ flush_instr:
# Manual, Mixing 16-bit and 32-bit code, page 16-6)
.byte 0x66, 0xea # prefix + jmpi-opcode
-code32: .long 0x1000 # will be set to 0x100000
- # for big kernels
+code32: .long startup_32 # will be set to %cs+startup_32
.word __BOOT_CS
+.code32
+startup_32:
+ movl $(__BOOT_DS), %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %fs
+ movl %eax, %gs
+ movl %eax, %ss
+
+ xorl %eax, %eax
+1: incl %eax # check that A20 really IS enabled
+ movl %eax, 0x00000000 # loop forever if it isn't
+ cmpl %eax, 0x00100000
+ je 1b
+
+ # Jump to the 32bit entry point
+ jmpl *(code32_start - start + (DELTA_INITSEG << 4))(%esi)
+.code16
# Here's a bunch of information about your current kernel..
kernel_version: .ascii UTS_RELEASE
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 97aacd6bd7d..65891f11ace 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc2-git4
-# Sat Oct 21 03:38:56 2006
+# Linux kernel version: 2.6.19-git7
+# Wed Dec 6 23:50:49 2006
#
CONFIG_X86_32=y
CONFIG_GENERIC_TIME=y
@@ -40,13 +40,14 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
+CONFIG_SYSFS_DEPRECATED=y
# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
# CONFIG_EMBEDDED is not set
CONFIG_UID16=y
-# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -110,6 +111,7 @@ CONFIG_SMP=y
# CONFIG_X86_VISWS is not set
CONFIG_X86_GENERICARCH=y
# CONFIG_X86_ES7000 is not set
+# CONFIG_PARAVIRT is not set
CONFIG_X86_CYCLONE_TIMER=y
# CONFIG_M386 is not set
# CONFIG_M486 is not set
@@ -120,6 +122,7 @@ CONFIG_X86_CYCLONE_TIMER=y
# CONFIG_MPENTIUMII is not set
CONFIG_MPENTIUMIII=y
# CONFIG_MPENTIUMM is not set
+# CONFIG_MCORE2 is not set
# CONFIG_MPENTIUM4 is not set
# CONFIG_MK6 is not set
# CONFIG_MK7 is not set
@@ -197,7 +200,6 @@ CONFIG_RESOURCES_64BIT=y
CONFIG_MTRR=y
# CONFIG_EFI is not set
# CONFIG_IRQBALANCE is not set
-CONFIG_REGPARM=y
CONFIG_SECCOMP=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
@@ -205,7 +207,8 @@ CONFIG_HZ_250=y
CONFIG_HZ=250
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
-CONFIG_PHYSICAL_START=0x100000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x100000
# CONFIG_HOTPLUG_CPU is not set
CONFIG_COMPAT_VDSO=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
@@ -367,6 +370,7 @@ CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -677,6 +681,7 @@ CONFIG_SATA_INTEL_COMBINED=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_JMICRON is not set
# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
# CONFIG_PATA_OLDPIIX is not set
# CONFIG_PATA_NETCELL is not set
@@ -850,6 +855,7 @@ CONFIG_BNX2=y
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
#
# Token Ring devices
@@ -984,10 +990,6 @@ CONFIG_RTC=y
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_SONYPI is not set
-
-#
-# Ftape, the floppy tape device driver
-#
CONFIG_AGP=y
# CONFIG_AGP_ALI is not set
# CONFIG_AGP_ATI is not set
@@ -1108,6 +1110,7 @@ CONFIG_USB_DEVICEFS=y
# CONFIG_USB_BANDWIDTH is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_MULTITHREAD_PROBE is not set
# CONFIG_USB_OTG is not set
#
@@ -1185,6 +1188,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_KAWETH is not set
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET_MII is not set
# CONFIG_USB_USBNET is not set
CONFIG_USB_MON=y
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 1a884b6e6e5..1e8988e558c 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
- pci-dma.o i386_ksyms.o i387.o bootflag.o \
+ pci-dma.o i386_ksyms.o i387.o bootflag.o e820.o\
quirks.o i8237.o topology.o alternative.o i8253.o tsc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -40,6 +40,9 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_HPET_TIMER) += hpet.o
obj-$(CONFIG_K8_NB) += k8.o
+# Make sure this is linked after any other paravirt_ops structs: see head.S
+obj-$(CONFIG_PARAVIRT) += paravirt.o
+
EXTRA_AFLAGS := -traditional
obj-$(CONFIG_SCx200) += scx200.o
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index d12fb97a533..c8f96cff07c 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -333,7 +333,7 @@ acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
/*
* Parse Interrupt Source Override for the ACPI SCI
*/
-static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger)
+static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
{
if (trigger == 0) /* compatible SCI trigger is level */
trigger = 3;
@@ -353,13 +353,13 @@ static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigge
* If GSI is < 16, this will update its flags,
* else it will create a new mp_irqs[] entry.
*/
- mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ mp_override_legacy_irq(gsi, polarity, trigger, gsi);
/*
* stash over-ride to indicate we've been here
* and for later update of acpi_fadt
*/
- acpi_sci_override_gsi = bus_irq;
+ acpi_sci_override_gsi = gsi;
return;
}
@@ -377,7 +377,7 @@ acpi_parse_int_src_ovr(acpi_table_entry_header * header,
acpi_table_print_madt_entry(header);
if (intsrc->bus_irq == acpi_fadt.sci_int) {
- acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq,
+ acpi_sci_ioapic_setup(intsrc->global_irq,
intsrc->flags.polarity,
intsrc->flags.trigger);
return 0;
@@ -880,7 +880,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* pretend we got one so we can set the SCI flags.
*/
if (!acpi_sci_override_gsi)
- acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0);
+ acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
/* Fill in identity legacy mapings where no override */
mp_config_acpi_legacy_irqs();
diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c
index 20563e52c62..12e937c1ce4 100644
--- a/arch/i386/kernel/acpi/cstate.c
+++ b/arch/i386/kernel/acpi/cstate.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/cpu.h>
+#include <linux/sched.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
@@ -155,10 +156,8 @@ static int __init ffh_cstate_init(void)
static void __exit ffh_cstate_exit(void)
{
- if (cpu_cstate_entry) {
- free_percpu(cpu_cstate_entry);
- cpu_cstate_entry = NULL;
- }
+ free_percpu(cpu_cstate_entry);
+ cpu_cstate_entry = NULL;
}
arch_initcall(ffh_cstate_init);
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index c9841692bb7..4b60af7f91d 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -10,6 +10,7 @@
#include <asm/pci-direct.h>
#include <asm/acpi.h>
#include <asm/apic.h>
+#include <asm/irq.h>
#ifdef CONFIG_ACPI
@@ -49,6 +50,24 @@ static int __init check_bridge(int vendor, int device)
return 0;
}
+static void check_intel(void)
+{
+ u16 vendor, device;
+
+ vendor = read_pci_config_16(0, 0, 0, PCI_VENDOR_ID);
+
+ if (vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ device = read_pci_config_16(0, 0, 0, PCI_DEVICE_ID);
+#ifdef CONFIG_SMP
+ if (device == PCI_DEVICE_ID_INTEL_E7320_MCH ||
+ device == PCI_DEVICE_ID_INTEL_E7520_MCH ||
+ device == PCI_DEVICE_ID_INTEL_E7525_MCH)
+ quirk_intel_irqbalance();
+#endif
+}
+
void __init check_acpi_pci(void)
{
int num, slot, func;
@@ -60,6 +79,8 @@ void __init check_acpi_pci(void)
if (!early_pci_allowed())
return;
+ check_intel();
+
/* Poor man's PCI discovery */
for (num = 0; num < 32; num++) {
for (slot = 0; slot < 32; slot++) {
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 583c238e17f..9eca21b49f6 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -1,4 +1,5 @@
#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <asm/alternative.h>
@@ -123,6 +124,20 @@ static unsigned char** find_nop_table(void)
#endif /* CONFIG_X86_64 */
+static void nop_out(void *insns, unsigned int len)
+{
+ unsigned char **noptable = find_nop_table();
+
+ while (len > 0) {
+ unsigned int noplen = len;
+ if (noplen > ASM_NOP_MAX)
+ noplen = ASM_NOP_MAX;
+ memcpy(insns, noptable[noplen], noplen);
+ insns += noplen;
+ len -= noplen;
+ }
+}
+
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
extern u8 *__smp_locks[], *__smp_locks_end[];
@@ -137,10 +152,9 @@ extern u8 __smp_alt_begin[], __smp_alt_end[];
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
- unsigned char **noptable = find_nop_table();
struct alt_instr *a;
u8 *instr;
- int diff, i, k;
+ int diff;
DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
for (a = start; a < end; a++) {
@@ -158,13 +172,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
#endif
memcpy(instr, a->replacement, a->replacementlen);
diff = a->instrlen - a->replacementlen;
- /* Pad the rest with nops */
- for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
- k = diff;
- if (k > ASM_NOP_MAX)
- k = ASM_NOP_MAX;
- memcpy(a->instr + i, noptable[k], k);
- }
+ nop_out(instr + a->replacementlen, diff);
}
}
@@ -208,7 +216,6 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
{
- unsigned char **noptable = find_nop_table();
u8 **ptr;
for (ptr = start; ptr < end; ptr++) {
@@ -216,7 +223,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
continue;
if (*ptr > text_end)
continue;
- **ptr = noptable[1][0];
+ nop_out(*ptr, 1);
};
}
@@ -342,6 +349,40 @@ void alternatives_smp_switch(int smp)
#endif
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{
+ struct paravirt_patch *p;
+
+ for (p = start; p < end; p++) {
+ unsigned int used;
+
+ used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
+ p->len);
+#ifdef CONFIG_DEBUG_PARAVIRT
+ {
+ int i;
+ /* Deliberately clobber regs using "not %reg" to find bugs. */
+ for (i = 0; i < 3; i++) {
+ if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
+ memcpy(p->instr + used, "\xf7\xd0", 2);
+ p->instr[used+1] |= i;
+ used += 2;
+ }
+ }
+ }
+#endif
+ /* Pad the rest with nops */
+ nop_out(p->instr + used, p->len - used);
+ }
+
+ /* Sync to be conservative, in case we patched following instructions */
+ sync_core();
+}
+extern struct paravirt_patch __start_parainstructions[],
+ __stop_parainstructions[];
+#endif /* CONFIG_PARAVIRT */
+
void __init alternative_instructions(void)
{
unsigned long flags;
@@ -389,5 +430,6 @@ void __init alternative_instructions(void)
alternatives_smp_switch(0);
}
#endif
+ apply_paravirt(__start_parainstructions, __stop_parainstructions);
local_irq_restore(flags);
}
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 2fd4b7d927c..776d9be26af 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -647,23 +647,30 @@ static struct {
static int lapic_suspend(struct sys_device *dev, pm_message_t state)
{
unsigned long flags;
+ int maxlvt;
if (!apic_pm_state.active)
return 0;
+ maxlvt = get_maxlvt();
+
apic_pm_state.apic_id = apic_read(APIC_ID);
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
- apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
+ if (maxlvt >= 4)
+ apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
- apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#ifdef CONFIG_X86_MCE_P4THERMAL
+ if (maxlvt >= 5)
+ apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#endif
local_irq_save(flags);
disable_local_APIC();
@@ -675,10 +682,13 @@ static int lapic_resume(struct sys_device *dev)
{
unsigned int l, h;
unsigned long flags;
+ int maxlvt;
if (!apic_pm_state.active)
return 0;
+ maxlvt = get_maxlvt();
+
local_irq_save(flags);
/*
@@ -700,8 +710,12 @@ static int lapic_resume(struct sys_device *dev)
apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
- apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
- apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
+#ifdef CONFIG_X86_MCE_P4THERMAL
+ if (maxlvt >= 5)
+ apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
+#endif
+ if (maxlvt >= 4)
+ apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index a60358fe9a4..a97847da9ed 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -231,6 +231,7 @@
#include <asm/uaccess.h>
#include <asm/desc.h>
#include <asm/i8253.h>
+#include <asm/paravirt.h>
#include "io_ports.h"
@@ -2235,7 +2236,7 @@ static int __init apm_init(void)
dmi_check_system(apm_dmi_table);
- if (apm_info.bios.version == 0) {
+ if (apm_info.bios.version == 0 || paravirt_enabled()) {
printk(KERN_INFO "apm: BIOS not found.\n");
return -ENODEV;
}
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index c80271f8f08..1b2f3cd3327 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -15,6 +15,7 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/elf.h>
+#include <asm/pda.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -51,13 +52,35 @@ void foo(void)
OFFSET(TI_exec_domain, thread_info, exec_domain);
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_status, thread_info, status);
- OFFSET(TI_cpu, thread_info, cpu);
OFFSET(TI_preempt_count, thread_info, preempt_count);
OFFSET(TI_addr_limit, thread_info, addr_limit);
OFFSET(TI_restart_block, thread_info, restart_block);
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
BLANK();
+ OFFSET(GDS_size, Xgt_desc_struct, size);
+ OFFSET(GDS_address, Xgt_desc_struct, address);
+ OFFSET(GDS_pad, Xgt_desc_struct, pad);
+ BLANK();
+
+ OFFSET(PT_EBX, pt_regs, ebx);
+ OFFSET(PT_ECX, pt_regs, ecx);
+ OFFSET(PT_EDX, pt_regs, edx);
+ OFFSET(PT_ESI, pt_regs, esi);
+ OFFSET(PT_EDI, pt_regs, edi);
+ OFFSET(PT_EBP, pt_regs, ebp);
+ OFFSET(PT_EAX, pt_regs, eax);
+ OFFSET(PT_DS, pt_regs, xds);
+ OFFSET(PT_ES, pt_regs, xes);
+ OFFSET(PT_GS, pt_regs, xgs);
+ OFFSET(PT_ORIG_EAX, pt_regs, orig_eax);
+ OFFSET(PT_EIP, pt_regs, eip);
+ OFFSET(PT_CS, pt_regs, xcs);
+ OFFSET(PT_EFLAGS, pt_regs, eflags);
+ OFFSET(PT_OLDESP, pt_regs, esp);
+ OFFSET(PT_OLDSS, pt_regs, xss);
+ BLANK();
+
OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
OFFSET(RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
BLANK();
@@ -74,4 +97,18 @@ void foo(void)
DEFINE(VDSO_PRELINK, VDSO_PRELINK);
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+
+ BLANK();
+ OFFSET(PDA_cpu, i386_pda, cpu_number);
+ OFFSET(PDA_pcurrent, i386_pda, pcurrent);
+
+#ifdef CONFIG_PARAVIRT
+ BLANK();
+ OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled);
+ OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable);
+ OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable);
+ OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit);
+ OFFSET(PARAVIRT_iret, paravirt_ops, iret);
+ OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
+#endif
}
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index e4758095d87..41cfea57232 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -104,10 +104,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
f_vide();
rdtscl(d2);
d = d2-d;
-
- /* Knock these two lines out if it debugs out ok */
- printk(KERN_INFO "AMD K6 stepping B detected - ");
- /* -- cut here -- */
+
if (d > 20*K6_BUG_LOOP)
printk("system stability may be impaired when more than 32 MB are used.\n");
else
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index d9f3e3c31f0..1b34c56f812 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -18,14 +18,15 @@
#include <asm/apic.h>
#include <mach_apic.h>
#endif
+#include <asm/pda.h>
#include "cpu.h"
DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
-DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
+struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(_cpu_pda);
static int cachesize_override __cpuinitdata = -1;
static int disable_x86_fxsr __cpuinitdata;
@@ -235,29 +236,14 @@ static int __cpuinit have_cpuid_p(void)
return flag_is_changeable_p(X86_EFLAGS_ID);
}
-/* Do minimum CPU detection early.
- Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
- The others are not touched to avoid unwanted side effects.
-
- WARNING: this function is only called on the BP. Don't add code here
- that is supposed to run on all CPUs. */
-static void __init early_cpu_detect(void)
+void __init cpu_detect(struct cpuinfo_x86 *c)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- c->x86_cache_alignment = 32;
-
- if (!have_cpuid_p())
- return;
-
/* Get vendor name */
cpuid(0x00000000, &c->cpuid_level,
(int *)&c->x86_vendor_id[0],
(int *)&c->x86_vendor_id[8],
(int *)&c->x86_vendor_id[4]);
- get_cpu_vendor(c, 1);
-
c->x86 = 4;
if (c->cpuid_level >= 0x00000001) {
u32 junk, tfms, cap0, misc;
@@ -274,6 +260,26 @@ static void __init early_cpu_detect(void)
}
}
+/* Do minimum CPU detection early.
+ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+ The others are not touched to avoid unwanted side effects.
+
+ WARNING: this function is only called on the BP. Don't add code here
+ that is supposed to run on all CPUs. */
+static void __init early_cpu_detect(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ c->x86_cache_alignment = 32;
+
+ if (!have_cpuid_p())
+ return;
+
+ cpu_detect(c);
+
+ get_cpu_vendor(c, 1);
+}
+
static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
{
u32 tfms, xlvl;
@@ -308,6 +314,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
#else
c->apicid = (ebx >> 24) & 0xFF;
#endif
+ if (c->x86_capability[0] & (1<<19))
+ c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
} else {
/* Have CPUID level 0 only - unheard of */
c->x86 = 4;
@@ -372,6 +380,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_max_cores = 1;
+ c->x86_clflush_size = 32;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (!have_cpuid_p()) {
@@ -591,42 +600,24 @@ void __init early_cpu_init(void)
disable_pse = 1;
#endif
}
-/*
- * cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
- */
-void __cpuinit cpu_init(void)
+
+/* Make sure %gs is initialized properly in idle threads */
+struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
{
- int cpu = smp_processor_id();
- struct tss_struct * t = &per_cpu(init_tss, cpu);
- struct thread_struct *thread = &current->thread;
- struct desc_struct *gdt;
- __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
- struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+ memset(regs, 0, sizeof(struct pt_regs));
+ regs->xgs = __KERNEL_PDA;
+ return regs;
+}
- if (cpu_test_and_set(cpu, cpu_initialized)) {
- printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
- for (;;) local_irq_enable();
- }
- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+static __cpuinit int alloc_gdt(int cpu)
+{
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+ struct desc_struct *gdt;
+ struct i386_pda *pda;
- if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
- clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
- if (tsc_disable && cpu_has_tsc) {
- printk(KERN_NOTICE "Disabling TSC...\n");
- /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
- clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
- set_in_cr4(X86_CR4_TSD);
- }
+ gdt = (struct desc_struct *)cpu_gdt_descr->address;
+ pda = cpu_pda(cpu);
- /* The CPU hotplug case */
- if (cpu_gdt_descr->address) {
- gdt = (struct desc_struct *)cpu_gdt_descr->address;
- memset(gdt, 0, PAGE_SIZE);
- goto old_gdt;
- }
/*
* This is a horrible hack to allocate the GDT. The problem
* is that cpu_init() is called really early for the boot CPU
@@ -634,43 +625,130 @@ void __cpuinit cpu_init(void)
* CPUs, when bootmem will have gone away
*/
if (NODE_DATA(0)->bdata->node_bootmem_map) {
- gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
- /* alloc_bootmem_pages panics on failure, so no check */
+ BUG_ON(gdt != NULL || pda != NULL);
+
+ gdt = alloc_bootmem_pages(PAGE_SIZE);
+ pda = alloc_bootmem(sizeof(*pda));
+ /* alloc_bootmem(_pages) panics on failure, so no check */
+
memset(gdt, 0, PAGE_SIZE);
+ memset(pda, 0, sizeof(*pda));
} else {
- gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
- if (unlikely(!gdt)) {
- printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
- for (;;)
- local_irq_enable();
+ /* GDT and PDA might already have been allocated if
+ this is a CPU hotplug re-insertion. */
+ if (gdt == NULL)
+ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
+
+ if (pda == NULL)
+ pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
+
+ if (unlikely(!gdt || !pda)) {
+ free_pages((unsigned long)gdt, 0);
+ kfree(pda);
+ return 0;
}
}
-old_gdt:
+
+ cpu_gdt_descr->address = (unsigned long)gdt;
+ cpu_pda(cpu) = pda;
+
+ return 1;
+}
+
+/* Initial PDA used by boot CPU */
+struct i386_pda boot_pda = {
+ ._pda = &boot_pda,
+ .cpu_number = 0,
+ .pcurrent = &init_task,
+};
+
+static inline void set_kernel_gs(void)
+{
+ /* Set %gs for this CPU's PDA. Memory clobber is to create a
+ barrier with respect to any PDA operations, so the compiler
+ doesn't move any before here. */
+ asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
+}
+
+/* Initialize the CPU's GDT and PDA. The boot CPU does this for
+ itself, but secondaries find this done for them. */
+__cpuinit int init_gdt(int cpu, struct task_struct *idle)
+{
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+ struct desc_struct *gdt;
+ struct i386_pda *pda;
+
+ /* For non-boot CPUs, the GDT and PDA should already have been
+ allocated. */
+ if (!alloc_gdt(cpu)) {
+ printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu);
+ return 0;
+ }
+
+ gdt = (struct desc_struct *)cpu_gdt_descr->address;
+ pda = cpu_pda(cpu);
+
+ BUG_ON(gdt == NULL || pda == NULL);
+
/*
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
*/
memcpy(gdt, cpu_gdt_table, GDT_SIZE);
+ cpu_gdt_descr->size = GDT_SIZE - 1;
- /* Set up GDT entry for 16bit stack */
- *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
- ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
- ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
- (CPU_16BIT_STACK_SIZE - 1);
+ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
+ (u32 *)&gdt[GDT_ENTRY_PDA].b,
+ (unsigned long)pda, sizeof(*pda) - 1,
+ 0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
- cpu_gdt_descr->size = GDT_SIZE - 1;
- cpu_gdt_descr->address = (unsigned long)gdt;
+ memset(pda, 0, sizeof(*pda));
+ pda->_pda = pda;
+ pda->cpu_number = cpu;
+ pda->pcurrent = idle;
+
+ return 1;
+}
+
+/* Common CPU init for both boot and secondary CPUs */
+static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
+{
+ struct tss_struct * t = &per_cpu(init_tss, cpu);
+ struct thread_struct *thread = &curr->thread;
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+ /* Reinit these anyway, even if they've already been done (on
+ the boot CPU, this will transition from the boot gdt+pda to
+ the real ones). */
load_gdt(cpu_gdt_descr);
+ set_kernel_gs();
+
+ if (cpu_test_and_set(cpu, cpu_initialized)) {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+ for (;;) local_irq_enable();
+ }
+
+ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+ if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+ if (tsc_disable && cpu_has_tsc) {
+ printk(KERN_NOTICE "Disabling TSC...\n");
+ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+ set_in_cr4(X86_CR4_TSD);
+ }
+
load_idt(&idt_descr);
/*
* Set up and load the per-CPU TSS and LDT
*/
atomic_inc(&init_mm.mm_count);
- current->active_mm = &init_mm;
- BUG_ON(current->mm);
- enter_lazy_tlb(&init_mm, current);
+ curr->active_mm = &init_mm;
+ if (curr->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, curr);
load_esp0(t, thread);
set_tss_desc(cpu,t);
@@ -682,8 +760,8 @@ old_gdt:
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
#endif
- /* Clear %fs and %gs. */
- asm volatile ("movl %0, %%fs; movl %0, %%gs" : : "r" (0));
+ /* Clear %fs. */
+ asm volatile ("mov %0, %%fs" : : "r" (0));
/* Clear all 6 debug registers: */
set_debugreg(0, 0);
@@ -701,6 +779,37 @@ old_gdt:
mxcsr_feature_mask_init();
}
+/* Entrypoint to initialize secondary CPU */
+void __cpuinit secondary_cpu_init(void)
+{
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+
+ _cpu_init(cpu, curr);
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __cpuinit cpu_init(void)
+{
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+
+ /* Set up the real GDT and PDA, so we can transition from the
+ boot versions. */
+ if (!init_gdt(cpu, curr)) {
+ /* failed to allocate something; not much we can do... */
+ for (;;)
+ local_irq_enable();
+ }
+
+ _cpu_init(cpu, curr);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
void __cpuinit cpu_uninit(void)
{
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 94a95aa5227..56fe2658495 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -107,7 +107,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
* Note that the workaround only should be initialized once...
*/
c->f00f_bug = 0;
- if ( c->x86 == 5 ) {
+ if (!paravirt_enabled() && c->x86 == 5) {
static int f00f_workaround_enabled = 0;
c->f00f_bug = 1;
@@ -195,8 +195,16 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
-}
+ if (cpu_has_ds) {
+ unsigned int l1;
+ rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+ if (!(l1 & (1<<11)))
+ set_bit(X86_FEATURE_BTS, c->x86_capability);
+ if (!(l1 & (1<<12)))
+ set_bit(X86_FEATURE_PEBS, c->x86_capability);
+ }
+}
static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
{
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 5c43be47587..80b4c5d421b 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -480,12 +480,10 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
if (num_cache_leaves == 0)
return -ENOENT;
- cpuid4_info[cpu] = kmalloc(
+ cpuid4_info[cpu] = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
if (unlikely(cpuid4_info[cpu] == NULL))
return -ENOMEM;
- memset(cpuid4_info[cpu], 0,
- sizeof(struct _cpuid4_info) * num_cache_leaves);
oldmask = current->cpus_allowed;
retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
@@ -658,17 +656,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
return -ENOENT;
/* Allocate all required memory */
- cache_kobject[cpu] = kmalloc(sizeof(struct kobject), GFP_KERNEL);
+ cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (unlikely(cache_kobject[cpu] == NULL))
goto err_out;
- memset(cache_kobject[cpu], 0, sizeof(struct kobject));
- index_kobject[cpu] = kmalloc(
+ index_kobject[cpu] = kzalloc(
sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
if (unlikely(index_kobject[cpu] == NULL))
goto err_out;
- memset(index_kobject[cpu], 0,
- sizeof(struct _index_kobject) * num_cache_leaves);
return 0;
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 1f9153ae5b0..6b5d3518a1c 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -51,10 +51,10 @@ static void mce_checkregs (void *info)
}
}
-static void mce_work_fn(void *data);
-static DECLARE_WORK(mce_work, mce_work_fn, NULL);
+static void mce_work_fn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
-static void mce_work_fn(void *data)
+static void mce_work_fn(struct work_struct *work)
{
on_each_cpu(mce_checkregs, NULL, 1, 1);
schedule_delayed_work(&mce_work, MCE_RATE);
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index 2d8703b7ce6..065005c3f16 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -20,6 +20,7 @@
#include <linux/cpu.h>
#include <asm/cpu.h>
#include <linux/notifier.h>
+#include <linux/jiffies.h>
#include <asm/therm_throt.h>
/* How long to wait between reporting thermal events */
@@ -115,7 +116,6 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
}
-#ifdef CONFIG_HOTPLUG_CPU
static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
{
return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
@@ -152,7 +152,6 @@ static struct notifier_block thermal_throttle_cpu_notifier =
{
.notifier_call = thermal_throttle_cpu_callback,
};
-#endif /* CONFIG_HOTPLUG_CPU */
static __init int thermal_throttle_init_device(void)
{
diff --git a/arch/i386/kernel/cpu/mtrr/Makefile b/arch/i386/kernel/cpu/mtrr/Makefile
index a25b701ab84..191fc053364 100644
--- a/arch/i386/kernel/cpu/mtrr/Makefile
+++ b/arch/i386/kernel/cpu/mtrr/Makefile
@@ -1,5 +1,3 @@
obj-y := main.o if.o generic.o state.o
-obj-y += amd.o
-obj-y += cyrix.o
-obj-y += centaur.o
+obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
diff --git a/arch/i386/kernel/cpu/mtrr/amd.c b/arch/i386/kernel/cpu/mtrr/amd.c
index 1a1e04b6fd0..0949cdbf848 100644
--- a/arch/i386/kernel/cpu/mtrr/amd.c
+++ b/arch/i386/kernel/cpu/mtrr/amd.c
@@ -7,7 +7,7 @@
static void
amd_get_mtrr(unsigned int reg, unsigned long *base,
- unsigned int *size, mtrr_type * type)
+ unsigned long *size, mtrr_type * type)
{
unsigned long low, high;
diff --git a/arch/i386/kernel/cpu/mtrr/centaur.c b/arch/i386/kernel/cpu/mtrr/centaur.c
index 33f00ac314e..cb9aa3a7a7a 100644
--- a/arch/i386/kernel/cpu/mtrr/centaur.c
+++ b/arch/i386/kernel/cpu/mtrr/centaur.c
@@ -17,7 +17,7 @@ static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */
*/
static int
-centaur_get_free_region(unsigned long base, unsigned long size)
+centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
/* [SUMMARY] Get a free MTRR.
<base> The starting (base) address of the region.
<size> The size (in bytes) of the region.
@@ -26,10 +26,11 @@ centaur_get_free_region(unsigned long base, unsigned long size)
{
int i, max;
mtrr_type ltype;
- unsigned long lbase;
- unsigned int lsize;
+ unsigned long lbase, lsize;
max = num_var_ranges;
+ if (replace_reg >= 0 && replace_reg < max)
+ return replace_reg;
for (i = 0; i < max; ++i) {
if (centaur_mcr_reserved & (1 << i))
continue;
@@ -49,7 +50,7 @@ mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
static void
centaur_get_mcr(unsigned int reg, unsigned long *base,
- unsigned int *size, mtrr_type * type)
+ unsigned long *size, mtrr_type * type)
{
*base = centaur_mcr[reg].high >> PAGE_SHIFT;
*size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c
index 9027a987006..0737a596db4 100644
--- a/arch/i386/kernel/cpu/mtrr/cyrix.c
+++ b/arch/i386/kernel/cpu/mtrr/cyrix.c
@@ -9,7 +9,7 @@ int arr3_protected;
static void
cyrix_get_arr(unsigned int reg, unsigned long *base,
- unsigned int *size, mtrr_type * type)
+ unsigned long *size, mtrr_type * type)
{
unsigned long flags;
unsigned char arr, ccr3, rcr, shift;
@@ -77,7 +77,7 @@ cyrix_get_arr(unsigned int reg, unsigned long *base,
}
static int
-cyrix_get_free_region(unsigned long base, unsigned long size)
+cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
/* [SUMMARY] Get a free ARR.
<base> The starting (base) address of the region.
<size> The size (in bytes) of the region.
@@ -86,9 +86,24 @@ cyrix_get_free_region(unsigned long base, unsigned long size)
{
int i;
mtrr_type ltype;
- unsigned long lbase;
- unsigned int lsize;
+ unsigned long lbase, lsize;
+ switch (replace_reg) {
+ case 7:
+ if (size < 0x40)
+ break;
+ case 6:
+ case 5:
+ case 4:
+ return replace_reg;
+ case 3:
+ if (arr3_protected)
+ break;
+ case 2:
+ case 1:
+ case 0:
+ return replace_reg;
+ }
/* If we are to set up a region >32M then look at ARR7 immediately */
if (size > 0x2000) {
cyrix_get_arr(7, &lbase, &lsize, &ltype);
@@ -214,7 +229,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
typedef struct {
unsigned long base;
- unsigned int size;
+ unsigned long size;
mtrr_type type;
} arr_state_t;
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index 0b61eed8bbd..f77fc53db65 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -3,6 +3,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
@@ -15,12 +16,19 @@ struct mtrr_state {
struct mtrr_var_range *var_ranges;
mtrr_type fixed_ranges[NUM_FIXED_RANGES];
unsigned char enabled;
+ unsigned char have_fixed;
mtrr_type def_type;
};
static unsigned long smp_changes_mask;
static struct mtrr_state mtrr_state = {};
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "mtrr."
+
+static __initdata int mtrr_show;
+module_param_named(show, mtrr_show, bool, 0);
+
/* Get the MSR pair relating to a var range */
static void __init
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
@@ -43,6 +51,14 @@ get_fixed_ranges(mtrr_type * frs)
rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
}
+static void __init print_fixed(unsigned base, unsigned step, const mtrr_type*types)
+{
+ unsigned i;
+
+ for (i = 0; i < 8; ++i, ++types, base += step)
+ printk(KERN_INFO "MTRR %05X-%05X %s\n", base, base + step - 1, mtrr_attrib_to_str(*types));
+}
+
/* Grab all of the MTRR state for this CPU into *state */
void __init get_mtrr_state(void)
{
@@ -58,13 +74,49 @@ void __init get_mtrr_state(void)
}
vrs = mtrr_state.var_ranges;
+ rdmsr(MTRRcap_MSR, lo, dummy);
+ mtrr_state.have_fixed = (lo >> 8) & 1;
+
for (i = 0; i < num_var_ranges; i++)
get_mtrr_var_range(i, &vrs[i]);
- get_fixed_ranges(mtrr_state.fixed_ranges);
+ if (mtrr_state.have_fixed)
+ get_fixed_ranges(mtrr_state.fixed_ranges);
rdmsr(MTRRdefType_MSR, lo, dummy);
mtrr_state.def_type = (lo & 0xff);
mtrr_state.enabled = (lo & 0xc00) >> 10;
+
+ if (mtrr_show) {
+ int high_width;
+
+ printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
+ if (mtrr_state.have_fixed) {
+ printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
+ mtrr_state.enabled & 1 ? "en" : "dis");
+ print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
+ for (i = 0; i < 2; ++i)
+ print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
+ for (i = 0; i < 8; ++i)
+ print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
+ }
+ printk(KERN_INFO "MTRR variable ranges %sabled:\n",
+ mtrr_state.enabled & 2 ? "en" : "dis");
+ high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
+ for (i = 0; i < num_var_ranges; ++i) {
+ if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
+ printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
+ i,
+ high_width,
+ mtrr_state.var_ranges[i].base_hi,
+ mtrr_state.var_ranges[i].base_lo >> 12,
+ high_width,
+ mtrr_state.var_ranges[i].mask_hi,
+ mtrr_state.var_ranges[i].mask_lo >> 12,
+ mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
+ else
+ printk(KERN_INFO "MTRR %u disabled\n", i);
+ }
+ }
}
/* Some BIOS's are fucked and don't set all MTRRs the same! */
@@ -95,7 +147,7 @@ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
smp_processor_id(), msr, a, b);
}
-int generic_get_free_region(unsigned long base, unsigned long size)
+int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
/* [SUMMARY] Get a free MTRR.
<base> The starting (base) address of the region.
<size> The size (in bytes) of the region.
@@ -104,10 +156,11 @@ int generic_get_free_region(unsigned long base, unsigned long size)
{
int i, max;
mtrr_type ltype;
- unsigned long lbase;
- unsigned lsize;
+ unsigned long lbase, lsize;
max = num_var_ranges;
+ if (replace_reg >= 0 && replace_reg < max)
+ return replace_reg;
for (i = 0; i < max; ++i) {
mtrr_if->get(i, &lbase, &lsize, &ltype);
if (lsize == 0)
@@ -117,7 +170,7 @@ int generic_get_free_region(unsigned long base, unsigned long size)
}
static void generic_get_mtrr(unsigned int reg, unsigned long *base,
- unsigned int *size, mtrr_type * type)
+ unsigned long *size, mtrr_type *type)
{
unsigned int mask_lo, mask_hi, base_lo, base_hi;
@@ -202,7 +255,9 @@ static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
return changed;
}
-static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
+static u32 deftype_lo, deftype_hi;
+
+static unsigned long set_mtrr_state(void)
/* [SUMMARY] Set the MTRR state for this CPU.
<state> The MTRR state information to read.
<ctxt> Some relevant CPU context.
@@ -217,14 +272,14 @@ static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
change_mask |= MTRR_CHANGE_MASK_VARIABLE;
- if (set_fixed_ranges(mtrr_state.fixed_ranges))
+ if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
change_mask |= MTRR_CHANGE_MASK_FIXED;
/* Set_mtrr_restore restores the old value of MTRRdefType,
so to set it we fiddle with the saved value */
if ((deftype_lo & 0xff) != mtrr_state.def_type
|| ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
- deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
+ deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
}
@@ -233,7 +288,6 @@ static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
static unsigned long cr4 = 0;
-static u32 deftype_lo, deftype_hi;
static DEFINE_SPINLOCK(set_atomicity_lock);
/*
@@ -271,7 +325,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
/* Disable MTRRs, and set the default type to uncached */
- mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
+ mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
}
static void post_set(void) __releases(set_atomicity_lock)
@@ -300,7 +354,7 @@ static void generic_set_all(void)
prepare_set();
/* Actually set the state */
- mask = set_mtrr_state(deftype_lo,deftype_hi);
+ mask = set_mtrr_state();
post_set();
local_irq_restore(flags);
@@ -366,7 +420,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, unsigned i
printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
return -EINVAL;
}
- if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
+ if (!(base + size < 0x70000 || base > 0x7003F) &&
(type == MTRR_TYPE_WRCOMB
|| type == MTRR_TYPE_WRBACK)) {
printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
index 5ac051bb9d5..5ae1705eafa 100644
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -17,7 +17,7 @@ extern unsigned int *usage_table;
#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
-static char *mtrr_strings[MTRR_NUM_TYPES] =
+static const char *const mtrr_strings[MTRR_NUM_TYPES] =
{
"uncachable", /* 0 */
"write-combining", /* 1 */
@@ -28,7 +28,7 @@ static char *mtrr_strings[MTRR_NUM_TYPES] =
"write-back", /* 6 */
};
-char *mtrr_attrib_to_str(int x)
+const char *mtrr_attrib_to_str(int x)
{
return (x <= 6) ? mtrr_strings[x] : "?";
}
@@ -44,10 +44,9 @@ mtrr_file_add(unsigned long base, unsigned long size,
max = num_var_ranges;
if (fcount == NULL) {
- fcount = kmalloc(max * sizeof *fcount, GFP_KERNEL);
+ fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL);
if (!fcount)
return -ENOMEM;
- memset(fcount, 0, max * sizeof *fcount);
FILE_FCOUNT(file) = fcount;
}
if (!page) {
@@ -155,6 +154,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
{
int err = 0;
mtrr_type type;
+ unsigned long size;
struct mtrr_sentry sentry;
struct mtrr_gentry gentry;
void __user *arg = (void __user *) __arg;
@@ -235,15 +235,15 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
case MTRRIOC_GET_ENTRY:
if (gentry.regnum >= num_var_ranges)
return -EINVAL;
- mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
+ mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
/* Hide entries that go above 4GB */
- if (gentry.base + gentry.size > 0x100000
- || gentry.size == 0x100000)
+ if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
+ || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
gentry.base = gentry.size = gentry.type = 0;
else {
gentry.base <<= PAGE_SHIFT;
- gentry.size <<= PAGE_SHIFT;
+ gentry.size = size << PAGE_SHIFT;
gentry.type = type;
}
@@ -273,8 +273,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
case MTRRIOC_GET_PAGE_ENTRY:
if (gentry.regnum >= num_var_ranges)
return -EINVAL;
- mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
- gentry.type = type;
+ mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
+ /* Hide entries that would overflow */
+ if (size != (__typeof__(gentry.size))size)
+ gentry.base = gentry.size = gentry.type = 0;
+ else {
+ gentry.size = size;
+ gentry.type = type;
+ }
break;
}
@@ -353,8 +359,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
char factor;
int i, max, len;
mtrr_type type;
- unsigned long base;
- unsigned int size;
+ unsigned long base, size;
len = 0;
max = num_var_ranges;
@@ -373,7 +378,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
}
/* RED-PEN: base can be > 32bit */
len += seq_printf(seq,
- "reg%02i: base=0x%05lx000 (%4liMB), size=%4i%cB: %s, count=%d\n",
+ "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n",
i, base, base >> (20 - PAGE_SHIFT), size, factor,
mtrr_attrib_to_str(type), usage_table[i]);
}
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index fff90bda473..16bb7ea8714 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -59,7 +59,11 @@ struct mtrr_ops * mtrr_if = NULL;
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
+#ifndef CONFIG_X86_64
extern int arr3_protected;
+#else
+#define arr3_protected 0
+#endif
void set_mtrr_ops(struct mtrr_ops * ops)
{
@@ -168,6 +172,13 @@ static void ipi_handler(void *info)
#endif
+static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
+ return type1 == MTRR_TYPE_UNCACHABLE ||
+ type2 == MTRR_TYPE_UNCACHABLE ||
+ (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
+ (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
+}
+
/**
* set_mtrr - update mtrrs on all processors
* @reg: mtrr in question
@@ -263,8 +274,8 @@ static void set_mtrr(unsigned int reg, unsigned long base,
/**
* mtrr_add_page - Add a memory type region
- * @base: Physical base address of region in pages (4 KB)
- * @size: Physical size of region in pages (4 KB)
+ * @base: Physical base address of region in pages (in units of 4 kB!)
+ * @size: Physical size of region in pages (4 kB)
* @type: Type of MTRR desired
* @increment: If this is true do usage counting on the region
*
@@ -300,11 +311,9 @@ static void set_mtrr(unsigned int reg, unsigned long base,
int mtrr_add_page(unsigned long base, unsigned long size,
unsigned int type, char increment)
{
- int i;
+ int i, replace, error;
mtrr_type ltype;
- unsigned long lbase;
- unsigned int lsize;
- int error;
+ unsigned long lbase, lsize;
if (!mtrr_if)
return -ENXIO;
@@ -324,12 +333,18 @@ int mtrr_add_page(unsigned long base, unsigned long size,
return -ENOSYS;
}
+ if (!size) {
+ printk(KERN_WARNING "mtrr: zero sized request\n");
+ return -EINVAL;
+ }
+
if (base & size_or_mask || size & size_or_mask) {
printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
return -EINVAL;
}
error = -EINVAL;
+ replace = -1;
/* No CPU hotplug when we change MTRR entries */
lock_cpu_hotplug();
@@ -337,21 +352,28 @@ int mtrr_add_page(unsigned long base, unsigned long size,
mutex_lock(&mtrr_mutex);
for (i = 0; i < num_var_ranges; ++i) {
mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (base >= lbase + lsize)
- continue;
- if ((base < lbase) && (base + size <= lbase))
+ if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
continue;
/* At this point we know there is some kind of overlap/enclosure */
- if ((base < lbase) || (base + size > lbase + lsize)) {
+ if (base < lbase || base + size - 1 > lbase + lsize - 1) {
+ if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
+ /* New region encloses an existing region */
+ if (type == ltype) {
+ replace = replace == -1 ? i : -2;
+ continue;
+ }
+ else if (types_compatible(type, ltype))
+ continue;
+ }
printk(KERN_WARNING
"mtrr: 0x%lx000,0x%lx000 overlaps existing"
- " 0x%lx000,0x%x000\n", base, size, lbase,
+ " 0x%lx000,0x%lx000\n", base, size, lbase,
lsize);
goto out;
}
/* New region is enclosed by an existing region */
if (ltype != type) {
- if (type == MTRR_TYPE_UNCACHABLE)
+ if (types_compatible(type, ltype))
continue;
printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
base, size, mtrr_attrib_to_str(ltype),
@@ -364,10 +386,18 @@ int mtrr_add_page(unsigned long base, unsigned long size,
goto out;
}
/* Search for an empty MTRR */
- i = mtrr_if->get_free_region(base, size);
+ i = mtrr_if->get_free_region(base, size, replace);
if (i >= 0) {
set_mtrr(i, base, size, type);
- usage_table[i] = 1;
+ if (likely(replace < 0))
+ usage_table[i] = 1;
+ else {
+ usage_table[i] = usage_table[replace] + !!increment;
+ if (unlikely(replace != i)) {
+ set_mtrr(replace, 0, 0, 0);
+ usage_table[replace] = 0;
+ }
+ }
} else
printk(KERN_INFO "mtrr: no more MTRRs available\n");
error = i;
@@ -455,8 +485,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
int i, max;
mtrr_type ltype;
- unsigned long lbase;
- unsigned int lsize;
+ unsigned long lbase, lsize;
int error = -EINVAL;
if (!mtrr_if)
@@ -544,9 +573,11 @@ extern void centaur_init_mtrr(void);
static void __init init_ifs(void)
{
+#ifndef CONFIG_X86_64
amd_init_mtrr();
cyrix_init_mtrr();
centaur_init_mtrr();
+#endif
}
/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
@@ -555,7 +586,7 @@ static void __init init_ifs(void)
struct mtrr_value {
mtrr_type ltype;
unsigned long lbase;
- unsigned int lsize;
+ unsigned long lsize;
};
static struct mtrr_value * mtrr_state;
@@ -565,10 +596,8 @@ static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
int i;
int size = num_var_ranges * sizeof(struct mtrr_value);
- mtrr_state = kmalloc(size,GFP_ATOMIC);
- if (mtrr_state)
- memset(mtrr_state,0,size);
- else
+ mtrr_state = kzalloc(size,GFP_ATOMIC);
+ if (!mtrr_state)
return -ENOMEM;
for (i = 0; i < num_var_ranges; i++) {
diff --git a/arch/i386/kernel/cpu/mtrr/mtrr.h b/arch/i386/kernel/cpu/mtrr/mtrr.h
index 99c9f268204..d61ea9db6cf 100644
--- a/arch/i386/kernel/cpu/mtrr/mtrr.h
+++ b/arch/i386/kernel/cpu/mtrr/mtrr.h
@@ -43,15 +43,16 @@ struct mtrr_ops {
void (*set_all)(void);
void (*get)(unsigned int reg, unsigned long *base,
- unsigned int *size, mtrr_type * type);
- int (*get_free_region) (unsigned long base, unsigned long size);
-
+ unsigned long *size, mtrr_type * type);
+ int (*get_free_region)(unsigned long base, unsigned long size,
+ int replace_reg);
int (*validate_add_page)(unsigned long base, unsigned long size,
unsigned int type);
int (*have_wrcomb)(void);
};
-extern int generic_get_free_region(unsigned long base, unsigned long size);
+extern int generic_get_free_region(unsigned long base, unsigned long size,
+ int replace_reg);
extern int generic_validate_add_page(unsigned long base, unsigned long size,
unsigned int type);
@@ -62,17 +63,17 @@ extern int positive_have_wrcomb(void);
/* library functions for processor-specific routines */
struct set_mtrr_context {
unsigned long flags;
- unsigned long deftype_lo;
- unsigned long deftype_hi;
unsigned long cr4val;
- unsigned long ccr3;
+ u32 deftype_lo;
+ u32 deftype_hi;
+ u32 ccr3;
};
struct mtrr_var_range {
- unsigned long base_lo;
- unsigned long base_hi;
- unsigned long mask_lo;
- unsigned long mask_hi;
+ u32 base_lo;
+ u32 base_hi;
+ u32 mask_lo;
+ u32 mask_hi;
};
void set_mtrr_done(struct set_mtrr_context *ctxt);
@@ -92,6 +93,6 @@ extern struct mtrr_ops * mtrr_if;
extern unsigned int num_var_ranges;
void mtrr_state_warn(void);
-char *mtrr_attrib_to_str(int x);
+const char *mtrr_attrib_to_str(int x);
void mtrr_wrmsr(unsigned, unsigned, unsigned);
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 76aac088a32..6624d8583c4 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -152,9 +152,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, " [%d]", i);
}
- seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
+ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
c->loops_per_jiffy/(500000/HZ),
(c->loops_per_jiffy/(5000/HZ)) % 100);
+ seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
return 0;
}
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index fde8bea85ce..db6dd20c358 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -34,7 +34,6 @@
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/smp_lock.h>
-#include <linux/fs.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
@@ -156,28 +155,27 @@ static struct file_operations cpuid_fops = {
.open = cpuid_open,
};
-static int cpuid_class_device_create(int i)
+static int cpuid_device_create(int i)
{
int err = 0;
- struct class_device *class_err;
+ struct device *dev;
- class_err = class_device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i);
- if (IS_ERR(class_err))
- err = PTR_ERR(class_err);
+ dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), "cpu%d",i);
+ if (IS_ERR(dev))
+ err = PTR_ERR(dev);
return err;
}
-#ifdef CONFIG_HOTPLUG_CPU
static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action) {
case CPU_ONLINE:
- cpuid_class_device_create(cpu);
+ cpuid_device_create(cpu);
break;
case CPU_DEAD:
- class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+ device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
break;
}
return NOTIFY_OK;
@@ -187,7 +185,6 @@ static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
{
.notifier_call = cpuid_class_cpu_callback,
};
-#endif /* !CONFIG_HOTPLUG_CPU */
static int __init cpuid_init(void)
{
@@ -206,7 +203,7 @@ static int __init cpuid_init(void)
goto out_chrdev;
}
for_each_online_cpu(i) {
- err = cpuid_class_device_create(i);
+ err = cpuid_device_create(i);
if (err != 0)
goto out_class;
}
@@ -218,7 +215,7 @@ static int __init cpuid_init(void)
out_class:
i = 0;
for_each_online_cpu(i) {
- class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i));
+ device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i));
}
class_destroy(cpuid_class);
out_chrdev:
@@ -232,7 +229,7 @@ static void __exit cpuid_exit(void)
int cpu = 0;
for_each_online_cpu(cpu)
- class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+ device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
class_destroy(cpuid_class);
unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 144b4328896..a5e0e990ea9 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -31,68 +31,6 @@
/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu;
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
- size_t data_len)
-{
- struct elf_note note;
-
- note.n_namesz = strlen(name) + 1;
- note.n_descsz = data_len;
- note.n_type = type;
- memcpy(buf, &note, sizeof(note));
- buf += (sizeof(note) +3)/4;
- memcpy(buf, name, note.n_namesz);
- buf += (note.n_namesz + 3)/4;
- memcpy(buf, data, note.n_descsz);
- buf += (note.n_descsz + 3)/4;
-
- return buf;
-}
-
-static void final_note(u32 *buf)
-{
- struct elf_note note;
-
- note.n_namesz = 0;
- note.n_descsz = 0;
- note.n_type = 0;
- memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
- struct elf_prstatus prstatus;
- u32 *buf;
-
- if ((cpu < 0) || (cpu >= NR_CPUS))
- return;
-
- /* Using ELF notes here is opportunistic.
- * I need a well defined structure format
- * for the data I pass, and I need tags
- * on the data to indicate what information I have
- * squirrelled away. ELF notes happen to provide
- * all of that, so there is no need to invent something new.
- */
- buf = (u32*)per_cpu_ptr(crash_notes, cpu);
- if (!buf)
- return;
- memset(&prstatus, 0, sizeof(prstatus));
- prstatus.pr_pid = current->pid;
- elf_core_copy_regs(&prstatus.pr_reg, regs);
- buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
- sizeof(prstatus));
- final_note(buf);
-}
-
-static void crash_save_self(struct pt_regs *regs)
-{
- int cpu;
-
- cpu = safe_smp_processor_id();
- crash_save_this_cpu(regs, cpu);
-}
-
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static atomic_t waiting_for_crash_ipi;
@@ -121,7 +59,7 @@ static int crash_nmi_callback(struct notifier_block *self,
crash_fixup_ss_esp(&fixed_regs, regs);
regs = &fixed_regs;
}
- crash_save_this_cpu(regs, cpu);
+ crash_save_cpu(regs, cpu);
disable_local_APIC();
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
@@ -195,5 +133,5 @@ void machine_crash_shutdown(struct pt_regs *regs)
#if defined(CONFIG_X86_IO_APIC)
disable_IO_APIC();
#endif
- crash_save_self(regs);
+ crash_save_cpu(regs, safe_smp_processor_id());
}
diff --git a/arch/i386/kernel/e820.c b/arch/i386/kernel/e820.c
new file mode 100644
index 00000000000..2f7d0a92fd7
--- /dev/null
+++ b/arch/i386/kernel/e820.c
@@ -0,0 +1,894 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/kexec.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/efi.h>
+#include <linux/pfn.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/e820.h>
+
+#ifdef CONFIG_EFI
+int efi_enabled = 0;
+EXPORT_SYMBOL(efi_enabled);
+#endif
+
+struct e820map e820;
+struct change_member {
+ struct e820entry *pbios; /* pointer to original bios entry */
+ unsigned long long addr; /* address for this change point */
+};
+static struct change_member change_point_list[2*E820MAX] __initdata;
+static struct change_member *change_point[2*E820MAX] __initdata;
+static struct e820entry *overlap_list[E820MAX] __initdata;
+static struct e820entry new_bios[E820MAX] __initdata;
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_mem_start);
+#endif
+extern int user_defined_memmap;
+struct resource data_resource = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+struct resource code_resource = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource system_rom_resource = {
+ .name = "System ROM",
+ .start = 0xf0000,
+ .end = 0xfffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource extension_rom_resource = {
+ .name = "Extension ROM",
+ .start = 0xe0000,
+ .end = 0xeffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource adapter_rom_resources[] = { {
+ .name = "Adapter ROM",
+ .start = 0xc8000,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+} };
+
+static struct resource video_rom_resource = {
+ .name = "Video ROM",
+ .start = 0xc0000,
+ .end = 0xc7fff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource video_ram_resource = {
+ .name = "Video RAM area",
+ .start = 0xa0000,
+ .end = 0xbffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource standard_io_resources[] = { {
+ .name = "dma1",
+ .start = 0x0000,
+ .end = 0x001f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "pic1",
+ .start = 0x0020,
+ .end = 0x0021,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "timer0",
+ .start = 0x0040,
+ .end = 0x0043,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "timer1",
+ .start = 0x0050,
+ .end = 0x0053,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "keyboard",
+ .start = 0x0060,
+ .end = 0x006f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "dma page reg",
+ .start = 0x0080,
+ .end = 0x008f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "pic2",
+ .start = 0x00a0,
+ .end = 0x00a1,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "dma2",
+ .start = 0x00c0,
+ .end = 0x00df,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+ .name = "fpu",
+ .start = 0x00f0,
+ .end = 0x00ff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+} };
+
+static int romsignature(const unsigned char *x)
+{
+ unsigned short sig;
+ int ret = 0;
+ if (probe_kernel_address((const unsigned short *)x, sig) == 0)
+ ret = (sig == 0xaa55);
+ return ret;
+}
+
+static int __init romchecksum(unsigned char *rom, unsigned long length)
+{
+ unsigned char *p, sum = 0;
+
+ for (p = rom; p < rom + length; p++)
+ sum += *p;
+ return sum == 0;
+}
+
+static void __init probe_roms(void)
+{
+ unsigned long start, length, upper;
+ unsigned char *rom;
+ int i;
+
+ /* video rom */
+ upper = adapter_rom_resources[0].start;
+ for (start = video_rom_resource.start; start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ video_rom_resource.start = start;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* if checksum okay, trust length byte */
+ if (length && romchecksum(rom, length))
+ video_rom_resource.end = start + length - 1;
+
+ request_resource(&iomem_resource, &video_rom_resource);
+ break;
+ }
+
+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+ if (start < upper)
+ start = upper;
+
+ /* system rom */
+ request_resource(&iomem_resource, &system_rom_resource);
+ upper = system_rom_resource.start;
+
+ /* check for extension rom (ignore length byte!) */
+ rom = isa_bus_to_virt(extension_rom_resource.start);
+ if (romsignature(rom)) {
+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
+ if (romchecksum(rom, length)) {
+ request_resource(&iomem_resource, &extension_rom_resource);
+ upper = extension_rom_resource.start;
+ }
+ }
+
+ /* check for adapter roms on 2k boundaries */
+ for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* but accept any length that fits if checksum okay */
+ if (!length || start + length > upper || !romchecksum(rom, length))
+ continue;
+
+ adapter_rom_resources[i].start = start;
+ adapter_rom_resources[i].end = start + length - 1;
+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+ start = adapter_rom_resources[i++].end & ~2047UL;
+ }
+}
+
+/*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+static void __init
+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+{
+ int i;
+
+ probe_roms();
+ for (i = 0; i < e820.nr_map; i++) {
+ struct resource *res;
+#ifndef CONFIG_RESOURCES_64BIT
+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+ continue;
+#endif
+ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+ switch (e820.map[i].type) {
+ case E820_RAM: res->name = "System RAM"; break;
+ case E820_ACPI: res->name = "ACPI Tables"; break;
+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+ default: res->name = "reserved";
+ }
+ res->start = e820.map[i].addr;
+ res->end = res->start + e820.map[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, res)) {
+ kfree(res);
+ continue;
+ }
+ if (e820.map[i].type == E820_RAM) {
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, code_resource);
+ request_resource(res, data_resource);
+#ifdef CONFIG_KEXEC
+ request_resource(res, &crashk_res);
+#endif
+ }
+ }
+}
+
+/*
+ * Request address space for all standard resources
+ *
+ * This is called just before pcibios_init(), which is also a
+ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
+ */
+static int __init request_standard_resources(void)
+{
+ int i;
+
+ printk("Setting up standard PCI resources\n");
+ if (efi_enabled)
+ efi_initialize_iomem_resources(&code_resource, &data_resource);
+ else
+ legacy_init_iomem_resources(&code_resource, &data_resource);
+
+ /* EFI systems may still have VGA */
+ request_resource(&iomem_resource, &video_ram_resource);
+
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
+ request_resource(&ioport_resource, &standard_io_resources[i]);
+ return 0;
+}
+
+subsys_initcall(request_standard_resources);
+
+void __init add_memory_region(unsigned long long start,
+ unsigned long long size, int type)
+{
+ int x;
+
+ if (!efi_enabled) {
+ x = e820.nr_map;
+
+ if (x == E820MAX) {
+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ return;
+ }
+
+ e820.map[x].addr = start;
+ e820.map[x].size = size;
+ e820.map[x].type = type;
+ e820.nr_map++;
+ }
+} /* add_memory_region */
+
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries. The following
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+ struct change_member *change_tmp;
+ unsigned long current_type, last_type;
+ unsigned long long last_addr;
+ int chgidx, still_changing;
+ int overlap_entries;
+ int new_bios_entry;
+ int old_nr, new_nr, chg_nr;
+ int i;
+
+ /*
+ Visually we're performing the following (1,2,3,4 = memory types)...
+
+ Sample memory map (w/overlaps):
+ ____22__________________
+ ______________________4_
+ ____1111________________
+ _44_____________________
+ 11111111________________
+ ____________________33__
+ ___________44___________
+ __________33333_________
+ ______________22________
+ ___________________2222_
+ _________111111111______
+ _____________________11_
+ _________________4______
+
+ Sanitized equivalent (no overlap):
+ 1_______________________
+ _44_____________________
+ ___1____________________
+ ____22__________________
+ ______11________________
+ _________1______________
+ __________3_____________
+ ___________44___________
+ _____________33_________
+ _______________2________
+ ________________1_______
+ _________________4______
+ ___________________2____
+ ____________________33__
+ ______________________4_
+ */
+ printk("sanitize start\n");
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2) {
+ printk("sanitize bail 0\n");
+ return -1;
+ }
+
+ old_nr = *pnr_map;
+
+ /* bail out if we find any unreasonable addresses in bios map */
+ for (i=0; i<old_nr; i++)
+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) {
+ printk("sanitize bail 1\n");
+ return -1;
+ }
+
+ /* create pointers for initial change-point information (for sorting) */
+ for (i=0; i < 2*old_nr; i++)
+ change_point[i] = &change_point_list[i];
+
+ /* record all known change-points (starting and ending addresses),
+ omitting those that are for empty memory regions */
+ chgidx = 0;
+ for (i=0; i < old_nr; i++) {
+ if (biosmap[i].size != 0) {
+ change_point[chgidx]->addr = biosmap[i].addr;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ }
+ }
+ chg_nr = chgidx; /* true number of change-points */
+
+ /* sort change-point list by memory addresses (low -> high) */
+ still_changing = 1;
+ while (still_changing) {
+ still_changing = 0;
+ for (i=1; i < chg_nr; i++) {
+ /* if <current_addr> > <last_addr>, swap */
+ /* or, if current=<start_addr> & last=<end_addr>, swap */
+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
+ ((change_point[i]->addr == change_point[i-1]->addr) &&
+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+ )
+ {
+ change_tmp = change_point[i];
+ change_point[i] = change_point[i-1];
+ change_point[i-1] = change_tmp;
+ still_changing=1;
+ }
+ }
+ }
+
+ /* create a new bios memory map, removing overlaps */
+ overlap_entries=0; /* number of entries in the overlap table */
+ new_bios_entry=0; /* index for creating new bios map entries */
+ last_type = 0; /* start with undefined memory type */
+ last_addr = 0; /* start with 0 as last starting address */
+ /* loop through change-points, determining affect on the new bios map */
+ for (chgidx=0; chgidx < chg_nr; chgidx++)
+ {
+ /* keep track of all overlapping bios entries */
+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+ {
+ /* add map entry to overlap list (> 1 entry implies an overlap) */
+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+ }
+ else
+ {
+ /* remove entry from list (order independent, so swap with last) */
+ for (i=0; i<overlap_entries; i++)
+ {
+ if (overlap_list[i] == change_point[chgidx]->pbios)
+ overlap_list[i] = overlap_list[overlap_entries-1];
+ }
+ overlap_entries--;
+ }
+ /* if there are overlapping entries, decide which "type" to use */
+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+ current_type = 0;
+ for (i=0; i<overlap_entries; i++)
+ if (overlap_list[i]->type > current_type)
+ current_type = overlap_list[i]->type;
+ /* continue building up new bios map based on this information */
+ if (current_type != last_type) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
+ /* move forward only if the new size was non-zero */
+ if (new_bios[new_bios_entry].size != 0)
+ if (++new_bios_entry >= E820MAX)
+ break; /* no more space left for new bios entries */
+ }
+ if (current_type != 0) {
+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+ new_bios[new_bios_entry].type = current_type;
+ last_addr=change_point[chgidx]->addr;
+ }
+ last_type = current_type;
+ }
+ }
+ new_nr = new_bios_entry; /* retain count for new bios entries */
+
+ /* copy new bios mapping into original location */
+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+ *pnr_map = new_nr;
+
+ printk("sanitize end\n");
+ return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory. If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+ /* Only one memory region (or negative)? Ignore it */
+ if (nr_map < 2)
+ return -1;
+
+ do {
+ unsigned long long start = biosmap->addr;
+ unsigned long long size = biosmap->size;
+ unsigned long long end = start + size;
+ unsigned long type = biosmap->type;
+ printk("copy_e820_map() start: %016Lx size: %016Lx end: %016Lx type: %ld\n", start, size, end, type);
+
+ /* Overflow in 64 bits? Ignore the memory map. */
+ if (start > end)
+ return -1;
+
+ /*
+ * Some BIOSes claim RAM in the 640k - 1M region.
+ * Not right. Fix it up.
+ */
+ if (type == E820_RAM) {
+ printk("copy_e820_map() type is E820_RAM\n");
+ if (start < 0x100000ULL && end > 0xA0000ULL) {
+ printk("copy_e820_map() lies in range...\n");
+ if (start < 0xA0000ULL) {
+ printk("copy_e820_map() start < 0xA0000ULL\n");
+ add_memory_region(start, 0xA0000ULL-start, type);
+ }
+ if (end <= 0x100000ULL) {
+ printk("copy_e820_map() end <= 0x100000ULL\n");
+ continue;
+ }
+ start = 0x100000ULL;
+ size = end - start;
+ }
+ }
+ add_memory_region(start, size, type);
+ } while (biosmap++,--nr_map);
+ return 0;
+}
+
+/*
+ * Callback for efi_memory_walk.
+ */
+static int __init
+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+{
+ unsigned long *max_pfn = arg, pfn;
+
+ if (start < end) {
+ pfn = PFN_UP(end -1);
+ if (pfn > *max_pfn)
+ *max_pfn = pfn;
+ }
+ return 0;
+}
+
+static int __init
+efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
+{
+ memory_present(0, PFN_UP(start), PFN_DOWN(end));
+ return 0;
+}
+
+/*
+ * Find the highest page frame number we have available
+ */
+void __init find_max_pfn(void)
+{
+ int i;
+
+ max_pfn = 0;
+ if (efi_enabled) {
+ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+ efi_memmap_walk(efi_memory_present_wrapper, NULL);
+ return;
+ }
+
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long start, end;
+ /* RAM? */
+ if (e820.map[i].type != E820_RAM)
+ continue;
+ start = PFN_UP(e820.map[i].addr);
+ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+ if (start >= end)
+ continue;
+ if (end > max_pfn)
+ max_pfn = end;
+ memory_present(0, start, end);
+ }
+}
+
+/*
+ * Free all available memory for boot time allocation. Used
+ * as a callback function by efi_memory_walk()
+ */
+
+static int __init
+free_available_memory(unsigned long start, unsigned long end, void *arg)
+{
+ /* check max_low_pfn */
+ if (start >= (max_low_pfn << PAGE_SHIFT))
+ return 0;
+ if (end >= (max_low_pfn << PAGE_SHIFT))
+ end = max_low_pfn << PAGE_SHIFT;
+ if (start < end)
+ free_bootmem(start, end - start);
+
+ return 0;
+}
+/*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+{
+ int i;
+
+ if (efi_enabled) {
+ efi_memmap_walk(free_available_memory, NULL);
+ return;
+ }
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long curr_pfn, last_pfn, size;
+ /*
+ * Reserve usable low memory
+ */
+ if (e820.map[i].type != E820_RAM)
+ continue;
+ /*
+ * We are rounding up the start address of usable memory:
+ */
+ curr_pfn = PFN_UP(e820.map[i].addr);
+ if (curr_pfn >= max_low_pfn)
+ continue;
+ /*
+ * ... and at the end of the usable range downwards:
+ */
+ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+
+ if (last_pfn > max_low_pfn)
+ last_pfn = max_low_pfn;
+
+ /*
+ * .. finally, did all the rounding and playing
+ * around just make the area go away?
+ */
+ if (last_pfn <= curr_pfn)
+ continue;
+
+ size = last_pfn - curr_pfn;
+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+ }
+}
+
+void __init register_memory(void)
+{
+ unsigned long gapstart, gapsize, round;
+ unsigned long long last;
+ int i;
+
+ /*
+ * Search for the bigest gap in the low 32 bits of the e820
+ * memory space.
+ */
+ last = 0x100000000ull;
+ gapstart = 0x10000000;
+ gapsize = 0x400000;
+ i = e820.nr_map;
+ while (--i >= 0) {
+ unsigned long long start = e820.map[i].addr;
+ unsigned long long end = start + e820.map[i].size;
+
+ /*
+ * Since "last" is at most 4GB, we know we'll
+ * fit in 32 bits if this condition is true
+ */
+ if (last > end) {
+ unsigned long gap = last - end;
+
+ if (gap > gapsize) {
+ gapsize = gap;
+ gapstart = end;
+ }
+ }
+ if (start < last)
+ last = start;
+ }
+
+ /*
+ * See how much we want to round up: start off with
+ * rounding to the next 1MB area.
+ */
+ round = 0x100000;
+ while ((gapsize >> 4) > round)
+ round += round;
+ /* Fun with two's complement */
+ pci_mem_start = (gapstart + round) & -round;
+
+ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
+ pci_mem_start, gapstart, gapsize);
+}
+
+void __init print_memory_map(char *who)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ printk(" %s: %016Lx - %016Lx ", who,
+ e820.map[i].addr,
+ e820.map[i].addr + e820.map[i].size);
+ switch (e820.map[i].type) {
+ case E820_RAM: printk("(usable)\n");
+ break;
+ case E820_RESERVED:
+ printk("(reserved)\n");
+ break;
+ case E820_ACPI:
+ printk("(ACPI data)\n");
+ break;
+ case E820_NVS:
+ printk("(ACPI NVS)\n");
+ break;
+ default: printk("type %lu\n", e820.map[i].type);
+ break;
+ }
+ }
+}
+
+static __init __always_inline void efi_limit_regions(unsigned long long size)
+{
+ unsigned long long current_addr = 0;
+ efi_memory_desc_t *md, *next_md;
+ void *p, *p1;
+ int i, j;
+
+ j = 0;
+ p1 = memmap.map;
+ for (p = p1, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
+ md = p;
+ next_md = p1;
+ current_addr = md->phys_addr +
+ PFN_PHYS(md->num_pages);
+ if (is_available_memory(md)) {
+ if (md->phys_addr >= size) continue;
+ memcpy(next_md, md, memmap.desc_size);
+ if (current_addr >= size) {
+ next_md->num_pages -=
+ PFN_UP(current_addr-size);
+ }
+ p1 += memmap.desc_size;
+ next_md = p1;
+ j++;
+ } else if ((md->attribute & EFI_MEMORY_RUNTIME) ==
+ EFI_MEMORY_RUNTIME) {
+ /* In order to make runtime services
+ * available we have to include runtime
+ * memory regions in memory map */
+ memcpy(next_md, md, memmap.desc_size);
+ p1 += memmap.desc_size;
+ next_md = p1;
+ j++;
+ }
+ }
+ memmap.nr_map = j;
+ memmap.map_end = memmap.map +
+ (memmap.nr_map * memmap.desc_size);
+}
+
+void __init limit_regions(unsigned long long size)
+{
+ unsigned long long current_addr;
+ int i;
+
+ print_memory_map("limit_regions start");
+ if (efi_enabled) {
+ efi_limit_regions(size);
+ return;
+ }
+ for (i = 0; i < e820.nr_map; i++) {
+ current_addr = e820.map[i].addr + e820.map[i].size;
+ if (current_addr < size)
+ continue;
+
+ if (e820.map[i].type != E820_RAM)
+ continue;
+
+ if (e820.map[i].addr >= size) {
+ /*
+ * This region starts past the end of the
+ * requested size, skip it completely.
+ */
+ e820.nr_map = i;
+ } else {
+ e820.nr_map = i + 1;
+ e820.map[i].size -= current_addr - size;
+ }
+ print_memory_map("limit_regions endfor");
+ return;
+ }
+ print_memory_map("limit_regions endfunc");
+}
+
+ /*
+ * This function checks if the entire range <start,end> is mapped with type.
+ *
+ * Note: this function only works correct if the e820 table is sorted and
+ * not-overlapping, which is the case
+ */
+int __init
+e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
+{
+ u64 start = s;
+ u64 end = e;
+ int i;
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ if (type && ei->type != type)
+ continue;
+ /* is the region (part) in overlap with the current region ?*/
+ if (ei->addr >= end || ei->addr + ei->size <= start)
+ continue;
+ /* if the region is at the beginning of <start,end> we move
+ * start to the end of the region since it's ok until there
+ */
+ if (ei->addr <= start)
+ start = ei->addr + ei->size;
+ /* if start is now at or beyond end, we're done, full
+ * coverage */
+ if (start >= end)
+ return 1; /* we're done */
+ }
+ return 0;
+}
+
+static int __init parse_memmap(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "exactmap") == 0) {
+#ifdef CONFIG_CRASH_DUMP
+ /* If we are doing a crash dump, we
+ * still need to know the real mem
+ * size before original memory map is
+ * reset.
+ */
+ find_max_pfn();
+ saved_max_pfn = max_pfn;
+#endif
+ e820.nr_map = 0;
+ user_defined_memmap = 1;
+ } else {
+ /* If the user specifies memory size, we
+ * limit the BIOS-provided memory map to
+ * that size. exactmap can be used to specify
+ * the exact map. mem=number can be used to
+ * trim the existing memory map.
+ */
+ unsigned long long start_at, mem_size;
+
+ mem_size = memparse(arg, &arg);
+ if (*arg == '@') {
+ start_at = memparse(arg+1, &arg);
+ add_memory_region(start_at, mem_size, E820_RAM);
+ } else if (*arg == '#') {
+ start_at = memparse(arg+1, &arg);
+ add_memory_region(start_at, mem_size, E820_ACPI);
+ } else if (*arg == '$') {
+ start_at = memparse(arg+1, &arg);
+ add_memory_region(start_at, mem_size, E820_RESERVED);
+ } else {
+ limit_regions(mem_size);
+ user_defined_memmap = 1;
+ }
+ }
+ return 0;
+}
+early_param("memmap", parse_memmap);
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index 8b40648d0ef..b92c7f0a358 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -194,17 +194,24 @@ inline int efi_set_rtc_mmss(unsigned long nowtime)
return 0;
}
/*
- * This should only be used during kernel init and before runtime
- * services have been remapped, therefore, we'll need to call in physical
- * mode. Note, this call isn't used later, so mark it __init.
+ * This is used during kernel init before runtime
+ * services have been remapped and also during suspend, therefore,
+ * we'll need to call both in physical and virtual modes.
*/
-inline unsigned long __init efi_get_time(void)
+inline unsigned long efi_get_time(void)
{
efi_status_t status;
efi_time_t eft;
efi_time_cap_t cap;
- status = phys_efi_get_time(&eft, &cap);
+ if (efi.get_time) {
+ /* if we are in virtual mode use remapped function */
+ status = efi.get_time(&eft, &cap);
+ } else {
+ /* we are in physical mode */
+ status = phys_efi_get_time(&eft, &cap);
+ }
+
if (status != EFI_SUCCESS)
printk("Oops: efitime: can't read time status: 0x%lx\n",status);
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 5a63d6fdb70..de34b7fed3c 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -30,12 +30,13 @@
* 18(%esp) - %eax
* 1C(%esp) - %ds
* 20(%esp) - %es
- * 24(%esp) - orig_eax
- * 28(%esp) - %eip
- * 2C(%esp) - %cs
- * 30(%esp) - %eflags
- * 34(%esp) - %oldesp
- * 38(%esp) - %oldss
+ * 24(%esp) - %gs
+ * 28(%esp) - orig_eax
+ * 2C(%esp) - %eip
+ * 30(%esp) - %cs
+ * 34(%esp) - %eflags
+ * 38(%esp) - %oldesp
+ * 3C(%esp) - %oldss
*
* "current" is in register %ebx during any slow entries.
*/
@@ -48,26 +49,24 @@
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/desc.h>
+#include <asm/percpu.h>
#include <asm/dwarf2.h>
#include "irq_vectors.h"
-#define nr_syscalls ((syscall_table_size)/4)
+/*
+ * We use macros for low-level operations which need to be overridden
+ * for paravirtualization. The following will never clobber any registers:
+ * INTERRUPT_RETURN (aka. "iret")
+ * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
+ * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
+ *
+ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
+ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
+ * Allowing a register to be clobbered can shrink the paravirt replacement
+ * enough to patch inline, increasing performance.
+ */
-EBX = 0x00
-ECX = 0x04
-EDX = 0x08
-ESI = 0x0C
-EDI = 0x10
-EBP = 0x14
-EAX = 0x18
-DS = 0x1C
-ES = 0x20
-ORIG_EAX = 0x24
-EIP = 0x28
-CS = 0x2C
-EFLAGS = 0x30
-OLDESP = 0x34
-OLDSS = 0x38
+#define nr_syscalls ((syscall_table_size)/4)
CF_MASK = 0x00000001
TF_MASK = 0x00000100
@@ -76,23 +75,16 @@ DF_MASK = 0x00000400
NT_MASK = 0x00004000
VM_MASK = 0x00020000
-/* These are replaces for paravirtualization */
-#define DISABLE_INTERRUPTS cli
-#define ENABLE_INTERRUPTS sti
-#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
-#define INTERRUPT_RETURN iret
-#define GET_CR0_INTO_EAX movl %cr0, %eax
-
#ifdef CONFIG_PREEMPT
-#define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
+#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else
-#define preempt_stop
+#define preempt_stop(clobbers)
#define resume_kernel restore_nocheck
#endif
.macro TRACE_IRQS_IRET
#ifdef CONFIG_TRACE_IRQFLAGS
- testl $IF_MASK,EFLAGS(%esp) # interrupts off?
+ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
jz 1f
TRACE_IRQS_ON
1:
@@ -107,6 +99,9 @@ VM_MASK = 0x00020000
#define SAVE_ALL \
cld; \
+ pushl %gs; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ /*CFI_REL_OFFSET gs, 0;*/\
pushl %es; \
CFI_ADJUST_CFA_OFFSET 4;\
/*CFI_REL_OFFSET es, 0;*/\
@@ -136,7 +131,9 @@ VM_MASK = 0x00020000
CFI_REL_OFFSET ebx, 0;\
movl $(__USER_DS), %edx; \
movl %edx, %ds; \
- movl %edx, %es;
+ movl %edx, %es; \
+ movl $(__KERNEL_PDA), %edx; \
+ movl %edx, %gs
#define RESTORE_INT_REGS \
popl %ebx; \
@@ -169,17 +166,22 @@ VM_MASK = 0x00020000
2: popl %es; \
CFI_ADJUST_CFA_OFFSET -4;\
/*CFI_RESTORE es;*/\
-.section .fixup,"ax"; \
-3: movl $0,(%esp); \
- jmp 1b; \
+3: popl %gs; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ /*CFI_RESTORE gs;*/\
+.pushsection .fixup,"ax"; \
4: movl $0,(%esp); \
+ jmp 1b; \
+5: movl $0,(%esp); \
jmp 2b; \
-.previous; \
+6: movl $0,(%esp); \
+ jmp 3b; \
.section __ex_table,"a";\
.align 4; \
- .long 1b,3b; \
- .long 2b,4b; \
-.previous
+ .long 1b,4b; \
+ .long 2b,5b; \
+ .long 3b,6b; \
+.popsection
#define RING0_INT_FRAME \
CFI_STARTPROC simple;\
@@ -198,18 +200,18 @@ VM_MASK = 0x00020000
#define RING0_PTREGS_FRAME \
CFI_STARTPROC simple;\
CFI_SIGNAL_FRAME;\
- CFI_DEF_CFA esp, OLDESP-EBX;\
- /*CFI_OFFSET cs, CS-OLDESP;*/\
- CFI_OFFSET eip, EIP-OLDESP;\
- /*CFI_OFFSET es, ES-OLDESP;*/\
- /*CFI_OFFSET ds, DS-OLDESP;*/\
- CFI_OFFSET eax, EAX-OLDESP;\
- CFI_OFFSET ebp, EBP-OLDESP;\
- CFI_OFFSET edi, EDI-OLDESP;\
- CFI_OFFSET esi, ESI-OLDESP;\
- CFI_OFFSET edx, EDX-OLDESP;\
- CFI_OFFSET ecx, ECX-OLDESP;\
- CFI_OFFSET ebx, EBX-OLDESP
+ CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
+ /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
+ CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
+ /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
+ /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
+ CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
+ CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
+ CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
+ CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
+ CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
+ CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
+ CFI_OFFSET ebx, PT_EBX-PT_OLDESP
ENTRY(ret_from_fork)
CFI_STARTPROC
@@ -237,17 +239,18 @@ ENTRY(ret_from_fork)
ALIGN
RING0_PTREGS_FRAME
ret_from_exception:
- preempt_stop
+ preempt_stop(CLBR_ANY)
ret_from_intr:
GET_THREAD_INFO(%ebp)
check_userspace:
- movl EFLAGS(%esp), %eax # mix EFLAGS and CS
- movb CS(%esp), %al
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb PT_CS(%esp), %al
andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
cmpl $USER_RPL, %eax
jb resume_kernel # not returning to v8086 or userspace
+
ENTRY(resume_userspace)
- DISABLE_INTERRUPTS # make sure we don't miss an interrupt
+ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
movl TI_flags(%ebp), %ecx
@@ -258,14 +261,14 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- DISABLE_INTERRUPTS
+ DISABLE_INTERRUPTS(CLBR_ANY)
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
jnz restore_nocheck
need_resched:
movl TI_flags(%ebp), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl
jz restore_all
- testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
+ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
call preempt_schedule_irq
jmp need_resched
@@ -287,7 +290,7 @@ sysenter_past_esp:
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
*/
- ENABLE_INTERRUPTS
+ ENABLE_INTERRUPTS(CLBR_NONE)
pushl $(__USER_DS)
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ss, 0*/
@@ -331,20 +334,27 @@ sysenter_past_esp:
cmpl $(nr_syscalls), %eax
jae syscall_badsys
call *sys_call_table(,%eax,4)
- movl %eax,EAX(%esp)
- DISABLE_INTERRUPTS
+ movl %eax,PT_EAX(%esp)
+ DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx
jne syscall_exit_work
/* if something modifies registers it must also disable sysexit */
- movl EIP(%esp), %edx
- movl OLDESP(%esp), %ecx
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
xorl %ebp,%ebp
TRACE_IRQS_ON
+1: mov PT_GS(%esp), %gs
ENABLE_INTERRUPTS_SYSEXIT
CFI_ENDPROC
-
+.pushsection .fixup,"ax"
+2: movl $0,PT_GS(%esp)
+ jmp 1b
+.section __ex_table,"a"
+ .align 4
+ .long 1b,2b
+.popsection
# system call handler stub
ENTRY(system_call)
@@ -353,7 +363,7 @@ ENTRY(system_call)
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
GET_THREAD_INFO(%ebp)
- testl $TF_MASK,EFLAGS(%esp)
+ testl $TF_MASK,PT_EFLAGS(%esp)
jz no_singlestep
orl $_TIF_SINGLESTEP,TI_flags(%ebp)
no_singlestep:
@@ -365,9 +375,9 @@ no_singlestep:
jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
- movl %eax,EAX(%esp) # store the return value
+ movl %eax,PT_EAX(%esp) # store the return value
syscall_exit:
- DISABLE_INTERRUPTS # make sure we don't miss an interrupt
+ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
TRACE_IRQS_OFF
@@ -376,12 +386,12 @@ syscall_exit:
jne syscall_exit_work
restore_all:
- movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
- # Warning: OLDSS(%esp) contains the wrong/random values if we
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
+ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
# are returning to the kernel.
# See comments in process.c:copy_thread() for details.
- movb OLDSS(%esp), %ah
- movb CS(%esp), %al
+ movb PT_OLDSS(%esp), %ah
+ movb PT_CS(%esp), %al
andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE
@@ -390,13 +400,13 @@ restore_nocheck:
TRACE_IRQS_IRET
restore_nocheck_notrace:
RESTORE_REGS
- addl $4, %esp
+ addl $4, %esp # skip orig_eax/error_code
CFI_ADJUST_CFA_OFFSET -4
1: INTERRUPT_RETURN
.section .fixup,"ax"
iret_exc:
TRACE_IRQS_ON
- ENABLE_INTERRUPTS
+ ENABLE_INTERRUPTS(CLBR_NONE)
pushl $0 # no error code
pushl $do_iret_error
jmp error_code
@@ -408,33 +418,42 @@ iret_exc:
CFI_RESTORE_STATE
ldt_ss:
- larl OLDSS(%esp), %eax
+ larl PT_OLDSS(%esp), %eax
jnz restore_nocheck
testl $0x00400000, %eax # returning to 32bit stack?
jnz restore_nocheck # allright, normal return
+
+#ifdef CONFIG_PARAVIRT
+ /*
+ * The kernel can't run on a non-flat stack if paravirt mode
+ * is active. Rather than try to fixup the high bits of
+ * ESP, bypass this code entirely. This may break DOSemu
+ * and/or Wine support in a paravirt VM, although the option
+ * is still available to implement the setting of the high
+ * 16-bits in the INTERRUPT_RETURN paravirt-op.
+ */
+ cmpl $0, paravirt_ops+PARAVIRT_enabled
+ jne restore_nocheck
+#endif
+
/* If returning to userspace with 16bit stack,
* try to fix the higher word of ESP, as the CPU
* won't restore it.
* This is an "official" bug of all the x86-compatible
* CPUs, which we can try to work around to make
* dosemu and wine happy. */
- subl $8, %esp # reserve space for switch16 pointer
- CFI_ADJUST_CFA_OFFSET 8
- DISABLE_INTERRUPTS
+ movl PT_OLDESP(%esp), %eax
+ movl %esp, %edx
+ call patch_espfix_desc
+ pushl $__ESPFIX_SS
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+ DISABLE_INTERRUPTS(CLBR_EAX)
TRACE_IRQS_OFF
- movl %esp, %eax
- /* Set up the 16bit stack frame with switch32 pointer on top,
- * and a switch16 pointer on top of the current frame. */
- call setup_x86_bogus_stack
- CFI_ADJUST_CFA_OFFSET -8 # frame has moved
- TRACE_IRQS_IRET
- RESTORE_REGS
- lss 20+4(%esp), %esp # switch to 16bit stack
-1: INTERRUPT_RETURN
-.section __ex_table,"a"
- .align 4
- .long 1b,iret_exc
-.previous
+ lss (%esp), %esp
+ CFI_ADJUST_CFA_OFFSET -8
+ jmp restore_nocheck
CFI_ENDPROC
# perform work that needs to be done immediately before resumption
@@ -445,7 +464,7 @@ work_pending:
jz work_notifysig
work_resched:
call schedule
- DISABLE_INTERRUPTS # make sure we don't miss an interrupt
+ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
TRACE_IRQS_OFF
@@ -458,7 +477,8 @@ work_resched:
work_notifysig: # deal with pending signals and
# notify-resume requests
- testl $VM_MASK, EFLAGS(%esp)
+#ifdef CONFIG_VM86
+ testl $VM_MASK, PT_EFLAGS(%esp)
movl %esp, %eax
jne work_notifysig_v86 # returning to kernel-space or
# vm86-space
@@ -468,29 +488,30 @@ work_notifysig: # deal with pending signals and
ALIGN
work_notifysig_v86:
-#ifdef CONFIG_VM86
pushl %ecx # save ti_flags for do_notify_resume
CFI_ADJUST_CFA_OFFSET 4
call save_v86_state # %eax contains pt_regs pointer
popl %ecx
CFI_ADJUST_CFA_OFFSET -4
movl %eax, %esp
+#else
+ movl %esp, %eax
+#endif
xorl %edx, %edx
call do_notify_resume
jmp resume_userspace_sig
-#endif
# perform syscall exit tracing
ALIGN
syscall_trace_entry:
- movl $-ENOSYS,EAX(%esp)
+ movl $-ENOSYS,PT_EAX(%esp)
movl %esp, %eax
xorl %edx,%edx
call do_syscall_trace
cmpl $0, %eax
jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
# so must skip actual syscall
- movl ORIG_EAX(%esp), %eax
+ movl PT_ORIG_EAX(%esp), %eax
cmpl $(nr_syscalls), %eax
jnae syscall_call
jmp syscall_exit
@@ -501,7 +522,7 @@ syscall_exit_work:
testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
jz work_pending
TRACE_IRQS_ON
- ENABLE_INTERRUPTS # could let do_syscall_trace() call
+ ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
# schedule() instead
movl %esp, %eax
movl $1, %edx
@@ -515,39 +536,38 @@ syscall_fault:
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
GET_THREAD_INFO(%ebp)
- movl $-EFAULT,EAX(%esp)
+ movl $-EFAULT,PT_EAX(%esp)
jmp resume_userspace
syscall_badsys:
- movl $-ENOSYS,EAX(%esp)
+ movl $-ENOSYS,PT_EAX(%esp)
jmp resume_userspace
CFI_ENDPROC
#define FIXUP_ESPFIX_STACK \
- movl %esp, %eax; \
- /* switch to 32bit stack using the pointer on top of 16bit stack */ \
- lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
- /* copy data from 16bit stack to 32bit stack */ \
- call fixup_x86_bogus_stack; \
- /* put ESP to the proper location */ \
- movl %eax, %esp;
-#define UNWIND_ESPFIX_STACK \
+ /* since we are on a wrong stack, we cant make it a C code :( */ \
+ movl %gs:PDA_cpu, %ebx; \
+ PER_CPU(cpu_gdt_descr, %ebx); \
+ movl GDS_address(%ebx), %ebx; \
+ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
+ addl %esp, %eax; \
+ pushl $__KERNEL_DS; \
+ CFI_ADJUST_CFA_OFFSET 4; \
pushl %eax; \
CFI_ADJUST_CFA_OFFSET 4; \
+ lss (%esp), %esp; \
+ CFI_ADJUST_CFA_OFFSET -8;
+#define UNWIND_ESPFIX_STACK \
movl %ss, %eax; \
- /* see if on 16bit stack */ \
+ /* see if on espfix stack */ \
cmpw $__ESPFIX_SS, %ax; \
- je 28f; \
-27: popl %eax; \
- CFI_ADJUST_CFA_OFFSET -4; \
-.section .fixup,"ax"; \
-28: movl $__KERNEL_DS, %eax; \
+ jne 27f; \
+ movl $__KERNEL_DS, %eax; \
movl %eax, %ds; \
movl %eax, %es; \
- /* switch to 32bit stack */ \
+ /* switch to normal stack */ \
FIXUP_ESPFIX_STACK; \
- jmp 27b; \
-.previous
+27:;
/*
* Build the entry stubs and pointer table with
@@ -608,13 +628,16 @@ KPROBE_ENTRY(page_fault)
CFI_ADJUST_CFA_OFFSET 4
ALIGN
error_code:
+ /* the function address is in %gs's slot on the stack */
+ pushl %es
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET es, 0*/
pushl %ds
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ds, 0*/
pushl %eax
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eax, 0
- xorl %eax, %eax
pushl %ebp
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebp, 0
@@ -627,7 +650,6 @@ error_code:
pushl %edx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edx, 0
- decl %eax # eax = -1
pushl %ecx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ecx, 0
@@ -635,18 +657,20 @@ error_code:
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebx, 0
cld
- pushl %es
+ pushl %gs
CFI_ADJUST_CFA_OFFSET 4
- /*CFI_REL_OFFSET es, 0*/
+ /*CFI_REL_OFFSET gs, 0*/
+ movl $(__KERNEL_PDA), %ecx
+ movl %ecx, %gs
UNWIND_ESPFIX_STACK
popl %ecx
CFI_ADJUST_CFA_OFFSET -4
/*CFI_REGISTER es, ecx*/
- movl ES(%esp), %edi # get the function address
- movl ORIG_EAX(%esp), %edx # get the error code
- movl %eax, ORIG_EAX(%esp)
- movl %ecx, ES(%esp)
- /*CFI_REL_OFFSET es, ES*/
+ movl PT_GS(%esp), %edi # get the function address
+ movl PT_ORIG_EAX(%esp), %edx # get the error code
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ mov %ecx, PT_GS(%esp)
+ /*CFI_REL_OFFSET gs, ES*/
movl $(__USER_DS), %ecx
movl %ecx, %ds
movl %ecx, %es
@@ -682,7 +706,7 @@ ENTRY(device_not_available)
GET_CR0_INTO_EAX
testl $0x4, %eax # EM (math emulation bit)
jne device_not_available_emulate
- preempt_stop
+ preempt_stop(CLBR_ANY)
call math_state_restore
jmp ret_from_exception
device_not_available_emulate:
@@ -754,7 +778,7 @@ KPROBE_ENTRY(nmi)
cmpw $__ESPFIX_SS, %ax
popl %eax
CFI_ADJUST_CFA_OFFSET -4
- je nmi_16bit_stack
+ je nmi_espfix_stack
cmpl $sysenter_entry,(%esp)
je nmi_stack_fixup
pushl %eax
@@ -797,7 +821,7 @@ nmi_debug_stack_check:
FIX_STACK(24,nmi_stack_correct, 1)
jmp nmi_stack_correct
-nmi_16bit_stack:
+nmi_espfix_stack:
/* We have a RING0_INT_FRAME here.
*
* create the pointer to lss back
@@ -806,7 +830,6 @@ nmi_16bit_stack:
CFI_ADJUST_CFA_OFFSET 4
pushl %esp
CFI_ADJUST_CFA_OFFSET 4
- movzwl %sp, %esp
addw $4, (%esp)
/* copy the iret frame of 12 bytes */
.rept 3
@@ -817,11 +840,11 @@ nmi_16bit_stack:
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
FIXUP_ESPFIX_STACK # %eax == %esp
- CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
xorl %edx,%edx # zero error code
call do_nmi
RESTORE_REGS
- lss 12+4(%esp), %esp # back to 16bit stack
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
1: INTERRUPT_RETURN
CFI_ENDPROC
.section __ex_table,"a"
@@ -830,6 +853,19 @@ nmi_16bit_stack:
.previous
KPROBE_END(nmi)
+#ifdef CONFIG_PARAVIRT
+ENTRY(native_iret)
+1: iret
+.section __ex_table,"a"
+ .align 4
+ .long 1b,iret_exc
+.previous
+
+ENTRY(native_irq_enable_sysexit)
+ sti
+ sysexit
+#endif
+
KPROBE_ENTRY(int3)
RING0_INT_FRAME
pushl $-1 # mark this as an int
@@ -949,26 +985,27 @@ ENTRY(arch_unwind_init_running)
movl 4(%esp), %edx
movl (%esp), %ecx
leal 4(%esp), %eax
- movl %ebx, EBX(%edx)
+ movl %ebx, PT_EBX(%edx)
xorl %ebx, %ebx
- movl %ebx, ECX(%edx)
- movl %ebx, EDX(%edx)
- movl %esi, ESI(%edx)
- movl %edi, EDI(%edx)
- movl %ebp, EBP(%edx)
- movl %ebx, EAX(%edx)
- movl $__USER_DS, DS(%edx)
- movl $__USER_DS, ES(%edx)
- movl %ebx, ORIG_EAX(%edx)
- movl %ecx, EIP(%edx)
+ movl %ebx, PT_ECX(%edx)
+ movl %ebx, PT_EDX(%edx)
+ movl %esi, PT_ESI(%edx)
+ movl %edi, PT_EDI(%edx)
+ movl %ebp, PT_EBP(%edx)
+ movl %ebx, PT_EAX(%edx)
+ movl $__USER_DS, PT_DS(%edx)
+ movl $__USER_DS, PT_ES(%edx)
+ movl $0, PT_GS(%edx)
+ movl %ebx, PT_ORIG_EAX(%edx)
+ movl %ecx, PT_EIP(%edx)
movl 12(%esp), %ecx
- movl $__KERNEL_CS, CS(%edx)
- movl %ebx, EFLAGS(%edx)
- movl %eax, OLDESP(%edx)
+ movl $__KERNEL_CS, PT_CS(%edx)
+ movl %ebx, PT_EFLAGS(%edx)
+ movl %eax, PT_OLDESP(%edx)
movl 8(%esp), %eax
movl %ecx, 8(%esp)
- movl EBX(%edx), %ebx
- movl $__KERNEL_DS, OLDSS(%edx)
+ movl PT_EBX(%edx), %ebx
+ movl $__KERNEL_DS, PT_OLDSS(%edx)
jmpl *%eax
CFI_ENDPROC
ENDPROC(arch_unwind_init_running)
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index ca31f18d277..edef5084ce1 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -55,6 +55,12 @@
*/
ENTRY(startup_32)
+#ifdef CONFIG_PARAVIRT
+ movl %cs, %eax
+ testl $0x3, %eax
+ jnz startup_paravirt
+#endif
+
/*
* Set segments to known values.
*/
@@ -302,6 +308,7 @@ is386: movl $2,%ecx # set MP
movl %eax,%cr0
call check_x87
+ call setup_pda
lgdt cpu_gdt_descr
lidt idt_descr
ljmp $(__KERNEL_CS),$1f
@@ -312,10 +319,13 @@ is386: movl $2,%ecx # set MP
movl %eax,%ds
movl %eax,%es
- xorl %eax,%eax # Clear FS/GS and LDT
+ xorl %eax,%eax # Clear FS and LDT
movl %eax,%fs
- movl %eax,%gs
lldt %ax
+
+ movl $(__KERNEL_PDA),%eax
+ mov %eax,%gs
+
cld # gcc2 wants the direction flag cleared at all times
pushl $0 # fake return address for unwinder
#ifdef CONFIG_SMP
@@ -346,6 +356,23 @@ check_x87:
ret
/*
+ * Point the GDT at this CPU's PDA. On boot this will be
+ * cpu_gdt_table and boot_pda; for secondary CPUs, these will be
+ * that CPU's GDT and PDA.
+ */
+setup_pda:
+ /* get the PDA pointer */
+ movl start_pda, %eax
+
+ /* slot the PDA address into the GDT */
+ mov cpu_gdt_descr+2, %ecx
+ mov %ax, (__KERNEL_PDA+0+2)(%ecx) /* base & 0x0000ffff */
+ shr $16, %eax
+ mov %al, (__KERNEL_PDA+4+0)(%ecx) /* base & 0x00ff0000 */
+ mov %ah, (__KERNEL_PDA+4+3)(%ecx) /* base & 0xff000000 */
+ ret
+
+/*
* setup_idt
*
* sets up a idt with 256 entries pointing to
@@ -465,6 +492,33 @@ ignore_int:
#endif
iret
+#ifdef CONFIG_PARAVIRT
+startup_paravirt:
+ cld
+ movl $(init_thread_union+THREAD_SIZE),%esp
+
+ /* We take pains to preserve all the regs. */
+ pushl %edx
+ pushl %ecx
+ pushl %eax
+
+ /* paravirt.o is last in link, and that probe fn never returns */
+ pushl $__start_paravirtprobe
+1:
+ movl 0(%esp), %eax
+ pushl (%eax)
+ movl 8(%esp), %eax
+ call *(%esp)
+ popl %eax
+
+ movl 4(%esp), %eax
+ movl 8(%esp), %ecx
+ movl 12(%esp), %edx
+
+ addl $4, (%esp)
+ jmp 1b
+#endif
+
/*
* Real beginning of normal "text" segment
*/
@@ -484,6 +538,8 @@ ENTRY(empty_zero_page)
* This starts the data section.
*/
.data
+ENTRY(start_pda)
+ .long boot_pda
ENTRY(stack_start)
.long init_thread_union+THREAD_SIZE
@@ -525,7 +581,7 @@ idt_descr:
# boot GDT descriptor (later on used by CPU#0):
.word 0 # 32 bit align gdt_desc.address
-cpu_gdt_descr:
+ENTRY(cpu_gdt_descr)
.word GDT_ENTRIES*8-1
.long cpu_gdt_table
@@ -584,8 +640,8 @@ ENTRY(cpu_gdt_table)
.quad 0x00009a000000ffff /* 0xc0 APM CS 16 code (16 bit) */
.quad 0x004092000000ffff /* 0xc8 APM DS data */
- .quad 0x0000920000000000 /* 0xd0 - ESPFIX 16-bit SS */
- .quad 0x0000000000000000 /* 0xd8 - unused */
+ .quad 0x00c0920000000000 /* 0xd0 - ESPFIX SS */
+ .quad 0x00cf92000000ffff /* 0xd8 - PDA */
.quad 0x0000000000000000 /* 0xe0 - unused */
.quad 0x0000000000000000 /* 0xe8 - unused */
.quad 0x0000000000000000 /* 0xf0 - unused */
diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
index 17647a530b2..45a8685bb60 100644
--- a/arch/i386/kernel/hpet.c
+++ b/arch/i386/kernel/hpet.c
@@ -34,6 +34,7 @@ static int __init init_hpet_clocksource(void)
unsigned long hpet_period;
void __iomem* hpet_base;
u64 tmp;
+ int err;
if (!is_hpet_enabled())
return -ENODEV;
@@ -61,7 +62,11 @@ static int __init init_hpet_clocksource(void)
do_div(tmp, FSEC_PER_NSEC);
clocksource_hpet.mult = (u32)tmp;
- return clocksource_register(&clocksource_hpet);
+ err = clocksource_register(&clocksource_hpet);
+ if (err)
+ iounmap(hpet_base);
+
+ return err;
}
module_init(init_hpet_clocksource);
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index 62996cd1708..c8d45821c78 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -381,7 +381,10 @@ void __init init_ISA_irqs (void)
}
}
-void __init init_IRQ(void)
+/* Overridden in paravirt.c */
+void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
+
+void __init native_init_IRQ(void)
{
int i;
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 3b7a63e0ed1..e21dcde0790 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -34,6 +34,7 @@
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/htirq.h>
+#include <linux/freezer.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -153,14 +154,20 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
* the interrupt, and we need to make sure the entry is fully populated
* before that happens.
*/
-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+static void
+__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
- unsigned long flags;
union entry_union eu;
eu.entry = e;
- spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+}
+
+static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __ioapic_write_entry(apic, pin, e);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -836,8 +843,7 @@ static int __init find_isa_irq_pin(int irq, int type)
if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
- mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
- mp_bus_id_to_type[lbus] == MP_BUS_NEC98
+ mp_bus_id_to_type[lbus] == MP_BUS_MCA
) &&
(mp_irqs[i].mpc_irqtype == type) &&
(mp_irqs[i].mpc_srcbusirq == irq))
@@ -856,8 +862,7 @@ static int __init find_isa_irq_apic(int irq, int type)
if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
- mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
- mp_bus_id_to_type[lbus] == MP_BUS_NEC98
+ mp_bus_id_to_type[lbus] == MP_BUS_MCA
) &&
(mp_irqs[i].mpc_irqtype == type) &&
(mp_irqs[i].mpc_srcbusirq == irq))
@@ -987,12 +992,6 @@ static int EISA_ELCR(unsigned int irq)
#define default_MCA_trigger(idx) (1)
#define default_MCA_polarity(idx) (0)
-/* NEC98 interrupts are always polarity zero edge triggered,
- * when listed as conforming in the MP table. */
-
-#define default_NEC98_trigger(idx) (0)
-#define default_NEC98_polarity(idx) (0)
-
static int __init MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
@@ -1027,11 +1026,6 @@ static int __init MPBIOS_polarity(int idx)
polarity = default_MCA_polarity(idx);
break;
}
- case MP_BUS_NEC98: /* NEC 98 pin */
- {
- polarity = default_NEC98_polarity(idx);
- break;
- }
default:
{
printk(KERN_WARNING "broken BIOS!!\n");
@@ -1101,11 +1095,6 @@ static int MPBIOS_trigger(int idx)
trigger = default_MCA_trigger(idx);
break;
}
- case MP_BUS_NEC98: /* NEC 98 pin */
- {
- trigger = default_NEC98_trigger(idx);
- break;
- }
default:
{
printk(KERN_WARNING "broken BIOS!!\n");
@@ -1167,7 +1156,6 @@ static int pin_2_irq(int idx, int apic, int pin)
case MP_BUS_ISA: /* ISA pin */
case MP_BUS_EISA:
case MP_BUS_MCA:
- case MP_BUS_NEC98:
{
irq = mp_irqs[idx].mpc_srcbusirq;
break;
@@ -1235,7 +1223,7 @@ static inline int IO_APIC_irq_trigger(int irq)
}
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
+static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
static int __assign_irq_vector(int irq)
{
@@ -1360,8 +1348,8 @@ static void __init setup_IO_APIC_irqs(void)
if (!apic && (irq < 16))
disable_8259A_irq(irq);
}
- ioapic_write_entry(apic, pin, entry);
spin_lock_irqsave(&ioapic_lock, flags);
+ __ioapic_write_entry(apic, pin, entry);
set_native_irq_info(irq, TARGET_CPUS);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -1926,6 +1914,15 @@ static void __init setup_ioapic_ids_from_mpc(void)
static void __init setup_ioapic_ids_from_mpc(void) { }
#endif
+static int no_timer_check __initdata;
+
+static int __init notimercheck(char *s)
+{
+ no_timer_check = 1;
+ return 1;
+}
+__setup("no_timer_check", notimercheck);
+
/*
* There is a nasty bug in some older SMP boards, their mptable lies
* about the timer IRQ. We do the following to work around the situation:
@@ -1934,10 +1931,13 @@ static void __init setup_ioapic_ids_from_mpc(void) { }
* - if this function detects that timer IRQs are defunct, then we fall
* back to ISA timer IRQs
*/
-static int __init timer_irq_works(void)
+int __init timer_irq_works(void)
{
unsigned long t1 = jiffies;
+ if (no_timer_check)
+ return 1;
+
local_irq_enable();
/* Let ten ticks pass... */
mdelay((10 * 1000) / HZ);
@@ -2161,9 +2161,15 @@ static inline void unlock_ExtINT_logic(void)
unsigned char save_control, save_freq_select;
pin = find_isa_irq_pin(8, mp_INT);
+ if (pin == -1) {
+ WARN_ON_ONCE(1);
+ return;
+ }
apic = find_isa_irq_apic(8, mp_INT);
- if (pin == -1)
+ if (apic == -1) {
+ WARN_ON_ONCE(1);
return;
+ }
entry0 = ioapic_read_entry(apic, pin);
clear_IO_APIC_pin(apic, pin);
@@ -2208,7 +2214,7 @@ int timer_uses_ioapic_pin_0;
* is so screwy. Thanks to Brian Perkins for testing/hacking this beast
* fanatically on his truly buggy board.
*/
-static inline void check_timer(void)
+static inline void __init check_timer(void)
{
int apic1, pin1, apic2, pin2;
int vector;
@@ -2856,8 +2862,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
if (!ioapic && (irq < 16))
disable_8259A_irq(irq);
- ioapic_write_entry(ioapic, pin, entry);
spin_lock_irqsave(&ioapic_lock, flags);
+ __ioapic_write_entry(ioapic, pin, entry);
set_native_irq_info(irq, TARGET_CPUS);
spin_unlock_irqrestore(&ioapic_lock, flags);
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index fc79e1e859c..af1d5334499 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -184,7 +184,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
- free_insn_slot(p->ainsn.insn);
+ free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
mutex_unlock(&kprobe_mutex);
}
@@ -333,7 +333,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
return 1;
ss_probe:
-#ifndef CONFIG_PREEMPT
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
if (p->ainsn.boostable == 1 && !p->post_handler){
/* Boost up -- we can execute copied instructions directly */
reset_current_kprobe();
diff --git a/arch/i386/kernel/ldt.c b/arch/i386/kernel/ldt.c
index 445211eb2d5..b410e5fb034 100644
--- a/arch/i386/kernel/ldt.c
+++ b/arch/i386/kernel/ldt.c
@@ -160,16 +160,14 @@ static int read_default_ldt(void __user * ptr, unsigned long bytecount)
{
int err;
unsigned long size;
- void *address;
err = 0;
- address = &default_ldt[0];
size = 5*sizeof(struct desc_struct);
if (size > bytecount)
size = bytecount;
err = size;
- if (copy_to_user(ptr, address, size))
+ if (clear_user(ptr, size))
err = -EFAULT;
return err;
diff --git a/arch/i386/kernel/mca.c b/arch/i386/kernel/mca.c
index eb57a851789..b83672b8952 100644
--- a/arch/i386/kernel/mca.c
+++ b/arch/i386/kernel/mca.c
@@ -283,10 +283,9 @@ static int __init mca_init(void)
bus->f.mca_transform_memory = mca_dummy_transform_memory;
/* get the motherboard device */
- mca_dev = kmalloc(sizeof(struct mca_device), GFP_KERNEL);
+ mca_dev = kzalloc(sizeof(struct mca_device), GFP_KERNEL);
if(unlikely(!mca_dev))
goto out_nomem;
- memset(mca_dev, 0, sizeof(struct mca_device));
/*
* We do not expect many MCA interrupts during initialization,
@@ -310,11 +309,9 @@ static int __init mca_init(void)
mca_dev->slot = MCA_MOTHERBOARD;
mca_register_device(MCA_PRIMARY_BUS, mca_dev);
- mca_dev = kmalloc(sizeof(struct mca_device), GFP_ATOMIC);
+ mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
if(unlikely(!mca_dev))
goto out_unlock_nomem;
- memset(mca_dev, 0, sizeof(struct mca_device));
-
/* Put motherboard into video setup mode, read integrated video
* POS registers, and turn motherboard setup off.
@@ -349,10 +346,9 @@ static int __init mca_init(void)
}
if(which_scsi) {
/* found a scsi card */
- mca_dev = kmalloc(sizeof(struct mca_device), GFP_ATOMIC);
+ mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
if(unlikely(!mca_dev))
goto out_unlock_nomem;
- memset(mca_dev, 0, sizeof(struct mca_device));
for(j = 0; j < 8; j++)
mca_dev->pos[j] = pos[j];
@@ -378,10 +374,9 @@ static int __init mca_init(void)
if(!mca_read_and_store_pos(pos))
continue;
- mca_dev = kmalloc(sizeof(struct mca_device), GFP_ATOMIC);
+ mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
if(unlikely(!mca_dev))
goto out_unlock_nomem;
- memset(mca_dev, 0, sizeof(struct mca_device));
for(j=0; j<8; j++)
mca_dev->pos[j]=pos[j];
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 23f5984d065..972346604f9 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -703,7 +703,6 @@ static struct sysdev_driver mc_sysdev_driver = {
.resume = mc_sysdev_resume,
};
-#ifdef CONFIG_HOTPLUG_CPU
static __cpuinit int
mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
@@ -726,7 +725,6 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
static struct notifier_block mc_cpu_notifier = {
.notifier_call = mc_cpu_callback,
};
-#endif
static int __init microcode_init (void)
{
diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c
index 470cf97e7cd..d7d9c8b23f7 100644
--- a/arch/i386/kernel/module.c
+++ b/arch/i386/kernel/module.c
@@ -108,7 +108,8 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
+ const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
+ *para = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
@@ -118,6 +119,8 @@ int module_finalize(const Elf_Ehdr *hdr,
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks= s;
+ if (!strcmp(".parainstructions", secstrings + s->sh_name))
+ para = s;
}
if (alt) {
@@ -132,6 +135,12 @@ int module_finalize(const Elf_Ehdr *hdr,
lseg, lseg + locks->sh_size,
tseg, tseg + text->sh_size);
}
+
+ if (para) {
+ void *pseg = (void *)para->sh_addr;
+ apply_paravirt(pseg, pseg + para->sh_size);
+ }
+
return 0;
}
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 442aaf8c77e..2ce67228dff 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -249,8 +249,6 @@ static void __init MP_bus_info (struct mpc_config_bus *m)
mp_current_pci_id++;
} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
- } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
- mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
} else {
printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
}
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index d535cdbbfd2..1d1a56cae34 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -195,7 +195,6 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
{
const u32 __user *tmp = (const u32 __user *)buf;
u32 data[2];
- size_t rv;
u32 reg = *ppos;
int cpu = iminor(file->f_dentry->d_inode);
int err;
@@ -203,7 +202,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
if (count % 8)
return -EINVAL; /* Invalid chunk size */
- for (rv = 0; count; count -= 8) {
+ for (; count; count -= 8) {
if (copy_from_user(&data, tmp, 8))
return -EFAULT;
err = do_wrmsr(cpu, reg, data[0], data[1]);
@@ -239,18 +238,17 @@ static struct file_operations msr_fops = {
.open = msr_open,
};
-static int msr_class_device_create(int i)
+static int msr_device_create(int i)
{
int err = 0;
- struct class_device *class_err;
+ struct device *dev;
- class_err = class_device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i);
- if (IS_ERR(class_err))
- err = PTR_ERR(class_err);
+ dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, i), "msr%d",i);
+ if (IS_ERR(dev))
+ err = PTR_ERR(dev);
return err;
}
-#ifdef CONFIG_HOTPLUG_CPU
static int msr_class_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
@@ -258,10 +256,10 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
- msr_class_device_create(cpu);
+ msr_device_create(cpu);
break;
case CPU_DEAD:
- class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
break;
}
return NOTIFY_OK;
@@ -271,7 +269,6 @@ static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
{
.notifier_call = msr_class_cpu_callback,
};
-#endif
static int __init msr_init(void)
{
@@ -290,7 +287,7 @@ static int __init msr_init(void)
goto out_chrdev;
}
for_each_online_cpu(i) {
- err = msr_class_device_create(i);
+ err = msr_device_create(i);
if (err != 0)
goto out_class;
}
@@ -302,7 +299,7 @@ static int __init msr_init(void)
out_class:
i = 0;
for_each_online_cpu(i)
- class_device_destroy(msr_class, MKDEV(MSR_MAJOR, i));
+ device_destroy(msr_class, MKDEV(MSR_MAJOR, i));
class_destroy(msr_class);
out_chrdev:
unregister_chrdev(MSR_MAJOR, "cpu/msr");
@@ -314,7 +311,7 @@ static void __exit msr_exit(void)
{
int cpu = 0;
for_each_online_cpu(cpu)
- class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
class_destroy(msr_class);
unregister_chrdev(MSR_MAJOR, "cpu/msr");
unregister_hotcpu_notifier(&msr_class_cpu_notifier);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index eaafe233a5d..f5bc7e1be80 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -22,6 +22,7 @@
#include <linux/percpu.h>
#include <linux/dmi.h>
#include <linux/kprobes.h>
+#include <linux/cpumask.h>
#include <asm/smp.h>
#include <asm/nmi.h>
@@ -42,6 +43,8 @@ int nmi_watchdog_enabled;
static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
+
/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
* offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
*/
@@ -867,14 +870,16 @@ static unsigned int
void touch_nmi_watchdog (void)
{
- int i;
+ if (nmi_watchdog > 0) {
+ unsigned cpu;
- /*
- * Just reset the alert counters, (other CPUs might be
- * spinning on locks we hold):
- */
- for_each_possible_cpu(i)
- alert_counter[i] = 0;
+ /*
+ * Just reset the alert counters, (other CPUs might be
+ * spinning on locks we hold):
+ */
+ for_each_present_cpu (cpu)
+ alert_counter[cpu] = 0;
+ }
/*
* Tickle the softlockup detector too:
@@ -907,6 +912,16 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
touched = 1;
}
+ if (cpu_isset(cpu, backtrace_mask)) {
+ static DEFINE_SPINLOCK(lock); /* Serialise the printks */
+
+ spin_lock(&lock);
+ printk("NMI backtrace for cpu %d\n", cpu);
+ dump_stack();
+ spin_unlock(&lock);
+ cpu_clear(cpu, backtrace_mask);
+ }
+
sum = per_cpu(irq_stat, cpu).apic_timer_irqs;
/* if the apic timer isn't firing, this cpu isn't doing much */
@@ -1033,6 +1048,19 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
#endif
+void __trigger_all_cpu_backtrace(void)
+{
+ int i;
+
+ backtrace_mask = cpu_online_map;
+ /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpus_empty(backtrace_mask))
+ break;
+ mdelay(1);
+ }
+}
+
EXPORT_SYMBOL(nmi_active);
EXPORT_SYMBOL(nmi_watchdog);
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
new file mode 100644
index 00000000000..3dceab5828f
--- /dev/null
+++ b/arch/i386/kernel/paravirt.c
@@ -0,0 +1,569 @@
+/* Paravirtualization interfaces
+ Copyright (C) 2006 Rusty Russell IBM Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <linux/bcd.h>
+#include <linux/start_kernel.h>
+
+#include <asm/bug.h>
+#include <asm/paravirt.h>
+#include <asm/desc.h>
+#include <asm/setup.h>
+#include <asm/arch_hooks.h>
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+#include <asm/fixmap.h>
+#include <asm/apic.h>
+#include <asm/tlbflush.h>
+
+/* nop stub */
+static void native_nop(void)
+{
+}
+
+static void __init default_banner(void)
+{
+ printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+ paravirt_ops.name);
+}
+
+char *memory_setup(void)
+{
+ return paravirt_ops.memory_setup();
+}
+
+/* Simple instruction patching code. */
+#define DEF_NATIVE(name, code) \
+ extern const char start_##name[], end_##name[]; \
+ asm("start_" #name ": " code "; end_" #name ":")
+DEF_NATIVE(cli, "cli");
+DEF_NATIVE(sti, "sti");
+DEF_NATIVE(popf, "push %eax; popf");
+DEF_NATIVE(pushf, "pushf; pop %eax");
+DEF_NATIVE(pushf_cli, "pushf; pop %eax; cli");
+DEF_NATIVE(iret, "iret");
+DEF_NATIVE(sti_sysexit, "sti; sysexit");
+
+static const struct native_insns
+{
+ const char *start, *end;
+} native_insns[] = {
+ [PARAVIRT_IRQ_DISABLE] = { start_cli, end_cli },
+ [PARAVIRT_IRQ_ENABLE] = { start_sti, end_sti },
+ [PARAVIRT_RESTORE_FLAGS] = { start_popf, end_popf },
+ [PARAVIRT_SAVE_FLAGS] = { start_pushf, end_pushf },
+ [PARAVIRT_SAVE_FLAGS_IRQ_DISABLE] = { start_pushf_cli, end_pushf_cli },
+ [PARAVIRT_INTERRUPT_RETURN] = { start_iret, end_iret },
+ [PARAVIRT_STI_SYSEXIT] = { start_sti_sysexit, end_sti_sysexit },
+};
+
+static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
+{
+ unsigned int insn_len;
+
+ /* Don't touch it if we don't have a replacement */
+ if (type >= ARRAY_SIZE(native_insns) || !native_insns[type].start)
+ return len;
+
+ insn_len = native_insns[type].end - native_insns[type].start;
+
+ /* Similarly if we can't fit replacement. */
+ if (len < insn_len)
+ return len;
+
+ memcpy(insns, native_insns[type].start, insn_len);
+ return insn_len;
+}
+
+static fastcall unsigned long native_get_debugreg(int regno)
+{
+ unsigned long val = 0; /* Damn you, gcc! */
+
+ switch (regno) {
+ case 0:
+ asm("movl %%db0, %0" :"=r" (val)); break;
+ case 1:
+ asm("movl %%db1, %0" :"=r" (val)); break;
+ case 2:
+ asm("movl %%db2, %0" :"=r" (val)); break;
+ case 3:
+ asm("movl %%db3, %0" :"=r" (val)); break;
+ case 6:
+ asm("movl %%db6, %0" :"=r" (val)); break;
+ case 7:
+ asm("movl %%db7, %0" :"=r" (val)); break;
+ default:
+ BUG();
+ }
+ return val;
+}
+
+static fastcall void native_set_debugreg(int regno, unsigned long value)
+{
+ switch (regno) {
+ case 0:
+ asm("movl %0,%%db0" : /* no output */ :"r" (value));
+ break;
+ case 1:
+ asm("movl %0,%%db1" : /* no output */ :"r" (value));
+ break;
+ case 2:
+ asm("movl %0,%%db2" : /* no output */ :"r" (value));
+ break;
+ case 3:
+ asm("movl %0,%%db3" : /* no output */ :"r" (value));
+ break;
+ case 6:
+ asm("movl %0,%%db6" : /* no output */ :"r" (value));
+ break;
+ case 7:
+ asm("movl %0,%%db7" : /* no output */ :"r" (value));
+ break;
+ default:
+ BUG();
+ }
+}
+
+void init_IRQ(void)
+{
+ paravirt_ops.init_IRQ();
+}
+
+static fastcall void native_clts(void)
+{
+ asm volatile ("clts");
+}
+
+static fastcall unsigned long native_read_cr0(void)
+{
+ unsigned long val;
+ asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
+ return val;
+}
+
+static fastcall void native_write_cr0(unsigned long val)
+{
+ asm volatile("movl %0,%%cr0": :"r" (val));
+}
+
+static fastcall unsigned long native_read_cr2(void)
+{
+ unsigned long val;
+ asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
+ return val;
+}
+
+static fastcall void native_write_cr2(unsigned long val)
+{
+ asm volatile("movl %0,%%cr2": :"r" (val));
+}
+
+static fastcall unsigned long native_read_cr3(void)
+{
+ unsigned long val;
+ asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
+ return val;
+}
+
+static fastcall void native_write_cr3(unsigned long val)
+{
+ asm volatile("movl %0,%%cr3": :"r" (val));
+}
+
+static fastcall unsigned long native_read_cr4(void)
+{
+ unsigned long val;
+ asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
+ return val;
+}
+
+static fastcall unsigned long native_read_cr4_safe(void)
+{
+ unsigned long val;
+ /* This could fault if %cr4 does not exist */
+ asm("1: movl %%cr4, %0 \n"
+ "2: \n"
+ ".section __ex_table,\"a\" \n"
+ ".long 1b,2b \n"
+ ".previous \n"
+ : "=r" (val): "0" (0));
+ return val;
+}
+
+static fastcall void native_write_cr4(unsigned long val)
+{
+ asm volatile("movl %0,%%cr4": :"r" (val));
+}
+
+static fastcall unsigned long native_save_fl(void)
+{
+ unsigned long f;
+ asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
+ return f;
+}
+
+static fastcall void native_restore_fl(unsigned long f)
+{
+ asm volatile("pushl %0 ; popfl": /* no output */
+ :"g" (f)
+ :"memory", "cc");
+}
+
+static fastcall void native_irq_disable(void)
+{
+ asm volatile("cli": : :"memory");
+}
+
+static fastcall void native_irq_enable(void)
+{
+ asm volatile("sti": : :"memory");
+}
+
+static fastcall void native_safe_halt(void)
+{
+ asm volatile("sti; hlt": : :"memory");
+}
+
+static fastcall void native_halt(void)
+{
+ asm volatile("hlt": : :"memory");
+}
+
+static fastcall void native_wbinvd(void)
+{
+ asm volatile("wbinvd": : :"memory");
+}
+
+static fastcall unsigned long long native_read_msr(unsigned int msr, int *err)
+{
+ unsigned long long val;
+
+ asm volatile("2: rdmsr ; xorl %0,%0\n"
+ "1:\n\t"
+ ".section .fixup,\"ax\"\n\t"
+ "3: movl %3,%0 ; jmp 1b\n\t"
+ ".previous\n\t"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n\t"
+ " .long 2b,3b\n\t"
+ ".previous"
+ : "=r" (*err), "=A" (val)
+ : "c" (msr), "i" (-EFAULT));
+
+ return val;
+}
+
+static fastcall int native_write_msr(unsigned int msr, unsigned long long val)
+{
+ int err;
+ asm volatile("2: wrmsr ; xorl %0,%0\n"
+ "1:\n\t"
+ ".section .fixup,\"ax\"\n\t"
+ "3: movl %4,%0 ; jmp 1b\n\t"
+ ".previous\n\t"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n\t"
+ " .long 2b,3b\n\t"
+ ".previous"
+ : "=a" (err)
+ : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
+ "i" (-EFAULT));
+ return err;
+}
+
+static fastcall unsigned long long native_read_tsc(void)
+{
+ unsigned long long val;
+ asm volatile("rdtsc" : "=A" (val));
+ return val;
+}
+
+static fastcall unsigned long long native_read_pmc(void)
+{
+ unsigned long long val;
+ asm volatile("rdpmc" : "=A" (val));
+ return val;
+}
+
+static fastcall void native_load_tr_desc(void)
+{
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+}
+
+static fastcall void native_load_gdt(const struct Xgt_desc_struct *dtr)
+{
+ asm volatile("lgdt %0"::"m" (*dtr));
+}
+
+static fastcall void native_load_idt(const struct Xgt_desc_struct *dtr)
+{
+ asm volatile("lidt %0"::"m" (*dtr));
+}
+
+static fastcall void native_store_gdt(struct Xgt_desc_struct *dtr)
+{
+ asm ("sgdt %0":"=m" (*dtr));
+}
+
+static fastcall void native_store_idt(struct Xgt_desc_struct *dtr)
+{
+ asm ("sidt %0":"=m" (*dtr));
+}
+
+static fastcall unsigned long native_store_tr(void)
+{
+ unsigned long tr;
+ asm ("str %0":"=r" (tr));
+ return tr;
+}
+
+static fastcall void native_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
+ C(0); C(1); C(2);
+#undef C
+}
+
+static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high)
+{
+ u32 *lp = (u32 *)((char *)dt + entry*8);
+ lp[0] = entry_low;
+ lp[1] = entry_high;
+}
+
+static fastcall void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+ native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static fastcall void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+ native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static fastcall void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+ native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static fastcall void native_load_esp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+{
+ tss->esp0 = thread->esp0;
+
+ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+ tss->ss1 = thread->sysenter_cs;
+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ }
+}
+
+static fastcall void native_io_delay(void)
+{
+ asm volatile("outb %al,$0x80");
+}
+
+static fastcall void native_flush_tlb(void)
+{
+ __native_flush_tlb();
+}
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+static fastcall void native_flush_tlb_global(void)
+{
+ __native_flush_tlb_global();
+}
+
+static fastcall void native_flush_tlb_single(u32 addr)
+{
+ __native_flush_tlb_single(addr);
+}
+
+#ifndef CONFIG_X86_PAE
+static fastcall void native_set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+}
+
+static fastcall void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+}
+
+static fastcall void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ *pmdp = pmdval;
+}
+
+#else /* CONFIG_X86_PAE */
+
+static fastcall void native_set_pte(pte_t *ptep, pte_t pte)
+{
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
+}
+
+static fastcall void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte)
+{
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
+}
+
+static fastcall void native_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ ptep->pte_low = 0;
+ smp_wmb();
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
+}
+
+static fastcall void native_set_pte_atomic(pte_t *ptep, pte_t pteval)
+{
+ set_64bit((unsigned long long *)ptep,pte_val(pteval));
+}
+
+static fastcall void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ set_64bit((unsigned long long *)pmdp,pmd_val(pmdval));
+}
+
+static fastcall void native_set_pud(pud_t *pudp, pud_t pudval)
+{
+ *pudp = pudval;
+}
+
+static fastcall void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ ptep->pte_low = 0;
+ smp_wmb();
+ ptep->pte_high = 0;
+}
+
+static fastcall void native_pmd_clear(pmd_t *pmd)
+{
+ u32 *tmp = (u32 *)pmd;
+ *tmp = 0;
+ smp_wmb();
+ *(tmp + 1) = 0;
+}
+#endif /* CONFIG_X86_PAE */
+
+/* These are in entry.S */
+extern fastcall void native_iret(void);
+extern fastcall void native_irq_enable_sysexit(void);
+
+static int __init print_banner(void)
+{
+ paravirt_ops.banner();
+ return 0;
+}
+core_initcall(print_banner);
+
+/* We simply declare start_kernel to be the paravirt probe of last resort. */
+paravirt_probe(start_kernel);
+
+struct paravirt_ops paravirt_ops = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+ .kernel_rpl = 0,
+
+ .patch = native_patch,
+ .banner = default_banner,
+ .arch_setup = native_nop,
+ .memory_setup = machine_specific_memory_setup,
+ .get_wallclock = native_get_wallclock,
+ .set_wallclock = native_set_wallclock,
+ .time_init = time_init_hook,
+ .init_IRQ = native_init_IRQ,
+
+ .cpuid = native_cpuid,
+ .get_debugreg = native_get_debugreg,
+ .set_debugreg = native_set_debugreg,
+ .clts = native_clts,
+ .read_cr0 = native_read_cr0,
+ .write_cr0 = native_write_cr0,
+ .read_cr2 = native_read_cr2,
+ .write_cr2 = native_write_cr2,
+ .read_cr3 = native_read_cr3,
+ .write_cr3 = native_write_cr3,
+ .read_cr4 = native_read_cr4,
+ .read_cr4_safe = native_read_cr4_safe,
+ .write_cr4 = native_write_cr4,
+ .save_fl = native_save_fl,
+ .restore_fl = native_restore_fl,
+ .irq_disable = native_irq_disable,
+ .irq_enable = native_irq_enable,
+ .safe_halt = native_safe_halt,
+ .halt = native_halt,
+ .wbinvd = native_wbinvd,
+ .read_msr = native_read_msr,
+ .write_msr = native_write_msr,
+ .read_tsc = native_read_tsc,
+ .read_pmc = native_read_pmc,
+ .load_tr_desc = native_load_tr_desc,
+ .set_ldt = native_set_ldt,
+ .load_gdt = native_load_gdt,
+ .load_idt = native_load_idt,
+ .store_gdt = native_store_gdt,
+ .store_idt = native_store_idt,
+ .store_tr = native_store_tr,
+ .load_tls = native_load_tls,
+ .write_ldt_entry = native_write_ldt_entry,
+ .write_gdt_entry = native_write_gdt_entry,
+ .write_idt_entry = native_write_idt_entry,
+ .load_esp0 = native_load_esp0,
+
+ .set_iopl_mask = native_set_iopl_mask,
+ .io_delay = native_io_delay,
+ .const_udelay = __const_udelay,
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ .apic_write = native_apic_write,
+ .apic_write_atomic = native_apic_write_atomic,
+ .apic_read = native_apic_read,
+#endif
+
+ .flush_tlb_user = native_flush_tlb,
+ .flush_tlb_kernel = native_flush_tlb_global,
+ .flush_tlb_single = native_flush_tlb_single,
+
+ .set_pte = native_set_pte,
+ .set_pte_at = native_set_pte_at,
+ .set_pmd = native_set_pmd,
+ .pte_update = (void *)native_nop,
+ .pte_update_defer = (void *)native_nop,
+#ifdef CONFIG_X86_PAE
+ .set_pte_atomic = native_set_pte_atomic,
+ .set_pte_present = native_set_pte_present,
+ .set_pud = native_set_pud,
+ .pte_clear = native_pte_clear,
+ .pmd_clear = native_pmd_clear,
+#endif
+
+ .irq_enable_sysexit = native_irq_enable_sysexit,
+ .iret = native_iret,
+};
+EXPORT_SYMBOL(paravirt_ops);
diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c
index 25fe6685393..41af692c158 100644
--- a/arch/i386/kernel/pci-dma.c
+++ b/arch/i386/kernel/pci-dma.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(dma_free_coherent);
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags)
{
- void __iomem *mem_base;
+ void __iomem *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = (pages + 31)/32;
@@ -92,14 +92,12 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
if (!mem_base)
goto out;
- dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+ dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
if (!dev->dma_mem)
goto out;
- memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
- dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
+ dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!dev->dma_mem->bitmap)
goto free1_out;
- memset(dev->dma_mem->bitmap, 0, bitmap_size);
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
@@ -114,6 +112,8 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
free1_out:
kfree(dev->dma_mem->bitmap);
out:
+ if (mem_base)
+ iounmap(mem_base);
return 0;
}
EXPORT_SYMBOL(dma_declare_coherent_memory);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index dd53c58f64f..99308510a17 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -56,6 +56,7 @@
#include <asm/tlbflush.h>
#include <asm/cpu.h>
+#include <asm/pda.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -99,22 +100,18 @@ EXPORT_SYMBOL(enable_hlt);
*/
void default_idle(void)
{
- local_irq_enable();
-
if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit();
- while (!need_resched()) {
- local_irq_disable();
- if (!need_resched())
- safe_halt();
- else
- local_irq_enable();
- }
+ local_irq_disable();
+ if (!need_resched())
+ safe_halt(); /* enables interrupts racelessly */
+ else
+ local_irq_enable();
current_thread_info()->status |= TS_POLLING;
} else {
- while (!need_resched())
- cpu_relax();
+ /* loop is done by the caller */
+ cpu_relax();
}
}
#ifdef CONFIG_APM_MODULE
@@ -128,14 +125,7 @@ EXPORT_SYMBOL(default_idle);
*/
static void poll_idle (void)
{
- local_irq_enable();
-
- asm volatile(
- "2:"
- "testl %0, %1;"
- "rep; nop;"
- "je 2b;"
- : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
+ cpu_relax();
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -256,8 +246,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
static void mwait_idle(void)
{
local_irq_enable();
- while (!need_resched())
- mwait_idle_with_hints(0, 0);
+ mwait_idle_with_hints(0, 0);
}
void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
@@ -314,8 +303,8 @@ void show_regs(struct pt_regs * regs)
regs->eax,regs->ebx,regs->ecx,regs->edx);
printk("ESI: %08lx EDI: %08lx EBP: %08lx",
regs->esi, regs->edi, regs->ebp);
- printk(" DS: %04x ES: %04x\n",
- 0xffff & regs->xds,0xffff & regs->xes);
+ printk(" DS: %04x ES: %04x GS: %04x\n",
+ 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
cr0 = read_cr0();
cr2 = read_cr2();
@@ -346,6 +335,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
regs.xds = __USER_DS;
regs.xes = __USER_DS;
+ regs.xgs = __KERNEL_PDA;
regs.orig_eax = -1;
regs.eip = (unsigned long) kernel_thread_helper;
regs.xcs = __KERNEL_CS | get_kernel_rpl();
@@ -431,7 +421,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
p->thread.eip = (unsigned long) ret_from_fork;
savesegment(fs,p->thread.fs);
- savesegment(gs,p->thread.gs);
tsk = current;
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -508,7 +497,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
dump->regs.ds = regs->xds;
dump->regs.es = regs->xes;
savesegment(fs,dump->regs.fs);
- savesegment(gs,dump->regs.gs);
+ dump->regs.gs = regs->xgs;
dump->regs.orig_eax = regs->orig_eax;
dump->regs.eip = regs->eip;
dump->regs.cs = regs->xcs;
@@ -648,22 +637,27 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
__unlazy_fpu(prev_p);
+
+ /* we're going to use this soon, after a few expensive things */
+ if (next_p->fpu_counter > 5)
+ prefetch(&next->i387.fxsave);
+
/*
* Reload esp0.
*/
load_esp0(tss, next);
/*
- * Save away %fs and %gs. No need to save %es and %ds, as
- * those are always kernel segments while inside the kernel.
- * Doing this before setting the new TLS descriptors avoids
- * the situation where we temporarily have non-reloadable
- * segments in %fs and %gs. This could be an issue if the
- * NMI handler ever used %fs or %gs (it does not today), or
- * if the kernel is running inside of a hypervisor layer.
+ * Save away %fs. No need to save %gs, as it was saved on the
+ * stack on entry. No need to save %es and %ds, as those are
+ * always kernel segments while inside the kernel. Doing this
+ * before setting the new TLS descriptors avoids the situation
+ * where we temporarily have non-reloadable segments in %fs
+ * and %gs. This could be an issue if the NMI handler ever
+ * used %fs or %gs (it does not today), or if the kernel is
+ * running inside of a hypervisor layer.
*/
savesegment(fs, prev->fs);
- savesegment(gs, prev->gs);
/*
* Load the per-thread Thread-Local Storage descriptor.
@@ -671,22 +665,14 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
load_TLS(next, cpu);
/*
- * Restore %fs and %gs if needed.
+ * Restore %fs if needed.
*
- * Glibc normally makes %fs be zero, and %gs is one of
- * the TLS segments.
+ * Glibc normally makes %fs be zero.
*/
if (unlikely(prev->fs | next->fs))
loadsegment(fs, next->fs);
- if (prev->gs | next->gs)
- loadsegment(gs, next->gs);
-
- /*
- * Restore IOPL if needed.
- */
- if (unlikely(prev->iopl != next->iopl))
- set_iopl_mask(next->iopl);
+ write_pda(pcurrent, next_p);
/*
* Now maybe handle debug registers and/or IO bitmaps
@@ -697,6 +683,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
disable_tsc(prev_p, next_p);
+ /* If the task has used fpu the last 5 timeslices, just do a full
+ * restore of the math state immediately to avoid the trap; the
+ * chances of needing FPU soon are obviously high now
+ */
+ if (next_p->fpu_counter > 5)
+ math_state_restore();
+
return prev_p;
}
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 775f50e9395..f3f94ac5736 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -94,13 +94,9 @@ static int putreg(struct task_struct *child,
return -EIO;
child->thread.fs = value;
return 0;
- case GS:
- if (value && (value & 3) != 3)
- return -EIO;
- child->thread.gs = value;
- return 0;
case DS:
case ES:
+ case GS:
if (value && (value & 3) != 3)
return -EIO;
value &= 0xffff;
@@ -116,8 +112,8 @@ static int putreg(struct task_struct *child,
value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
break;
}
- if (regno > GS*4)
- regno -= 2*4;
+ if (regno > ES*4)
+ regno -= 1*4;
put_stack_long(child, regno - sizeof(struct pt_regs), value);
return 0;
}
@@ -131,18 +127,16 @@ static unsigned long getreg(struct task_struct *child,
case FS:
retval = child->thread.fs;
break;
- case GS:
- retval = child->thread.gs;
- break;
case DS:
case ES:
+ case GS:
case SS:
case CS:
retval = 0xffff;
/* fall through */
default:
- if (regno > GS*4)
- regno -= 2*4;
+ if (regno > ES*4)
+ regno -= 1*4;
regno = regno - sizeof(struct pt_regs);
retval &= get_stack_long(child, regno);
}
diff --git a/arch/i386/kernel/quirks.c b/arch/i386/kernel/quirks.c
index 9f6ab1789bb..a01320a7b63 100644
--- a/arch/i386/kernel/quirks.c
+++ b/arch/i386/kernel/quirks.c
@@ -3,10 +3,23 @@
*/
#include <linux/pci.h>
#include <linux/irq.h>
+#include <asm/pci-direct.h>
+#include <asm/genapic.h>
+#include <asm/cpu.h>
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
+static void __devinit verify_quirk_intel_irqbalance(struct pci_dev *dev)
+{
+#ifdef CONFIG_X86_64
+ if (genapic != &apic_flat)
+ panic("APIC mode must be flat on this system\n");
+#elif defined(CONFIG_X86_GENERICARCH)
+ if (genapic != &apic_default)
+ panic("APIC mode must be default(flat) on this system. Use apic=default\n");
+#endif
+}
-static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
+void __init quirk_intel_irqbalance(void)
{
u8 config, rev;
u32 word;
@@ -16,18 +29,18 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
* based platforms.
* Disable SW irqbalance/affinity on those platforms.
*/
- pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
+ rev = read_pci_config_byte(0, 0, 0, PCI_CLASS_REVISION);
if (rev > 0x9)
return;
printk(KERN_INFO "Intel E7520/7320/7525 detected.");
- /* enable access to config space*/
- pci_read_config_byte(dev, 0xf4, &config);
- pci_write_config_byte(dev, 0xf4, config|0x2);
+ /* enable access to config space */
+ config = read_pci_config_byte(0, 0, 0, 0xf4);
+ write_pci_config_byte(0, 0, 0, 0xf4, config|0x2);
/* read xTPR register */
- raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
+ word = read_pci_config_16(0, 0, 0x40, 0x4c);
if (!(word & (1 << 13))) {
printk(KERN_INFO "Disabling irq balancing and affinity\n");
@@ -38,13 +51,24 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
#ifdef CONFIG_PROC_FS
no_irq_affinity = 1;
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+ printk(KERN_INFO "Disabling cpu hotplug control\n");
+ enable_cpu_hotplug = 0;
+#endif
+#ifdef CONFIG_X86_64
+ /* force the genapic selection to flat mode so that
+ * interrupts can be redirected to more than one CPU.
+ */
+ genapic_force = &apic_flat;
+#endif
}
- /* put back the original value for config space*/
+ /* put back the original value for config space */
if (!(config & 0x2))
- pci_write_config_byte(dev, 0xf4, config);
+ write_pci_config_byte(0, 0, 0, 0xf4, config);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, verify_quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, verify_quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, verify_quirk_intel_irqbalance);
+
#endif
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 84278e0093a..3514b4153f7 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -12,6 +12,7 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/pm.h>
+#include <linux/reboot.h>
#include <asm/uaccess.h>
#include <asm/apic.h>
#include <asm/desc.h>
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 141041dde74..79df6e612db 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -63,9 +63,6 @@
#include <setup_arch.h>
#include <bios_ebda.h>
-/* Forward Declaration. */
-void __init find_max_pfn(void);
-
/* This value is set up by the early boot code to point to the value
immediately after the boot time page tables. It contains a *physical*
address, and must not be in the .bss segment! */
@@ -76,11 +73,8 @@ int disable_pse __devinitdata = 0;
/*
* Machine setup..
*/
-
-#ifdef CONFIG_EFI
-int efi_enabled = 0;
-EXPORT_SYMBOL(efi_enabled);
-#endif
+extern struct resource code_resource;
+extern struct resource data_resource;
/* cpu data as detected by the assembly code in head.S */
struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
@@ -99,12 +93,6 @@ unsigned int machine_submodel_id;
unsigned int BIOS_revision;
unsigned int mca_pentium_flag;
-/* For PCI or other memory-mapped resources */
-unsigned long pci_mem_start = 0x10000000;
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_mem_start);
-#endif
-
/* Boot loader ID as an integer, for the benefit of proc_dointvec */
int bootloader_type;
@@ -134,7 +122,6 @@ struct ist_info ist_info;
defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
EXPORT_SYMBOL(ist_info);
#endif
-struct e820map e820;
extern void early_cpu_init(void);
extern int root_mountflags;
@@ -149,516 +136,6 @@ static char command_line[COMMAND_LINE_SIZE];
unsigned char __initdata boot_params[PARAM_SIZE];
-static struct resource data_resource = {
- .name = "Kernel data",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource code_resource = {
- .name = "Kernel code",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource system_rom_resource = {
- .name = "System ROM",
- .start = 0xf0000,
- .end = 0xfffff,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource extension_rom_resource = {
- .name = "Extension ROM",
- .start = 0xe0000,
- .end = 0xeffff,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource adapter_rom_resources[] = { {
- .name = "Adapter ROM",
- .start = 0xc8000,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
- .name = "Adapter ROM",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-} };
-
-static struct resource video_rom_resource = {
- .name = "Video ROM",
- .start = 0xc0000,
- .end = 0xc7fff,
- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource video_ram_resource = {
- .name = "Video RAM area",
- .start = 0xa0000,
- .end = 0xbffff,
- .flags = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource standard_io_resources[] = { {
- .name = "dma1",
- .start = 0x0000,
- .end = 0x001f,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "pic1",
- .start = 0x0020,
- .end = 0x0021,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "timer0",
- .start = 0x0040,
- .end = 0x0043,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "timer1",
- .start = 0x0050,
- .end = 0x0053,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "keyboard",
- .start = 0x0060,
- .end = 0x006f,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "dma page reg",
- .start = 0x0080,
- .end = 0x008f,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "pic2",
- .start = 0x00a0,
- .end = 0x00a1,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "dma2",
- .start = 0x00c0,
- .end = 0x00df,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
- .name = "fpu",
- .start = 0x00f0,
- .end = 0x00ff,
- .flags = IORESOURCE_BUSY | IORESOURCE_IO
-} };
-
-#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-
-static int __init romchecksum(unsigned char *rom, unsigned long length)
-{
- unsigned char *p, sum = 0;
-
- for (p = rom; p < rom + length; p++)
- sum += *p;
- return sum == 0;
-}
-
-static void __init probe_roms(void)
-{
- unsigned long start, length, upper;
- unsigned char *rom;
- int i;
-
- /* video rom */
- upper = adapter_rom_resources[0].start;
- for (start = video_rom_resource.start; start < upper; start += 2048) {
- rom = isa_bus_to_virt(start);
- if (!romsignature(rom))
- continue;
-
- video_rom_resource.start = start;
-
- /* 0 < length <= 0x7f * 512, historically */
- length = rom[2] * 512;
-
- /* if checksum okay, trust length byte */
- if (length && romchecksum(rom, length))
- video_rom_resource.end = start + length - 1;
-
- request_resource(&iomem_resource, &video_rom_resource);
- break;
- }
-
- start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
- if (start < upper)
- start = upper;
-
- /* system rom */
- request_resource(&iomem_resource, &system_rom_resource);
- upper = system_rom_resource.start;
-
- /* check for extension rom (ignore length byte!) */
- rom = isa_bus_to_virt(extension_rom_resource.start);
- if (romsignature(rom)) {
- length = extension_rom_resource.end - extension_rom_resource.start + 1;
- if (romchecksum(rom, length)) {
- request_resource(&iomem_resource, &extension_rom_resource);
- upper = extension_rom_resource.start;
- }
- }
-
- /* check for adapter roms on 2k boundaries */
- for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
- rom = isa_bus_to_virt(start);
- if (!romsignature(rom))
- continue;
-
- /* 0 < length <= 0x7f * 512, historically */
- length = rom[2] * 512;
-
- /* but accept any length that fits if checksum okay */
- if (!length || start + length > upper || !romchecksum(rom, length))
- continue;
-
- adapter_rom_resources[i].start = start;
- adapter_rom_resources[i].end = start + length - 1;
- request_resource(&iomem_resource, &adapter_rom_resources[i]);
-
- start = adapter_rom_resources[i++].end & ~2047UL;
- }
-}
-
-static void __init limit_regions(unsigned long long size)
-{
- unsigned long long current_addr = 0;
- int i;
-
- if (efi_enabled) {
- efi_memory_desc_t *md;
- void *p;
-
- for (p = memmap.map, i = 0; p < memmap.map_end;
- p += memmap.desc_size, i++) {
- md = p;
- current_addr = md->phys_addr + (md->num_pages << 12);
- if (md->type == EFI_CONVENTIONAL_MEMORY) {
- if (current_addr >= size) {
- md->num_pages -=
- (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
- memmap.nr_map = i + 1;
- return;
- }
- }
- }
- }
- for (i = 0; i < e820.nr_map; i++) {
- current_addr = e820.map[i].addr + e820.map[i].size;
- if (current_addr < size)
- continue;
-
- if (e820.map[i].type != E820_RAM)
- continue;
-
- if (e820.map[i].addr >= size) {
- /*
- * This region starts past the end of the
- * requested size, skip it completely.
- */
- e820.nr_map = i;
- } else {
- e820.nr_map = i + 1;
- e820.map[i].size -= current_addr - size;
- }
- return;
- }
-}
-
-void __init add_memory_region(unsigned long long start,
- unsigned long long size, int type)
-{
- int x;
-
- if (!efi_enabled) {
- x = e820.nr_map;
-
- if (x == E820MAX) {
- printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
- return;
- }
-
- e820.map[x].addr = start;
- e820.map[x].size = size;
- e820.map[x].type = type;
- e820.nr_map++;
- }
-} /* add_memory_region */
-
-#define E820_DEBUG 1
-
-static void __init print_memory_map(char *who)
-{
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- printk(" %s: %016Lx - %016Lx ", who,
- e820.map[i].addr,
- e820.map[i].addr + e820.map[i].size);
- switch (e820.map[i].type) {
- case E820_RAM: printk("(usable)\n");
- break;
- case E820_RESERVED:
- printk("(reserved)\n");
- break;
- case E820_ACPI:
- printk("(ACPI data)\n");
- break;
- case E820_NVS:
- printk("(ACPI NVS)\n");
- break;
- default: printk("type %lu\n", e820.map[i].type);
- break;
- }
- }
-}
-
-/*
- * Sanitize the BIOS e820 map.
- *
- * Some e820 responses include overlapping entries. The following
- * replaces the original e820 map with a new one, removing overlaps.
- *
- */
-struct change_member {
- struct e820entry *pbios; /* pointer to original bios entry */
- unsigned long long addr; /* address for this change point */
-};
-static struct change_member change_point_list[2*E820MAX] __initdata;
-static struct change_member *change_point[2*E820MAX] __initdata;
-static struct e820entry *overlap_list[E820MAX] __initdata;
-static struct e820entry new_bios[E820MAX] __initdata;
-
-int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-{
- struct change_member *change_tmp;
- unsigned long current_type, last_type;
- unsigned long long last_addr;
- int chgidx, still_changing;
- int overlap_entries;
- int new_bios_entry;
- int old_nr, new_nr, chg_nr;
- int i;
-
- /*
- Visually we're performing the following (1,2,3,4 = memory types)...
-
- Sample memory map (w/overlaps):
- ____22__________________
- ______________________4_
- ____1111________________
- _44_____________________
- 11111111________________
- ____________________33__
- ___________44___________
- __________33333_________
- ______________22________
- ___________________2222_
- _________111111111______
- _____________________11_
- _________________4______
-
- Sanitized equivalent (no overlap):
- 1_______________________
- _44_____________________
- ___1____________________
- ____22__________________
- ______11________________
- _________1______________
- __________3_____________
- ___________44___________
- _____________33_________
- _______________2________
- ________________1_______
- _________________4______
- ___________________2____
- ____________________33__
- ______________________4_
- */
-
- /* if there's only one memory region, don't bother */
- if (*pnr_map < 2)
- return -1;
-
- old_nr = *pnr_map;
-
- /* bail out if we find any unreasonable addresses in bios map */
- for (i=0; i<old_nr; i++)
- if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
- return -1;
-
- /* create pointers for initial change-point information (for sorting) */
- for (i=0; i < 2*old_nr; i++)
- change_point[i] = &change_point_list[i];
-
- /* record all known change-points (starting and ending addresses),
- omitting those that are for empty memory regions */
- chgidx = 0;
- for (i=0; i < old_nr; i++) {
- if (biosmap[i].size != 0) {
- change_point[chgidx]->addr = biosmap[i].addr;
- change_point[chgidx++]->pbios = &biosmap[i];
- change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
- change_point[chgidx++]->pbios = &biosmap[i];
- }
- }
- chg_nr = chgidx; /* true number of change-points */
-
- /* sort change-point list by memory addresses (low -> high) */
- still_changing = 1;
- while (still_changing) {
- still_changing = 0;
- for (i=1; i < chg_nr; i++) {
- /* if <current_addr> > <last_addr>, swap */
- /* or, if current=<start_addr> & last=<end_addr>, swap */
- if ((change_point[i]->addr < change_point[i-1]->addr) ||
- ((change_point[i]->addr == change_point[i-1]->addr) &&
- (change_point[i]->addr == change_point[i]->pbios->addr) &&
- (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
- )
- {
- change_tmp = change_point[i];
- change_point[i] = change_point[i-1];
- change_point[i-1] = change_tmp;
- still_changing=1;
- }
- }
- }
-
- /* create a new bios memory map, removing overlaps */
- overlap_entries=0; /* number of entries in the overlap table */
- new_bios_entry=0; /* index for creating new bios map entries */
- last_type = 0; /* start with undefined memory type */
- last_addr = 0; /* start with 0 as last starting address */
- /* loop through change-points, determining affect on the new bios map */
- for (chgidx=0; chgidx < chg_nr; chgidx++)
- {
- /* keep track of all overlapping bios entries */
- if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
- {
- /* add map entry to overlap list (> 1 entry implies an overlap) */
- overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
- }
- else
- {
- /* remove entry from list (order independent, so swap with last) */
- for (i=0; i<overlap_entries; i++)
- {
- if (overlap_list[i] == change_point[chgidx]->pbios)
- overlap_list[i] = overlap_list[overlap_entries-1];
- }
- overlap_entries--;
- }
- /* if there are overlapping entries, decide which "type" to use */
- /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
- current_type = 0;
- for (i=0; i<overlap_entries; i++)
- if (overlap_list[i]->type > current_type)
- current_type = overlap_list[i]->type;
- /* continue building up new bios map based on this information */
- if (current_type != last_type) {
- if (last_type != 0) {
- new_bios[new_bios_entry].size =
- change_point[chgidx]->addr - last_addr;
- /* move forward only if the new size was non-zero */
- if (new_bios[new_bios_entry].size != 0)
- if (++new_bios_entry >= E820MAX)
- break; /* no more space left for new bios entries */
- }
- if (current_type != 0) {
- new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
- new_bios[new_bios_entry].type = current_type;
- last_addr=change_point[chgidx]->addr;
- }
- last_type = current_type;
- }
- }
- new_nr = new_bios_entry; /* retain count for new bios entries */
-
- /* copy new bios mapping into original location */
- memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
- *pnr_map = new_nr;
-
- return 0;
-}
-
-/*
- * Copy the BIOS e820 map into a safe place.
- *
- * Sanity-check it while we're at it..
- *
- * If we're lucky and live on a modern system, the setup code
- * will have given us a memory map that we can use to properly
- * set up memory. If we aren't, we'll fake a memory map.
- *
- * We check to see that the memory map contains at least 2 elements
- * before we'll use it, because the detection code in setup.S may
- * not be perfect and most every PC known to man has two memory
- * regions: one from 0 to 640k, and one from 1mb up. (The IBM
- * thinkpad 560x, for example, does not cooperate with the memory
- * detection code.)
- */
-int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-{
- /* Only one memory region (or negative)? Ignore it */
- if (nr_map < 2)
- return -1;
-
- do {
- unsigned long long start = biosmap->addr;
- unsigned long long size = biosmap->size;
- unsigned long long end = start + size;
- unsigned long type = biosmap->type;
-
- /* Overflow in 64 bits? Ignore the memory map. */
- if (start > end)
- return -1;
-
- /*
- * Some BIOSes claim RAM in the 640k - 1M region.
- * Not right. Fix it up.
- */
- if (type == E820_RAM) {
- if (start < 0x100000ULL && end > 0xA0000ULL) {
- if (start < 0xA0000ULL)
- add_memory_region(start, 0xA0000ULL-start, type);
- if (end <= 0x100000ULL)
- continue;
- start = 0x100000ULL;
- size = end - start;
- }
- }
- add_memory_region(start, size, type);
- } while (biosmap++,--nr_map);
- return 0;
-}
-
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
struct edd edd;
#ifdef CONFIG_EDD_MODULE
@@ -682,7 +159,7 @@ static inline void copy_edd(void)
}
#endif
-static int __initdata user_defined_memmap = 0;
+int __initdata user_defined_memmap = 0;
/*
* "mem=nopentium" disables the 4MB page tables.
@@ -719,51 +196,6 @@ static int __init parse_mem(char *arg)
}
early_param("mem", parse_mem);
-static int __init parse_memmap(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- if (strcmp(arg, "exactmap") == 0) {
-#ifdef CONFIG_CRASH_DUMP
- /* If we are doing a crash dump, we
- * still need to know the real mem
- * size before original memory map is
- * reset.
- */
- find_max_pfn();
- saved_max_pfn = max_pfn;
-#endif
- e820.nr_map = 0;
- user_defined_memmap = 1;
- } else {
- /* If the user specifies memory size, we
- * limit the BIOS-provided memory map to
- * that size. exactmap can be used to specify
- * the exact map. mem=number can be used to
- * trim the existing memory map.
- */
- unsigned long long start_at, mem_size;
-
- mem_size = memparse(arg, &arg);
- if (*arg == '@') {
- start_at = memparse(arg+1, &arg);
- add_memory_region(start_at, mem_size, E820_RAM);
- } else if (*arg == '#') {
- start_at = memparse(arg+1, &arg);
- add_memory_region(start_at, mem_size, E820_ACPI);
- } else if (*arg == '$') {
- start_at = memparse(arg+1, &arg);
- add_memory_region(start_at, mem_size, E820_RESERVED);
- } else {
- limit_regions(mem_size);
- user_defined_memmap = 1;
- }
- }
- return 0;
-}
-early_param("memmap", parse_memmap);
-
#ifdef CONFIG_PROC_VMCORE
/* elfcorehdr= specifies the location of elf core header
* stored by the crashed kernel.
@@ -828,90 +260,6 @@ static int __init parse_reservetop(char *arg)
early_param("reservetop", parse_reservetop);
/*
- * Callback for efi_memory_walk.
- */
-static int __init
-efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
-{
- unsigned long *max_pfn = arg, pfn;
-
- if (start < end) {
- pfn = PFN_UP(end -1);
- if (pfn > *max_pfn)
- *max_pfn = pfn;
- }
- return 0;
-}
-
-static int __init
-efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
-{
- memory_present(0, PFN_UP(start), PFN_DOWN(end));
- return 0;
-}
-
- /*
- * This function checks if the entire range <start,end> is mapped with type.
- *
- * Note: this function only works correct if the e820 table is sorted and
- * not-overlapping, which is the case
- */
-int __init
-e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
-{
- u64 start = s;
- u64 end = e;
- int i;
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- if (type && ei->type != type)
- continue;
- /* is the region (part) in overlap with the current region ?*/
- if (ei->addr >= end || ei->addr + ei->size <= start)
- continue;
- /* if the region is at the beginning of <start,end> we move
- * start to the end of the region since it's ok until there
- */
- if (ei->addr <= start)
- start = ei->addr + ei->size;
- /* if start is now at or beyond end, we're done, full
- * coverage */
- if (start >= end)
- return 1; /* we're done */
- }
- return 0;
-}
-
-/*
- * Find the highest page frame number we have available
- */
-void __init find_max_pfn(void)
-{
- int i;
-
- max_pfn = 0;
- if (efi_enabled) {
- efi_memmap_walk(efi_find_max_pfn, &max_pfn);
- efi_memmap_walk(efi_memory_present_wrapper, NULL);
- return;
- }
-
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long start, end;
- /* RAM? */
- if (e820.map[i].type != E820_RAM)
- continue;
- start = PFN_UP(e820.map[i].addr);
- end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
- if (start >= end)
- continue;
- if (end > max_pfn)
- max_pfn = end;
- memory_present(0, start, end);
- }
-}
-
-/*
* Determine low and high memory ranges:
*/
unsigned long __init find_max_low_pfn(void)
@@ -971,68 +319,6 @@ unsigned long __init find_max_low_pfn(void)
}
/*
- * Free all available memory for boot time allocation. Used
- * as a callback function by efi_memory_walk()
- */
-
-static int __init
-free_available_memory(unsigned long start, unsigned long end, void *arg)
-{
- /* check max_low_pfn */
- if (start >= (max_low_pfn << PAGE_SHIFT))
- return 0;
- if (end >= (max_low_pfn << PAGE_SHIFT))
- end = max_low_pfn << PAGE_SHIFT;
- if (start < end)
- free_bootmem(start, end - start);
-
- return 0;
-}
-/*
- * Register fully available low RAM pages with the bootmem allocator.
- */
-static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
-{
- int i;
-
- if (efi_enabled) {
- efi_memmap_walk(free_available_memory, NULL);
- return;
- }
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long curr_pfn, last_pfn, size;
- /*
- * Reserve usable low memory
- */
- if (e820.map[i].type != E820_RAM)
- continue;
- /*
- * We are rounding up the start address of usable memory:
- */
- curr_pfn = PFN_UP(e820.map[i].addr);
- if (curr_pfn >= max_low_pfn)
- continue;
- /*
- * ... and at the end of the usable range downwards:
- */
- last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-
- if (last_pfn > max_low_pfn)
- last_pfn = max_low_pfn;
-
- /*
- * .. finally, did all the rounding and playing
- * around just make the area go away?
- */
- if (last_pfn <= curr_pfn)
- continue;
-
- size = last_pfn - curr_pfn;
- free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
- }
-}
-
-/*
* workaround for Dell systems that neglect to reserve EBDA
*/
static void __init reserve_ebda_region(void)
@@ -1118,8 +404,8 @@ void __init setup_bootmem_allocator(void)
* the (very unlikely) case of us accidentally initializing the
* bootmem allocator with an invalid RAM area.
*/
- reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
- bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
+ reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
+ bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
@@ -1162,8 +448,7 @@ void __init setup_bootmem_allocator(void)
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
reserve_bootmem(INITRD_START, INITRD_SIZE);
- initrd_start =
- INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+ initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start+INITRD_SIZE;
}
else {
@@ -1200,126 +485,6 @@ void __init remapped_pgdat_init(void)
}
}
-/*
- * Request address space for all standard RAM and ROM resources
- * and also for regions reported as reserved by the e820.
- */
-static void __init
-legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
-{
- int i;
-
- probe_roms();
- for (i = 0; i < e820.nr_map; i++) {
- struct resource *res;
-#ifndef CONFIG_RESOURCES_64BIT
- if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
- continue;
-#endif
- res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
- switch (e820.map[i].type) {
- case E820_RAM: res->name = "System RAM"; break;
- case E820_ACPI: res->name = "ACPI Tables"; break;
- case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
- default: res->name = "reserved";
- }
- res->start = e820.map[i].addr;
- res->end = res->start + e820.map[i].size - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- if (request_resource(&iomem_resource, res)) {
- kfree(res);
- continue;
- }
- if (e820.map[i].type == E820_RAM) {
- /*
- * We don't know which RAM region contains kernel data,
- * so we try it repeatedly and let the resource manager
- * test it.
- */
- request_resource(res, code_resource);
- request_resource(res, data_resource);
-#ifdef CONFIG_KEXEC
- request_resource(res, &crashk_res);
-#endif
- }
- }
-}
-
-/*
- * Request address space for all standard resources
- *
- * This is called just before pcibios_init(), which is also a
- * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
- */
-static int __init request_standard_resources(void)
-{
- int i;
-
- printk("Setting up standard PCI resources\n");
- if (efi_enabled)
- efi_initialize_iomem_resources(&code_resource, &data_resource);
- else
- legacy_init_iomem_resources(&code_resource, &data_resource);
-
- /* EFI systems may still have VGA */
- request_resource(&iomem_resource, &video_ram_resource);
-
- /* request I/O space for devices used on all i[345]86 PCs */
- for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
- request_resource(&ioport_resource, &standard_io_resources[i]);
- return 0;
-}
-
-subsys_initcall(request_standard_resources);
-
-static void __init register_memory(void)
-{
- unsigned long gapstart, gapsize, round;
- unsigned long long last;
- int i;
-
- /*
- * Search for the bigest gap in the low 32 bits of the e820
- * memory space.
- */
- last = 0x100000000ull;
- gapstart = 0x10000000;
- gapsize = 0x400000;
- i = e820.nr_map;
- while (--i >= 0) {
- unsigned long long start = e820.map[i].addr;
- unsigned long long end = start + e820.map[i].size;
-
- /*
- * Since "last" is at most 4GB, we know we'll
- * fit in 32 bits if this condition is true
- */
- if (last > end) {
- unsigned long gap = last - end;
-
- if (gap > gapsize) {
- gapsize = gap;
- gapstart = end;
- }
- }
- if (start < last)
- last = start;
- }
-
- /*
- * See how much we want to round up: start off with
- * rounding to the next 1MB area.
- */
- round = 0x100000;
- while ((gapsize >> 4) > round)
- round += round;
- /* Fun with two's complement */
- pci_mem_start = (gapstart + round) & -round;
-
- printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
- pci_mem_start, gapstart, gapsize);
-}
-
#ifdef CONFIG_MCA
static void set_mca_bus(int x)
{
@@ -1329,6 +494,12 @@ static void set_mca_bus(int x)
static void set_mca_bus(int x) { }
#endif
+/* Overridden in paravirt.c if CONFIG_PARAVIRT */
+char * __attribute__((weak)) memory_setup(void)
+{
+ return machine_specific_memory_setup();
+}
+
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -1381,7 +552,7 @@ void __init setup_arch(char **cmdline_p)
efi_init();
else {
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
- print_memory_map(machine_specific_memory_setup());
+ print_memory_map(memory_setup());
}
copy_edd();
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 43002cfb40c..65d7620eaa0 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -128,7 +128,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
- GET_SEG(gs);
+ COPY_SEG(gs);
GET_SEG(fs);
COPY_SEG(es);
COPY_SEG(ds);
@@ -244,9 +244,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
{
int tmp, err = 0;
- tmp = 0;
- savesegment(gs, tmp);
- err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
+ err |= __put_user(regs->xgs, (unsigned int __user *)&sc->gs);
savesegment(fs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 31e5c6573aa..5285aff8367 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -321,7 +321,6 @@ static inline void leave_mm (unsigned long cpu)
fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long cpu;
cpu = get_cpu();
@@ -352,7 +351,6 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
smp_mb__after_clear_bit();
out:
put_cpu_no_resched();
- set_irq_regs(old_regs);
}
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
@@ -607,14 +605,11 @@ void smp_send_stop(void)
*/
fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
- set_irq_regs(old_regs);
}
fastcall void smp_call_function_interrupt(struct pt_regs *regs)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
int wait = call_data->wait;
@@ -637,7 +632,6 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs)
mb();
atomic_inc(&call_data->finished);
}
- set_irq_regs(old_regs);
}
/*
@@ -699,6 +693,10 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
put_cpu();
return -EBUSY;
}
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
spin_lock_bh(&call_lock);
__smp_call_function_single(cpu, func, info, nonatomic, wait);
spin_unlock_bh(&call_lock);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 4bb8b77cd65..4bf0e3c83b8 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -33,6 +33,11 @@
* Dave Jones : Report invalid combinations of Athlon CPUs.
* Rusty Russell : Hacked into shape for new "hotplug" boot process. */
+
+/* SMP boot always wants to use real time delay to allow sufficient time for
+ * the APs to come online */
+#define USE_REAL_TIME_DELAY
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -52,6 +57,8 @@
#include <asm/desc.h>
#include <asm/arch_hooks.h>
#include <asm/nmi.h>
+#include <asm/pda.h>
+#include <asm/genapic.h>
#include <mach_apic.h>
#include <mach_wakecpu.h>
@@ -536,11 +543,11 @@ set_cpu_sibling_map(int cpu)
static void __devinit start_secondary(void *unused)
{
/*
- * Dont put anything before smp_callin(), SMP
+ * Don't put *anything* before secondary_cpu_init(), SMP
* booting is too fragile that we want to limit the
* things done here to the most necessary things.
*/
- cpu_init();
+ secondary_cpu_init();
preempt_disable();
smp_callin();
while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
@@ -599,13 +606,16 @@ void __devinit initialize_secondary(void)
"movl %0,%%esp\n\t"
"jmp *%1"
:
- :"r" (current->thread.esp),"r" (current->thread.eip));
+ :"m" (current->thread.esp),"m" (current->thread.eip));
}
+/* Static state in head.S used to set up a CPU */
extern struct {
void * esp;
unsigned short ss;
} stack_start;
+extern struct i386_pda *start_pda;
+extern struct Xgt_desc_struct cpu_gdt_descr;
#ifdef CONFIG_NUMA
@@ -936,9 +946,6 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
unsigned long start_eip;
unsigned short nmi_high = 0, nmi_low = 0;
- ++cpucount;
- alternatives_smp_switch(1);
-
/*
* We can't use kernel_thread since we must avoid to
* reschedule the child.
@@ -946,15 +953,30 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
idle = alloc_idle_task(cpu);
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
+
+ /* Pre-allocate and initialize the CPU's GDT and PDA so it
+ doesn't have to do any memory allocation during the
+ delicate CPU-bringup phase. */
+ if (!init_gdt(cpu, idle)) {
+ printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu);
+ return -1; /* ? */
+ }
+
idle->thread.eip = (unsigned long) start_secondary;
/* start_eip had better be page-aligned! */
start_eip = setup_trampoline();
+ ++cpucount;
+ alternatives_smp_switch(1);
+
/* So we see what's up */
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
/* Stack for startup_32 can be just as for start_secondary onwards */
stack_start.esp = (void *) idle->thread.esp;
+ start_pda = cpu_pda(cpu);
+ cpu_gdt_descr = per_cpu(cpu_gdt_descr, cpu);
+
irq_ctx_init(cpu);
x86_cpu_to_apicid[cpu] = apicid;
@@ -1049,13 +1071,15 @@ void cpu_exit_clear(void)
struct warm_boot_cpu_info {
struct completion *complete;
+ struct work_struct task;
int apicid;
int cpu;
};
-static void __cpuinit do_warm_boot_cpu(void *p)
+static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
{
- struct warm_boot_cpu_info *info = p;
+ struct warm_boot_cpu_info *info =
+ container_of(work, struct warm_boot_cpu_info, task);
do_boot_cpu(info->apicid, info->cpu);
complete(info->complete);
}
@@ -1064,7 +1088,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
{
DECLARE_COMPLETION_ONSTACK(done);
struct warm_boot_cpu_info info;
- struct work_struct task;
int apicid, ret;
struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
@@ -1089,7 +1112,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
info.complete = &done;
info.apicid = apicid;
info.cpu = cpu;
- INIT_WORK(&task, do_warm_boot_cpu, &info);
+ INIT_WORK(&info.task, do_warm_boot_cpu);
tsc_sync_disabled = 1;
@@ -1097,7 +1120,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
KERNEL_PGD_PTRS);
flush_tlb_all();
- schedule_work(&task);
+ schedule_work(&info.task);
wait_for_completion(&done);
tsc_sync_disabled = 0;
@@ -1108,34 +1131,15 @@ exit:
}
#endif
-static void smp_tune_scheduling (void)
+static void smp_tune_scheduling(void)
{
unsigned long cachesize; /* kB */
- unsigned long bandwidth = 350; /* MB/s */
- /*
- * Rough estimation for SMP scheduling, this is the number of
- * cycles it takes for a fully memory-limited process to flush
- * the SMP-local cache.
- *
- * (For a P5 this pretty much means we will choose another idle
- * CPU almost always at wakeup time (this is due to the small
- * L1 cache), on PIIs it's around 50-100 usecs, depending on
- * the cache size)
- */
- if (!cpu_khz) {
- /*
- * this basically disables processor-affinity
- * scheduling on SMP without a TSC.
- */
- return;
- } else {
+ if (cpu_khz) {
cachesize = boot_cpu_data.x86_cache_size;
- if (cachesize == -1) {
- cachesize = 16; /* Pentiums, 2x8kB cache */
- bandwidth = 100;
- }
- max_cache_size = cachesize * 1024;
+
+ if (cachesize > 0)
+ max_cache_size = cachesize * 1024;
}
}
@@ -1461,6 +1465,12 @@ int __devinit __cpu_up(unsigned int cpu)
cpu_set(cpu, smp_commenced_mask);
while (!cpu_isset(cpu, cpu_online_map))
cpu_relax();
+
+#ifdef CONFIG_X86_GENERICARCH
+ if (num_online_cpus() > 8 && genapic == &apic_default)
+ panic("Default flat APIC routing can't be used with > 8 cpus\n");
+#endif
+
return 0;
}
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 713ba39d32c..7de9117b5a3 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -27,7 +27,11 @@
* Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()?
*/
+#ifdef CONFIG_PARAVIRT
+unsigned int __read_mostly vdso_enabled = 0;
+#else
unsigned int __read_mostly vdso_enabled = 1;
+#endif
EXPORT_SYMBOL_GPL(vdso_enabled);
@@ -132,7 +136,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
goto up_fail;
}
- vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
ret = -ENOMEM;
goto up_fail;
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 78af572fd17..c505b16c099 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -56,6 +56,7 @@
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/timer.h>
+#include <asm/time.h>
#include "mach_time.h"
@@ -116,10 +117,7 @@ static int set_rtc_mmss(unsigned long nowtime)
/* gets recalled with irq locally disabled */
/* XXX - does irqsave resolve this? -johnstul */
spin_lock_irqsave(&rtc_lock, flags);
- if (efi_enabled)
- retval = efi_set_rtc_mmss(nowtime);
- else
- retval = mach_set_rtc_mmss(nowtime);
+ retval = set_wallclock(nowtime);
spin_unlock_irqrestore(&rtc_lock, flags);
return retval;
@@ -223,10 +221,7 @@ unsigned long get_cmos_time(void)
spin_lock_irqsave(&rtc_lock, flags);
- if (efi_enabled)
- retval = efi_get_time();
- else
- retval = mach_get_cmos_time();
+ retval = get_wallclock();
spin_unlock_irqrestore(&rtc_lock, flags);
@@ -370,7 +365,7 @@ static void __init hpet_time_init(void)
printk("Using HPET for base-timer\n");
}
- time_init_hook();
+ do_time_init();
}
#endif
@@ -392,5 +387,5 @@ void __init time_init(void)
do_settimeofday(&ts);
- time_init_hook();
+ do_time_init();
}
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c
index 1a2a979cf6a..1e4702dfcd0 100644
--- a/arch/i386/kernel/time_hpet.c
+++ b/arch/i386/kernel/time_hpet.c
@@ -132,14 +132,20 @@ int __init hpet_enable(void)
* the single HPET timer for system time.
*/
#ifdef CONFIG_HPET_EMULATE_RTC
- if (!(id & HPET_ID_NUMBER))
+ if (!(id & HPET_ID_NUMBER)) {
+ iounmap(hpet_virt_address);
+ hpet_virt_address = NULL;
return -1;
+ }
#endif
hpet_period = hpet_readl(HPET_PERIOD);
- if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD))
+ if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD)) {
+ iounmap(hpet_virt_address);
+ hpet_virt_address = NULL;
return -1;
+ }
/*
* 64 bit math
@@ -156,8 +162,11 @@ int __init hpet_enable(void)
hpet_use_timer = id & HPET_ID_LEGSUP;
- if (hpet_timer_stop_set_go(hpet_tick))
+ if (hpet_timer_stop_set_go(hpet_tick)) {
+ iounmap(hpet_virt_address);
+ hpet_virt_address = NULL;
return -1;
+ }
use_hpet = 1;
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c
index 07d6da36a82..79cf608e14c 100644
--- a/arch/i386/kernel/topology.c
+++ b/arch/i386/kernel/topology.c
@@ -40,14 +40,18 @@ int arch_register_cpu(int num)
* restrictions and assumptions in kernel. This basically
* doesnt add a control file, one cannot attempt to offline
* BSP.
+ *
+ * Also certain PCI quirks require not to enable hotplug control
+ * for all CPU's.
*/
- if (!num)
- cpu_devices[num].cpu.no_control = 1;
+ if (num && enable_cpu_hotplug)
+ cpu_devices[num].cpu.hotpluggable = 1;
return register_cpu(&cpu_devices[num].cpu, num);
}
#ifdef CONFIG_HOTPLUG_CPU
+int enable_cpu_hotplug = 1;
void arch_unregister_cpu(int num) {
return unregister_cpu(&cpu_devices[num].cpu);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index fe9c5e8e7e6..68de48e498c 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -29,6 +29,7 @@
#include <linux/kexec.h>
#include <linux/unwind.h>
#include <linux/uaccess.h>
+#include <linux/nmi.h>
#ifdef CONFIG_EISA
#include <linux/ioport.h>
@@ -61,9 +62,6 @@ int panic_on_unrecovered_nmi;
asmlinkage int system_call(void);
-struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
- { 0, 0 }, { 0, 0 } };
-
/* Do we ignore FPU interrupts ? */
char ignore_fpu_irq = 0;
@@ -94,7 +92,7 @@ asmlinkage void alignment_check(void);
asmlinkage void spurious_interrupt_bug(void);
asmlinkage void machine_check(void);
-static int kstack_depth_to_print = 24;
+int kstack_depth_to_print = 24;
#ifdef CONFIG_STACK_UNWIND
static int call_trace = 1;
#else
@@ -163,16 +161,25 @@ dump_trace_unwind(struct unwind_frame_info *info, void *data)
{
struct ops_and_data *oad = (struct ops_and_data *)data;
int n = 0;
+ unsigned long sp = UNW_SP(info);
+ if (arch_unw_user_mode(info))
+ return -1;
while (unwind(info) == 0 && UNW_PC(info)) {
n++;
oad->ops->address(oad->data, UNW_PC(info));
if (arch_unw_user_mode(info))
break;
+ if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
+ && sp > UNW_SP(info))
+ break;
+ sp = UNW_SP(info);
}
return n;
}
+#define MSG(msg) ops->warning(data, msg)
+
void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack,
struct stacktrace_ops *ops, void *data)
@@ -191,29 +198,31 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (unwind_init_frame_info(&info, task, regs) == 0)
unw_ret = dump_trace_unwind(&info, &oad);
} else if (task == current)
- unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+ unw_ret = unwind_init_running(&info, dump_trace_unwind,
+ &oad);
else {
if (unwind_init_blocked(&info, task) == 0)
unw_ret = dump_trace_unwind(&info, &oad);
}
if (unw_ret > 0) {
if (call_trace == 1 && !arch_unw_user_mode(&info)) {
- ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+ ops->warning_symbol(data,
+ "DWARF2 unwinder stuck at %s",
UNW_PC(&info));
if (UNW_SP(&info) >= PAGE_OFFSET) {
- ops->warning(data, "Leftover inexact backtrace:\n");
+ MSG("Leftover inexact backtrace:");
stack = (void *)UNW_SP(&info);
if (!stack)
return;
ebp = UNW_FP(&info);
} else
- ops->warning(data, "Full inexact backtrace again:\n");
+ MSG("Full inexact backtrace again:");
} else if (call_trace >= 1)
return;
else
- ops->warning(data, "Full inexact backtrace again:\n");
+ MSG("Full inexact backtrace again:");
} else
- ops->warning(data, "Inexact backtrace:\n");
+ MSG("Inexact backtrace:");
}
if (!stack) {
unsigned long dummy;
@@ -247,6 +256,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
stack = (unsigned long*)context->previous_esp;
if (!stack)
break;
+ touch_nmi_watchdog();
}
}
EXPORT_SYMBOL(dump_trace);
@@ -379,7 +389,7 @@ void show_registers(struct pt_regs *regs)
* time of the fault..
*/
if (in_kernel) {
- u8 __user *eip;
+ u8 *eip;
int code_bytes = 64;
unsigned char c;
@@ -388,18 +398,20 @@ void show_registers(struct pt_regs *regs)
printk(KERN_EMERG "Code: ");
- eip = (u8 __user *)regs->eip - 43;
- if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
+ eip = (u8 *)regs->eip - 43;
+ if (eip < (u8 *)PAGE_OFFSET ||
+ probe_kernel_address(eip, c)) {
/* try starting at EIP */
- eip = (u8 __user *)regs->eip;
+ eip = (u8 *)regs->eip;
code_bytes = 32;
}
for (i = 0; i < code_bytes; i++, eip++) {
- if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
+ if (eip < (u8 *)PAGE_OFFSET ||
+ probe_kernel_address(eip, c)) {
printk(" Bad EIP value.");
break;
}
- if (eip == (u8 __user *)regs->eip)
+ if (eip == (u8 *)regs->eip)
printk("<%02x> ", c);
else
printk("%02x ", c);
@@ -415,7 +427,7 @@ static void handle_BUG(struct pt_regs *regs)
if (eip < PAGE_OFFSET)
return;
- if (probe_kernel_address((unsigned short __user *)eip, ud2))
+ if (probe_kernel_address((unsigned short *)eip, ud2))
return;
if (ud2 != 0x0b0f)
return;
@@ -428,11 +440,11 @@ static void handle_BUG(struct pt_regs *regs)
char *file;
char c;
- if (probe_kernel_address((unsigned short __user *)(eip + 2),
- line))
+ if (probe_kernel_address((unsigned short *)(eip + 2), line))
break;
- if (__get_user(file, (char * __user *)(eip + 4)) ||
- (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+ if (probe_kernel_address((char **)(eip + 4), file) ||
+ (unsigned long)file < PAGE_OFFSET ||
+ probe_kernel_address(file, c))
file = "<bad filename>";
printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
@@ -452,7 +464,7 @@ void die(const char * str, struct pt_regs * regs, long err)
u32 lock_owner;
int lock_owner_depth;
} die = {
- .lock = SPIN_LOCK_UNLOCKED,
+ .lock = __SPIN_LOCK_UNLOCKED(die.lock),
.lock_owner = -1,
.lock_owner_depth = 0
};
@@ -707,8 +719,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs)
{
printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
"CPU %d.\n", reason, smp_processor_id());
- printk(KERN_EMERG "You probably have a hardware problem with your RAM "
- "chips\n");
+ printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
if (panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
@@ -773,7 +784,6 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
printk(" on CPU%d, eip %08lx, registers:\n",
smp_processor_id(), regs->eip);
show_registers(regs);
- printk(KERN_EMERG "console shuts up ...\n");
console_silent();
spin_unlock(&nmi_print_lock);
bust_spinlocks(0);
@@ -1088,49 +1098,24 @@ fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
#endif
}
-fastcall void setup_x86_bogus_stack(unsigned char * stk)
-{
- unsigned long *switch16_ptr, *switch32_ptr;
- struct pt_regs *regs;
- unsigned long stack_top, stack_bot;
- unsigned short iret_frame16_off;
- int cpu = smp_processor_id();
- /* reserve the space on 32bit stack for the magic switch16 pointer */
- memmove(stk, stk + 8, sizeof(struct pt_regs));
- switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
- regs = (struct pt_regs *)stk;
- /* now the switch32 on 16bit stack */
- stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
- stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
- switch32_ptr = (unsigned long *)(stack_top - 8);
- iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
- /* copy iret frame on 16bit stack */
- memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
- /* fill in the switch pointers */
- switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
- switch16_ptr[1] = __ESPFIX_SS;
- switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
- 8 - CPU_16BIT_STACK_SIZE;
- switch32_ptr[1] = __KERNEL_DS;
-}
-
-fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
+fastcall unsigned long patch_espfix_desc(unsigned long uesp,
+ unsigned long kesp)
{
- unsigned long *switch32_ptr;
- unsigned char *stack16, *stack32;
- unsigned long stack_top, stack_bot;
- int len;
int cpu = smp_processor_id();
- stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
- stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
- switch32_ptr = (unsigned long *)(stack_top - 8);
- /* copy the data from 16bit stack to 32bit stack */
- len = CPU_16BIT_STACK_SIZE - 8 - sp;
- stack16 = (unsigned char *)(stack_bot + sp);
- stack32 = (unsigned char *)
- (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
- memcpy(stack32, stack16, len);
- return stack32;
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+ struct desc_struct *gdt = (struct desc_struct *)cpu_gdt_descr->address;
+ unsigned long base = (kesp - uesp) & -THREAD_SIZE;
+ unsigned long new_kesp = kesp - base;
+ unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
+ __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
+ /* Set up base for espfix segment */
+ desc &= 0x00f0ff0000000000ULL;
+ desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
+ ((((__u64)base) << 32) & 0xff00000000000000ULL) |
+ ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
+ (lim_pages & 0xffff);
+ *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
+ return new_kesp;
}
/*
@@ -1143,7 +1128,7 @@ fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
* Must be called with kernel preemption disabled (in this case,
* local interrupts are disabled at the call-site in entry.S).
*/
-asmlinkage void math_state_restore(struct pt_regs regs)
+asmlinkage void math_state_restore(void)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = thread->task;
@@ -1153,6 +1138,7 @@ asmlinkage void math_state_restore(struct pt_regs regs)
init_fpu(tsk);
restore_fpu(tsk);
thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
+ tsk->fpu_counter++;
}
#ifndef CONFIG_MATH_EMULATION
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index fbc95828cd7..1bbe45dca7a 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -13,7 +13,6 @@
#include <asm/delay.h>
#include <asm/tsc.h>
-#include <asm/delay.h>
#include <asm/io.h>
#include "mach_timer.h"
@@ -217,7 +216,7 @@ static unsigned int cpufreq_delayed_issched = 0;
static unsigned int cpufreq_init = 0;
static struct work_struct cpufreq_delayed_get_work;
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *work)
{
unsigned int cpu;
@@ -306,7 +305,7 @@ static int __init cpufreq_tsc(void)
{
int ret;
- INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+ INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (!ret)
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index cbcd61d6120..be2f96e67f7 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -43,6 +43,7 @@
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/audit.h>
+#include <linux/stddef.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -72,10 +73,10 @@
/*
* 8- and 16-bit register defines..
*/
-#define AL(regs) (((unsigned char *)&((regs)->eax))[0])
-#define AH(regs) (((unsigned char *)&((regs)->eax))[1])
-#define IP(regs) (*(unsigned short *)&((regs)->eip))
-#define SP(regs) (*(unsigned short *)&((regs)->esp))
+#define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0])
+#define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1])
+#define IP(regs) (*(unsigned short *)&((regs)->pt.eip))
+#define SP(regs) (*(unsigned short *)&((regs)->pt.esp))
/*
* virtual flags (16 and 32-bit versions)
@@ -89,10 +90,37 @@
#define SAFE_MASK (0xDD5)
#define RETURN_MASK (0xDFF)
-#define VM86_REGS_PART2 orig_eax
-#define VM86_REGS_SIZE1 \
- ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) )
-#define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
+/* convert kernel_vm86_regs to vm86_regs */
+static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
+ const struct kernel_vm86_regs *regs)
+{
+ int ret = 0;
+
+ /* kernel_vm86_regs is missing xfs, so copy everything up to
+ (but not including) xgs, and then rest after xgs. */
+ ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.xgs));
+ ret += copy_to_user(&user->__null_gs, &regs->pt.xgs,
+ sizeof(struct kernel_vm86_regs) -
+ offsetof(struct kernel_vm86_regs, pt.xgs));
+
+ return ret;
+}
+
+/* convert vm86_regs to kernel_vm86_regs */
+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
+ const struct vm86_regs __user *user,
+ unsigned extra)
+{
+ int ret = 0;
+
+ ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.xgs));
+ ret += copy_from_user(&regs->pt.xgs, &user->__null_gs,
+ sizeof(struct kernel_vm86_regs) -
+ offsetof(struct kernel_vm86_regs, pt.xgs) +
+ extra);
+
+ return ret;
+}
struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
@@ -112,10 +140,8 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
printk("no vm86_info: BAD\n");
do_exit(SIGSEGV);
}
- set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
- tmp = copy_to_user(&current->thread.vm86_info->regs,regs, VM86_REGS_SIZE1);
- tmp += copy_to_user(&current->thread.vm86_info->regs.VM86_REGS_PART2,
- &regs->VM86_REGS_PART2, VM86_REGS_SIZE2);
+ set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
+ tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
if (tmp) {
printk("vm86: could not access userspace vm86_info\n");
@@ -129,9 +155,11 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
current->thread.saved_esp0 = 0;
put_cpu();
- loadsegment(fs, current->thread.saved_fs);
- loadsegment(gs, current->thread.saved_gs);
ret = KVM86->regs32;
+
+ loadsegment(fs, current->thread.saved_fs);
+ ret->xgs = current->thread.saved_gs;
+
return ret;
}
@@ -183,9 +211,9 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
tsk = current;
if (tsk->thread.saved_esp0)
goto out;
- tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1);
- tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2,
- (long)&info.vm86plus - (long)&info.regs.VM86_REGS_PART2);
+ tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
+ offsetof(struct kernel_vm86_struct, vm86plus) -
+ sizeof(info.regs));
ret = -EFAULT;
if (tmp)
goto out;
@@ -233,9 +261,9 @@ asmlinkage int sys_vm86(struct pt_regs regs)
if (tsk->thread.saved_esp0)
goto out;
v86 = (struct vm86plus_struct __user *)regs.ecx;
- tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1);
- tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2,
- (long)&info.regs32 - (long)&info.regs.VM86_REGS_PART2);
+ tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
+ offsetof(struct kernel_vm86_struct, regs32) -
+ sizeof(info.regs));
ret = -EFAULT;
if (tmp)
goto out;
@@ -252,15 +280,15 @@ out:
static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
{
struct tss_struct *tss;
- long eax;
/*
* make sure the vm86() system call doesn't try to do anything silly
*/
- info->regs.__null_ds = 0;
- info->regs.__null_es = 0;
+ info->regs.pt.xds = 0;
+ info->regs.pt.xes = 0;
+ info->regs.pt.xgs = 0;
-/* we are clearing fs,gs later just before "jmp resume_userspace",
- * because starting with Linux 2.1.x they aren't no longer saved/restored
+/* we are clearing fs later just before "jmp resume_userspace",
+ * because it is not saved/restored.
*/
/*
@@ -268,10 +296,10 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
* has set it up safely, so this makes sure interrupt etc flags are
* inherited from protected mode.
*/
- VEFLAGS = info->regs.eflags;
- info->regs.eflags &= SAFE_MASK;
- info->regs.eflags |= info->regs32->eflags & ~SAFE_MASK;
- info->regs.eflags |= VM_MASK;
+ VEFLAGS = info->regs.pt.eflags;
+ info->regs.pt.eflags &= SAFE_MASK;
+ info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK;
+ info->regs.pt.eflags |= VM_MASK;
switch (info->cpu_type) {
case CPU_286:
@@ -294,7 +322,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
info->regs32->eax = 0;
tsk->thread.saved_esp0 = tsk->thread.esp0;
savesegment(fs, tsk->thread.saved_fs);
- savesegment(gs, tsk->thread.saved_gs);
+ tsk->thread.saved_gs = info->regs32->xgs;
tss = &per_cpu(init_tss, get_cpu());
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
@@ -306,19 +334,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
tsk->thread.screen_bitmap = info->screen_bitmap;
if (info->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);
- __asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t");
- __asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax));
/*call audit_syscall_exit since we do not exit via the normal paths */
if (unlikely(current->audit_context))
- audit_syscall_exit(AUDITSC_RESULT(eax), eax);
+ audit_syscall_exit(AUDITSC_RESULT(0), 0);
__asm__ __volatile__(
"movl %0,%%esp\n\t"
"movl %1,%%ebp\n\t"
+ "mov %2, %%fs\n\t"
"jmp resume_userspace"
: /* no outputs */
- :"r" (&info->regs), "r" (task_thread_info(tsk)));
+ :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
/* we never return here */
}
@@ -348,12 +375,12 @@ static inline void clear_IF(struct kernel_vm86_regs * regs)
static inline void clear_TF(struct kernel_vm86_regs * regs)
{
- regs->eflags &= ~TF_MASK;
+ regs->pt.eflags &= ~TF_MASK;
}
static inline void clear_AC(struct kernel_vm86_regs * regs)
{
- regs->eflags &= ~AC_MASK;
+ regs->pt.eflags &= ~AC_MASK;
}
/* It is correct to call set_IF(regs) from the set_vflags_*
@@ -370,7 +397,7 @@ static inline void clear_AC(struct kernel_vm86_regs * regs)
static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs)
{
set_flags(VEFLAGS, eflags, current->thread.v86mask);
- set_flags(regs->eflags, eflags, SAFE_MASK);
+ set_flags(regs->pt.eflags, eflags, SAFE_MASK);
if (eflags & IF_MASK)
set_IF(regs);
else
@@ -380,7 +407,7 @@ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs
static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
{
set_flags(VFLAGS, flags, current->thread.v86mask);
- set_flags(regs->eflags, flags, SAFE_MASK);
+ set_flags(regs->pt.eflags, flags, SAFE_MASK);
if (flags & IF_MASK)
set_IF(regs);
else
@@ -389,7 +416,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
{
- unsigned long flags = regs->eflags & RETURN_MASK;
+ unsigned long flags = regs->pt.eflags & RETURN_MASK;
if (VEFLAGS & VIF_MASK)
flags |= IF_MASK;
@@ -493,7 +520,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
unsigned long __user *intr_ptr;
unsigned long segoffs;
- if (regs->cs == BIOSSEG)
+ if (regs->pt.xcs == BIOSSEG)
goto cannot_handle;
if (is_revectored(i, &KVM86->int_revectored))
goto cannot_handle;
@@ -505,9 +532,9 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
if ((segoffs >> 16) == BIOSSEG)
goto cannot_handle;
pushw(ssp, sp, get_vflags(regs), cannot_handle);
- pushw(ssp, sp, regs->cs, cannot_handle);
+ pushw(ssp, sp, regs->pt.xcs, cannot_handle);
pushw(ssp, sp, IP(regs), cannot_handle);
- regs->cs = segoffs >> 16;
+ regs->pt.xcs = segoffs >> 16;
SP(regs) -= 6;
IP(regs) = segoffs & 0xffff;
clear_TF(regs);
@@ -524,7 +551,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
if (VMPI.is_vm86pus) {
if ( (trapno==3) || (trapno==1) )
return_to_32bit(regs, VM86_TRAP + (trapno << 8));
- do_int(regs, trapno, (unsigned char __user *) (regs->ss << 4), SP(regs));
+ do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs));
return 0;
}
if (trapno !=1)
@@ -560,10 +587,10 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
handle_vm86_trap(regs, 0, 1); \
return; } while (0)
- orig_flags = *(unsigned short *)&regs->eflags;
+ orig_flags = *(unsigned short *)&regs->pt.eflags;
- csp = (unsigned char __user *) (regs->cs << 4);
- ssp = (unsigned char __user *) (regs->ss << 4);
+ csp = (unsigned char __user *) (regs->pt.xcs << 4);
+ ssp = (unsigned char __user *) (regs->pt.xss << 4);
sp = SP(regs);
ip = IP(regs);
@@ -650,7 +677,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
SP(regs) += 6;
}
IP(regs) = newip;
- regs->cs = newcs;
+ regs->pt.xcs = newcs;
CHECK_IF_IN_TRAP;
if (data32) {
set_vflags_long(newflags, regs);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index c6f84a0322b..56e6ad5cb04 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -1,13 +1,26 @@
/* ld script to make i386 Linux kernel
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ *
+ * Don't define absolute symbols until and unless you know that symbol
+ * value is should remain constant even if kernel image is relocated
+ * at run time. Absolute symbols are not relocated. If symbol value should
+ * change if kernel is relocated, make the symbol section relative and
+ * put it inside the section definition.
*/
+/* Don't define absolute symbols until and unless you know that symbol
+ * value is should remain constant even if kernel image is relocated
+ * at run time. Absolute symbols are not relocated. If symbol value should
+ * change if kernel is relocated, make the symbol section relative and
+ * put it inside the section definition.
+ */
#define LOAD_OFFSET __PAGE_OFFSET
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/cache.h>
+#include <asm/boot.h>
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
@@ -21,34 +34,35 @@ PHDRS {
}
SECTIONS
{
- . = __KERNEL_START;
+ . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
phys_startup_32 = startup_32 - LOAD_OFFSET;
/* read-only */
- _text = .; /* Text and read-only data */
.text : AT(ADDR(.text) - LOAD_OFFSET) {
+ _text = .; /* Text and read-only data */
*(.text)
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
- } :text = 0x9090
-
- _etext = .; /* End of text section */
+ _etext = .; /* End of text section */
+ } :text = 0x9090
. = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
- __stop___ex_table = .;
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
RODATA
. = ALIGN(4);
- __tracedata_start = .;
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
+ __tracedata_start = .;
*(.tracedata)
+ __tracedata_end = .;
}
- __tracedata_end = .;
/* writeable */
. = ALIGN(4096);
@@ -57,11 +71,19 @@ SECTIONS
CONSTRUCTORS
} :data
+ .paravirtprobe : AT(ADDR(.paravirtprobe) - LOAD_OFFSET) {
+ __start_paravirtprobe = .;
+ *(.paravirtprobe)
+ __stop_paravirtprobe = .;
+ }
+
. = ALIGN(4096);
- __nosave_begin = .;
- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
- . = ALIGN(4096);
- __nosave_end = .;
+ .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+ __nosave_begin = .;
+ *(.data.nosave)
+ . = ALIGN(4096);
+ __nosave_end = .;
+ }
. = ALIGN(4096);
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
@@ -75,17 +97,10 @@ SECTIONS
/* rarely changed data like cpu maps */
. = ALIGN(32);
- .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) }
- _edata = .; /* End of data section */
-
-#ifdef CONFIG_STACK_UNWIND
- . = ALIGN(4);
- .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
- __start_unwind = .;
- *(.eh_frame)
- __end_unwind = .;
+ .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
+ *(.data.read_mostly)
+ _edata = .; /* End of data section */
}
-#endif
. = ALIGN(THREAD_SIZE); /* init_task */
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
@@ -94,88 +109,102 @@ SECTIONS
/* might get freed after init */
. = ALIGN(4096);
- __smp_alt_begin = .;
- __smp_alt_instructions = .;
.smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
+ __smp_alt_begin = .;
+ __smp_alt_instructions = .;
*(.smp_altinstructions)
+ __smp_alt_instructions_end = .;
}
- __smp_alt_instructions_end = .;
. = ALIGN(4);
- __smp_locks = .;
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+ __smp_locks = .;
*(.smp_locks)
+ __smp_locks_end = .;
}
- __smp_locks_end = .;
.smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
*(.smp_altinstr_replacement)
+ __smp_alt_end = .;
}
+ /* will be freed after init
+ * Following ALIGN() is required to make sure no other data falls on the
+ * same page where __smp_alt_end is pointing as that page might be freed
+ * after boot. Always make sure that ALIGN() directive is present after
+ * the section which contains __smp_alt_end.
+ */
. = ALIGN(4096);
- __smp_alt_end = .;
/* will be freed after init */
. = ALIGN(4096); /* Init code and data */
- __init_begin = .;
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+ __init_begin = .;
_sinittext = .;
*(.init.text)
_einittext = .;
}
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
. = ALIGN(16);
- __setup_start = .;
- .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
- __setup_end = .;
- __initcall_start = .;
+ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ }
.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
+ __initcall_start = .;
INITCALLS
+ __initcall_end = .;
}
- __initcall_end = .;
- __con_initcall_start = .;
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
+ __con_initcall_start = .;
*(.con_initcall.init)
+ __con_initcall_end = .;
}
- __con_initcall_end = .;
SECURITY_INIT
. = ALIGN(4);
- __alt_instructions = .;
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+ __alt_instructions = .;
*(.altinstructions)
+ __alt_instructions_end = .;
}
- __alt_instructions_end = .;
.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
*(.altinstr_replacement)
}
+ . = ALIGN(4);
+ .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
+ __start_parainstructions = .;
+ *(.parainstructions)
+ __stop_parainstructions = .;
+ }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
. = ALIGN(4096);
- __initramfs_start = .;
- .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
- __initramfs_end = .;
+ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+ __initramfs_start = .;
+ *(.init.ramfs)
+ __initramfs_end = .;
+ }
. = ALIGN(L1_CACHE_BYTES);
- __per_cpu_start = .;
- .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
- __per_cpu_end = .;
+ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
+ __per_cpu_start = .;
+ *(.data.percpu)
+ __per_cpu_end = .;
+ }
. = ALIGN(4096);
- __init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .bss.page_aligned : AT(ADDR(.bss.page_aligned) - LOAD_OFFSET) {
- *(.bss.page_aligned)
- }
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+ __init_end = .;
+ __bss_start = .; /* BSS */
+ *(.bss.page_aligned)
*(.bss)
+ . = ALIGN(4);
+ __bss_stop = .;
+ _end = . ;
+ /* This is where the kernel creates the early boot page tables */
+ . = ALIGN(4096);
+ pg0 = . ;
}
- . = ALIGN(4);
- __bss_stop = .;
-
- _end = . ;
-
- /* This is where the kernel creates the early boot page tables */
- . = ALIGN(4096);
- pg0 = .;
/* Sections to be discarded */
/DISCARD/ : {
diff --git a/arch/i386/mach-generic/probe.c b/arch/i386/mach-generic/probe.c
index 94b1fd9cbe3..a7b3999bb37 100644
--- a/arch/i386/mach-generic/probe.c
+++ b/arch/i386/mach-generic/probe.c
@@ -45,7 +45,9 @@ static int __init parse_apic(char *arg)
return 0;
}
}
- return -ENOENT;
+
+ /* Parsed again by __setup for debug/verbose */
+ return 0;
}
early_param("apic", parse_apic);
diff --git a/arch/i386/mach-voyager/voyager_cat.c b/arch/i386/mach-voyager/voyager_cat.c
index f50c6c6ad68..943a9473b13 100644
--- a/arch/i386/mach-voyager/voyager_cat.c
+++ b/arch/i386/mach-voyager/voyager_cat.c
@@ -776,7 +776,7 @@ voyager_cat_init(void)
for(asic=0; asic < (*modpp)->num_asics; asic++) {
int j;
voyager_asic_t *asicp = *asicpp
- = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++];*/
+ = kzalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++];*/
voyager_sp_table_t *sp_table;
voyager_at_t *asic_table;
voyager_jtt_t *jtag_table;
@@ -785,7 +785,6 @@ voyager_cat_init(void)
printk("**WARNING** kmalloc failure in cat_init\n");
continue;
}
- memset(asicp, 0, sizeof(voyager_asic_t));
asicpp = &(asicp->next);
asicp->asic_location = asic;
sp_table = (voyager_sp_table_t *)(eprom_buf + sp_offset);
@@ -851,8 +850,7 @@ voyager_cat_init(void)
#endif
{
- struct resource *res = kmalloc(sizeof(struct resource),GFP_KERNEL);
- memset(res, 0, sizeof(struct resource));
+ struct resource *res = kzalloc(sizeof(struct resource),GFP_KERNEL);
res->name = kmalloc(128, GFP_KERNEL);
sprintf((char *)res->name, "Voyager %s Quad CPI", cat_module_name(i));
res->start = qic_addr;
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index f3fea2ad50f..55428e656a3 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -28,6 +28,7 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/arch_hooks.h>
+#include <asm/pda.h>
/* TLB state -- visible externally, indexed physically */
DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
@@ -422,6 +423,7 @@ find_smp_config(void)
VOYAGER_SUS_IN_CONTROL_PORT);
current_thread_info()->cpu = boot_cpu_id;
+ write_pda(cpu_number, boot_cpu_id);
}
/*
@@ -458,7 +460,7 @@ start_secondary(void *unused)
/* external functions not defined in the headers */
extern void calibrate_delay(void);
- cpu_init();
+ secondary_cpu_init();
/* OK, we're in the routine */
ack_CPI(VIC_CPU_BOOT_CPI);
@@ -578,6 +580,15 @@ do_boot_cpu(__u8 cpu)
/* init_tasks (in sched.c) is indexed logically */
stack_start.esp = (void *) idle->thread.esp;
+ /* Pre-allocate and initialize the CPU's GDT and PDA so it
+ doesn't have to do any memory allocation during the
+ delicate CPU-bringup phase. */
+ if (!init_gdt(cpu, idle)) {
+ printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu);
+ cpucount--;
+ return;
+ }
+
irq_ctx_init(cpu);
/* Note: Don't modify initial ss override */
@@ -1963,4 +1974,5 @@ void __init
smp_setup_processor_id(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
+ write_pda(cpu_number, hard_smp_processor_id());
}
diff --git a/arch/i386/math-emu/fpu_emu.h b/arch/i386/math-emu/fpu_emu.h
index d62b20a3e66..65120f52385 100644
--- a/arch/i386/math-emu/fpu_emu.h
+++ b/arch/i386/math-emu/fpu_emu.h
@@ -57,6 +57,7 @@
#define TAG_Special Const(2) /* De-normal, + or - infinity,
or Not a Number */
#define TAG_Empty Const(3) /* empty */
+#define TAG_Error Const(0x80) /* probably need to abort */
#define LOADED_DATA Const(10101) /* Special st() number to identify
loaded data (not on stack). */
diff --git a/arch/i386/math-emu/fpu_entry.c b/arch/i386/math-emu/fpu_entry.c
index d93f16ef828..ddf8fa3bbd0 100644
--- a/arch/i386/math-emu/fpu_entry.c
+++ b/arch/i386/math-emu/fpu_entry.c
@@ -742,7 +742,8 @@ int save_i387_soft(void *s387, struct _fpstate __user * buf)
S387->fcs &= ~0xf8000000;
S387->fos |= 0xffff0000;
#endif /* PECULIAR_486 */
- __copy_to_user(d, &S387->cwd, 7*4);
+ if (__copy_to_user(d, &S387->cwd, 7*4))
+ return -1;
RE_ENTRANT_CHECK_ON;
d += 7*4;
diff --git a/arch/i386/math-emu/fpu_system.h b/arch/i386/math-emu/fpu_system.h
index bf26341c8bd..a3ae28c49dd 100644
--- a/arch/i386/math-emu/fpu_system.h
+++ b/arch/i386/math-emu/fpu_system.h
@@ -68,6 +68,7 @@
#define FPU_access_ok(x,y,z) if ( !access_ok(x,y,z) ) \
math_abort(FPU_info,SIGSEGV)
+#define FPU_abort math_abort(FPU_info, SIGSEGV)
#undef FPU_IGNORE_CODE_SEGV
#ifdef FPU_IGNORE_CODE_SEGV
diff --git a/arch/i386/math-emu/load_store.c b/arch/i386/math-emu/load_store.c
index 85314be2fef..eebd6fb1c8a 100644
--- a/arch/i386/math-emu/load_store.c
+++ b/arch/i386/math-emu/load_store.c
@@ -227,6 +227,8 @@ int FPU_load_store(u_char type, fpu_addr_modes addr_modes,
case 027: /* fild m64int */
clear_C1();
loaded_tag = FPU_load_int64((long long __user *)data_address);
+ if (loaded_tag == TAG_Error)
+ return 0;
FPU_settag0(loaded_tag);
break;
case 030: /* fstenv m14/28byte */
diff --git a/arch/i386/math-emu/reg_ld_str.c b/arch/i386/math-emu/reg_ld_str.c
index f06ed41d191..e976caef649 100644
--- a/arch/i386/math-emu/reg_ld_str.c
+++ b/arch/i386/math-emu/reg_ld_str.c
@@ -244,7 +244,8 @@ int FPU_load_int64(long long __user *_s)
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_READ, _s, 8);
- copy_from_user(&s,_s,8);
+ if (copy_from_user(&s,_s,8))
+ FPU_abort;
RE_ENTRANT_CHECK_ON;
if (s == 0)
@@ -907,7 +908,8 @@ int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d)
RE_ENTRANT_CHECK_OFF;
FPU_access_ok(VERIFY_WRITE,d,8);
- copy_to_user(d, &tll, 8);
+ if (copy_to_user(d, &tll, 8))
+ FPU_abort;
RE_ENTRANT_CHECK_ON;
return 1;
@@ -1336,7 +1338,8 @@ u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d)
I387.soft.fcs &= ~0xf8000000;
I387.soft.fos |= 0xffff0000;
#endif /* PECULIAR_486 */
- __copy_to_user(d, &control_word, 7*4);
+ if (__copy_to_user(d, &control_word, 7*4))
+ FPU_abort;
RE_ENTRANT_CHECK_ON;
d += 0x1c;
}
@@ -1359,9 +1362,11 @@ void fsave(fpu_addr_modes addr_modes, u_char __user *data_address)
FPU_access_ok(VERIFY_WRITE,d,80);
/* Copy all registers in stack order. */
- __copy_to_user(d, register_base+offset, other);
+ if (__copy_to_user(d, register_base+offset, other))
+ FPU_abort;
if ( offset )
- __copy_to_user(d+other, register_base, offset);
+ if (__copy_to_user(d+other, register_base, offset))
+ FPU_abort;
RE_ENTRANT_CHECK_ON;
finit();
diff --git a/arch/i386/mm/boot_ioremap.c b/arch/i386/mm/boot_ioremap.c
index 4de11f508c3..4de95a17a7d 100644
--- a/arch/i386/mm/boot_ioremap.c
+++ b/arch/i386/mm/boot_ioremap.c
@@ -16,6 +16,7 @@
*/
#undef CONFIG_X86_PAE
+#undef CONFIG_PARAVIRT
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index ddbdb0336f2..103b76e56a9 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -168,7 +168,7 @@ static void __init allocate_pgdat(int nid)
if (nid && node_has_online_mem(nid))
NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
else {
- NODE_DATA(nid) = (pg_data_t *)(__va(min_low_pfn << PAGE_SHIFT));
+ NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(min_low_pfn));
min_low_pfn += PFN_UP(sizeof(pg_data_t));
}
}
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 2581575786c..aaaa4d225f7 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -22,9 +22,9 @@
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/kprobes.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
#include <asm/desc.h>
#include <asm/kdebug.h>
#include <asm/segment.h>
@@ -167,7 +167,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
{
unsigned long limit;
- unsigned long instr = get_segment_eip (regs, &limit);
+ unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
int scan_more = 1;
int prefetch = 0;
int i;
@@ -177,9 +177,9 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
unsigned char instr_hi;
unsigned char instr_lo;
- if (instr > limit)
+ if (instr > (unsigned char *)limit)
break;
- if (__get_user(opcode, (unsigned char __user *) instr))
+ if (probe_kernel_address(instr, opcode))
break;
instr_hi = opcode & 0xf0;
@@ -204,9 +204,9 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
case 0x00:
/* Prefetch instruction is 0x0F0D or 0x0F18 */
scan_more = 0;
- if (instr > limit)
+ if (instr > (unsigned char *)limit)
break;
- if (__get_user(opcode, (unsigned char __user *) instr))
+ if (probe_kernel_address(instr, opcode))
break;
prefetch = (instr_lo == 0xF) &&
(opcode == 0x0D || opcode == 0x18);
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index f9f647cdbc7..e0fa6cb655a 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -32,7 +32,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- inc_preempt_count();
+ pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
@@ -50,26 +50,22 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-#ifdef CONFIG_DEBUG_HIGHMEM
- if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
- dec_preempt_count();
- preempt_check_resched();
- return;
- }
-
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
- BUG();
-#endif
/*
* Force other mappings to Oops if they'll try to access this pte
* without first remap it. Keeping stale mappings around is a bad idea
* also, in case the page changes cacheability attributes or becomes
* a protected page in a hypervisor.
*/
- kpte_clear_flush(kmap_pte-idx, vaddr);
+ if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+ kpte_clear_flush(kmap_pte-idx, vaddr);
+ else {
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(vaddr < PAGE_OFFSET);
+ BUG_ON(vaddr >= (unsigned long)high_memory);
+#endif
+ }
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_enable();
}
/* This is the same as kmap_atomic() but can map memory that doesn't
@@ -80,7 +76,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
enum fixed_addresses idx;
unsigned long vaddr;
- inc_preempt_count();
+ pagefault_disable();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index 1719a8141f8..34728e4afe4 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -17,6 +17,113 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+static unsigned long page_table_shareable(struct vm_area_struct *svma,
+ struct vm_area_struct *vma,
+ unsigned long addr, pgoff_t idx)
+{
+ unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
+ svma->vm_start;
+ unsigned long sbase = saddr & PUD_MASK;
+ unsigned long s_end = sbase + PUD_SIZE;
+
+ /*
+ * match the virtual addresses, permission and the alignment of the
+ * page table page.
+ */
+ if (pmd_index(addr) != pmd_index(saddr) ||
+ vma->vm_flags != svma->vm_flags ||
+ sbase < svma->vm_start || svma->vm_end < s_end)
+ return 0;
+
+ return saddr;
+}
+
+static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+{
+ unsigned long base = addr & PUD_MASK;
+ unsigned long end = base + PUD_SIZE;
+
+ /*
+ * check on proper vm_flags and page table alignment
+ */
+ if (vma->vm_flags & VM_MAYSHARE &&
+ vma->vm_start <= base && end <= vma->vm_end)
+ return 1;
+ return 0;
+}
+
+/*
+ * search for a shareable pmd page for hugetlb.
+ */
+static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+{
+ struct vm_area_struct *vma = find_vma(mm, addr);
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+ vma->vm_pgoff;
+ struct prio_tree_iter iter;
+ struct vm_area_struct *svma;
+ unsigned long saddr;
+ pte_t *spte = NULL;
+
+ if (!vma_shareable(vma, addr))
+ return;
+
+ spin_lock(&mapping->i_mmap_lock);
+ vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
+ if (svma == vma)
+ continue;
+
+ saddr = page_table_shareable(svma, vma, addr, idx);
+ if (saddr) {
+ spte = huge_pte_offset(svma->vm_mm, saddr);
+ if (spte) {
+ get_page(virt_to_page(spte));
+ break;
+ }
+ }
+ }
+
+ if (!spte)
+ goto out;
+
+ spin_lock(&mm->page_table_lock);
+ if (pud_none(*pud))
+ pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK);
+ else
+ put_page(virt_to_page(spte));
+ spin_unlock(&mm->page_table_lock);
+out:
+ spin_unlock(&mapping->i_mmap_lock);
+}
+
+/*
+ * unmap huge page backed by shared pte.
+ *
+ * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
+ * indicated by page_count > 1, unmap is achieved by clearing pud and
+ * decrementing the ref count. If count == 1, the pte page is not shared.
+ *
+ * called with vma->vm_mm->page_table_lock held.
+ *
+ * returns: 1 successfully unmapped a shared pte page
+ * 0 the underlying pte page is not shared, or it is the last user
+ */
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ pgd_t *pgd = pgd_offset(mm, *addr);
+ pud_t *pud = pud_offset(pgd, *addr);
+
+ BUG_ON(page_count(virt_to_page(ptep)) == 0);
+ if (page_count(virt_to_page(ptep)) == 1)
+ return 0;
+
+ pud_clear(pud);
+ put_page(virt_to_page(ptep));
+ *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
+ return 1;
+}
+
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
@@ -25,8 +132,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
- if (pud)
+ if (pud) {
+ if (pud_none(*pud))
+ huge_pmd_share(mm, addr, pud);
pte = (pte_t *) pmd_alloc(mm, pud, addr);
+ }
BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
return pte;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 167416155ee..84697dfc734 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -192,8 +192,6 @@ static inline int page_kills_ppro(unsigned long pagenr)
return 0;
}
-extern int is_available_memory(efi_memory_desc_t *);
-
int page_is_ram(unsigned long pagenr)
{
int i;
@@ -699,8 +697,8 @@ int remove_memory(u64 start, u64 size)
#endif
#endif
-kmem_cache_t *pgd_cache;
-kmem_cache_t *pmd_cache;
+struct kmem_cache *pgd_cache;
+struct kmem_cache *pmd_cache;
void __init pgtable_cache_init(void)
{
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 8564b6ae17e..ad91528bdc1 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -67,11 +67,17 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
return base;
}
-static void flush_kernel_map(void *dummy)
+static void flush_kernel_map(void *arg)
{
- /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
- if (boot_cpu_data.x86_model >= 4)
+ unsigned long adr = (unsigned long)arg;
+
+ if (adr && cpu_has_clflush) {
+ int i;
+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ asm volatile("clflush (%0)" :: "r" (adr + i));
+ } else if (boot_cpu_data.x86_model >= 4)
wbinvd();
+
/* Flush all to work around Errata in early athlons regarding
* large page flushing.
*/
@@ -173,9 +179,9 @@ __change_page_attr(struct page *page, pgprot_t prot)
return 0;
}
-static inline void flush_map(void)
+static inline void flush_map(void *adr)
{
- on_each_cpu(flush_kernel_map, NULL, 1, 1);
+ on_each_cpu(flush_kernel_map, adr, 1, 1);
}
/*
@@ -217,9 +223,13 @@ void global_flush_tlb(void)
spin_lock_irq(&cpa_lock);
list_replace_init(&df_list, &l);
spin_unlock_irq(&cpa_lock);
- flush_map();
- list_for_each_entry_safe(pg, next, &l, lru)
+ if (!cpu_has_clflush)
+ flush_map(0);
+ list_for_each_entry_safe(pg, next, &l, lru) {
+ if (cpu_has_clflush)
+ flush_map(page_address(pg));
__free_page(pg);
+ }
}
#ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 10126e3f817..f349eaf450b 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -95,8 +95,11 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
return;
}
pte = pte_offset_kernel(pmd, vaddr);
- /* <pfn,flags> stored as-is, to permit clearing entries */
- set_pte(pte, pfn_pte(pfn, flags));
+ if (pgprot_val(flags))
+ /* <pfn,flags> stored as-is, to permit clearing entries */
+ set_pte(pte, pfn_pte(pfn, flags));
+ else
+ pte_clear(&init_mm, vaddr, pte);
/*
* It's enough to flush this one mapping.
@@ -193,7 +196,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte;
}
-void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
+void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
{
memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
}
@@ -233,7 +236,7 @@ static inline void pgd_list_del(pgd_t *pgd)
set_page_private(next, (unsigned long)pprev);
}
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags;
@@ -253,7 +256,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
}
/* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags; /* can be called from interrupt context */
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index cdfcf971098..53ca6e89798 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -20,7 +20,7 @@
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF;
-int pci_bf_sort;
+static int pci_bf_sort;
int pci_routeirq;
int pcibios_last_bus = -1;
unsigned long pirq_table_addr;
diff --git a/arch/i386/pci/early.c b/arch/i386/pci/early.c
index 713d6c866ca..42df4b6606d 100644
--- a/arch/i386/pci/early.c
+++ b/arch/i386/pci/early.c
@@ -45,6 +45,13 @@ void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
outl(val, 0xcfc);
}
+void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
+{
+ PDprintk("%x writing to %x: %x\n", slot, offset, val);
+ outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
+ outb(val, 0xcfc);
+}
+
int early_pci_allowed(void)
{
return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) ==
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index c1949ff38d6..cde1170b01a 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -74,52 +74,6 @@ static void __devinit pci_fixup_ncr53c810(struct pci_dev *d)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810);
-static void __devinit pci_fixup_ide_bases(struct pci_dev *d)
-{
- int i;
-
- /*
- * PCI IDE controllers use non-standard I/O port decoding, respect it.
- */
- if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
- return;
- DBG("PCI: IDE base address fixup for %s\n", pci_name(d));
- for(i=0; i<4; i++) {
- struct resource *r = &d->resource[i];
- if ((r->start & ~0x80) == 0x374) {
- r->start |= 2;
- r->end = r->start;
- }
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
-
-static void __devinit pci_fixup_ide_trash(struct pci_dev *d)
-{
- int i;
-
- /*
- * Runs the fixup only for the first IDE controller
- * (Shai Fultheim - shai@ftcon.com)
- */
- static int called = 0;
- if (called)
- return;
- called = 1;
-
- /*
- * There exist PCI IDE controllers which have utter garbage
- * in first four base registers. Ignore that.
- */
- DBG("PCI: IDE base address trash cleared for %s\n", pci_name(d));
- for(i=0; i<4; i++)
- d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, pci_fixup_ide_trash);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_11, pci_fixup_ide_trash);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_9, pci_fixup_ide_trash);
-
static void __devinit pci_fixup_latency(struct pci_dev *d)
{
/*
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c
index 98580292f0d..43005f04442 100644
--- a/arch/i386/pci/i386.c
+++ b/arch/i386/pci/i386.c
@@ -104,16 +104,24 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
/* Depth-First Search on bus tree */
list_for_each_entry(bus, bus_list, node) {
if ((dev = bus->self)) {
- for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+ for (idx = PCI_BRIDGE_RESOURCES;
+ idx < PCI_NUM_RESOURCES; idx++) {
r = &dev->resource[idx];
if (!r->flags)
continue;
pr = pci_find_parent_resource(dev, r);
- if (!r->start || !pr || request_resource(pr, r) < 0) {
- printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
- /* Something is wrong with the region.
- Invalidate the resource to prevent child
- resource allocations in this range. */
+ if (!r->start || !pr ||
+ request_resource(pr, r) < 0) {
+ printk(KERN_ERR "PCI: Cannot allocate "
+ "resource region %d "
+ "of bridge %s\n",
+ idx, pci_name(dev));
+ /*
+ * Something is wrong with the region.
+ * Invalidate the resource to prevent
+ * child resource allocations in this
+ * range.
+ */
r->flags = 0;
}
}
@@ -131,7 +139,7 @@ static void __init pcibios_allocate_resources(int pass)
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
- for(idx = 0; idx < 6; idx++) {
+ for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
r = &dev->resource[idx];
if (r->parent) /* Already allocated */
continue;
@@ -142,11 +150,15 @@ static void __init pcibios_allocate_resources(int pass)
else
disabled = !(command & PCI_COMMAND_MEMORY);
if (pass == disabled) {
- DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
+ DBG("PCI: Resource %08lx-%08lx "
+ "(f=%lx, d=%d, p=%d)\n",
r->start, r->end, r->flags, disabled, pass);
pr = pci_find_parent_resource(dev, r);
if (!pr || request_resource(pr, r) < 0) {
- printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
+ printk(KERN_ERR "PCI: Cannot allocate "
+ "resource region %d "
+ "of device %s\n",
+ idx, pci_name(dev));
/* We'll assign a new address later */
r->end -= r->start;
r->start = 0;
@@ -156,12 +168,16 @@ static void __init pcibios_allocate_resources(int pass)
if (!pass) {
r = &dev->resource[PCI_ROM_RESOURCE];
if (r->flags & IORESOURCE_ROM_ENABLE) {
- /* Turn the ROM off, leave the resource region, but keep it unregistered. */
+ /* Turn the ROM off, leave the resource region,
+ * but keep it unregistered. */
u32 reg;
- DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
+ DBG("PCI: Switching off ROM of %s\n",
+ pci_name(dev));
r->flags &= ~IORESOURCE_ROM_ENABLE;
- pci_read_config_dword(dev, dev->rom_base_reg, &reg);
- pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
+ pci_read_config_dword(dev,
+ dev->rom_base_reg, &reg);
+ pci_write_config_dword(dev, dev->rom_base_reg,
+ reg & ~PCI_ROM_ADDRESS_ENABLE);
}
}
}
@@ -173,9 +189,11 @@ static int __init pcibios_assign_resources(void)
struct resource *r, *pr;
if (!(pci_probe & PCI_ASSIGN_ROMS)) {
- /* Try to use BIOS settings for ROMs, otherwise let
- pci_assign_unassigned_resources() allocate the new
- addresses. */
+ /*
+ * Try to use BIOS settings for ROMs, otherwise let
+ * pci_assign_unassigned_resources() allocate the new
+ * addresses.
+ */
for_each_pci_dev(dev) {
r = &dev->resource[PCI_ROM_RESOURCE];
if (!r->flags || !r->start)
@@ -215,9 +233,9 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
- for(idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
+ for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
/* Only set up the requested stuff */
- if (!(mask & (1<<idx)))
+ if (!(mask & (1 << idx)))
continue;
r = &dev->resource[idx];
@@ -227,7 +245,9 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
if (!r->start && r->end) {
- printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
+ printk(KERN_ERR "PCI: Device %s not available "
+ "because of resource collisions\n",
+ pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
@@ -236,7 +256,8 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
+ printk("PCI: Enabling device %s (%04x -> %04x)\n",
+ pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
@@ -258,7 +279,8 @@ void pcibios_set_master(struct pci_dev *dev)
lat = pcibios_max_latency;
else
return;
- printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
+ printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n",
+ pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 69163998ade..f2cb942f828 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -543,6 +543,12 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
case PCI_DEVICE_ID_INTEL_ICH8_2:
case PCI_DEVICE_ID_INTEL_ICH8_3:
case PCI_DEVICE_ID_INTEL_ICH8_4:
+ case PCI_DEVICE_ID_INTEL_ICH9_0:
+ case PCI_DEVICE_ID_INTEL_ICH9_1:
+ case PCI_DEVICE_ID_INTEL_ICH9_2:
+ case PCI_DEVICE_ID_INTEL_ICH9_3:
+ case PCI_DEVICE_ID_INTEL_ICH9_4:
+ case PCI_DEVICE_ID_INTEL_ICH9_5:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
@@ -758,7 +764,7 @@ static void __init pirq_find_router(struct irq_router *r)
DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
rt->rtr_vendor, rt->rtr_device);
- pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
+ pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn);
if (!pirq_router_dev) {
DBG(KERN_DEBUG "PCI: Interrupt router not found at "
"%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
@@ -778,6 +784,8 @@ static void __init pirq_find_router(struct irq_router *r)
pirq_router_dev->vendor,
pirq_router_dev->device,
pci_name(pirq_router_dev));
+
+ /* The device remains referenced for the kernel lifetime */
}
static struct irq_info *pirq_get_info(struct pci_dev *dev)
diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c
index ed1512a175a..5f5193401be 100644
--- a/arch/i386/pci/pcbios.c
+++ b/arch/i386/pci/pcbios.c
@@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include "pci.h"
#include "pci-functions.h"
@@ -314,6 +315,10 @@ static struct pci_raw_ops * __devinit pci_find_bios(void)
for (check = (union bios32 *) __va(0xe0000);
check <= (union bios32 *) __va(0xffff0);
++check) {
+ long sig;
+ if (probe_kernel_address(&check->fields.signature, sig))
+ continue;
+
if (check->fields.signature != BIOS32_SIGNATURE)
continue;
length = check->fields.length * 16;
@@ -331,11 +336,13 @@ static struct pci_raw_ops * __devinit pci_find_bios(void)
}
DBG("PCI: BIOS32 Service Directory structure at 0x%p\n", check);
if (check->fields.entry >= 0x100000) {
- printk("PCI: BIOS32 entry (0x%p) in high memory, cannot use.\n", check);
+ printk("PCI: BIOS32 entry (0x%p) in high memory, "
+ "cannot use.\n", check);
return NULL;
} else {
unsigned long bios32_entry = check->fields.entry;
- DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n", bios32_entry);
+ DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n",
+ bios32_entry);
bios32_indirect.address = bios32_entry + PAGE_OFFSET;
if (check_pcibios())
return &pci_bios_access;
diff --git a/arch/i386/power/Makefile b/arch/i386/power/Makefile
index 8cfa4e8a719..2de7bbf03cd 100644
--- a/arch/i386/power/Makefile
+++ b/arch/i386/power/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_PM) += cpu.o
-obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
+obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o suspend.o
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index 5a1abeff033..2c15500f871 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -26,8 +26,8 @@ void __save_processor_state(struct saved_context *ctxt)
/*
* descriptor tables
*/
- store_gdt(&ctxt->gdt_limit);
- store_idt(&ctxt->idt_limit);
+ store_gdt(&ctxt->gdt);
+ store_idt(&ctxt->idt);
store_tr(ctxt->tr);
/*
@@ -99,8 +99,8 @@ void __restore_processor_state(struct saved_context *ctxt)
* now restore the descriptor tables to their proper values
* ltr is done i fix_processor_context().
*/
- load_gdt(&ctxt->gdt_limit);
- load_idt(&ctxt->idt_limit);
+ load_gdt(&ctxt->gdt);
+ load_idt(&ctxt->idt);
/*
* segment registers
diff --git a/arch/i386/power/suspend.c b/arch/i386/power/suspend.c
new file mode 100644
index 00000000000..db5e98d2eb7
--- /dev/null
+++ b/arch/i386/power/suspend.c
@@ -0,0 +1,158 @@
+/*
+ * Suspend support specific for i386 - temporary page tables
+ *
+ * Distribute under GPLv2
+ *
+ * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ */
+
+#include <linux/suspend.h>
+#include <linux/bootmem.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+/* Defined in arch/i386/power/swsusp.S */
+extern int restore_image(void);
+
+/* Pointer to the temporary resume page tables */
+pgd_t *resume_pg_dir;
+
+/* The following three functions are based on the analogous code in
+ * arch/i386/mm/init.c
+ */
+
+/*
+ * Create a middle page table on a resume-safe page and put a pointer to it in
+ * the given global directory entry. This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t *resume_one_md_table_init(pgd_t *pgd)
+{
+ pud_t *pud;
+ pmd_t *pmd_table;
+
+#ifdef CONFIG_X86_PAE
+ pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pmd_table)
+ return NULL;
+
+ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+ pud = pud_offset(pgd, 0);
+
+ BUG_ON(pmd_table != pmd_offset(pud, 0));
+#else
+ pud = pud_offset(pgd, 0);
+ pmd_table = pmd_offset(pud, 0);
+#endif
+
+ return pmd_table;
+}
+
+/*
+ * Create a page table on a resume-safe page and place a pointer to it in
+ * a middle page directory entry.
+ */
+static pte_t *resume_one_page_table_init(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
+ if (!page_table)
+ return NULL;
+
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+
+ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+
+ return page_table;
+ }
+
+ return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total
+ * of max_low_pfn pages, by creating page tables starting from address
+ * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
+ */
+static int resume_physical_mapping_init(pgd_t *pgd_base)
+{
+ unsigned long pfn;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ int pgd_idx, pmd_idx;
+
+ pgd_idx = pgd_index(PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+ pfn = 0;
+
+ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+ pmd = resume_one_md_table_init(pgd);
+ if (!pmd)
+ return -ENOMEM;
+
+ if (pfn >= max_low_pfn)
+ continue;
+
+ for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
+ if (pfn >= max_low_pfn)
+ break;
+
+ /* Map with big pages if possible, otherwise create
+ * normal page tables.
+ * NOTE: We can mark everything as executable here
+ */
+ if (cpu_has_pse) {
+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
+ pfn += PTRS_PER_PTE;
+ } else {
+ pte_t *max_pte;
+
+ pte = resume_one_page_table_init(pmd);
+ if (!pte)
+ return -ENOMEM;
+
+ max_pte = pte + PTRS_PER_PTE;
+ for (; pte < max_pte; pte++, pfn++) {
+ if (pfn >= max_low_pfn)
+ break;
+
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
+{
+#ifdef CONFIG_X86_PAE
+ int i;
+
+ /* Init entries of the first-level page table to the zero page */
+ for (i = 0; i < PTRS_PER_PGD; i++)
+ set_pgd(pg_dir + i,
+ __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
+#endif
+}
+
+int swsusp_arch_resume(void)
+{
+ int error;
+
+ resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!resume_pg_dir)
+ return -ENOMEM;
+
+ resume_init_first_level_page_table(resume_pg_dir);
+ error = resume_physical_mapping_init(resume_pg_dir);
+ if (error)
+ return error;
+
+ /* We have got enough memory and from now on we cannot recover */
+ restore_image();
+ return 0;
+}
diff --git a/arch/i386/power/swsusp.S b/arch/i386/power/swsusp.S
index 8a2b50a0aaa..53662e05b39 100644
--- a/arch/i386/power/swsusp.S
+++ b/arch/i386/power/swsusp.S
@@ -28,8 +28,9 @@ ENTRY(swsusp_arch_suspend)
call swsusp_save
ret
-ENTRY(swsusp_arch_resume)
- movl $swsusp_pg_dir-__PAGE_OFFSET, %ecx
+ENTRY(restore_image)
+ movl resume_pg_dir, %ecx
+ subl $__PAGE_OFFSET, %ecx
movl %ecx, %cr3
movl restore_pblist, %edx
@@ -51,6 +52,10 @@ copy_loop:
.p2align 4,,7
done:
+ /* go back to the original page tables */
+ movl $swapper_pg_dir, %ecx
+ subl $__PAGE_OFFSET, %ecx
+ movl %ecx, %cr3
/* Flush TLB, including "global" things (vmalloc) */
movl mmu_cr4_features, %eax
movl %eax, %edx
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 683b12c6f76..75d839715b2 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -434,6 +434,29 @@ config IA64_ESI
source "drivers/sn/Kconfig"
+config KEXEC
+ bool "kexec system call (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+ but it is indepedent of the system firmware. And like a reboot
+ you can start any kernel with it, not just Linux.
+
+ The name comes from the similiarity to the exec system call.
+
+ It is an ongoing process to be certain the hardware in a machine
+ is properly shutdown, so do not be surprised if this code does not
+ initially work for you. It may help to enable device hotplugging
+ support. As of this writing the exact hardware interface is
+ strongly in flux, so no good recommendation can be made.
+
+config CRASH_DUMP
+ bool "kernel crash dumps (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+ help
+ Generate crash dump after being started by kexec.
+
source "drivers/firmware/Kconfig"
source "fs/Kconfig.binfmt"
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index db8e1fcfa04..ce49fe3a3b5 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -75,7 +75,7 @@
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
** disconnect on 4k boundaries and prevent such issues. If the device is
-** particularly agressive, this option will keep the entire pdir valid such
+** particularly aggressive, this option will keep the entire pdir valid such
** that prefetching will hit a valid address. This could severely impact
** error containment, and is therefore off by default. The page that is
** used for spill-over is poisoned, so that should help debugging somewhat.
@@ -258,10 +258,10 @@ static u64 prefetch_spill_page;
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
-** (or rather not merge) DMA's into managable chunks.
+** (or rather not merge) DMAs into manageable chunks.
** On parisc, this is more of the software/tuning constraint
-** rather than the HW. I/O MMU allocation alogorithms can be
-** faster with smaller size is (to some degree).
+** rather than the HW. I/O MMU allocation algorithms can be
+** faster with smaller sizes (to some degree).
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
@@ -1672,15 +1672,13 @@ ioc_sac_init(struct ioc *ioc)
* SAC (single address cycle) addressable, so allocate a
* pseudo-device to enforce that.
*/
- sac = kmalloc(sizeof(*sac), GFP_KERNEL);
+ sac = kzalloc(sizeof(*sac), GFP_KERNEL);
if (!sac)
panic(PFX "Couldn't allocate struct pci_dev");
- memset(sac, 0, sizeof(*sac));
- controller = kmalloc(sizeof(*controller), GFP_KERNEL);
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
panic(PFX "Couldn't allocate struct pci_controller");
- memset(controller, 0, sizeof(*controller));
controller->iommu = ioc;
sac->sysdata = controller;
@@ -1737,12 +1735,10 @@ ioc_init(u64 hpa, void *handle)
struct ioc *ioc;
struct ioc_iommu *info;
- ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
+ ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc)
return NULL;
- memset(ioc, 0, sizeof(*ioc));
-
ioc->next = ioc_list;
ioc_list = ioc;
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index caab986af70..1f16ebb9a80 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -209,7 +209,7 @@ static void do_serial_bh(void)
}
#endif
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *private_)
{
printk(KERN_ERR "simserial: do_softint called\n");
}
@@ -684,12 +684,11 @@ static int get_async_struct(int line, struct async_struct **ret_info)
*ret_info = sstate->info;
return 0;
}
- info = kmalloc(sizeof(struct async_struct), GFP_KERNEL);
+ info = kzalloc(sizeof(struct async_struct), GFP_KERNEL);
if (!info) {
sstate->count--;
return -ENOMEM;
}
- memset(info, 0, sizeof(struct async_struct));
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
init_waitqueue_head(&info->delta_msr_wait);
@@ -698,7 +697,7 @@ static int get_async_struct(int line, struct async_struct **ret_info)
info->flags = sstate->flags;
info->xmit_fifo_size = sstate->xmit_fifo_size;
info->line = line;
- INIT_WORK(&info->work, do_softint, info);
+ INIT_WORK(&info->work, do_softint);
info->state = sstate;
if (sstate->info) {
kfree(info);
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index daa6b91bc92..578737ec762 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs)
* it with privilege level 3 because the IVE uses non-privileged accesses to these
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
*/
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
@@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs)
* code is locked in specific gate page, which is pointed by pretcode
* when setup_frame_ia32
*/
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
@@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs)
* Install LDT as anonymous memory. This gives us all-zero segment descriptors
* until a task modifies them via modify_ldt().
*/
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
@@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
bprm->loader += stack_base;
bprm->exec += stack_base;
- mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index c187743965a..6af400a12ca 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -249,7 +249,7 @@ ia32_init (void)
#if PAGE_SHIFT > IA32_PAGE_SHIFT
{
- extern kmem_cache_t *partial_page_cachep;
+ extern struct kmem_cache *partial_page_cachep;
partial_page_cachep = kmem_cache_create("partial_page_cache",
sizeof(struct partial_page), 0, 0,
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index 703a67c934f..cfa0bc0026b 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -330,8 +330,6 @@ struct old_linux32_dirent {
void ia64_elf32_init(struct pt_regs *regs);
#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r)
-#define elf_addr_t u32
-
/* This macro yields a bitmask that programs can use to figure out
what instruction set this CPU supports. */
#define ELF_HWCAP 0
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 9d6a3f21014..a4a6e1463af 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
}
/* SLAB cache for partial_page structures */
-kmem_cache_t *partial_page_cachep;
+struct kmem_cache *partial_page_cachep;
/*
* init partial_page_list.
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index cfa099b04cd..8ae384eb535 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
obj-$(CONFIG_AUDIT) += audit.o
obj-$(CONFIG_PCI_MSI) += msi_ia64.o
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index 86faf221a07..088f130197a 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -68,7 +68,8 @@ processor_get_pstate (
dprintk("processor_get_pstate\n");
- retval = ia64_pal_get_pstate(&pstate_index);
+ retval = ia64_pal_get_pstate(&pstate_index,
+ PAL_GET_PSTATE_TYPE_INSTANT);
*value = (u32) pstate_index;
if (retval)
@@ -91,7 +92,7 @@ extract_clock (
dprintk("extract_clock\n");
for (i = 0; i < data->acpi_data.state_count; i++) {
- if (value >= data->acpi_data.states[i].control)
+ if (value == data->acpi_data.states[i].status)
return data->acpi_data.states[i].core_frequency;
}
return data->acpi_data.states[i-1].core_frequency;
@@ -117,11 +118,7 @@ processor_get_freq (
goto migrate_end;
}
- /*
- * processor_get_pstate gets the average frequency since the
- * last get. So, do two PAL_get_freq()...
- */
- ret = processor_get_pstate(&value);
+ /* processor_get_pstate gets the instantaneous frequency */
ret = processor_get_pstate(&value);
if (ret) {
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
new file mode 100644
index 00000000000..0aabedf95da
--- /dev/null
+++ b/arch/ia64/kernel/crash.c
@@ -0,0 +1,245 @@
+/*
+ * arch/ia64/kernel/crash.c
+ *
+ * Architecture specific (ia64) functions for kexec based crash dumps.
+ *
+ * Created by: Khalid Aziz <khalid.aziz@hp.com>
+ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2005 Intel Corp Zou Nan hai <nanhai.zou@intel.com>
+ *
+ */
+#include <linux/smp.h>
+#include <linux/delay.h>
+#include <linux/crash_dump.h>
+#include <linux/bootmem.h>
+#include <linux/kexec.h>
+#include <linux/elfcore.h>
+#include <linux/sysctl.h>
+#include <linux/init.h>
+
+#include <asm/kdebug.h>
+#include <asm/mca.h>
+#include <asm/uaccess.h>
+
+int kdump_status[NR_CPUS];
+atomic_t kdump_cpu_freezed;
+atomic_t kdump_in_progress;
+int kdump_on_init = 1;
+ssize_t
+copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+ vaddr = __va(pfn<<PAGE_SHIFT);
+ if (userbuf) {
+ if (copy_to_user(buf, (vaddr + offset), csize)) {
+ return -EFAULT;
+ }
+ } else
+ memcpy(buf, (vaddr + offset), csize);
+ return csize;
+}
+
+static inline Elf64_Word
+*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
+ size_t data_len)
+{
+ struct elf_note *note = (struct elf_note *)buf;
+ note->n_namesz = strlen(name) + 1;
+ note->n_descsz = data_len;
+ note->n_type = type;
+ buf += (sizeof(*note) + 3)/4;
+ memcpy(buf, name, note->n_namesz);
+ buf += (note->n_namesz + 3)/4;
+ memcpy(buf, data, data_len);
+ buf += (data_len + 3)/4;
+ return buf;
+}
+
+static void
+final_note(void *buf)
+{
+ memset(buf, 0, sizeof(struct elf_note));
+}
+
+extern void ia64_dump_cpu_regs(void *);
+
+static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
+
+void
+crash_save_this_cpu()
+{
+ void *buf;
+ unsigned long cfm, sof, sol;
+
+ int cpu = smp_processor_id();
+ struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
+
+ elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
+ memset(prstatus, 0, sizeof(*prstatus));
+ prstatus->pr_pid = current->pid;
+
+ ia64_dump_cpu_regs(dst);
+ cfm = dst[43];
+ sol = (cfm >> 7) & 0x7f;
+ sof = cfm & 0x7f;
+ dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
+ sof - sol);
+
+ buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
+ if (!buf)
+ return;
+ buf = append_elf_note(buf, "CORE", NT_PRSTATUS, prstatus,
+ sizeof(*prstatus));
+ final_note(buf);
+}
+
+static int
+kdump_wait_cpu_freeze(void)
+{
+ int cpu_num = num_online_cpus() - 1;
+ int timeout = 1000;
+ while(timeout-- > 0) {
+ if (atomic_read(&kdump_cpu_freezed) == cpu_num)
+ return 0;
+ udelay(1000);
+ }
+ return 1;
+}
+
+void
+machine_crash_shutdown(struct pt_regs *pt)
+{
+ /* This function is only called after the system
+ * has paniced or is otherwise in a critical state.
+ * The minimum amount of code to allow a kexec'd kernel
+ * to run successfully needs to happen here.
+ *
+ * In practice this means shooting down the other cpus in
+ * an SMP system.
+ */
+ kexec_disable_iosapic();
+#ifdef CONFIG_SMP
+ kdump_smp_send_stop();
+ if (kdump_wait_cpu_freeze() && kdump_on_init) {
+ //not all cpu response to IPI, send INIT to freeze them
+ kdump_smp_send_init();
+ }
+#endif
+}
+
+static void
+machine_kdump_on_init(void)
+{
+ local_irq_disable();
+ kexec_disable_iosapic();
+ machine_kexec(ia64_kimage);
+}
+
+void
+kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
+{
+ int cpuid;
+ local_irq_disable();
+ cpuid = smp_processor_id();
+ crash_save_this_cpu();
+ current->thread.ksp = (__u64)info->sw - 16;
+ atomic_inc(&kdump_cpu_freezed);
+ kdump_status[cpuid] = 1;
+ mb();
+ if (cpuid == 0) {
+ for (;;)
+ cpu_relax();
+ } else
+ ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
+}
+
+static int
+kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct ia64_mca_notify_die *nd;
+ struct die_args *args = data;
+
+ if (!kdump_on_init)
+ return NOTIFY_DONE;
+
+ if (val != DIE_INIT_MONARCH_ENTER &&
+ val != DIE_INIT_SLAVE_ENTER &&
+ val != DIE_MCA_RENDZVOUS_LEAVE &&
+ val != DIE_MCA_MONARCH_LEAVE)
+ return NOTIFY_DONE;
+
+ nd = (struct ia64_mca_notify_die *)args->err;
+ /* Reason code 1 means machine check rendezous*/
+ if ((val == DIE_INIT_MONARCH_ENTER || DIE_INIT_SLAVE_ENTER) &&
+ nd->sos->rv_rc == 1)
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_INIT_MONARCH_ENTER:
+ machine_kdump_on_init();
+ break;
+ case DIE_INIT_SLAVE_ENTER:
+ unw_init_running(kdump_cpu_freeze, NULL);
+ break;
+ case DIE_MCA_RENDZVOUS_LEAVE:
+ if (atomic_read(&kdump_in_progress))
+ unw_init_running(kdump_cpu_freeze, NULL);
+ break;
+ case DIE_MCA_MONARCH_LEAVE:
+ /* die_register->signr indicate if MCA is recoverable */
+ if (!args->signr)
+ machine_kdump_on_init();
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+#ifdef CONFIG_SYSCTL
+static ctl_table kdump_on_init_table[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "kdump_on_init",
+ .data = &kdump_on_init,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table sys_table[] = {
+ {
+ .ctl_name = CTL_KERN,
+ .procname = "kernel",
+ .mode = 0555,
+ .child = kdump_on_init_table,
+ },
+ { .ctl_name = 0 }
+};
+#endif
+
+static int
+machine_crash_setup(void)
+{
+ char *from = strstr(saved_command_line, "elfcorehdr=");
+ static struct notifier_block kdump_init_notifier_nb = {
+ .notifier_call = kdump_init_notifier,
+ };
+ int ret;
+ if (from)
+ elfcorehdr_addr = memparse(from+11, &from);
+ saved_max_pfn = (unsigned long)-1;
+ if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
+ return ret;
+#ifdef CONFIG_SYSCTL
+ register_sysctl_table(sys_table, 0);
+#endif
+ return 0;
+}
+
+__initcall(machine_crash_setup);
+
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index bb8770a177b..0b25a7d4e1e 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/time.h>
#include <linux/efi.h>
+#include <linux/kexec.h>
#include <asm/io.h>
#include <asm/kregs.h>
@@ -41,7 +42,7 @@ extern efi_status_t efi_call_phys (void *, ...);
struct efi efi;
EXPORT_SYMBOL(efi);
static efi_runtime_services_t *runtime;
-static unsigned long mem_limit = ~0UL, max_addr = ~0UL;
+static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
#define efi_call_virt(f, args...) (*(f))(args)
@@ -224,7 +225,7 @@ efi_gettimeofday (struct timespec *ts)
}
static int
-is_available_memory (efi_memory_desc_t *md)
+is_memory_available (efi_memory_desc_t *md)
{
if (!(md->attribute & EFI_MEMORY_WB))
return 0;
@@ -421,6 +422,8 @@ efi_init (void)
mem_limit = memparse(cp + 4, &cp);
} else if (memcmp(cp, "max_addr=", 9) == 0) {
max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
+ } else if (memcmp(cp, "min_addr=", 9) == 0) {
+ min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
} else {
while (*cp != ' ' && *cp)
++cp;
@@ -428,6 +431,8 @@ efi_init (void)
++cp;
}
}
+ if (min_addr != 0UL)
+ printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
if (max_addr != ~0UL)
printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
@@ -887,14 +892,15 @@ find_memmap_space (void)
}
contig_high = GRANULEROUNDDOWN(contig_high);
}
- if (!is_available_memory(md) || md->type == EFI_LOADER_DATA)
+ if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
continue;
/* Round ends inward to granule boundaries */
as = max(contig_low, md->phys_addr);
ae = min(contig_high, efi_md_end(md));
- /* keep within max_addr= command line arg */
+ /* keep within max_addr= and min_addr= command line arg */
+ as = max(as, min_addr);
ae = min(ae, max_addr);
if (ae <= as)
continue;
@@ -962,7 +968,7 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
}
contig_high = GRANULEROUNDDOWN(contig_high);
}
- if (!is_available_memory(md))
+ if (!is_memory_available(md))
continue;
/*
@@ -1004,7 +1010,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
} else
ae = efi_md_end(md);
- /* keep within max_addr= command line arg */
+ /* keep within max_addr= and min_addr= command line arg */
+ as = max(as, min_addr);
ae = min(ae, max_addr);
if (ae <= as)
continue;
@@ -1116,6 +1123,58 @@ efi_initialize_iomem_resources(struct resource *code_resource,
*/
insert_resource(res, code_resource);
insert_resource(res, data_resource);
+#ifdef CONFIG_KEXEC
+ insert_resource(res, &efi_memmap_res);
+ insert_resource(res, &boot_param_res);
+ if (crashk_res.end > crashk_res.start)
+ insert_resource(res, &crashk_res);
+#endif
}
}
}
+
+#ifdef CONFIG_KEXEC
+/* find a block of memory aligned to 64M exclude reserved regions
+ rsvd_regions are sorted
+ */
+unsigned long
+kdump_find_rsvd_region (unsigned long size,
+ struct rsvd_region *r, int n)
+{
+ int i;
+ u64 start, end;
+ u64 alignment = 1UL << _PAGE_SIZE_64M;
+ void *efi_map_start, *efi_map_end, *p;
+ efi_memory_desc_t *md;
+ u64 efi_desc_size;
+
+ efi_map_start = __va(ia64_boot_param->efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ md = p;
+ if (!efi_wb(md))
+ continue;
+ start = ALIGN(md->phys_addr, alignment);
+ end = efi_md_end(md);
+ for (i = 0; i < n; i++) {
+ if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
+ if (__pa(r[i].start) > start + size)
+ return start;
+ start = ALIGN(__pa(r[i].end), alignment);
+ if (i < n-1 && __pa(r[i+1].start) < start + size)
+ continue;
+ else
+ break;
+ }
+ }
+ if (end > start + size)
+ return start;
+ }
+
+ printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
+ size);
+ return ~0UL;
+}
+#endif
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 3390b7c5a63..15234ed3a34 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1575,7 +1575,7 @@ sys_call_table:
data8 sys_mq_timedreceive // 1265
data8 sys_mq_notify
data8 sys_mq_getsetattr
- data8 sys_ni_syscall // reserved for kexec_load
+ data8 sys_kexec_load
data8 sys_ni_syscall // reserved for vserver
data8 sys_waitid // 1270
data8 sys_add_key
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 879c1817bd1..bd17190bebb 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -14,6 +14,7 @@ EXPORT_SYMBOL(strlen);
#include <asm/checksum.h>
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
+EXPORT_SYMBOL(csum_ipv6_magic);
#include <asm/semaphore.h>
EXPORT_SYMBOL(__down);
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 60d64950e3c..0fc5fb7865c 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -288,6 +288,27 @@ nop (unsigned int irq)
/* do nothing... */
}
+
+#ifdef CONFIG_KEXEC
+void
+kexec_disable_iosapic(void)
+{
+ struct iosapic_intr_info *info;
+ struct iosapic_rte_info *rte;
+ u8 vec = 0;
+ for (info = iosapic_intr_info; info <
+ iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
+ list_for_each_entry(rte, &info->rtes,
+ rte_list) {
+ iosapic_write(rte->addr,
+ IOSAPIC_RTE_LOW(rte->rte_index),
+ IOSAPIC_MASK|vec);
+ iosapic_eoi(rte->addr, vec);
+ }
+ }
+}
+#endif
+
static void
mask_irq (unsigned int irq)
{
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 51217d63285..76e778951e2 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -481,7 +481,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
- free_insn_slot(p->ainsn.insn);
+ free_insn_slot(p->ainsn.insn, 0);
mutex_unlock(&kprobe_mutex);
}
/*
@@ -851,7 +851,7 @@ static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg)
return;
}
} while (unw_unwind(info) >= 0);
- lp->bsp = 0;
+ lp->bsp = NULL;
lp->cfm = 0;
return;
}
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
new file mode 100644
index 00000000000..468233fa2ce
--- /dev/null
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -0,0 +1,133 @@
+/*
+ * arch/ia64/kernel/machine_kexec.c
+ *
+ * Handle transition of Linux booting another kernel
+ * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P.
+ * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
+ * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/kexec.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/setup.h>
+#include <asm/delay.h>
+#include <asm/meminit.h>
+
+typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long,
+ struct ia64_boot_param *, unsigned long);
+
+struct kimage *ia64_kimage;
+
+struct resource efi_memmap_res = {
+ .name = "EFI Memory Map",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+struct resource boot_param_res = {
+ .name = "Boot parameter",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+
+/*
+ * Do what every setup is needed on image and the
+ * reboot code buffer to allow us to avoid allocations
+ * later.
+ */
+int machine_kexec_prepare(struct kimage *image)
+{
+ void *control_code_buffer;
+ const unsigned long *func;
+
+ func = (unsigned long *)&relocate_new_kernel;
+ /* Pre-load control code buffer to minimize work in kexec path */
+ control_code_buffer = page_address(image->control_code_page);
+ memcpy((void *)control_code_buffer, (const void *)func[0],
+ relocate_new_kernel_size);
+ flush_icache_range((unsigned long)control_code_buffer,
+ (unsigned long)control_code_buffer + relocate_new_kernel_size);
+ ia64_kimage = image;
+
+ return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+}
+
+void machine_shutdown(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id())
+ cpu_down(cpu);
+ }
+ kexec_disable_iosapic();
+}
+
+/*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+ */
+extern void *efi_get_pal_addr(void);
+static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
+{
+ struct kimage *image = arg;
+ relocate_new_kernel_t rnk;
+ void *pal_addr = efi_get_pal_addr();
+ unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
+ unsigned long vector;
+ int ii;
+
+ if (image->type == KEXEC_TYPE_CRASH) {
+ crash_save_this_cpu();
+ current->thread.ksp = (__u64)info->sw - 16;
+ }
+
+ /* Interrupts aren't acceptable while we reboot */
+ local_irq_disable();
+
+ /* Mask CMC and Performance Monitor interrupts */
+ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
+ ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
+
+ /* Mask ITV and Local Redirect Registers */
+ ia64_set_itv(1 << 16);
+ ia64_set_lrr0(1 << 16);
+ ia64_set_lrr1(1 << 16);
+
+ /* terminate possible nested in-service interrupts */
+ for (ii = 0; ii < 16; ii++)
+ ia64_eoi();
+
+ /* unmask TPR and clear any pending interrupts */
+ ia64_setreg(_IA64_REG_CR_TPR, 0);
+ ia64_srlz_d();
+ vector = ia64_get_ivr();
+ while (vector != IA64_SPURIOUS_INT_VECTOR) {
+ ia64_eoi();
+ vector = ia64_get_ivr();
+ }
+ platform_kernel_launch_event();
+ rnk = (relocate_new_kernel_t)&code_addr;
+ (*rnk)(image->head, image->start, ia64_boot_param,
+ GRANULEROUNDDOWN((unsigned long) pal_addr));
+ BUG();
+}
+
+void machine_kexec(struct kimage *image)
+{
+ unw_init_running(ia64_machine_kexec, image);
+ for(;;);
+}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7cfa63a98cb..87c1c4f4287 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -82,6 +82,7 @@
#include <asm/system.h>
#include <asm/sal.h>
#include <asm/mca.h>
+#include <asm/kexec.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
@@ -678,7 +679,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
* disable the cmc interrupt vector.
*/
static void
-ia64_mca_cmc_vector_disable_keventd(void *unused)
+ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
{
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
}
@@ -690,7 +691,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused)
* enable the cmc interrupt vector.
*/
static void
-ia64_mca_cmc_vector_enable_keventd(void *unused)
+ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
{
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
}
@@ -1238,6 +1239,10 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
} else {
/* Dump buffered message to console */
ia64_mlogbuf_finish(1);
+#ifdef CONFIG_CRASH_DUMP
+ atomic_set(&kdump_in_progress, 1);
+ monarch_cpu = -1;
+#endif
}
if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
== NOTIFY_STOP)
@@ -1247,8 +1252,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
monarch_cpu = -1;
}
-static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
-static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
+static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
/*
* ia64_mca_cmc_int_handler
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 0b546e2b36a..a71df9ae039 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -16,6 +16,7 @@
* 02/05/2001 S.Eranian fixed module support
* 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
* 03/24/2004 Ashok Raj updated to work with CPU Hotplug
+ * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
*/
#include <linux/types.h>
#include <linux/errno.h>
@@ -314,13 +315,20 @@ vm_info(char *page)
"Protection Key Registers(PKR) : %d\n"
"Implemented bits in PKR.key : %d\n"
"Hash Tag ID : 0x%x\n"
- "Size of RR.rid : %d\n",
+ "Size of RR.rid : %d\n"
+ "Max Purges : ",
vm_info_1.pal_vm_info_1_s.phys_add_size,
vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
vm_info_1.pal_vm_info_1_s.max_pkr+1,
vm_info_1.pal_vm_info_1_s.key_size,
vm_info_1.pal_vm_info_1_s.hash_tag_id,
vm_info_2.pal_vm_info_2_s.rid_size);
+ if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
+ p += sprintf(p, "unlimited\n");
+ else
+ p += sprintf(p, "%d\n",
+ vm_info_2.pal_vm_info_2_s.max_purges ?
+ vm_info_2.pal_vm_info_2_s.max_purges : 1);
}
if (ia64_pal_mem_attrib(&attrib) == 0) {
@@ -467,7 +475,11 @@ static const char *proc_features[]={
NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
- NULL,NULL,NULL,NULL,NULL,
+ "Unimplemented instruction address fault",
+ "INIT, PMI, and LINT pins",
+ "Simple unimplemented instr addresses",
+ "Variable P-state performance",
+ "Virtual machine features implemented",
"XIP,XPSR,XFS implemented",
"XR1-XR3 implemented",
"Disable dynamic predicate prediction",
@@ -475,7 +487,11 @@ static const char *proc_features[]={
"Disable dynamic data cache prefetch",
"Disable dynamic inst cache prefetch",
"Disable dynamic branch prediction",
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "Disable P-states",
+ "Enable MCA on Data Poisoning",
+ "Enable vmsw instruction",
+ "Enable extern environmental notification",
"Disable BINIT on processor time-out",
"Disable dynamic power management (DPM)",
"Disable coherency",
@@ -952,7 +968,6 @@ remove_palinfo_proc_entries(unsigned int hcpu)
}
}
-#ifdef CONFIG_HOTPLUG_CPU
static int palinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
@@ -974,7 +989,6 @@ static struct notifier_block palinfo_cpu_notifier =
.notifier_call = palinfo_cpu_callback,
.priority = 0,
};
-#endif
static int __init
palinfo_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 3aaede0d698..dbb28164b19 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -853,9 +853,8 @@ pfm_context_alloc(void)
* allocate context descriptor
* must be able to free with interrupts disabled
*/
- ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
+ ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
if (ctx) {
- memset(ctx, 0, sizeof(pfm_context_t));
DPRINT(("alloc ctx @%p\n", ctx));
}
return ctx;
@@ -2302,7 +2301,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
DPRINT(("Cannot allocate vma\n"));
goto error_kmem;
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h
index cd06ac6a686..7f8da4c7ca6 100644
--- a/arch/ia64/kernel/perfmon_montecito.h
+++ b/arch/ia64/kernel/perfmon_montecito.h
@@ -45,16 +45,16 @@ static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={
/* pmc29 */ { PFM_REG_NOTIMPL, },
/* pmc30 */ { PFM_REG_NOTIMPL, },
/* pmc31 */ { PFM_REG_NOTIMPL, },
-/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
-/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffffUL, 0x30f01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffffUL, 0xf01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
-/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
+/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
{ PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
};
@@ -185,7 +185,7 @@ pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cn
DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
if (cnum == 41 && is_loaded
- && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
+ && (tmpval & 0x1e00000000000UL) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
new file mode 100644
index 00000000000..ae473e3f2a0
--- /dev/null
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -0,0 +1,334 @@
+/*
+ * arch/ia64/kernel/relocate_kernel.S
+ *
+ * Relocate kexec'able kernel and start it
+ *
+ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
+ * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <asm/asmmacro.h>
+#include <asm/kregs.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mca_asm.h>
+
+ /* Must be relocatable PIC code callable as a C function
+ */
+GLOBAL_ENTRY(relocate_new_kernel)
+ .prologue
+ alloc r31=ar.pfs,4,0,0,0
+ .body
+.reloc_entry:
+{
+ rsm psr.i| psr.ic
+ mov r2=ip
+}
+ ;;
+{
+ flushrs // must be first insn in group
+ srlz.i
+}
+ ;;
+ dep r2=0,r2,61,3 //to physical address
+ ;;
+ //first switch to physical mode
+ add r3=1f-.reloc_entry, r2
+ movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
+ mov ar.rsc=0 // put RSE in enforced lazy mode
+ ;;
+ add sp=(memory_stack_end - 16 - .reloc_entry),r2
+ add r8=(register_stack - .reloc_entry),r2
+ ;;
+ mov r18=ar.rnat
+ mov ar.bspstore=r8
+ ;;
+ mov cr.ipsr=r16
+ mov cr.iip=r3
+ mov cr.ifs=r0
+ srlz.i
+ ;;
+ mov ar.rnat=r18
+ rfi
+ ;;
+1:
+ //physical mode code begin
+ mov b6=in1
+ dep r28=0,in2,61,3 //to physical address
+
+ // purge all TC entries
+#define O(member) IA64_CPUINFO_##member##_OFFSET
+ GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
+ ;;
+ addl r17=O(PTCE_STRIDE),r2
+ addl r2=O(PTCE_BASE),r2
+ ;;
+ ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
+ ld4 r19=[r2],4 // r19=ptce_count[0]
+ ld4 r21=[r17],4 // r21=ptce_stride[0]
+ ;;
+ ld4 r20=[r2] // r20=ptce_count[1]
+ ld4 r22=[r17] // r22=ptce_stride[1]
+ mov r24=r0
+ ;;
+ adds r20=-1,r20
+ ;;
+#undef O
+2:
+ cmp.ltu p6,p7=r24,r19
+(p7) br.cond.dpnt.few 4f
+ mov ar.lc=r20
+3:
+ ptc.e r18
+ ;;
+ add r18=r22,r18
+ br.cloop.sptk.few 3b
+ ;;
+ add r18=r21,r18
+ add r24=1,r24
+ ;;
+ br.sptk.few 2b
+4:
+ srlz.i
+ ;;
+ //purge TR entry for kernel text and data
+ movl r16=KERNEL_START
+ mov r18=KERNEL_TR_PAGE_SHIFT<<2
+ ;;
+ ptr.i r16, r18
+ ptr.d r16, r18
+ ;;
+ srlz.i
+ ;;
+
+ // purge TR entry for percpu data
+ movl r16=PERCPU_ADDR
+ mov r18=PERCPU_PAGE_SHIFT<<2
+ ;;
+ ptr.d r16,r18
+ ;;
+ srlz.d
+ ;;
+
+ // purge TR entry for pal code
+ mov r16=in3
+ mov r18=IA64_GRANULE_SHIFT<<2
+ ;;
+ ptr.i r16,r18
+ ;;
+ srlz.i
+ ;;
+
+ // purge TR entry for stack
+ mov r16=IA64_KR(CURRENT_STACK)
+ ;;
+ shl r16=r16,IA64_GRANULE_SHIFT
+ movl r19=PAGE_OFFSET
+ ;;
+ add r16=r19,r16
+ mov r18=IA64_GRANULE_SHIFT<<2
+ ;;
+ ptr.d r16,r18
+ ;;
+ srlz.i
+ ;;
+
+ //copy segments
+ movl r16=PAGE_MASK
+ mov r30=in0 // in0 is page_list
+ br.sptk.few .dest_page
+ ;;
+.loop:
+ ld8 r30=[in0], 8;;
+.dest_page:
+ tbit.z p0, p6=r30, 0;; // 0x1 dest page
+(p6) and r17=r30, r16
+(p6) br.cond.sptk.few .loop;;
+
+ tbit.z p0, p6=r30, 1;; // 0x2 indirect page
+(p6) and in0=r30, r16
+(p6) br.cond.sptk.few .loop;;
+
+ tbit.z p0, p6=r30, 2;; // 0x4 end flag
+(p6) br.cond.sptk.few .end_loop;;
+
+ tbit.z p6, p0=r30, 3;; // 0x8 source page
+(p6) br.cond.sptk.few .loop
+
+ and r18=r30, r16
+
+ // simple copy page, may optimize later
+ movl r14=PAGE_SIZE/8 - 1;;
+ mov ar.lc=r14;;
+1:
+ ld8 r14=[r18], 8;;
+ st8 [r17]=r14;;
+ fc.i r17
+ add r17=8, r17
+ br.ctop.sptk.few 1b
+ br.sptk.few .loop
+ ;;
+
+.end_loop:
+ sync.i // for fc.i
+ ;;
+ srlz.i
+ ;;
+ srlz.d
+ ;;
+ br.call.sptk.many b0=b6;;
+
+.align 32
+memory_stack:
+ .fill 8192, 1, 0
+memory_stack_end:
+register_stack:
+ .fill 8192, 1, 0
+register_stack_end:
+relocate_new_kernel_end:
+END(relocate_new_kernel)
+
+.global relocate_new_kernel_size
+relocate_new_kernel_size:
+ data8 relocate_new_kernel_end - relocate_new_kernel
+
+GLOBAL_ENTRY(ia64_dump_cpu_regs)
+ .prologue
+ alloc loc0=ar.pfs,1,2,0,0
+ .body
+ mov ar.rsc=0 // put RSE in enforced lazy mode
+ add loc1=4*8, in0 // save r4 and r5 first
+ ;;
+{
+ flushrs // flush dirty regs to backing store
+ srlz.i
+}
+ st8 [loc1]=r4, 8
+ ;;
+ st8 [loc1]=r5, 8
+ ;;
+ add loc1=32*8, in0
+ mov r4=ar.rnat
+ ;;
+ st8 [in0]=r0, 8 // r0
+ st8 [loc1]=r4, 8 // rnat
+ mov r5=pr
+ ;;
+ st8 [in0]=r1, 8 // r1
+ st8 [loc1]=r5, 8 // pr
+ mov r4=b0
+ ;;
+ st8 [in0]=r2, 8 // r2
+ st8 [loc1]=r4, 8 // b0
+ mov r5=b1;
+ ;;
+ st8 [in0]=r3, 24 // r3
+ st8 [loc1]=r5, 8 // b1
+ mov r4=b2
+ ;;
+ st8 [in0]=r6, 8 // r6
+ st8 [loc1]=r4, 8 // b2
+ mov r5=b3
+ ;;
+ st8 [in0]=r7, 8 // r7
+ st8 [loc1]=r5, 8 // b3
+ mov r4=b4
+ ;;
+ st8 [in0]=r8, 8 // r8
+ st8 [loc1]=r4, 8 // b4
+ mov r5=b5
+ ;;
+ st8 [in0]=r9, 8 // r9
+ st8 [loc1]=r5, 8 // b5
+ mov r4=b6
+ ;;
+ st8 [in0]=r10, 8 // r10
+ st8 [loc1]=r5, 8 // b6
+ mov r5=b7
+ ;;
+ st8 [in0]=r11, 8 // r11
+ st8 [loc1]=r5, 8 // b7
+ mov r4=b0
+ ;;
+ st8 [in0]=r12, 8 // r12
+ st8 [loc1]=r4, 8 // ip
+ mov r5=loc0
+ ;;
+ st8 [in0]=r13, 8 // r13
+ extr.u r5=r5, 0, 38 // ar.pfs.pfm
+ mov r4=r0 // user mask
+ ;;
+ st8 [in0]=r14, 8 // r14
+ st8 [loc1]=r5, 8 // cfm
+ ;;
+ st8 [in0]=r15, 8 // r15
+ st8 [loc1]=r4, 8 // user mask
+ mov r5=ar.rsc
+ ;;
+ st8 [in0]=r16, 8 // r16
+ st8 [loc1]=r5, 8 // ar.rsc
+ mov r4=ar.bsp
+ ;;
+ st8 [in0]=r17, 8 // r17
+ st8 [loc1]=r4, 8 // ar.bsp
+ mov r5=ar.bspstore
+ ;;
+ st8 [in0]=r18, 8 // r18
+ st8 [loc1]=r5, 8 // ar.bspstore
+ mov r4=ar.rnat
+ ;;
+ st8 [in0]=r19, 8 // r19
+ st8 [loc1]=r4, 8 // ar.rnat
+ mov r5=ar.ccv
+ ;;
+ st8 [in0]=r20, 8 // r20
+ st8 [loc1]=r5, 8 // ar.ccv
+ mov r4=ar.unat
+ ;;
+ st8 [in0]=r21, 8 // r21
+ st8 [loc1]=r4, 8 // ar.unat
+ mov r5 = ar.fpsr
+ ;;
+ st8 [in0]=r22, 8 // r22
+ st8 [loc1]=r5, 8 // ar.fpsr
+ mov r4 = ar.unat
+ ;;
+ st8 [in0]=r23, 8 // r23
+ st8 [loc1]=r4, 8 // unat
+ mov r5 = ar.fpsr
+ ;;
+ st8 [in0]=r24, 8 // r24
+ st8 [loc1]=r5, 8 // fpsr
+ mov r4 = ar.pfs
+ ;;
+ st8 [in0]=r25, 8 // r25
+ st8 [loc1]=r4, 8 // ar.pfs
+ mov r5 = ar.lc
+ ;;
+ st8 [in0]=r26, 8 // r26
+ st8 [loc1]=r5, 8 // ar.lc
+ mov r4 = ar.ec
+ ;;
+ st8 [in0]=r27, 8 // r27
+ st8 [loc1]=r4, 8 // ar.ec
+ mov r5 = ar.csd
+ ;;
+ st8 [in0]=r28, 8 // r28
+ st8 [loc1]=r5, 8 // ar.csd
+ mov r4 = ar.ssd
+ ;;
+ st8 [in0]=r29, 8 // r29
+ st8 [loc1]=r4, 8 // ar.ssd
+ ;;
+ st8 [in0]=r30, 8 // r30
+ ;;
+ st8 [in0]=r31, 8 // r31
+ mov ar.pfs=loc0
+ ;;
+ br.ret.sptk.many rp
+END(ia64_dump_cpu_regs)
+
+
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e63b8ca5344..fd607ca51a8 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -575,7 +575,6 @@ static struct file_operations salinfo_data_fops = {
.write = salinfo_log_write,
};
-#ifdef CONFIG_HOTPLUG_CPU
static int __devinit
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
@@ -620,7 +619,6 @@ static struct notifier_block salinfo_cpu_notifier =
.notifier_call = salinfo_cpu_callback,
.priority = 0,
};
-#endif /* CONFIG_HOTPLUG_CPU */
static int __init
salinfo_init(void)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index d10404a4175..14e1200376a 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -43,6 +43,8 @@
#include <linux/initrd.h>
#include <linux/pm.h>
#include <linux/cpufreq.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
#include <asm/ia32.h>
#include <asm/machvec.h>
@@ -252,6 +254,41 @@ reserve_memory (void)
efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
n++;
+#ifdef CONFIG_KEXEC
+ /* crashkernel=size@offset specifies the size to reserve for a crash
+ * kernel.(offset is ingored for keep compatibility with other archs)
+ * By reserving this memory we guarantee that linux never set's it
+ * up as a DMA target.Useful for holding code to do something
+ * appropriate after a kernel panic.
+ */
+ {
+ char *from = strstr(saved_command_line, "crashkernel=");
+ unsigned long base, size;
+ if (from) {
+ size = memparse(from + 12, &from);
+ if (size) {
+ sort_regions(rsvd_region, n);
+ base = kdump_find_rsvd_region(size,
+ rsvd_region, n);
+ if (base != ~0UL) {
+ rsvd_region[n].start =
+ (unsigned long)__va(base);
+ rsvd_region[n].end =
+ (unsigned long)__va(base + size);
+ n++;
+ crashk_res.start = base;
+ crashk_res.end = base + size - 1;
+ }
+ }
+ }
+ efi_memmap_res.start = ia64_boot_param->efi_memmap;
+ efi_memmap_res.end = efi_memmap_res.start +
+ ia64_boot_param->efi_memmap_size;
+ boot_param_res.start = __pa(ia64_boot_param);
+ boot_param_res.end = boot_param_res.start +
+ sizeof(*ia64_boot_param);
+ }
+#endif
/* end of memory marker */
rsvd_region[n].start = ~0UL;
rsvd_region[n].end = ~0UL;
@@ -263,6 +300,7 @@ reserve_memory (void)
sort_regions(rsvd_region, num_rsvd_regions);
}
+
/**
* find_initrd - get initrd parameters from the boot parameter structure
*
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 6ab95ceaf9d..b1b9aa4364b 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/efi.h>
#include <linux/bitops.h>
+#include <linux/kexec.h>
#include <asm/atomic.h>
#include <asm/current.h>
@@ -66,6 +67,7 @@ static volatile struct call_data_struct *call_data;
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
+#define IPI_KDUMP_CPU_STOP 3
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
@@ -155,7 +157,11 @@ handle_IPI (int irq, void *dev_id)
case IPI_CPU_STOP:
stop_this_cpu();
break;
-
+#ifdef CONFIG_CRASH_DUMP
+ case IPI_KDUMP_CPU_STOP:
+ unw_init_running(kdump_cpu_freeze, NULL);
+ break;
+#endif
default:
printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
break;
@@ -213,6 +219,26 @@ send_IPI_self (int op)
send_IPI_single(smp_processor_id(), op);
}
+#ifdef CONFIG_CRASH_DUMP
+void
+kdump_smp_send_stop()
+{
+ send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
+}
+
+void
+kdump_smp_send_init()
+{
+ unsigned int cpu, self_cpu;
+ self_cpu = smp_processor_id();
+ for_each_online_cpu(cpu) {
+ if (cpu != self_cpu) {
+ if(kdump_status[cpu] == 0)
+ platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
+ }
+ }
+}
+#endif
/*
* Called with preeemption disabled.
*/
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index f7d7f566814..b21ddecea94 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
}
struct create_idle {
+ struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
void
-do_fork_idle(void *_c_idle)
+do_fork_idle(struct work_struct *work)
{
- struct create_idle *c_idle = _c_idle;
+ struct create_idle *c_idle =
+ container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu)
{
int timeout;
struct create_idle c_idle = {
+ .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER(c_idle.done),
};
- DECLARE_WORK(work, do_fork_idle, &c_idle);
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu)
* We can't use kernel_thread since we must avoid to reschedule the child.
*/
if (!keventd_up() || current_is_keventd())
- work.func(work.data);
+ c_idle.work.func(&c_idle.work);
else {
- schedule_work(&work);
+ schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 5629b45e89c..687500ddb4b 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -31,11 +31,11 @@ int arch_register_cpu(int num)
{
#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
/*
- * If CPEI cannot be re-targetted, and this is
- * CPEI target, then dont create the control file
+ * If CPEI can be re-targetted or if this is not
+ * CPEI target, then it is hotpluggable
*/
- if (!can_cpei_retarget() && is_cpu_cpei_target(num))
- sysfs_cpus[num].cpu.no_control = 1;
+ if (can_cpei_retarget() || !is_cpu_cpei_target(num))
+ sysfs_cpus[num].cpu.hotpluggable = 1;
map_cpu_to_node(num, node_cpuid[num].nid);
#endif
diff --git a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c
index beb11721d9f..4411d9baeb2 100644
--- a/arch/ia64/lib/checksum.c
+++ b/arch/ia64/lib/checksum.c
@@ -33,32 +33,32 @@ from64to16 (unsigned long x)
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented.
*/
-unsigned short int
-csum_tcpudp_magic (unsigned long saddr, unsigned long daddr, unsigned short len,
- unsigned short proto, unsigned int sum)
+__sum16
+csum_tcpudp_magic (__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
{
- return ~from64to16(saddr + daddr + sum + ((unsigned long) ntohs(len) << 16) +
- ((unsigned long) proto << 8));
+ return (__force __sum16)~from64to16(
+ (__force u64)saddr + (__force u64)daddr +
+ (__force u64)sum + ((len + proto) << 8));
}
EXPORT_SYMBOL(csum_tcpudp_magic);
-unsigned int
-csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr, unsigned short len,
- unsigned short proto, unsigned int sum)
+__wsum
+csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
{
unsigned long result;
- result = (saddr + daddr + sum +
- ((unsigned long) ntohs(len) << 16) +
- ((unsigned long) proto << 8));
+ result = (__force u64)saddr + (__force u64)daddr +
+ (__force u64)sum + ((len + proto) << 8);
/* Fold down to 32-bits so we don't lose in the typedef-less network stack. */
/* 64 to 33 */
result = (result & 0xffffffff) + (result >> 32);
/* 33 to 32 */
result = (result & 0xffffffff) + (result >> 32);
- return result;
+ return (__force __wsum)result;
}
extern unsigned long do_csum (const unsigned char *, long);
@@ -75,16 +75,15 @@ extern unsigned long do_csum (const unsigned char *, long);
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int
-csum_partial (const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
- unsigned long result = do_csum(buff, len);
+ u64 result = do_csum(buff, len);
/* add in old sum, and carry.. */
- result += sum;
+ result += (__force u32)sum;
/* 32+c bits -> 32 bits */
result = (result & 0xffffffff) + (result >> 32);
- return result;
+ return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
@@ -93,10 +92,9 @@ EXPORT_SYMBOL(csum_partial);
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-unsigned short
-ip_compute_csum (unsigned char * buff, int len)
+__sum16 ip_compute_csum (const void *buff, int len)
{
- return ~do_csum(buff,len);
+ return (__force __sum16)~do_csum(buff,len);
}
EXPORT_SYMBOL(ip_compute_csum);
diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c
index 36866e8a5d2..503dfe6d145 100644
--- a/arch/ia64/lib/csum_partial_copy.c
+++ b/arch/ia64/lib/csum_partial_copy.c
@@ -104,9 +104,9 @@ out:
*/
extern unsigned long do_csum(const unsigned char *, long);
-static unsigned int
-do_csum_partial_copy_from_user (const unsigned char __user *src, unsigned char *dst,
- int len, unsigned int psum, int *errp)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum psum, int *errp)
{
unsigned long result;
@@ -122,30 +122,17 @@ do_csum_partial_copy_from_user (const unsigned char __user *src, unsigned char *
result = do_csum(dst, len);
/* add in old sum, and carry.. */
- result += psum;
+ result += (__force u32)psum;
/* 32+c bits -> 32 bits */
result = (result & 0xffffffff) + (result >> 32);
- return result;
-}
-
-unsigned int
-csum_partial_copy_from_user (const unsigned char __user *src, unsigned char *dst,
- int len, unsigned int sum, int *errp)
-{
- if (!access_ok(VERIFY_READ, src, len)) {
- *errp = -EFAULT;
- memset(dst, 0, len);
- return sum;
- }
-
- return do_csum_partial_copy_from_user(src, dst, len, sum, errp);
+ return (__force __wsum)result;
}
-unsigned int
-csum_partial_copy_nocheck(const unsigned char __user *src, unsigned char *dst,
- int len, unsigned int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
- return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
+ return csum_partial_copy_from_user((__force const void __user *)src,
+ dst, len, sum, NULL);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S
index 19674ca2acf..1f86aeb2c94 100644
--- a/arch/ia64/lib/ip_fast_csum.S
+++ b/arch/ia64/lib/ip_fast_csum.S
@@ -8,8 +8,8 @@
* in0: address of buffer to checksum (char *)
* in1: length of the buffer (int)
*
- * Copyright (C) 2002 Intel Corp.
- * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
+ * Copyright (C) 2002, 2006 Intel Corp.
+ * Copyright (C) 2002, 2006 Ken Chen <kenneth.w.chen@intel.com>
*/
#include <asm/asmmacro.h>
@@ -25,6 +25,9 @@
#define in0 r32
#define in1 r33
+#define in2 r34
+#define in3 r35
+#define in4 r36
#define ret0 r8
GLOBAL_ENTRY(ip_fast_csum)
@@ -65,8 +68,9 @@ GLOBAL_ENTRY(ip_fast_csum)
zxt2 r20=r20
;;
add r20=ret0,r20
+ mov r9=0xffff
;;
- andcm ret0=-1,r20
+ andcm ret0=r9,r20
.restore sp // reset frame state
br.ret.sptk.many b0
;;
@@ -88,3 +92,51 @@ GLOBAL_ENTRY(ip_fast_csum)
mov b0=r34
br.ret.sptk.many b0
END(ip_fast_csum)
+
+GLOBAL_ENTRY(csum_ipv6_magic)
+ ld4 r20=[in0],4
+ ld4 r21=[in1],4
+ dep r15=in3,in2,32,16
+ ;;
+ ld4 r22=[in0],4
+ ld4 r23=[in1],4
+ mux1 r15=r15,@rev
+ ;;
+ ld4 r24=[in0],4
+ ld4 r25=[in1],4
+ shr.u r15=r15,16
+ add r16=r20,r21
+ add r17=r22,r23
+ ;;
+ ld4 r26=[in0],4
+ ld4 r27=[in1],4
+ add r18=r24,r25
+ add r8=r16,r17
+ ;;
+ add r19=r26,r27
+ add r8=r8,r18
+ ;;
+ add r8=r8,r19
+ add r15=r15,in4
+ ;;
+ add r8=r8,r15
+ ;;
+ shr.u r10=r8,32 // now fold sum into short
+ zxt4 r11=r8
+ ;;
+ add r8=r10,r11
+ ;;
+ shr.u r10=r8,16 // yeah, keep it rolling
+ zxt2 r11=r8
+ ;;
+ add r8=r10,r11
+ ;;
+ shr.u r10=r8,16 // three times lucky
+ zxt2 r11=r8
+ ;;
+ add r8=r10,r11
+ mov r9=0xffff
+ ;;
+ andcm r8=r9,r8
+ br.ret.sptk.many b0
+END(csum_ipv6_magic)
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index f3a9585e98a..0c7e94edc20 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -64,6 +64,11 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
return pte;
}
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
/*
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ff87a5cba39..56dc2024220 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -156,7 +156,7 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
@@ -175,7 +175,7 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
memset(vma, 0, sizeof(*vma));
vma->vm_mm = current->mm;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index b30be7c48ba..474d179966d 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -125,11 +125,10 @@ alloc_pci_controller (int seg)
{
struct pci_controller *controller;
- controller = kmalloc(sizeof(*controller), GFP_KERNEL);
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
return NULL;
- memset(controller, 0, sizeof(*controller));
controller->segment = seg;
controller->node = -1;
return controller;
@@ -469,10 +468,11 @@ pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
}
}
-static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
+void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
{
pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
}
+EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
{
@@ -493,6 +493,7 @@ pcibios_fixup_bus (struct pci_bus *b)
}
list_for_each_entry(dev, &b->devices, bus_list)
pcibios_fixup_device_resources(dev);
+ platform_pci_fixup_bus(b);
return;
}
@@ -562,8 +563,8 @@ pcibios_enable_device (struct pci_dev *dev, int mask)
void
pcibios_disable_device (struct pci_dev *dev)
{
- if (dev->is_enabled)
- acpi_pci_irq_disable(dev);
+ BUG_ON(atomic_read(&dev->enable_cnt));
+ acpi_pci_irq_disable(dev);
}
void
@@ -738,75 +739,44 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
return ret;
}
+/* It's defined in drivers/pci/pci.c */
+extern u8 pci_cache_line_size;
+
/**
- * pci_cacheline_size - determine cacheline size for PCI devices
- * @dev: void
+ * set_pci_cacheline_size - determine cacheline size for PCI devices
*
* We want to use the line-size of the outer-most cache. We assume
* that this line-size is the same for all CPUs.
*
* Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
- *
- * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
*/
-static unsigned long
-pci_cacheline_size (void)
+static void __init set_pci_cacheline_size(void)
{
u64 levels, unique_caches;
s64 status;
pal_cache_config_info_t cci;
- static u8 cacheline_size;
-
- if (cacheline_size)
- return cacheline_size;
status = ia64_pal_cache_summary(&levels, &unique_caches);
if (status != 0) {
- printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
- __FUNCTION__, status);
- return SMP_CACHE_BYTES;
+ printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
+ "(status=%ld)\n", __FUNCTION__, status);
+ return;
}
- status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2,
- &cci);
+ status = ia64_pal_cache_config_info(levels - 1,
+ /* cache_type (data_or_unified)= */ 2, &cci);
if (status != 0) {
- printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed (status=%ld)\n",
- __FUNCTION__, status);
- return SMP_CACHE_BYTES;
+ printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
+ "(status=%ld)\n", __FUNCTION__, status);
+ return;
}
- cacheline_size = 1 << cci.pcci_line_size;
- return cacheline_size;
+ pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
}
-/**
- * pcibios_prep_mwi - helper function for drivers/pci/pci.c:pci_set_mwi()
- * @dev: the PCI device for which MWI is enabled
- *
- * For ia64, we can get the cacheline sizes from PAL.
- *
- * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
- */
-int
-pcibios_prep_mwi (struct pci_dev *dev)
-{
- unsigned long desired_linesize, current_linesize;
- int rc = 0;
- u8 pci_linesize;
-
- desired_linesize = pci_cacheline_size();
-
- pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &pci_linesize);
- current_linesize = 4 * pci_linesize;
- if (desired_linesize != current_linesize) {
- printk(KERN_WARNING "PCI: slot %s has incorrect PCI cache line size of %lu bytes,",
- pci_name(dev), current_linesize);
- if (current_linesize > desired_linesize) {
- printk(" expected %lu bytes instead\n", desired_linesize);
- rc = -EINVAL;
- } else {
- printk(" correcting to %lu\n", desired_linesize);
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, desired_linesize / 4);
- }
- }
- return rc;
+static int __init pcibios_init(void)
+{
+ set_pci_cacheline_size();
+ return 0;
}
+
+subsys_initcall(pcibios_init);
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 2d78f34dd76..0a59371d347 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -4,13 +4,14 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
-# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved.
+# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All Rights Reserved.
#
CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
- huberror.o io_init.o iomv.o klconflib.o pio_phys.o \
+ huberror.o io_acpi_init.o io_common.o \
+ io_init.o iomv.o klconflib.o pio_phys.o \
sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_SGI_TIOCX) += tiocx.o
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
new file mode 100644
index 00000000000..99d7f278612
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -0,0 +1,231 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/sn_sal.h>
+#include "xtalk/hubdev.h"
+#include <linux/acpi.h>
+
+
+/*
+ * The code in this file will only be executed when running with
+ * a PROM that has ACPI IO support. (i.e., SN_ACPI_BASE_SUPPORT() == 1)
+ */
+
+
+/*
+ * This value must match the UUID the PROM uses
+ * (io/acpi/defblk.c) when building a vendor descriptor.
+ */
+struct acpi_vendor_uuid sn_uuid = {
+ .subtype = 0,
+ .data = { 0x2c, 0xc6, 0xa6, 0xfe, 0x9c, 0x44, 0xda, 0x11,
+ 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
+};
+
+/*
+ * Perform the early IO init in PROM.
+ */
+static s64
+sal_ioif_init(u64 *result)
+{
+ struct ia64_sal_retval isrv = {0,0,0,0};
+
+ SAL_CALL_NOLOCK(isrv,
+ SN_SAL_IOIF_INIT, 0, 0, 0, 0, 0, 0, 0);
+ *result = isrv.v0;
+ return isrv.status;
+}
+
+/*
+ * sn_hubdev_add - The 'add' function of the acpi_sn_hubdev_driver.
+ * Called for every "SGIHUB" or "SGITIO" device defined
+ * in the ACPI namespace.
+ */
+static int __init
+sn_hubdev_add(struct acpi_device *device)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ u64 addr;
+ struct hubdev_info *hubdev;
+ struct hubdev_info *hubdev_ptr;
+ int i;
+ u64 nasid;
+ struct acpi_resource *resource;
+ int ret = 0;
+ acpi_status status;
+ struct acpi_resource_vendor_typed *vendor;
+ extern void sn_common_hubdev_init(struct hubdev_info *);
+
+ status = acpi_get_vendor_resource(device->handle, METHOD_NAME__CRS,
+ &sn_uuid, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR
+ "sn_hubdev_add: acpi_get_vendor_resource() failed: %d\n",
+ status);
+ return 1;
+ }
+
+ resource = buffer.pointer;
+ vendor = &resource->data.vendor_typed;
+ if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
+ sizeof(struct hubdev_info *)) {
+ printk(KERN_ERR
+ "sn_hubdev_add: Invalid vendor data length: %d\n",
+ vendor->byte_length);
+ ret = 1;
+ goto exit;
+ }
+
+ memcpy(&addr, vendor->byte_data, sizeof(struct hubdev_info *));
+ hubdev_ptr = __va((struct hubdev_info *) addr);
+
+ nasid = hubdev_ptr->hdi_nasid;
+ i = nasid_to_cnodeid(nasid);
+ hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
+ *hubdev = *hubdev_ptr;
+ sn_common_hubdev_init(hubdev);
+
+exit:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+/*
+ * sn_get_bussoft_ptr() - The pcibus_bussoft pointer is found in
+ * the ACPI Vendor resource for this bus.
+ */
+static struct pcibus_bussoft *
+sn_get_bussoft_ptr(struct pci_bus *bus)
+{
+ u64 addr;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle handle;
+ struct pcibus_bussoft *prom_bussoft_ptr;
+ struct acpi_resource *resource;
+ acpi_status status;
+ struct acpi_resource_vendor_typed *vendor;
+
+
+ handle = PCI_CONTROLLER(bus)->acpi_handle;
+ status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
+ &sn_uuid, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR "get_acpi_pcibus_ptr: "
+ "get_acpi_bussoft_info() failed: %d\n",
+ status);
+ return NULL;
+ }
+ resource = buffer.pointer;
+ vendor = &resource->data.vendor_typed;
+
+ if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
+ sizeof(struct pcibus_bussoft *)) {
+ printk(KERN_ERR
+ "get_acpi_bussoft_ptr: Invalid vendor data "
+ "length %d\n", vendor->byte_length);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+ memcpy(&addr, vendor->byte_data, sizeof(struct pcibus_bussoft *));
+ prom_bussoft_ptr = __va((struct pcibus_bussoft *) addr);
+ kfree(buffer.pointer);
+
+ return prom_bussoft_ptr;
+}
+
+/*
+ * sn_acpi_bus_fixup
+ */
+void
+sn_acpi_bus_fixup(struct pci_bus *bus)
+{
+ struct pci_dev *pci_dev = NULL;
+ struct pcibus_bussoft *prom_bussoft_ptr;
+ extern void sn_common_bus_fixup(struct pci_bus *,
+ struct pcibus_bussoft *);
+
+ if (!bus->parent) { /* If root bus */
+ prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
+ if (prom_bussoft_ptr == NULL) {
+ printk(KERN_ERR
+ "sn_pci_fixup_bus: 0x%04x:0x%02x Unable to "
+ "obtain prom_bussoft_ptr\n",
+ pci_domain_nr(bus), bus->number);
+ return;
+ }
+ sn_common_bus_fixup(bus, prom_bussoft_ptr);
+ }
+ list_for_each_entry(pci_dev, &bus->devices, bus_list) {
+ sn_pci_fixup_slot(pci_dev);
+ }
+}
+
+/*
+ * sn_acpi_slot_fixup - Perform any SN specific slot fixup.
+ * At present there does not appear to be
+ * any generic way to handle a ROM image
+ * that has been shadowed by the PROM, so
+ * we pass a pointer to it within the
+ * pcidev_info structure.
+ */
+
+void
+sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
+{
+ void __iomem *addr;
+ size_t size;
+
+ if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
+ /*
+ * A valid ROM image exists and has been shadowed by the
+ * PROM. Setup the pci_dev ROM resource to point to
+ * the shadowed copy.
+ */
+ size = dev->resource[PCI_ROM_RESOURCE].end -
+ dev->resource[PCI_ROM_RESOURCE].start;
+ addr =
+ ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE],
+ size);
+ dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr;
+ dev->resource[PCI_ROM_RESOURCE].end =
+ (unsigned long) addr + size;
+ dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
+ }
+}
+
+static struct acpi_driver acpi_sn_hubdev_driver = {
+ .name = "SGI HUBDEV Driver",
+ .ids = "SGIHUB,SGITIO",
+ .ops = {
+ .add = sn_hubdev_add,
+ },
+};
+
+
+/*
+ * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
+ * nodes and root buses in the DSDT. As a result, bus scanning
+ * will be initiated by the Linux ACPI code.
+ */
+
+void __init
+sn_io_acpi_init(void)
+{
+ u64 result;
+ s64 status;
+
+ acpi_bus_register_driver(&acpi_sn_hubdev_driver);
+ status = sal_ioif_init(&result);
+ if (status || result)
+ panic("sal_ioif_init failed: [%lx] %s\n",
+ status, ia64_sal_strerror(status));
+}
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
new file mode 100644
index 00000000000..d4dd8f4b6b8
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -0,0 +1,613 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/bootmem.h>
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn_feature_sets.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/io.h>
+#include <asm/sn/l1.h>
+#include <asm/sn/module.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/tioca_provider.h>
+#include <asm/sn/tioce_provider.h>
+#include "xtalk/hubdev.h"
+#include "xtalk/xwidgetdev.h"
+#include <linux/acpi.h>
+#include <asm/sn/sn2/sn_hwperf.h>
+#include <asm/sn/acpi.h>
+
+extern void sn_init_cpei_timer(void);
+extern void register_sn_procfs(void);
+extern void sn_acpi_bus_fixup(struct pci_bus *);
+extern void sn_bus_fixup(struct pci_bus *);
+extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
+extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
+extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
+extern void sn_io_acpi_init(void);
+extern void sn_io_init(void);
+
+
+static struct list_head sn_sysdata_list;
+
+/* sysdata list struct */
+struct sysdata_el {
+ struct list_head entry;
+ void *sysdata;
+};
+
+int sn_ioif_inited; /* SN I/O infrastructure initialized? */
+
+struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
+
+/*
+ * Hooks and struct for unsupported pci providers
+ */
+
+static dma_addr_t
+sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
+{
+ return 0;
+}
+
+static void
+sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
+{
+ return;
+}
+
+static void *
+sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
+{
+ return NULL;
+}
+
+static struct sn_pcibus_provider sn_pci_default_provider = {
+ .dma_map = sn_default_pci_map,
+ .dma_map_consistent = sn_default_pci_map,
+ .dma_unmap = sn_default_pci_unmap,
+ .bus_fixup = sn_default_pci_bus_fixup,
+};
+
+/*
+ * Retrieve the DMA Flush List given nasid, widget, and device.
+ * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
+ */
+static inline u64
+sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
+ u64 address)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
+ (u64) nasid, (u64) widget_num,
+ (u64) device_num, (u64) address, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+/*
+ * Retrieve the pci device information given the bus and device|function number.
+ */
+static inline u64
+sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
+ u64 sn_irq_info)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
+ (u64) segment, (u64) bus_number, (u64) devfn,
+ (u64) pci_dev,
+ sn_irq_info, 0, 0);
+ return ret_stuff.v0;
+}
+
+/*
+ * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
+ * device.
+ */
+inline struct pcidev_info *
+sn_pcidev_info_get(struct pci_dev *dev)
+{
+ struct pcidev_info *pcidev;
+
+ list_for_each_entry(pcidev,
+ &(SN_PLATFORM_DATA(dev)->pcidev_info), pdi_list) {
+ if (pcidev->pdi_linux_pcidev == dev)
+ return pcidev;
+ }
+ return NULL;
+}
+
+/* Older PROM flush WAR
+ *
+ * 01/16/06 -- This war will be in place until a new official PROM is released.
+ * Additionally note that the struct sn_flush_device_war also has to be
+ * removed from arch/ia64/sn/include/xtalk/hubdev.h
+ */
+static u8 war_implemented = 0;
+
+static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
+ struct sn_flush_device_common *common)
+{
+ struct sn_flush_device_war *war_list;
+ struct sn_flush_device_war *dev_entry;
+ struct ia64_sal_retval isrv = {0,0,0,0};
+
+ if (!war_implemented) {
+ printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
+ "PROM flush WAR\n");
+ war_implemented = 1;
+ }
+
+ war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
+ if (!war_list)
+ BUG();
+
+ SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
+ nasid, widget, __pa(war_list), 0, 0, 0 ,0);
+ if (isrv.status)
+ panic("sn_device_fixup_war failed: %s\n",
+ ia64_sal_strerror(isrv.status));
+
+ dev_entry = war_list + device;
+ memcpy(common,dev_entry, sizeof(*common));
+ kfree(war_list);
+
+ return isrv.status;
+}
+
+/*
+ * sn_common_hubdev_init() - This routine is called to initialize the HUB data
+ * structure for each node in the system.
+ */
+void __init
+sn_common_hubdev_init(struct hubdev_info *hubdev)
+{
+
+ struct sn_flush_device_kernel *sn_flush_device_kernel;
+ struct sn_flush_device_kernel *dev_entry;
+ s64 status;
+ int widget, device, size;
+
+ /* Attach the error interrupt handlers */
+ if (hubdev->hdi_nasid & 1) /* If TIO */
+ ice_error_init(hubdev);
+ else
+ hub_error_init(hubdev);
+
+ for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
+ hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
+
+ if (!hubdev->hdi_flush_nasid_list.widget_p)
+ return;
+
+ size = (HUB_WIDGET_ID_MAX + 1) *
+ sizeof(struct sn_flush_device_kernel *);
+ hubdev->hdi_flush_nasid_list.widget_p =
+ kzalloc(size, GFP_KERNEL);
+ if (!hubdev->hdi_flush_nasid_list.widget_p)
+ BUG();
+
+ for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
+ size = DEV_PER_WIDGET *
+ sizeof(struct sn_flush_device_kernel);
+ sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
+ if (!sn_flush_device_kernel)
+ BUG();
+
+ dev_entry = sn_flush_device_kernel;
+ for (device = 0; device < DEV_PER_WIDGET;
+ device++, dev_entry++) {
+ size = sizeof(struct sn_flush_device_common);
+ dev_entry->common = kzalloc(size, GFP_KERNEL);
+ if (!dev_entry->common)
+ BUG();
+ if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST))
+ status = sal_get_device_dmaflush_list(
+ hubdev->hdi_nasid, widget, device,
+ (u64)(dev_entry->common));
+ else
+ status = sn_device_fixup_war(hubdev->hdi_nasid,
+ widget, device,
+ dev_entry->common);
+ if (status != SALRET_OK)
+ panic("SAL call failed: %s\n",
+ ia64_sal_strerror(status));
+
+ spin_lock_init(&dev_entry->sfdl_flush_lock);
+ }
+
+ if (sn_flush_device_kernel)
+ hubdev->hdi_flush_nasid_list.widget_p[widget] =
+ sn_flush_device_kernel;
+ }
+}
+
+void sn_pci_unfixup_slot(struct pci_dev *dev)
+{
+ struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
+
+ sn_irq_unfixup(dev);
+ pci_dev_put(host_pci_dev);
+ pci_dev_put(dev);
+}
+
+/*
+ * sn_pci_fixup_slot() - This routine sets up a slot's resources consistent
+ * with the Linux PCI abstraction layer. Resources
+ * acquired from our PCI provider include PIO maps
+ * to BAR space and interrupt objects.
+ */
+void sn_pci_fixup_slot(struct pci_dev *dev)
+{
+ int segment = pci_domain_nr(dev->bus);
+ int status = 0;
+ struct pcibus_bussoft *bs;
+ struct pci_bus *host_pci_bus;
+ struct pci_dev *host_pci_dev;
+ struct pcidev_info *pcidev_info;
+ struct sn_irq_info *sn_irq_info;
+ unsigned int bus_no, devfn;
+
+ pci_dev_get(dev); /* for the sysdata pointer */
+ pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
+ if (!pcidev_info)
+ BUG(); /* Cannot afford to run out of memory */
+
+ sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+ if (!sn_irq_info)
+ BUG(); /* Cannot afford to run out of memory */
+
+ /* Call to retrieve pci device information needed by kernel. */
+ status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
+ dev->devfn,
+ (u64) __pa(pcidev_info),
+ (u64) __pa(sn_irq_info));
+ if (status)
+ BUG(); /* Cannot get platform pci device information */
+
+ /* Add pcidev_info to list in pci_controller.platform_data */
+ list_add_tail(&pcidev_info->pdi_list,
+ &(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
+
+ if (SN_ACPI_BASE_SUPPORT())
+ sn_acpi_slot_fixup(dev, pcidev_info);
+ else
+ sn_more_slot_fixup(dev, pcidev_info);
+ /*
+ * Using the PROMs values for the PCI host bus, get the Linux
+ * PCI host_pci_dev struct and set up host bus linkages
+ */
+
+ bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
+ devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
+ host_pci_bus = pci_find_bus(segment, bus_no);
+ host_pci_dev = pci_get_slot(host_pci_bus, devfn);
+
+ pcidev_info->host_pci_dev = host_pci_dev;
+ pcidev_info->pdi_linux_pcidev = dev;
+ pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
+ bs = SN_PCIBUS_BUSSOFT(dev->bus);
+ pcidev_info->pdi_pcibus_info = bs;
+
+ if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
+ SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
+ } else {
+ SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
+ }
+
+ /* Only set up IRQ stuff if this device has a host bus context */
+ if (bs && sn_irq_info->irq_irq) {
+ pcidev_info->pdi_sn_irq_info = sn_irq_info;
+ dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
+ sn_irq_fixup(dev, sn_irq_info);
+ } else {
+ pcidev_info->pdi_sn_irq_info = NULL;
+ kfree(sn_irq_info);
+ }
+}
+
+/*
+ * sn_common_bus_fixup - Perform platform specific bus fixup.
+ * Execute the ASIC specific fixup routine
+ * for this bus.
+ */
+void
+sn_common_bus_fixup(struct pci_bus *bus,
+ struct pcibus_bussoft *prom_bussoft_ptr)
+{
+ int cnode;
+ struct pci_controller *controller;
+ struct hubdev_info *hubdev_info;
+ int nasid;
+ void *provider_soft;
+ struct sn_pcibus_provider *provider;
+ struct sn_platform_data *sn_platform_data;
+
+ controller = PCI_CONTROLLER(bus);
+ /*
+ * Per-provider fixup. Copies the bus soft structure from prom
+ * to local area and links SN_PCIBUS_BUSSOFT().
+ */
+
+ if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
+ printk(KERN_WARNING "sn_common_bus_fixup: Unsupported asic type, %d",
+ prom_bussoft_ptr->bs_asic_type);
+ return;
+ }
+
+ if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
+ return; /* no further fixup necessary */
+
+ provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
+ if (provider == NULL)
+ panic("sn_common_bus_fixup: No provider registered for this asic type, %d",
+ prom_bussoft_ptr->bs_asic_type);
+
+ if (provider->bus_fixup)
+ provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr,
+ controller);
+ else
+ provider_soft = NULL;
+
+ /*
+ * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
+ * after this point.
+ */
+ controller->platform_data = kzalloc(sizeof(struct sn_platform_data),
+ GFP_KERNEL);
+ if (controller->platform_data == NULL)
+ BUG();
+ sn_platform_data =
+ (struct sn_platform_data *) controller->platform_data;
+ sn_platform_data->provider_soft = provider_soft;
+ INIT_LIST_HEAD(&((struct sn_platform_data *)
+ controller->platform_data)->pcidev_info);
+ nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+ SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
+ &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
+
+ /*
+ * If the node information we obtained during the fixup phase is
+ * invalid then set controller->node to -1 (undetermined)
+ */
+ if (controller->node >= num_online_nodes()) {
+ struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
+
+ printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
+ "L_IO=%lx L_MEM=%lx BASE=%lx\n",
+ b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
+ b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
+ printk(KERN_WARNING "on node %d but only %d nodes online."
+ "Association set to undetermined.\n",
+ controller->node, num_online_nodes());
+ controller->node = -1;
+ }
+}
+
+void sn_bus_store_sysdata(struct pci_dev *dev)
+{
+ struct sysdata_el *element;
+
+ element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
+ if (!element) {
+ dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
+ return;
+ }
+ element->sysdata = SN_PCIDEV_INFO(dev);
+ list_add(&element->entry, &sn_sysdata_list);
+}
+
+void sn_bus_free_sysdata(void)
+{
+ struct sysdata_el *element;
+ struct list_head *list, *safe;
+
+ list_for_each_safe(list, safe, &sn_sysdata_list) {
+ element = list_entry(list, struct sysdata_el, entry);
+ list_del(&element->entry);
+ list_del(&(((struct pcidev_info *)
+ (element->sysdata))->pdi_list));
+ kfree(element->sysdata);
+ kfree(element);
+ }
+ return;
+}
+
+/*
+ * hubdev_init_node() - Creates the HUB data structure and link them to it's
+ * own NODE specific data area.
+ */
+void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
+{
+ struct hubdev_info *hubdev_info;
+ int size;
+ pg_data_t *pg;
+
+ size = sizeof(struct hubdev_info);
+
+ if (node >= num_online_nodes()) /* Headless/memless IO nodes */
+ pg = NODE_DATA(0);
+ else
+ pg = NODE_DATA(node);
+
+ hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
+
+ npda->pdinfo = (void *)hubdev_info;
+}
+
+geoid_t
+cnodeid_get_geoid(cnodeid_t cnode)
+{
+ struct hubdev_info *hubdev;
+
+ hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+ return hubdev->hdi_geoid;
+}
+
+void sn_generate_path(struct pci_bus *pci_bus, char *address)
+{
+ nasid_t nasid;
+ cnodeid_t cnode;
+ geoid_t geoid;
+ moduleid_t moduleid;
+ u16 bricktype;
+
+ nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ geoid = cnodeid_get_geoid(cnode);
+ moduleid = geo_module(geoid);
+
+ sprintf(address, "module_%c%c%c%c%.2d",
+ '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
+ '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
+ '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
+ MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
+
+ /* Tollhouse requires slot id to be displayed */
+ bricktype = MODULE_GET_BTYPE(moduleid);
+ if ((bricktype == L1_BRICKTYPE_191010) ||
+ (bricktype == L1_BRICKTYPE_1932))
+ sprintf(address, "%s^%d", address, geo_slot(geoid));
+}
+
+/*
+ * sn_pci_fixup_bus() - Perform SN specific setup of software structs
+ * (pcibus_bussoft, pcidev_info) and hardware
+ * registers, for the specified bus and devices under it.
+ */
+void __devinit
+sn_pci_fixup_bus(struct pci_bus *bus)
+{
+
+ if (SN_ACPI_BASE_SUPPORT())
+ sn_acpi_bus_fixup(bus);
+ else
+ sn_bus_fixup(bus);
+}
+
+/*
+ * sn_io_early_init - Perform early IO (and some non-IO) initialization.
+ * In particular, setup the sn_pci_provider[] array.
+ * This needs to be done prior to any bus scanning
+ * (acpi_scan_init()) in the ACPI case, as the SN
+ * bus fixup code will reference the array.
+ */
+static int __init
+sn_io_early_init(void)
+{
+ int i;
+
+ if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
+ return 0;
+
+ /*
+ * prime sn_pci_provider[]. Individial provider init routines will
+ * override their respective default entries.
+ */
+
+ for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
+ sn_pci_provider[i] = &sn_pci_default_provider;
+
+ pcibr_init_provider();
+ tioca_init_provider();
+ tioce_init_provider();
+
+ /*
+ * This is needed to avoid bounce limit checks in the blk layer
+ */
+ ia64_max_iommu_merge_mask = ~PAGE_MASK;
+
+ sn_irq_lh_init();
+ INIT_LIST_HEAD(&sn_sysdata_list);
+ sn_init_cpei_timer();
+
+#ifdef CONFIG_PROC_FS
+ register_sn_procfs();
+#endif
+
+ printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
+ acpi_gbl_DSDT->oem_revision);
+ if (SN_ACPI_BASE_SUPPORT())
+ sn_io_acpi_init();
+ else
+ sn_io_init();
+ return 0;
+}
+
+arch_initcall(sn_io_early_init);
+
+/*
+ * sn_io_late_init() - Perform any final platform specific IO initialization.
+ */
+
+int __init
+sn_io_late_init(void)
+{
+ struct pci_bus *bus;
+ struct pcibus_bussoft *bussoft;
+ cnodeid_t cnode;
+ nasid_t nasid;
+ cnodeid_t near_cnode;
+
+ if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
+ return 0;
+
+ /*
+ * Setup closest node in pci_controller->node for
+ * PIC, TIOCP, TIOCE (TIOCA does it during bus fixup using
+ * info from the PROM).
+ */
+ bus = NULL;
+ while ((bus = pci_find_next_bus(bus)) != NULL) {
+ bussoft = SN_PCIBUS_BUSSOFT(bus);
+ nasid = NASID_GET(bussoft->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ if ((bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) ||
+ (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCE)) {
+ /* TIO PCI Bridge: find nearest node with CPUs */
+ int e = sn_hwperf_get_nearest_node(cnode, NULL,
+ &near_cnode);
+ if (e < 0) {
+ near_cnode = (cnodeid_t)-1; /* use any node */
+ printk(KERN_WARNING "pcibr_bus_fixup: failed "
+ "to find near node with CPUs to TIO "
+ "node %d, err=%d\n", cnode, e);
+ }
+ PCI_CONTROLLER(bus)->node = near_cnode;
+ } else if (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_PIC) {
+ PCI_CONTROLLER(bus)->node = cnode;
+ }
+ }
+
+ sn_ioif_inited = 1; /* SN I/O infrastructure now initialized */
+
+ return 0;
+}
+
+fs_initcall(sn_io_late_init);
+
+EXPORT_SYMBOL(sn_pci_fixup_slot);
+EXPORT_SYMBOL(sn_pci_unfixup_slot);
+EXPORT_SYMBOL(sn_bus_store_sysdata);
+EXPORT_SYMBOL(sn_bus_free_sysdata);
+EXPORT_SYMBOL(sn_generate_path);
+
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index dc09a6a28a3..9ad843e0383 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -3,103 +3,28 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/bootmem.h>
-#include <linux/nodemask.h>
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
-#include <asm/sn/sn_feature_sets.h>
-#include <asm/sn/geo.h>
#include <asm/sn/io.h>
-#include <asm/sn/l1.h>
#include <asm/sn/module.h>
-#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
-#include <asm/sn/simulator.h>
#include <asm/sn/sn_sal.h>
-#include <asm/sn/tioca_provider.h>
-#include <asm/sn/tioce_provider.h>
#include "xtalk/hubdev.h"
-#include "xtalk/xwidgetdev.h"
-
-
-extern void sn_init_cpei_timer(void);
-extern void register_sn_procfs(void);
-
-static struct list_head sn_sysdata_list;
-
-/* sysdata list struct */
-struct sysdata_el {
- struct list_head entry;
- void *sysdata;
-};
-
-struct slab_info {
- struct hubdev_info hubdev;
-};
-
-struct brick {
- moduleid_t id; /* Module ID of this module */
- struct slab_info slab_info[MAX_SLABS + 1];
-};
-
-int sn_ioif_inited; /* SN I/O infrastructure initialized? */
-
-struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
-
-static int max_segment_number; /* Default highest segment number */
-static int max_pcibus_number = 255; /* Default highest pci bus number */
/*
- * Hooks and struct for unsupported pci providers
+ * The code in this file will only be executed when running with
+ * a PROM that does _not_ have base ACPI IO support.
+ * (i.e., SN_ACPI_BASE_SUPPORT() == 0)
*/
-static dma_addr_t
-sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
-{
- return 0;
-}
-
-static void
-sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
-{
- return;
-}
-
-static void *
-sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
-{
- return NULL;
-}
-
-static struct sn_pcibus_provider sn_pci_default_provider = {
- .dma_map = sn_default_pci_map,
- .dma_map_consistent = sn_default_pci_map,
- .dma_unmap = sn_default_pci_unmap,
- .bus_fixup = sn_default_pci_bus_fixup,
-};
-
-/*
- * Retrieve the DMA Flush List given nasid, widget, and device.
- * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
- */
-static inline u64
-sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
- u64 address)
-{
- struct ia64_sal_retval ret_stuff;
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
+static int max_segment_number; /* Default highest segment number */
+static int max_pcibus_number = 255; /* Default highest pci bus number */
- SAL_CALL_NOLOCK(ret_stuff,
- (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
- (u64) nasid, (u64) widget_num,
- (u64) device_num, (u64) address, 0, 0, 0);
- return ret_stuff.status;
-}
/*
* Retrieve the hub device info structure for the given nasid.
@@ -131,93 +56,20 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
return ret_stuff.v0;
}
-/*
- * Retrieve the pci device information given the bus and device|function number.
- */
-static inline u64
-sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
- u64 sn_irq_info)
-{
- struct ia64_sal_retval ret_stuff;
- ret_stuff.status = 0;
- ret_stuff.v0 = 0;
-
- SAL_CALL_NOLOCK(ret_stuff,
- (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
- (u64) segment, (u64) bus_number, (u64) devfn,
- (u64) pci_dev,
- sn_irq_info, 0, 0);
- return ret_stuff.v0;
-}
-
-/*
- * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
- * device.
- */
-inline struct pcidev_info *
-sn_pcidev_info_get(struct pci_dev *dev)
-{
- struct pcidev_info *pcidev;
-
- list_for_each_entry(pcidev,
- &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
- if (pcidev->pdi_linux_pcidev == dev) {
- return pcidev;
- }
- }
- return NULL;
-}
-
-/* Older PROM flush WAR
- *
- * 01/16/06 -- This war will be in place until a new official PROM is released.
- * Additionally note that the struct sn_flush_device_war also has to be
- * removed from arch/ia64/sn/include/xtalk/hubdev.h
- */
-static u8 war_implemented = 0;
-
-static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
- struct sn_flush_device_common *common)
-{
- struct sn_flush_device_war *war_list;
- struct sn_flush_device_war *dev_entry;
- struct ia64_sal_retval isrv = {0,0,0,0};
-
- if (!war_implemented) {
- printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
- "PROM flush WAR\n");
- war_implemented = 1;
- }
-
- war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
- if (!war_list)
- BUG();
-
- SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
- nasid, widget, __pa(war_list), 0, 0, 0 ,0);
- if (isrv.status)
- panic("sn_device_fixup_war failed: %s\n",
- ia64_sal_strerror(isrv.status));
-
- dev_entry = war_list + device;
- memcpy(common,dev_entry, sizeof(*common));
- kfree(war_list);
-
- return isrv.status;
-}
/*
- * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
- * each node in the system.
+ * sn_fixup_ionodes() - This routine initializes the HUB data structure for
+ * each node in the system. This function is only
+ * executed when running with a non-ACPI capable PROM.
*/
static void __init sn_fixup_ionodes(void)
{
- struct sn_flush_device_kernel *sn_flush_device_kernel;
- struct sn_flush_device_kernel *dev_entry;
+
struct hubdev_info *hubdev;
u64 status;
u64 nasid;
- int i, widget, device, size;
+ int i;
+ extern void sn_common_hubdev_init(struct hubdev_info *);
/*
* Get SGI Specific HUB chipset information.
@@ -240,70 +92,47 @@ static void __init sn_fixup_ionodes(void)
max_segment_number = hubdev->max_segment_number;
max_pcibus_number = hubdev->max_pcibus_number;
}
+ sn_common_hubdev_init(hubdev);
+ }
+}
- /* Attach the error interrupt handlers */
- if (nasid & 1)
- ice_error_init(hubdev);
- else
- hub_error_init(hubdev);
-
- for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
- hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
-
- if (!hubdev->hdi_flush_nasid_list.widget_p)
- continue;
-
- size = (HUB_WIDGET_ID_MAX + 1) *
- sizeof(struct sn_flush_device_kernel *);
- hubdev->hdi_flush_nasid_list.widget_p =
- kzalloc(size, GFP_KERNEL);
- if (!hubdev->hdi_flush_nasid_list.widget_p)
+/*
+ * sn_pci_legacy_window_fixup - Create PCI controller windows for
+ * legacy IO and MEM space. This needs to
+ * be done here, as the PROM does not have
+ * ACPI support defining the root buses
+ * and their resources (_CRS),
+ */
+static void
+sn_legacy_pci_window_fixup(struct pci_controller *controller,
+ u64 legacy_io, u64 legacy_mem)
+{
+ controller->window = kcalloc(2, sizeof(struct pci_window),
+ GFP_KERNEL);
+ if (controller->window == NULL)
BUG();
-
- for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
- size = DEV_PER_WIDGET *
- sizeof(struct sn_flush_device_kernel);
- sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
- if (!sn_flush_device_kernel)
- BUG();
-
- dev_entry = sn_flush_device_kernel;
- for (device = 0; device < DEV_PER_WIDGET;
- device++,dev_entry++) {
- size = sizeof(struct sn_flush_device_common);
- dev_entry->common = kzalloc(size, GFP_KERNEL);
- if (!dev_entry->common)
- BUG();
-
- if (sn_prom_feature_available(
- PRF_DEVICE_FLUSH_LIST))
- status = sal_get_device_dmaflush_list(
- nasid, widget, device,
- (u64)(dev_entry->common));
- else
- status = sn_device_fixup_war(nasid,
- widget, device,
- dev_entry->common);
- if (status != SALRET_OK)
- panic("SAL call failed: %s\n",
- ia64_sal_strerror(status));
-
- spin_lock_init(&dev_entry->sfdl_flush_lock);
- }
-
- if (sn_flush_device_kernel)
- hubdev->hdi_flush_nasid_list.widget_p[widget] =
- sn_flush_device_kernel;
- }
- }
+ controller->window[0].offset = legacy_io;
+ controller->window[0].resource.name = "legacy_io";
+ controller->window[0].resource.flags = IORESOURCE_IO;
+ controller->window[0].resource.start = legacy_io;
+ controller->window[0].resource.end =
+ controller->window[0].resource.start + 0xffff;
+ controller->window[0].resource.parent = &ioport_resource;
+ controller->window[1].offset = legacy_mem;
+ controller->window[1].resource.name = "legacy_mem";
+ controller->window[1].resource.flags = IORESOURCE_MEM;
+ controller->window[1].resource.start = legacy_mem;
+ controller->window[1].resource.end =
+ controller->window[1].resource.start + (1024 * 1024) - 1;
+ controller->window[1].resource.parent = &iomem_resource;
+ controller->windows = 2;
}
/*
* sn_pci_window_fixup() - Create a pci_window for each device resource.
- * Until ACPI support is added, we need this code
- * to setup pci_windows for use by
- * pcibios_bus_to_resource(),
- * pcibios_resource_to_bus(), etc.
+ * It will setup pci_windows for use by
+ * pcibios_bus_to_resource(), pcibios_resource_to_bus(),
+ * etc.
*/
static void
sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
@@ -342,60 +171,22 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
controller->window = new_window;
}
-void sn_pci_unfixup_slot(struct pci_dev *dev)
-{
- struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
-
- sn_irq_unfixup(dev);
- pci_dev_put(host_pci_dev);
- pci_dev_put(dev);
-}
-
/*
- * sn_pci_fixup_slot() - This routine sets up a slot's resources
- * consistent with the Linux PCI abstraction layer. Resources acquired
- * from our PCI provider include PIO maps to BAR space and interrupt
- * objects.
+ * sn_more_slot_fixup() - We are not running with an ACPI capable PROM,
+ * and need to convert the pci_dev->resource
+ * 'start' and 'end' addresses to mapped addresses,
+ * and setup the pci_controller->window array entries.
*/
-void sn_pci_fixup_slot(struct pci_dev *dev)
+void
+sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
{
unsigned int count = 0;
int idx;
- int segment = pci_domain_nr(dev->bus);
- int status = 0;
- struct pcibus_bussoft *bs;
- struct pci_bus *host_pci_bus;
- struct pci_dev *host_pci_dev;
- struct pcidev_info *pcidev_info;
s64 pci_addrs[PCI_ROM_RESOURCE + 1];
- struct sn_irq_info *sn_irq_info;
- unsigned long size;
- unsigned int bus_no, devfn;
-
- pci_dev_get(dev); /* for the sysdata pointer */
- pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
- if (!pcidev_info)
- BUG(); /* Cannot afford to run out of memory */
-
- sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
- if (!sn_irq_info)
- BUG(); /* Cannot afford to run out of memory */
-
- /* Call to retrieve pci device information needed by kernel. */
- status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
- dev->devfn,
- (u64) __pa(pcidev_info),
- (u64) __pa(sn_irq_info));
- if (status)
- BUG(); /* Cannot get platform pci device information */
-
- /* Add pcidev_info to list in sn_pci_controller struct */
- list_add_tail(&pcidev_info->pdi_list,
- &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
+ unsigned long addr, end, size, start;
/* Copy over PIO Mapped Addresses */
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
- unsigned long start, end, addr;
if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
pci_addrs[idx] = -1;
@@ -419,60 +210,28 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
dev->resource[idx].parent = &ioport_resource;
else
dev->resource[idx].parent = &iomem_resource;
+ /* If ROM, mark as shadowed in PROM */
+ if (idx == PCI_ROM_RESOURCE)
+ dev->resource[idx].flags |= IORESOURCE_ROM_BIOS_COPY;
}
/* Create a pci_window in the pci_controller struct for
* each device resource.
*/
if (count > 0)
sn_pci_window_fixup(dev, count, pci_addrs);
-
- /*
- * Using the PROMs values for the PCI host bus, get the Linux
- * PCI host_pci_dev struct and set up host bus linkages
- */
-
- bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
- devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
- host_pci_bus = pci_find_bus(segment, bus_no);
- host_pci_dev = pci_get_slot(host_pci_bus, devfn);
-
- pcidev_info->host_pci_dev = host_pci_dev;
- pcidev_info->pdi_linux_pcidev = dev;
- pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
- bs = SN_PCIBUS_BUSSOFT(dev->bus);
- pcidev_info->pdi_pcibus_info = bs;
-
- if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
- SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
- } else {
- SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
- }
-
- /* Only set up IRQ stuff if this device has a host bus context */
- if (bs && sn_irq_info->irq_irq) {
- pcidev_info->pdi_sn_irq_info = sn_irq_info;
- dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
- sn_irq_fixup(dev, sn_irq_info);
- } else {
- pcidev_info->pdi_sn_irq_info = NULL;
- kfree(sn_irq_info);
- }
}
/*
* sn_pci_controller_fixup() - This routine sets up a bus's resources
- * consistent with the Linux PCI abstraction layer.
+ * consistent with the Linux PCI abstraction layer.
*/
-void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
+static void
+sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
{
- int status;
- int nasid, cnode;
+ s64 status = 0;
struct pci_controller *controller;
- struct sn_pci_controller *sn_controller;
struct pcibus_bussoft *prom_bussoft_ptr;
- struct hubdev_info *hubdev_info;
- void *provider_soft;
- struct sn_pcibus_provider *provider;
+
status = sal_get_pcibus_info((u64) segment, (u64) busnum,
(u64) ia64_tpa(&prom_bussoft_ptr));
@@ -480,261 +239,77 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
return; /*bus # does not exist */
prom_bussoft_ptr = __va(prom_bussoft_ptr);
- /* Allocate a sn_pci_controller, which has a pci_controller struct
- * as the first member.
- */
- sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
- if (!sn_controller)
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller)
BUG();
- INIT_LIST_HEAD(&sn_controller->pcidev_info);
- controller = &sn_controller->pci_controller;
controller->segment = segment;
- if (bus == NULL) {
- bus = pci_scan_bus(busnum, &pci_root_ops, controller);
- if (bus == NULL)
- goto error_return; /* error, or bus already scanned */
- bus->sysdata = NULL;
- }
-
- if (bus->sysdata)
- goto error_return; /* sysdata already alloc'd */
-
/*
- * Per-provider fixup. Copies the contents from prom to local
- * area and links SN_PCIBUS_BUSSOFT().
+ * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
+ * (platform_data will be overwritten later in sn_common_bus_fixup())
*/
+ controller->platform_data = prom_bussoft_ptr;
- if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
- goto error_return; /* unsupported asic type */
-
- if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
- goto error_return; /* no further fixup necessary */
-
- provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
- if (provider == NULL)
- goto error_return; /* no provider registerd for this asic */
+ bus = pci_scan_bus(busnum, &pci_root_ops, controller);
+ if (bus == NULL)
+ goto error_return; /* error, or bus already scanned */
bus->sysdata = controller;
- if (provider->bus_fixup)
- provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
- else
- provider_soft = NULL;
-
- if (provider_soft == NULL) {
- /* fixup failed or not applicable */
- bus->sysdata = NULL;
- goto error_return;
- }
-
- /*
- * Setup pci_windows for legacy IO and MEM space.
- * (Temporary until ACPI support is in place.)
- */
- controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
- if (controller->window == NULL)
- BUG();
- controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
- controller->window[0].resource.name = "legacy_io";
- controller->window[0].resource.flags = IORESOURCE_IO;
- controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
- controller->window[0].resource.end =
- controller->window[0].resource.start + 0xffff;
- controller->window[0].resource.parent = &ioport_resource;
- controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
- controller->window[1].resource.name = "legacy_mem";
- controller->window[1].resource.flags = IORESOURCE_MEM;
- controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
- controller->window[1].resource.end =
- controller->window[1].resource.start + (1024 * 1024) - 1;
- controller->window[1].resource.parent = &iomem_resource;
- controller->windows = 2;
-
- /*
- * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
- * after this point.
- */
-
- PCI_CONTROLLER(bus)->platform_data = provider_soft;
- nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
- cnode = nasid_to_cnodeid(nasid);
- hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
- SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
- &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
- /*
- * If the node information we obtained during the fixup phase is invalid
- * then set controller->node to -1 (undetermined)
- */
- if (controller->node >= num_online_nodes()) {
- struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
-
- printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
- "L_IO=%lx L_MEM=%lx BASE=%lx\n",
- b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
- b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
- printk(KERN_WARNING "on node %d but only %d nodes online."
- "Association set to undetermined.\n",
- controller->node, num_online_nodes());
- controller->node = -1;
- }
return;
error_return:
- kfree(sn_controller);
+ kfree(controller);
return;
}
-void sn_bus_store_sysdata(struct pci_dev *dev)
+/*
+ * sn_bus_fixup
+ */
+void
+sn_bus_fixup(struct pci_bus *bus)
{
- struct sysdata_el *element;
-
- element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
- if (!element) {
- dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
- return;
- }
- element->sysdata = SN_PCIDEV_INFO(dev);
- list_add(&element->entry, &sn_sysdata_list);
-}
+ struct pci_dev *pci_dev = NULL;
+ struct pcibus_bussoft *prom_bussoft_ptr;
+ extern void sn_common_bus_fixup(struct pci_bus *,
+ struct pcibus_bussoft *);
+
+
+ if (!bus->parent) { /* If root bus */
+ prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
+ if (prom_bussoft_ptr == NULL) {
+ printk(KERN_ERR
+ "sn_bus_fixup: 0x%04x:0x%02x Unable to "
+ "obtain prom_bussoft_ptr\n",
+ pci_domain_nr(bus), bus->number);
+ return;
+ }
+ sn_common_bus_fixup(bus, prom_bussoft_ptr);
+ sn_legacy_pci_window_fixup(PCI_CONTROLLER(bus),
+ prom_bussoft_ptr->bs_legacy_io,
+ prom_bussoft_ptr->bs_legacy_mem);
+ }
+ list_for_each_entry(pci_dev, &bus->devices, bus_list) {
+ sn_pci_fixup_slot(pci_dev);
+ }
-void sn_bus_free_sysdata(void)
-{
- struct sysdata_el *element;
- struct list_head *list, *safe;
-
- list_for_each_safe(list, safe, &sn_sysdata_list) {
- element = list_entry(list, struct sysdata_el, entry);
- list_del(&element->entry);
- list_del(&(((struct pcidev_info *)
- (element->sysdata))->pdi_list));
- kfree(element->sysdata);
- kfree(element);
- }
- return;
}
/*
- * Ugly hack to get PCI setup until we have a proper ACPI namespace.
+ * sn_io_init - PROM does not have ACPI support to define nodes or root buses,
+ * so we need to do things the hard way, including initiating the
+ * bus scanning ourselves.
*/
-#define PCI_BUSES_TO_SCAN 256
-
-static int __init sn_pci_init(void)
+void __init sn_io_init(void)
{
int i, j;
- struct pci_dev *pci_dev = NULL;
-
- if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
- return 0;
-
- /*
- * prime sn_pci_provider[]. Individial provider init routines will
- * override their respective default entries.
- */
-
- for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
- sn_pci_provider[i] = &sn_pci_default_provider;
- pcibr_init_provider();
- tioca_init_provider();
- tioce_init_provider();
-
- /*
- * This is needed to avoid bounce limit checks in the blk layer
- */
- ia64_max_iommu_merge_mask = ~PAGE_MASK;
sn_fixup_ionodes();
- sn_irq_lh_init();
- INIT_LIST_HEAD(&sn_sysdata_list);
- sn_init_cpei_timer();
-
-#ifdef CONFIG_PROC_FS
- register_sn_procfs();
-#endif
/* busses are not known yet ... */
for (i = 0; i <= max_segment_number; i++)
for (j = 0; j <= max_pcibus_number; j++)
sn_pci_controller_fixup(i, j, NULL);
-
- /*
- * Generic Linux PCI Layer has created the pci_bus and pci_dev
- * structures - time for us to add our SN PLatform specific
- * information.
- */
-
- while ((pci_dev =
- pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
- sn_pci_fixup_slot(pci_dev);
-
- sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
-
- return 0;
-}
-
-/*
- * hubdev_init_node() - Creates the HUB data structure and link them to it's
- * own NODE specific data area.
- */
-void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
-{
- struct hubdev_info *hubdev_info;
- int size;
- pg_data_t *pg;
-
- size = sizeof(struct hubdev_info);
-
- if (node >= num_online_nodes()) /* Headless/memless IO nodes */
- pg = NODE_DATA(0);
- else
- pg = NODE_DATA(node);
-
- hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
-
- npda->pdinfo = (void *)hubdev_info;
}
-
-geoid_t
-cnodeid_get_geoid(cnodeid_t cnode)
-{
- struct hubdev_info *hubdev;
-
- hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
- return hubdev->hdi_geoid;
-}
-
-void sn_generate_path(struct pci_bus *pci_bus, char *address)
-{
- nasid_t nasid;
- cnodeid_t cnode;
- geoid_t geoid;
- moduleid_t moduleid;
- u16 bricktype;
-
- nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
- cnode = nasid_to_cnodeid(nasid);
- geoid = cnodeid_get_geoid(cnode);
- moduleid = geo_module(geoid);
-
- sprintf(address, "module_%c%c%c%c%.2d",
- '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
- '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
- '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
- MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
-
- /* Tollhouse requires slot id to be displayed */
- bricktype = MODULE_GET_BTYPE(moduleid);
- if ((bricktype == L1_BRICKTYPE_191010) ||
- (bricktype == L1_BRICKTYPE_1932))
- sprintf(address, "%s^%d", address, geo_slot(geoid));
-}
-
-subsys_initcall(sn_pci_init);
-EXPORT_SYMBOL(sn_pci_fixup_slot);
-EXPORT_SYMBOL(sn_pci_unfixup_slot);
-EXPORT_SYMBOL(sn_pci_controller_fixup);
-EXPORT_SYMBOL(sn_bus_store_sysdata);
-EXPORT_SYMBOL(sn_bus_free_sysdata);
-EXPORT_SYMBOL(sn_generate_path);
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index 7ce3cdad627..4aa4f301d56 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -3,10 +3,11 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
+#include <linux/acpi.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/vga.h>
@@ -15,6 +16,7 @@
#include <asm/sn/pda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/shub_mmr.h>
+#include <asm/sn/acpi.h>
#define IS_LEGACY_VGA_IOPORT(p) \
(((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
@@ -31,11 +33,14 @@ void *sn_io_addr(unsigned long port)
{
if (!IS_RUNNING_ON_SIMULATOR()) {
if (IS_LEGACY_VGA_IOPORT(port))
- port += vga_console_iobase;
+ return (__ia64_mk_io_addr(port));
/* On sn2, legacy I/O ports don't point at anything */
if (port < (64 * 1024))
return NULL;
- return ((void *)(port | __IA64_UNCACHED_OFFSET));
+ if (SN_ACPI_BASE_SUPPORT())
+ return (__ia64_mk_io_addr(port));
+ else
+ return ((void *)(port | __IA64_UNCACHED_OFFSET));
} else {
/* but the simulator uses them... */
unsigned long addr;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 0b49459a878..8c5bee01eaa 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -117,7 +117,10 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
nasid_t nasid, int slice)
{
int vector;
+ int cpuid;
+#ifdef CONFIG_SMP
int cpuphys;
+#endif
int64_t bridge;
int local_widget, status;
nasid_t local_nasid;
@@ -146,7 +149,6 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
vector = sn_irq_info->irq_irq;
/* Free the old PROM new_irq_info structure */
sn_intr_free(local_nasid, local_widget, new_irq_info);
- /* Update kernels new_irq_info with new target info */
unregister_intr_pda(new_irq_info);
/* allocate a new PROM new_irq_info struct */
@@ -160,8 +162,10 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
return NULL;
}
- cpuphys = nasid_slice_to_cpuid(nasid, slice);
- new_irq_info->irq_cpuid = cpuphys;
+ /* Update kernels new_irq_info with new target info */
+ cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
+ new_irq_info->irq_slice);
+ new_irq_info->irq_cpuid = cpuid;
register_intr_pda(new_irq_info);
pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
@@ -180,6 +184,7 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
#ifdef CONFIG_SMP
+ cpuphys = cpu_physical_id(cpuid);
set_irq_affinity_info((vector & 0xff), cpuphys, 0);
#endif
@@ -299,6 +304,9 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
nasid_t nasid = sn_irq_info->irq_nasid;
int slice = sn_irq_info->irq_slice;
int cpu = nasid_slice_to_cpuid(nasid, slice);
+#ifdef CONFIG_SMP
+ int cpuphys;
+#endif
pci_dev_get(pci_dev);
sn_irq_info->irq_cpuid = cpu;
@@ -311,6 +319,10 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
spin_unlock(&sn_irq_info_lock);
register_intr_pda(sn_irq_info);
+#ifdef CONFIG_SMP
+ cpuphys = cpu_physical_id(cpu);
+ set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
+#endif
}
void sn_irq_unfixup(struct pci_dev *pci_dev)
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 6ffd1f850d4..b3a435fd70f 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -136,10 +136,6 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
*/
msg.data = 0x100 + irq;
-#ifdef CONFIG_SMP
- set_irq_affinity_info(irq, sn_irq_info->irq_cpuid, 0);
-#endif
-
write_msi_msg(irq, &msg);
set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 7a2d824c5ce..a934ad06942 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -388,6 +388,14 @@ void __init sn_setup(char **cmdline_p)
ia64_sn_plat_set_error_handling_features(); // obsolete
ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
+ /*
+ * Note: The calls to notify the PROM of ACPI and PCI Segment
+ * support must be done prior to acpi_load_tables(), as
+ * an ACPI capable PROM will rebuild the DSDT as result
+ * of the call.
+ */
+ ia64_sn_set_os_feature(OSF_PCISEGMENT_ENABLE);
+ ia64_sn_set_os_feature(OSF_ACPI_ENABLE);
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
@@ -413,6 +421,16 @@ void __init sn_setup(char **cmdline_p)
if (! vga_console_membase)
sn_scan_pcdp();
+ /*
+ * Setup legacy IO space.
+ * vga_console_iobase maps to PCI IO Space address 0 on the
+ * bus containing the VGA console.
+ */
+ if (vga_console_iobase) {
+ io_space[0].mmio_base = vga_console_iobase;
+ io_space[0].sparse = 0;
+ }
+
if (vga_console_membase) {
/* usable vga ... make tty0 the preferred default console */
if (!strstr(*cmdline_p, "console="))
@@ -751,5 +769,13 @@ int sn_prom_feature_available(int id)
return 0;
return test_bit(id, sn_prom_features);
}
+
+void
+sn_kernel_launch_event(void)
+{
+ /* ignore status until we understand possible failure, if any*/
+ if (ia64_sn_kernel_launch_event())
+ printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n");
+}
EXPORT_SYMBOL(sn_prom_feature_available);
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index feaf1a6e810..493380b2c05 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -552,7 +552,7 @@ static void __exit tiocx_exit(void)
bus_unregister(&tiocx_bus_type);
}
-subsys_initcall(tiocx_init);
+fs_initcall(tiocx_init);
module_exit(tiocx_exit);
/************************************************************************
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 27dd7df0f44..6846dc9b432 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
@@ -109,7 +109,6 @@ void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
int nasid, cnode, j;
- cnodeid_t near_cnode;
struct hubdev_info *hubdev_info;
struct pcibus_info *soft;
struct sn_flush_device_kernel *sn_flush_device_kernel;
@@ -186,20 +185,6 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
return NULL;
}
- if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
- /* TIO PCI Bridge: find nearest node with CPUs */
- int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode);
-
- if (e < 0) {
- near_cnode = (cnodeid_t)-1; /* use any node */
- printk(KERN_WARNING "pcibr_bus_fixup: failed to find "
- "near node with CPUs to TIO node %d, err=%d\n",
- cnode, e);
- }
- controller->node = near_cnode;
- }
- else
- controller->node = cnode;
return soft;
}
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 46e16dcf597..35f854fb612 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -15,7 +15,6 @@
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioce_provider.h>
-#include <asm/sn/sn2/sn_hwperf.h>
/*
* 1/26/2006
@@ -990,8 +989,6 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
static void *
tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
- int my_nasid;
- cnodeid_t my_cnode, mem_cnode;
struct tioce_common *tioce_common;
struct tioce_kernel *tioce_kern;
struct tioce __iomem *tioce_mmr;
@@ -1035,21 +1032,6 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
tioce_common->ce_pcibus.bs_persist_segment,
tioce_common->ce_pcibus.bs_persist_busnum);
- /*
- * identify closest nasid for memory allocations
- */
-
- my_nasid = NASID_GET(tioce_common->ce_pcibus.bs_base);
- my_cnode = nasid_to_cnodeid(my_nasid);
-
- if (sn_hwperf_get_nearest_node(my_cnode, &mem_cnode, NULL) < 0) {
- printk(KERN_WARNING "tioce_bus_fixup: failed to find "
- "closest node with MEM to TIO node %d\n", my_cnode);
- mem_cnode = (cnodeid_t)-1; /* use any node */
- }
-
- controller->node = mem_cnode;
-
return tioce_common;
}
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 0e7778be33c..936205f7aba 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -196,9 +196,7 @@ static unsigned long __init setup_memory(void)
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
reserve_bootmem(INITRD_START, INITRD_SIZE);
- initrd_start = INITRD_START ?
- INITRD_START + PAGE_OFFSET : 0;
-
+ initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
printk("initrd:start[%08lx],size[%08lx]\n",
initrd_start, INITRD_SIZE);
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index b60cea4aeba..092ea86bb07 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -21,7 +21,7 @@
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
diff --git a/arch/m32r/lib/csum_partial_copy.c b/arch/m32r/lib/csum_partial_copy.c
index 3d5f0614585..5596f3df833 100644
--- a/arch/m32r/lib/csum_partial_copy.c
+++ b/arch/m32r/lib/csum_partial_copy.c
@@ -27,9 +27,8 @@
/*
* Copy while checksumming, otherwise like csum_partial
*/
-unsigned int
-csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst,
- int len, unsigned int sum)
+__wsum
+csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
{
sum = csum_partial(src, len, sum);
memcpy(dst, src, len);
@@ -42,10 +41,9 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
* Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer.
*/
-unsigned int
-csum_partial_copy_from_user (const unsigned char __user *src,
- unsigned char *dst,
- int len, unsigned int sum, int *err_ptr)
+__wsum
+csum_partial_copy_from_user (const void __user *src, void *dst,
+ int len, __wsum sum, int *err_ptr)
{
int missing;
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index abb34ccd598..c7efdb0aefc 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -105,9 +105,7 @@ unsigned long __init setup_memory(void)
if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
reserve_bootmem_node(NODE_DATA(0), INITRD_START,
INITRD_SIZE);
- initrd_start = INITRD_START ?
- INITRD_START + PAGE_OFFSET : 0;
-
+ initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
printk("initrd:start[%08lx],size[%08lx]\n",
initrd_start, INITRD_SIZE);
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
index de1304c9111..fa015d80161 100644
--- a/arch/m68k/amiga/chipram.c
+++ b/arch/m68k/amiga/chipram.c
@@ -52,10 +52,9 @@ void *amiga_chip_alloc(unsigned long size, const char *name)
#ifdef DEBUG
printk("amiga_chip_alloc: allocate %ld bytes\n", size);
#endif
- res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!res)
return NULL;
- memset(res, 0, sizeof(struct resource));
res->name = name;
if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
diff --git a/arch/m68k/atari/hades-pci.c b/arch/m68k/atari/hades-pci.c
index 6ca57b6564d..bee2b1443e3 100644
--- a/arch/m68k/atari/hades-pci.c
+++ b/arch/m68k/atari/hades-pci.c
@@ -375,10 +375,9 @@ struct pci_bus_info * __init init_hades_pci(void)
* Allocate memory for bus info structure.
*/
- bus = kmalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
+ bus = kzalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
if (!bus)
return NULL;
- memset(bus, 0, sizeof(struct pci_bus_info));
/*
* Claim resources. The m68k has no separate I/O space, both
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index cb13c6e3cca..aed3be29e06 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -39,8 +39,7 @@
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
-unsigned int
-csum_partial (const unsigned char *buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned long tmp1, tmp2;
/*
@@ -133,9 +132,9 @@ EXPORT_SYMBOL(csum_partial);
* copy from user space while checksumming, with exception handling.
*/
-unsigned int
-csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
- int len, int sum, int *csum_err)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err)
{
/*
* GCC doesn't like more than 10 operands for the asm
@@ -325,8 +324,8 @@ csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
* copy from kernel space while checksumming, otherwise like csum_partial
*/
-unsigned int
-csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
unsigned long tmp1, tmp2;
__asm__("movel %2,%4\n\t"
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 911f2ce3f53..2adbeb16e1b 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -99,7 +99,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_interrupt() || !mm)
+ if (in_atomic() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 6d920d4bdc3..aa70dde5422 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -173,7 +173,7 @@ config CLOCK_DIV
On many SoC style CPUs the master CPU clock is also used to drive
on-chip peripherals. The clock that is distributed to these
peripherals is sometimes a fixed ratio of the master clock
- frequency. If so then set this to the divider ration of the
+ frequency. If so then set this to the divider ratio of the
master clock to the peripheral clock. If not sure then select 1.
config OLDMASK
@@ -192,7 +192,7 @@ config PILOT3
Support for the Palm Pilot 1000/5000, Personal/Pro and PalmIII.
config XCOPILOT_BUGS
- bool " (X)Copilot support"
+ bool "(X)Copilot support"
depends on PILOT3
help
Support the bugs of Xcopilot.
@@ -216,20 +216,20 @@ config DRAGEN2
Support for the DragenEngine II board.
config DIRECT_IO_ACCESS
- bool " Allow user to access IO directly"
+ bool "Allow user to access IO directly"
depends on (UCSIMM || UCDIMM || DRAGEN2)
help
Disable the CPU internal registers protection in user mode,
to allow a user application to read/write them.
config INIT_LCD
- bool " Initialize LCD"
+ bool "Initialize LCD"
depends on (UCSIMM || UCDIMM || DRAGEN2)
help
Initialize the LCD controller of the 68x328 processor.
config MEMORY_RESERVE
- int " Memory reservation (MiB)"
+ int "Memory reservation (MiB)"
depends on (UCSIMM || UCDIMM)
help
Reserve certain memory regions on 68x328 based boards.
@@ -409,7 +409,7 @@ config MOD5272
Support for the Netburner MOD-5272 board.
config ROMFS_FROM_ROM
- bool " ROMFS image not RAM resident"
+ bool "ROMFS image not RAM resident"
depends on (NETtel || SNAPGEAR)
help
The ROMfs filesystem will stay resident in the FLASH/ROM, not be
@@ -565,7 +565,7 @@ config ROMVEC
depends on ROM
help
This is almost always the same as the base of the ROM. Since on all
- 68000 type varients the vectors are at the base of the boot device
+ 68000 type variants the vectors are at the base of the boot device
on system startup.
config ROMVECSIZE
@@ -574,7 +574,7 @@ config ROMVECSIZE
depends on ROM
help
Define the size of the vector region in ROM. For most 68000
- varients this would be 0x400 bytes in size. Set to 0 if you do
+ variants this would be 0x400 bytes in size. Set to 0 if you do
not want a vector region at the start of the ROM.
config ROMSTART
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index 1e62150f358..25327c9eadd 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(kernel_thread);
/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index c18a8330695..941955dc3b7 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -290,7 +290,7 @@ void dump(struct pt_regs *fp)
unsigned char *tp;
int i;
- printk(KERN_EMERG "\nCURRENT PROCESS:\n\n");
+ printk(KERN_EMERG "\n" KERN_EMERG "CURRENT PROCESS:\n" KERN_EMERG "\n");
printk(KERN_EMERG "COMM=%s PID=%d\n", current->comm, current->pid);
if (current->mm) {
@@ -301,7 +301,8 @@ void dump(struct pt_regs *fp)
(int) current->mm->end_data,
(int) current->mm->end_data,
(int) current->mm->brk);
- printk(KERN_EMERG "USER-STACK=%08x KERNEL-STACK=%08x\n\n",
+ printk(KERN_EMERG "USER-STACK=%08x KERNEL-STACK=%08x\n"
+ KERN_EMERG "\n",
(int) current->mm->start_stack,
(int)(((unsigned long) current) + THREAD_SIZE));
}
@@ -312,36 +313,35 @@ void dump(struct pt_regs *fp)
fp->d0, fp->d1, fp->d2, fp->d3);
printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
fp->d4, fp->d5, fp->a0, fp->a1);
- printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %08x\n", (unsigned int) rdusp(),
- (unsigned int) fp);
+ printk(KERN_EMERG "\n" KERN_EMERG "USP: %08x TRAPFRAME: %08x\n",
+ (unsigned int) rdusp(), (unsigned int) fp);
- printk(KERN_EMERG "\nCODE:");
+ printk(KERN_EMERG "\n" KERN_EMERG "CODE:");
tp = ((unsigned char *) fp->pc) - 0x20;
for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
if ((i % 0x10) == 0)
- printk(KERN_EMERG "\n%08x: ", (int) (tp + i));
- printk(KERN_EMERG "%08x ", (int) *sp++);
+ printk("\n" KERN_EMERG "%08x: ", (int) (tp + i));
+ printk("%08x ", (int) *sp++);
}
- printk(KERN_EMERG "\n");
+ printk("\n" KERN_EMERG "\n");
- printk(KERN_EMERG "\nKERNEL STACK:");
+ printk(KERN_EMERG "KERNEL STACK:");
tp = ((unsigned char *) fp) - 0x40;
for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
if ((i % 0x10) == 0)
- printk(KERN_EMERG "\n%08x: ", (int) (tp + i));
- printk(KERN_EMERG "%08x ", (int) *sp++);
+ printk("\n" KERN_EMERG "%08x: ", (int) (tp + i));
+ printk("%08x ", (int) *sp++);
}
- printk(KERN_EMERG "\n");
- printk(KERN_EMERG "\n");
+ printk("\n" KERN_EMERG "\n");
- printk(KERN_EMERG "\nUSER STACK:");
+ printk(KERN_EMERG "USER STACK:");
tp = (unsigned char *) (rdusp() - 0x10);
for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
if ((i % 0x10) == 0)
- printk(KERN_EMERG "\n%08x: ", (int) (tp + i));
- printk(KERN_EMERG "%08x ", (int) *sp++);
+ printk("\n" KERN_EMERG "%08x: ", (int) (tp + i));
+ printk("%08x ", (int) *sp++);
}
- printk(KERN_EMERG "\n\n");
+ printk("\n" KERN_EMERG "\n");
}
/*
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index 7b21959eaea..9cf2e4d1fc7 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -36,10 +36,7 @@
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/machdep.h>
-
-#ifdef CONFIG_BLK_DEV_INITRD
#include <asm/pgtable.h>
-#endif
unsigned long memory_start;
unsigned long memory_end;
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c
index c3494b8447d..3265b2d734d 100644
--- a/arch/m68knommu/kernel/sys_m68k.c
+++ b/arch/m68knommu/kernel/sys_m68k.c
@@ -137,7 +137,7 @@ asmlinkage int old_select(struct sel_arg_struct *arg)
asmlinkage int sys_ipc (uint call, int first, int second,
int third, void *ptr, long fifth)
{
- int version;
+ int version, ret;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
@@ -190,6 +190,27 @@ asmlinkage int sys_ipc (uint call, int first, int second,
default:
return -EINVAL;
}
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ default: {
+ ulong raddr;
+ ret = do_shmat (first, ptr, second, &raddr);
+ if (ret)
+ return ret;
+ return put_user (raddr, (ulong __user *) third);
+ }
+ }
+ case SHMDT:
+ return sys_shmdt (ptr);
+ case SHMGET:
+ return sys_shmget (first, second, third);
+ case SHMCTL:
+ return sys_shmctl (first, second, ptr);
+ default:
+ return -ENOSYS;
+ }
return -EINVAL;
}
diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c
index 17649d2543e..9129b3a5258 100644
--- a/arch/m68knommu/kernel/traps.c
+++ b/arch/m68knommu/kernel/traps.c
@@ -127,11 +127,12 @@ void show_stack(struct task_struct *task, unsigned long *stack)
if (stack + 1 > endstack)
break;
if (i % 8 == 0)
- printk(KERN_EMERG "\n ");
- printk(KERN_EMERG " %08lx", *stack++);
+ printk("\n" KERN_EMERG " ");
+ printk(" %08lx", *stack++);
}
+ printk("\n");
- printk(KERN_EMERG "\nCall Trace:");
+ printk(KERN_EMERG "Call Trace:");
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
@@ -146,12 +147,12 @@ void show_stack(struct task_struct *task, unsigned long *stack)
if (((addr >= (unsigned long) &_start) &&
(addr <= (unsigned long) &_etext))) {
if (i % 4 == 0)
- printk(KERN_EMERG "\n ");
- printk(KERN_EMERG " [<%08lx>]", addr);
+ printk("\n" KERN_EMERG " ");
+ printk(" [<%08lx>]", addr);
i++;
}
}
- printk(KERN_EMERG "\n");
+ printk("\n");
}
void bad_super_trap(struct frame *fp)
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index 58afa8be604..2b2a10da64a 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -60,6 +60,7 @@ SECTIONS {
#endif
.text : {
+ _text = .;
_stext = . ;
*(.text)
SCHED_TEXT
diff --git a/arch/m68knommu/lib/checksum.c b/arch/m68knommu/lib/checksum.c
index 7bec6fdee34..269d83bfbbe 100644
--- a/arch/m68knommu/lib/checksum.c
+++ b/arch/m68knommu/lib/checksum.c
@@ -96,9 +96,9 @@ out:
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
-unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
- return ~do_csum(iph,ihl*4);
+ return (__force __sum16)~do_csum(iph,ihl*4);
}
/*
@@ -113,15 +113,15 @@ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned int result = do_csum(buff, len);
/* add in old sum, and carry.. */
- result += sum;
- if (sum > result)
+ result += (__force u32)sum;
+ if ((__force u32)sum > result)
result += 1;
- return result;
+ return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
@@ -130,21 +130,21 @@ EXPORT_SYMBOL(csum_partial);
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-unsigned short ip_compute_csum(const unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
{
- return ~do_csum(buff,len);
+ return (__force __sum16)~do_csum(buff,len);
}
/*
* copy from fs while checksumming, otherwise like csum_partial
*/
-unsigned int
-csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
- int len, int sum, int *csum_err)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err)
{
if (csum_err) *csum_err = 0;
- memcpy(dst, src, len);
+ memcpy(dst, (__force const void *)src, len);
return csum_partial(dst, len, sum);
}
@@ -152,8 +152,8 @@ csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
* copy from ds while checksumming, otherwise like csum_partial
*/
-unsigned int
-csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
diff --git a/arch/m68knommu/platform/5307/head.S b/arch/m68knommu/platform/5307/head.S
index f2edb6498cd..b9aa0ca29bf 100644
--- a/arch/m68knommu/platform/5307/head.S
+++ b/arch/m68knommu/platform/5307/head.S
@@ -64,6 +64,26 @@
negl %d0 /* negate bits */
.endm
+#elif defined(CONFIG_M520x)
+.macro GET_MEM_SIZE
+ clrl %d0
+ movel MCF_MBAR+MCFSIM_SDCS0, %d2 /* Get SDRAM chip select 0 config */
+ andl #0x1f, %d2 /* Get only the chip select size */
+ beq 3f /* Check if it is enabled */
+ addql #1, %d2 /* Form exponent */
+ moveql #1, %d0
+ lsll %d2, %d0 /* 2 ^ exponent */
+3:
+ movel MCF_MBAR+MCFSIM_SDCS1, %d2 /* Get SDRAM chip select 1 config */
+ andl #0x1f, %d2 /* Get only the chip select size */
+ beq 4f /* Check if it is enabled */
+ addql #1, %d2 /* Form exponent */
+ moveql #1, %d1
+ lsll %d2, %d1 /* 2 ^ exponent */
+ addl %d1, %d0 /* Total size of SDRAM in d0 */
+4:
+.endm
+
#else
#error "ERROR: I don't know how to probe your boards memory size?"
#endif
diff --git a/arch/m68knommu/platform/5307/timers.c b/arch/m68knommu/platform/5307/timers.c
index 24781f00933..e5668af1978 100644
--- a/arch/m68knommu/platform/5307/timers.c
+++ b/arch/m68knommu/platform/5307/timers.c
@@ -3,7 +3,7 @@
/*
* timers.c -- generic ColdFire hardware timer support.
*
- * Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com)
*/
/***************************************************************************/
@@ -44,6 +44,14 @@ unsigned int mcf_timerlevel = 5;
extern void mcf_settimericr(int timer, int level);
extern int mcf_timerirqpending(int timer);
+#if defined(CONFIG_M532x)
+#define __raw_readtrr __raw_readl
+#define __raw_writetrr __raw_writel
+#else
+#define __raw_readtrr __raw_readw
+#define __raw_writetrr __raw_writew
+#endif
+
/***************************************************************************/
void coldfire_tick(void)
@@ -57,7 +65,7 @@ void coldfire_tick(void)
void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *))
{
__raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
- __raw_writew(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
+ __raw_writetrr(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
@@ -76,7 +84,7 @@ unsigned long coldfire_timer_offset(void)
unsigned long trr, tcn, offset;
tcn = __raw_readw(TA(MCFTIMER_TCN));
- trr = __raw_readw(TA(MCFTIMER_TRR));
+ trr = __raw_readtrr(TA(MCFTIMER_TRR));
offset = (tcn * (1000000 / HZ)) / trr;
/* Check if we just wrapped the counters and maybe missed a tick */
@@ -120,7 +128,7 @@ void coldfire_profile_init(void)
/* Set up TIMER 2 as high speed profile clock */
__raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
- __raw_writew(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
+ __raw_writetrr(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index c5482e3622e..1b36f626176 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -114,7 +114,7 @@ void BSP_gettod (int *yearp, int *monp, int *dayp,
{
}
-int BSP_hwclk(int op, struct hwclk_time *t)
+int BSP_hwclk(int op, struct rtc_time *t)
{
if (!op) {
/* read */
diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S
index 2ea51479f13..2ef06242398 100644
--- a/arch/m68knommu/platform/68360/head-ram.S
+++ b/arch/m68knommu/platform/68360/head-ram.S
@@ -25,6 +25,7 @@
.global _periph_base
#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
+#define ROMEND (CONFIG_ROMBASE + CONFIG_ROMSIZE)
#define REGB 0x1000
#define PEPAR (_dprbase + REGB + 0x0016)
@@ -175,7 +176,7 @@ configure_chip_select_0:
move.l %d0, BR0
configure_chip_select_1:
- move.l #__rom_end, %d0
+ move.l #ROMEND, %d0
subi.l #__rom_start, %d0
subq.l #0x01, %d0
eori.l #SIM_OR_MASK, %d0
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 1443024b1c7..d8af858fe3f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -16,6 +16,7 @@ config MIPS_MTX1
bool "4G Systems MTX-1 board"
select DMA_NONCOHERENT
select HW_HAS_PCI
+ select RESOURCES_64BIT if PCI
select SOC_AU1500
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -32,6 +33,7 @@ config MIPS_PB1000
select SOC_AU1000
select DMA_NONCOHERENT
select HW_HAS_PCI
+ select RESOURCES_64BIT if PCI
select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -41,6 +43,7 @@ config MIPS_PB1100
select SOC_AU1100
select DMA_NONCOHERENT
select HW_HAS_PCI
+ select RESOURCES_64BIT if PCI
select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -50,6 +53,7 @@ config MIPS_PB1500
select SOC_AU1500
select DMA_NONCOHERENT
select HW_HAS_PCI
+ select RESOURCES_64BIT if PCI
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -59,6 +63,7 @@ config MIPS_PB1550
select DMA_NONCOHERENT
select HW_HAS_PCI
select MIPS_DISABLE_OBSOLETE_IDE
+ select RESOURCES_64BIT if PCI
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -67,6 +72,7 @@ config MIPS_PB1200
select SOC_AU1200
select DMA_NONCOHERENT
select MIPS_DISABLE_OBSOLETE_IDE
+ select RESOURCES_64BIT if PCI
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -75,6 +81,7 @@ config MIPS_DB1000
select SOC_AU1000
select DMA_NONCOHERENT
select HW_HAS_PCI
+ select RESOURCES_64BIT if PCI
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -91,6 +98,7 @@ config MIPS_DB1500
select DMA_NONCOHERENT
select HW_HAS_PCI
select MIPS_DISABLE_OBSOLETE_IDE
+ select RESOURCES_64BIT if PCI
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -101,6 +109,7 @@ config MIPS_DB1550
select HW_HAS_PCI
select DMA_NONCOHERENT
select MIPS_DISABLE_OBSOLETE_IDE
+ select RESOURCES_64BIT if PCI
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -233,6 +242,7 @@ config LASAT
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select GENERIC_HARDIRQS_NO__DO_IRQ
config MIPS_ATLAS
bool "MIPS Atlas board"
@@ -256,6 +266,7 @@ config MIPS_ATLAS
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MULTITHREADING if EXPERIMENTAL
+ select GENERIC_HARDIRQS_NO__DO_IRQ
help
This enables support for the MIPS Technologies Atlas evaluation
board.
@@ -266,8 +277,8 @@ config MIPS_MALTA
select BOOT_ELF32
select HAVE_STD_PC_SERIAL_PORT
select DMA_NONCOHERENT
- select IRQ_CPU
select GENERIC_ISA_DMA
+ select IRQ_CPU
select HW_HAS_PCI
select I8259
select MIPS_BOARDS_GEN
@@ -410,6 +421,7 @@ config MOMENCO_OCELOT_C
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
+ select GENERIC_HARDIRQS_NO__DO_IRQ
help
The Ocelot is a MIPS-based Single Board Computer (SBC) made by
Momentum Computer <http://www.momenco.com/>.
@@ -534,7 +546,7 @@ config SGI_IP22
select HW_HAS_EISA
select IP22_CPU_SCACHE
select IRQ_CPU
- select NO_ISA if ISA
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
select SWAP_IO_SPACE
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
@@ -560,6 +572,7 @@ config SGI_IP27
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_NUMA
select SYS_SUPPORTS_SMP
+ select GENERIC_HARDIRQS_NO__DO_IRQ
help
This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics
workstations. To compile a Linux kernel that runs on these, say Y
@@ -766,6 +779,23 @@ config TOSHIBA_RBTX4938
endchoice
+config KEXEC
+ bool "Kexec system call (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+ but it is indepedent of the system firmware. And like a reboot
+ you can start any kernel with it, not just Linux.
+
+ The name comes from the similiarity to the exec system call.
+
+ It is an ongoing process to be certain the hardware in a machine
+ is properly shutdown, so do not be surprised if this code does not
+ initially work for you. It may help to enable device hotplugging
+ support. As of this writing the exact hardware interface is
+ strongly in flux, so no good recommendation can be made.
+
source "arch/mips/ddb5xxx/Kconfig"
source "arch/mips/gt64120/ev64120/Kconfig"
source "arch/mips/jazz/Kconfig"
@@ -809,6 +839,10 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
bool
default y
+config GENERIC_HARDIRQS_NO__DO_IRQ
+ bool
+ default n
+
#
# Select some configuration options automatically based on user selections.
#
@@ -864,8 +898,11 @@ config MIPS_NILE4
config MIPS_DISABLE_OBSOLETE_IDE
bool
+config GENERIC_ISA_DMA_SUPPORT_BROKEN
+ bool
+
#
-# Endianess selection. Suffiently obscure so many users don't know what to
+# Endianess selection. Sufficiently obscure so many users don't know what to
# answer,so we try hard to limit the available choices. Also the use of a
# choice statement should be more obvious to the user.
#
@@ -874,7 +911,7 @@ choice
help
Some MIPS machines can be configured for either little or big endian
byte order. These modes require different kernels and a different
- Linux distribution. In general there is one prefered byteorder for a
+ Linux distribution. In general there is one preferred byteorder for a
particular system but some systems are just as commonly used in the
one or the other endianess.
@@ -967,6 +1004,7 @@ config SOC_PNX8550
select HW_HAS_PCI
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
+ select GENERIC_HARDIRQS_NO__DO_IRQ
config SWAP_IO_SPACE
bool
@@ -1248,6 +1286,7 @@ config CPU_RM9000
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select WEAK_ORDERING
config CPU_SB1
bool "SB1"
@@ -1256,6 +1295,7 @@ config CPU_SB1
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select WEAK_ORDERING
endchoice
@@ -1316,6 +1356,8 @@ config SYS_HAS_CPU_RM9000
config SYS_HAS_CPU_SB1
bool
+config WEAK_ORDERING
+ bool
endmenu
#
@@ -1835,13 +1877,11 @@ source "drivers/pci/Kconfig"
config ISA
bool
-config NO_ISA
- bool
-
config EISA
bool "EISA support"
depends on HW_HAS_EISA
select ISA
+ select GENERIC_ISA_DMA
---help---
The Extended Industry Standard Architecture (EISA) bus was
developed as an open alternative to the IBM MicroChannel bus.
@@ -1922,6 +1962,11 @@ config COMPAT
depends on MIPS32_COMPAT
default y
+config SYSVIPC_COMPAT
+ bool
+ depends on COMPAT && SYSVIPC
+ default y
+
config MIPS32_O32
bool "Kernel support for o32 binaries"
depends on MIPS32_COMPAT
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index d580d46f967..641aa30b363 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -63,9 +63,7 @@ cflags-y += -mabi=64
ifdef CONFIG_BUILD_ELF64
cflags-y += $(call cc-option,-mno-explicit-relocs)
else
-# -msym32 can not be used for modules since they are loaded into XKSEG
-CFLAGS_MODULE += $(call cc-option,-mno-explicit-relocs)
-CFLAGS_KERNEL += $(call cc-option,-msym32)
+cflags-y += $(call cc-option,-msym32)
endif
endif
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index 2abe132bb07..9cf7b671583 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -70,7 +70,6 @@ extern irq_cpustat_t irq_stat [NR_CPUS];
extern void mips_timer_interrupt(void);
static void setup_local_irq(unsigned int irq, int type, int int_req);
-static unsigned int startup_irq(unsigned int irq);
static void end_irq(unsigned int irq_nr);
static inline void mask_and_ack_level_irq(unsigned int irq_nr);
static inline void mask_and_ack_rise_edge_irq(unsigned int irq_nr);
@@ -84,20 +83,6 @@ void (*board_init_irq)(void);
static DEFINE_SPINLOCK(irq_lock);
-static unsigned int startup_irq(unsigned int irq_nr)
-{
- local_enable_irq(irq_nr);
- return 0;
-}
-
-
-static void shutdown_irq(unsigned int irq_nr)
-{
- local_disable_irq(irq_nr);
- return;
-}
-
-
inline void local_enable_irq(unsigned int irq_nr)
{
if (irq_nr > AU1000_LAST_INTC0_INT) {
@@ -249,41 +234,37 @@ void restore_local_and_enable(int controller, unsigned long mask)
static struct irq_chip rise_edge_irq_type = {
.typename = "Au1000 Rise Edge",
- .startup = startup_irq,
- .shutdown = shutdown_irq,
- .enable = local_enable_irq,
- .disable = local_disable_irq,
.ack = mask_and_ack_rise_edge_irq,
+ .mask = local_disable_irq,
+ .mask_ack = mask_and_ack_rise_edge_irq,
+ .unmask = local_enable_irq,
.end = end_irq,
};
static struct irq_chip fall_edge_irq_type = {
.typename = "Au1000 Fall Edge",
- .startup = startup_irq,
- .shutdown = shutdown_irq,
- .enable = local_enable_irq,
- .disable = local_disable_irq,
.ack = mask_and_ack_fall_edge_irq,
+ .mask = local_disable_irq,
+ .mask_ack = mask_and_ack_fall_edge_irq,
+ .unmask = local_enable_irq,
.end = end_irq,
};
static struct irq_chip either_edge_irq_type = {
.typename = "Au1000 Rise or Fall Edge",
- .startup = startup_irq,
- .shutdown = shutdown_irq,
- .enable = local_enable_irq,
- .disable = local_disable_irq,
.ack = mask_and_ack_either_edge_irq,
+ .mask = local_disable_irq,
+ .mask_ack = mask_and_ack_either_edge_irq,
+ .unmask = local_enable_irq,
.end = end_irq,
};
static struct irq_chip level_irq_type = {
.typename = "Au1000 Level",
- .startup = startup_irq,
- .shutdown = shutdown_irq,
- .enable = local_enable_irq,
- .disable = local_disable_irq,
.ack = mask_and_ack_level_irq,
+ .mask = local_disable_irq,
+ .mask_ack = mask_and_ack_level_irq,
+ .unmask = local_enable_irq,
.end = end_irq,
};
@@ -328,31 +309,31 @@ static void setup_local_irq(unsigned int irq_nr, int type, int int_req)
au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
au_writel(1<<(irq_nr-32), IC1_CFG1CLR);
au_writel(1<<(irq_nr-32), IC1_CFG0SET);
- irq_desc[irq_nr].chip = &rise_edge_irq_type;
+ set_irq_chip(irq_nr, &rise_edge_irq_type);
break;
case INTC_INT_FALL_EDGE: /* 0:1:0 */
au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
au_writel(1<<(irq_nr-32), IC1_CFG1SET);
au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
- irq_desc[irq_nr].chip = &fall_edge_irq_type;
+ set_irq_chip(irq_nr, &fall_edge_irq_type);
break;
case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
au_writel(1<<(irq_nr-32), IC1_CFG1SET);
au_writel(1<<(irq_nr-32), IC1_CFG0SET);
- irq_desc[irq_nr].chip = &either_edge_irq_type;
+ set_irq_chip(irq_nr, &either_edge_irq_type);
break;
case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
au_writel(1<<(irq_nr-32), IC1_CFG2SET);
au_writel(1<<(irq_nr-32), IC1_CFG1CLR);
au_writel(1<<(irq_nr-32), IC1_CFG0SET);
- irq_desc[irq_nr].chip = &level_irq_type;
+ set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_LOW_LEVEL: /* 1:1:0 */
au_writel(1<<(irq_nr-32), IC1_CFG2SET);
au_writel(1<<(irq_nr-32), IC1_CFG1SET);
au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
- irq_desc[irq_nr].chip = &level_irq_type;
+ set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_DISABLED: /* 0:0:0 */
au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
@@ -380,31 +361,31 @@ static void setup_local_irq(unsigned int irq_nr, int type, int int_req)
au_writel(1<<irq_nr, IC0_CFG2CLR);
au_writel(1<<irq_nr, IC0_CFG1CLR);
au_writel(1<<irq_nr, IC0_CFG0SET);
- irq_desc[irq_nr].chip = &rise_edge_irq_type;
+ set_irq_chip(irq_nr, &rise_edge_irq_type);
break;
case INTC_INT_FALL_EDGE: /* 0:1:0 */
au_writel(1<<irq_nr, IC0_CFG2CLR);
au_writel(1<<irq_nr, IC0_CFG1SET);
au_writel(1<<irq_nr, IC0_CFG0CLR);
- irq_desc[irq_nr].chip = &fall_edge_irq_type;
+ set_irq_chip(irq_nr, &fall_edge_irq_type);
break;
case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
au_writel(1<<irq_nr, IC0_CFG2CLR);
au_writel(1<<irq_nr, IC0_CFG1SET);
au_writel(1<<irq_nr, IC0_CFG0SET);
- irq_desc[irq_nr].chip = &either_edge_irq_type;
+ set_irq_chip(irq_nr, &either_edge_irq_type);
break;
case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
au_writel(1<<irq_nr, IC0_CFG2SET);
au_writel(1<<irq_nr, IC0_CFG1CLR);
au_writel(1<<irq_nr, IC0_CFG0SET);
- irq_desc[irq_nr].chip = &level_irq_type;
+ set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_LOW_LEVEL: /* 1:1:0 */
au_writel(1<<irq_nr, IC0_CFG2SET);
au_writel(1<<irq_nr, IC0_CFG1SET);
au_writel(1<<irq_nr, IC0_CFG0CLR);
- irq_desc[irq_nr].chip = &level_irq_type;
+ set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_DISABLED: /* 0:0:0 */
au_writel(1<<irq_nr, IC0_CFG0CLR);
diff --git a/arch/mips/au1000/pb1200/board_setup.c b/arch/mips/au1000/pb1200/board_setup.c
index 8b953b9fc25..043302b7fe5 100644
--- a/arch/mips/au1000/pb1200/board_setup.c
+++ b/arch/mips/au1000/pb1200/board_setup.c
@@ -55,7 +55,7 @@
#endif
extern void _board_init_irq(void);
-extern void (*board_init_irq)(void);
+extern void (*board_init_irq)(void);
void board_reset (void)
{
@@ -151,11 +151,7 @@ void __init board_setup(void)
#endif
/* Setup Pb1200 External Interrupt Controller */
- {
- extern void (*board_init_irq)(void);
- extern void _board_init_irq(void);
- board_init_irq = _board_init_irq;
- }
+ board_init_irq = _board_init_irq;
}
int
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
index 82e569d5b02..4c46f0e7378 100644
--- a/arch/mips/cobalt/irq.c
+++ b/arch/mips/cobalt/irq.c
@@ -45,25 +45,22 @@ static inline void galileo_irq(void)
{
unsigned int mask, pending, devfn;
- mask = GALILEO_INL(GT_INTRMASK_OFS);
- pending = GALILEO_INL(GT_INTRCAUSE_OFS) & mask;
+ mask = GT_READ(GT_INTRMASK_OFS);
+ pending = GT_READ(GT_INTRCAUSE_OFS) & mask;
- if (pending & GALILEO_INTR_T0EXP) {
-
- GALILEO_OUTL(~GALILEO_INTR_T0EXP, GT_INTRCAUSE_OFS);
+ if (pending & GT_INTR_T0EXP_MSK) {
+ GT_WRITE(GT_INTRCAUSE_OFS, ~GT_INTR_T0EXP_MSK);
do_IRQ(COBALT_GALILEO_IRQ);
-
- } else if (pending & GALILEO_INTR_RETRY_CTR) {
-
- devfn = GALILEO_INL(GT_PCI0_CFGADDR_OFS) >> 8;
- GALILEO_OUTL(~GALILEO_INTR_RETRY_CTR, GT_INTRCAUSE_OFS);
- printk(KERN_WARNING "Galileo: PCI retry count exceeded (%02x.%u)\n",
- PCI_SLOT(devfn), PCI_FUNC(devfn));
-
+ } else if (pending & GT_INTR_RETRYCTR0_MSK) {
+ devfn = GT_READ(GT_PCI0_CFGADDR_OFS) >> 8;
+ GT_WRITE(GT_INTRCAUSE_OFS, ~GT_INTR_RETRYCTR0_MSK);
+ printk(KERN_WARNING
+ "Galileo: PCI retry count exceeded (%02x.%u)\n",
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
} else {
-
- GALILEO_OUTL(mask & ~pending, GT_INTRMASK_OFS);
- printk(KERN_WARNING "Galileo: masking unexpected interrupt %08x\n", pending);
+ GT_WRITE(GT_INTRMASK_OFS, mask & ~pending);
+ printk(KERN_WARNING
+ "Galileo: masking unexpected interrupt %08x\n", pending);
}
}
@@ -104,7 +101,7 @@ void __init arch_init_irq(void)
* Mask all Galileo interrupts. The Galileo
* handler is set in cobalt_timer_setup()
*/
- GALILEO_OUTL(0, GT_INTRMASK_OFS);
+ GT_WRITE(GT_INTRMASK_OFS, 0);
init_i8259_irqs(); /* 0 ... 15 */
mips_cpu_irq_init(COBALT_CPU_IRQ); /* 16 ... 23 */
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index bf9dc72b972..e8f0f20b852 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -51,23 +51,23 @@ const char *get_system_type(void)
void __init plat_timer_setup(struct irqaction *irq)
{
/* Load timer value for HZ (TCLK is 50MHz) */
- GALILEO_OUTL(50*1000*1000 / HZ, GT_TC0_OFS);
+ GT_WRITE(GT_TC0_OFS, 50*1000*1000 / HZ);
/* Enable timer */
- GALILEO_OUTL(GALILEO_ENTC0 | GALILEO_SELTC0, GT_TC_CONTROL_OFS);
+ GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
/* Register interrupt */
setup_irq(COBALT_GALILEO_IRQ, irq);
/* Enable interrupt */
- GALILEO_OUTL(GALILEO_INTR_T0EXP | GALILEO_INL(GT_INTRMASK_OFS), GT_INTRMASK_OFS);
+ GT_WRITE(GT_INTRMASK_OFS, GT_INTR_T0EXP_MSK | GT_READ(GT_INTRMASK_OFS));
}
extern struct pci_ops gt64111_pci_ops;
static struct resource cobalt_mem_resource = {
- .start = GT64111_MEM_BASE,
- .end = GT64111_MEM_END,
+ .start = GT_DEF_PCI0_MEM0_BASE,
+ .end = GT_DEF_PCI0_MEM0_BASE + GT_DEF_PCI0_MEM0_SIZE - 1,
.name = "PCI memory",
.flags = IORESOURCE_MEM
};
@@ -115,7 +115,7 @@ static struct pci_controller cobalt_pci_controller = {
.mem_resource = &cobalt_mem_resource,
.mem_offset = 0,
.io_resource = &cobalt_io_resource,
- .io_offset = 0 - GT64111_IO_BASE
+ .io_offset = 0 - GT_DEF_PCI0_IO_BASE,
};
void __init plat_mem_setup(void)
@@ -128,7 +128,7 @@ void __init plat_mem_setup(void)
_machine_halt = cobalt_machine_halt;
pm_power_off = cobalt_machine_power_off;
- set_io_port_base(CKSEG1ADDR(GT64111_IO_BASE));
+ set_io_port_base(CKSEG1ADDR(GT_DEF_PCI0_IO_BASE));
/* I/O port resource must include UART and LCD/buttons */
ioport_resource.end = 0x0fffffff;
@@ -139,7 +139,7 @@ void __init plat_mem_setup(void)
/* Read the cobalt id register out of the PCI config space */
PCI_CFG_SET(devfn, (VIA_COBALT_BRD_ID_REG & ~0x3));
- cobalt_board_id = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
+ cobalt_board_id = GT_READ(GT_PCI0_CFGDATA_OFS);
cobalt_board_id >>= ((VIA_COBALT_BRD_ID_REG & 3) * 8);
cobalt_board_id = VIA_COBALT_BRD_REG_to_ID(cobalt_board_id);
diff --git a/arch/mips/ddb5xxx/ddb5477/irq_5477.c b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
index ba52705a273..96249aa5df5 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq_5477.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
@@ -53,14 +53,6 @@ vrc5477_irq_disable(unsigned int irq)
ll_vrc5477_irq_disable(irq - vrc5477_irq_base);
}
-static unsigned int vrc5477_irq_startup(unsigned int irq)
-{
- vrc5477_irq_enable(irq);
- return 0;
-}
-
-#define vrc5477_irq_shutdown vrc5477_irq_disable
-
static void
vrc5477_irq_ack(unsigned int irq)
{
@@ -91,11 +83,10 @@ vrc5477_irq_end(unsigned int irq)
struct irq_chip vrc5477_irq_controller = {
.typename = "vrc5477_irq",
- .startup = vrc5477_irq_startup,
- .shutdown = vrc5477_irq_shutdown,
- .enable = vrc5477_irq_enable,
- .disable = vrc5477_irq_disable,
.ack = vrc5477_irq_ack,
+ .mask = vrc5477_irq_disable,
+ .mask_ack = vrc5477_irq_ack,
+ .unmask = vrc5477_irq_enable,
.end = vrc5477_irq_end
};
@@ -103,12 +94,8 @@ void __init vrc5477_irq_init(u32 irq_base)
{
u32 i;
- for (i= irq_base; i< irq_base+ NUM_5477_IRQ; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &vrc5477_irq_controller;
- }
+ for (i= irq_base; i< irq_base+ NUM_5477_IRQ; i++)
+ set_irq_chip(i, &vrc5477_irq_controller);
vrc5477_irq_base = irq_base;
}
diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c
index 3e374d05978..6d55e8aab66 100644
--- a/arch/mips/dec/ecc-berr.c
+++ b/arch/mips/dec/ecc-berr.c
@@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/addrspace.h>
@@ -26,6 +25,7 @@
#include <asm/cpu.h>
#include <asm/irq_regs.h>
#include <asm/processor.h>
+#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/traps.h>
@@ -231,13 +231,10 @@ irqreturn_t dec_ecc_be_interrupt(int irq, void *dev_id)
static inline void dec_kn02_be_init(void)
{
volatile u32 *csr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
- unsigned long flags;
kn0x_erraddr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_ERRADDR);
kn0x_chksyn = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CHKSYN);
- spin_lock_irqsave(&kn02_lock, flags);
-
/* Preset write-only bits of the Control Register cache. */
cached_kn02_csr = *csr | KN02_CSR_LEDS;
@@ -247,8 +244,6 @@ static inline void dec_kn02_be_init(void)
cached_kn02_csr |= KN02_CSR_CORRECT;
*csr = cached_kn02_csr;
iob();
-
- spin_unlock_irqrestore(&kn02_lock, flags);
}
static inline void dec_kn03_be_init(void)
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 31dd47d1002..b251ef864c3 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -267,7 +267,7 @@ handle_it:
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
- j do_IRQ
+ j dec_irq_dispatch
nop
#ifdef CONFIG_32BIT
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 41cd2a96148..4c7cb4048d3 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/irq.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/dec/ioasic.h>
@@ -21,8 +20,6 @@
#include <asm/dec/ioasic_ints.h>
-static DEFINE_SPINLOCK(ioasic_lock);
-
static int ioasic_irq_base;
@@ -52,65 +49,30 @@ static inline void clear_ioasic_irq(unsigned int irq)
ioasic_write(IO_REG_SIR, sir);
}
-static inline void enable_ioasic_irq(unsigned int irq)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioasic_lock, flags);
- unmask_ioasic_irq(irq);
- spin_unlock_irqrestore(&ioasic_lock, flags);
-}
-
-static inline void disable_ioasic_irq(unsigned int irq)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioasic_lock, flags);
- mask_ioasic_irq(irq);
- spin_unlock_irqrestore(&ioasic_lock, flags);
-}
-
-
-static inline unsigned int startup_ioasic_irq(unsigned int irq)
-{
- enable_ioasic_irq(irq);
- return 0;
-}
-
-#define shutdown_ioasic_irq disable_ioasic_irq
-
static inline void ack_ioasic_irq(unsigned int irq)
{
- spin_lock(&ioasic_lock);
mask_ioasic_irq(irq);
- spin_unlock(&ioasic_lock);
fast_iob();
}
static inline void end_ioasic_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- enable_ioasic_irq(irq);
+ unmask_ioasic_irq(irq);
}
static struct irq_chip ioasic_irq_type = {
.typename = "IO-ASIC",
- .startup = startup_ioasic_irq,
- .shutdown = shutdown_ioasic_irq,
- .enable = enable_ioasic_irq,
- .disable = disable_ioasic_irq,
.ack = ack_ioasic_irq,
- .end = end_ioasic_irq,
+ .mask = mask_ioasic_irq,
+ .mask_ack = ack_ioasic_irq,
+ .unmask = unmask_ioasic_irq,
};
-#define startup_ioasic_dma_irq startup_ioasic_irq
-
-#define shutdown_ioasic_dma_irq shutdown_ioasic_irq
-
-#define enable_ioasic_dma_irq enable_ioasic_irq
+#define unmask_ioasic_dma_irq unmask_ioasic_irq
-#define disable_ioasic_dma_irq disable_ioasic_irq
+#define mask_ioasic_dma_irq mask_ioasic_irq
#define ack_ioasic_dma_irq ack_ioasic_irq
@@ -123,11 +85,10 @@ static inline void end_ioasic_dma_irq(unsigned int irq)
static struct irq_chip ioasic_dma_irq_type = {
.typename = "IO-ASIC-DMA",
- .startup = startup_ioasic_dma_irq,
- .shutdown = shutdown_ioasic_dma_irq,
- .enable = enable_ioasic_dma_irq,
- .disable = disable_ioasic_dma_irq,
.ack = ack_ioasic_dma_irq,
+ .mask = mask_ioasic_dma_irq,
+ .mask_ack = ack_ioasic_dma_irq,
+ .unmask = unmask_ioasic_dma_irq,
.end = end_ioasic_dma_irq,
};
@@ -140,18 +101,11 @@ void __init init_ioasic_irqs(int base)
ioasic_write(IO_REG_SIMR, 0);
fast_iob();
- for (i = base; i < base + IO_INR_DMA; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &ioasic_irq_type;
- }
- for (; i < base + IO_IRQ_LINES; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &ioasic_dma_irq_type;
- }
+ for (i = base; i < base + IO_INR_DMA; i++)
+ set_irq_chip_and_handler(i, &ioasic_irq_type,
+ handle_level_irq);
+ for (; i < base + IO_IRQ_LINES; i++)
+ set_irq_chip(i, &ioasic_dma_irq_type);
ioasic_irq_base = base;
}
diff --git a/arch/mips/dec/kn01-berr.c b/arch/mips/dec/kn01-berr.c
index f19b4617a0a..d3b8002bf1e 100644
--- a/arch/mips/dec/kn01-berr.c
+++ b/arch/mips/dec/kn01-berr.c
@@ -20,8 +20,10 @@
#include <linux/types.h>
#include <asm/inst.h>
+#include <asm/irq_regs.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
+#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
index 04a367a60a5..916e46b8ccd 100644
--- a/arch/mips/dec/kn02-irq.c
+++ b/arch/mips/dec/kn02-irq.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/irq.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/dec/kn02.h>
@@ -29,7 +28,6 @@
* There is no default value -- it has to be initialized.
*/
u32 cached_kn02_csr;
-DEFINE_SPINLOCK(kn02_lock);
static int kn02_irq_base;
@@ -53,55 +51,18 @@ static inline void mask_kn02_irq(unsigned int irq)
*csr = cached_kn02_csr;
}
-static inline void enable_kn02_irq(unsigned int irq)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&kn02_lock, flags);
- unmask_kn02_irq(irq);
- spin_unlock_irqrestore(&kn02_lock, flags);
-}
-
-static inline void disable_kn02_irq(unsigned int irq)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&kn02_lock, flags);
- mask_kn02_irq(irq);
- spin_unlock_irqrestore(&kn02_lock, flags);
-}
-
-
-static unsigned int startup_kn02_irq(unsigned int irq)
-{
- enable_kn02_irq(irq);
- return 0;
-}
-
-#define shutdown_kn02_irq disable_kn02_irq
-
static void ack_kn02_irq(unsigned int irq)
{
- spin_lock(&kn02_lock);
mask_kn02_irq(irq);
- spin_unlock(&kn02_lock);
iob();
}
-static void end_kn02_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- enable_kn02_irq(irq);
-}
-
static struct irq_chip kn02_irq_type = {
.typename = "KN02-CSR",
- .startup = startup_kn02_irq,
- .shutdown = shutdown_kn02_irq,
- .enable = enable_kn02_irq,
- .disable = disable_kn02_irq,
.ack = ack_kn02_irq,
- .end = end_kn02_irq,
+ .mask = mask_kn02_irq,
+ .mask_ack = ack_kn02_irq,
+ .unmask = unmask_kn02_irq,
};
@@ -109,22 +70,15 @@ void __init init_kn02_irqs(int base)
{
volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
KN02_CSR);
- unsigned long flags;
int i;
/* Mask interrupts. */
- spin_lock_irqsave(&kn02_lock, flags);
cached_kn02_csr &= ~KN02_CSR_IOINTEN;
*csr = cached_kn02_csr;
iob();
- spin_unlock_irqrestore(&kn02_lock, flags);
-
- for (i = base; i < base + KN02_IRQ_LINES; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &kn02_irq_type;
- }
+
+ for (i = base; i < base + KN02_IRQ_LINES; i++)
+ set_irq_chip_and_handler(i, &kn02_irq_type, handle_level_irq);
kn02_irq_base = base;
}
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index 6b7481e97be..d34032ac492 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -761,3 +761,9 @@ void __init arch_init_irq(void)
if (dec_interrupt[DEC_IRQ_HALT] >= 0)
setup_irq(dec_interrupt[DEC_IRQ_HALT], &haltirq);
}
+
+asmlinkage unsigned int dec_irq_dispatch(unsigned int irq)
+{
+ do_IRQ(irq);
+ return 0;
+}
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
index 69e424e9ab6..8b7e0c17ac3 100644
--- a/arch/mips/dec/time.c
+++ b/arch/mips/dec/time.c
@@ -151,7 +151,7 @@ static void dec_timer_ack(void)
CMOS_READ(RTC_REG_C); /* Ack the RTC interrupt. */
}
-static unsigned int dec_ioasic_hpt_read(void)
+static cycle_t dec_ioasic_hpt_read(void)
{
/*
* The free-running counter is 32-bit which is good for about
@@ -171,7 +171,7 @@ void __init dec_time_init(void)
if (!cpu_has_counter && IOASIC)
/* For pre-R4k systems we use the I/O ASIC's counter. */
- mips_hpt_read = dec_ioasic_hpt_read;
+ clocksource_mips.read = dec_ioasic_hpt_read;
/* Set up the rate of periodic DS1287 interrupts. */
CMOS_WRITE(RTC_REF_CLCK_32KHZ | (16 - __ffs(HZ)), RTC_REG_A);
diff --git a/arch/mips/emma2rh/common/irq_emma2rh.c b/arch/mips/emma2rh/common/irq_emma2rh.c
index 197ed4c2ba0..8d880f0b06e 100644
--- a/arch/mips/emma2rh/common/irq_emma2rh.c
+++ b/arch/mips/emma2rh/common/irq_emma2rh.c
@@ -56,49 +56,21 @@ static void emma2rh_irq_disable(unsigned int irq)
ll_emma2rh_irq_disable(irq - emma2rh_irq_base);
}
-static unsigned int emma2rh_irq_startup(unsigned int irq)
-{
- emma2rh_irq_enable(irq);
- return 0;
-}
-
-#define emma2rh_irq_shutdown emma2rh_irq_disable
-
-static void emma2rh_irq_ack(unsigned int irq)
-{
- /* disable interrupt - some handler will re-enable the irq
- * and if the interrupt is leveled, we will have infinite loop
- */
- ll_emma2rh_irq_disable(irq - emma2rh_irq_base);
-}
-
-static void emma2rh_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- ll_emma2rh_irq_enable(irq - emma2rh_irq_base);
-}
-
struct irq_chip emma2rh_irq_controller = {
.typename = "emma2rh_irq",
- .startup = emma2rh_irq_startup,
- .shutdown = emma2rh_irq_shutdown,
- .enable = emma2rh_irq_enable,
- .disable = emma2rh_irq_disable,
- .ack = emma2rh_irq_ack,
- .end = emma2rh_irq_end,
- .set_affinity = NULL /* no affinity stuff for UP */
+ .ack = emma2rh_irq_disable,
+ .mask = emma2rh_irq_disable,
+ .mask_ack = emma2rh_irq_disable,
+ .unmask = emma2rh_irq_enable,
};
void emma2rh_irq_init(u32 irq_base)
{
u32 i;
- for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &emma2rh_irq_controller;
- }
+ for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ; i++)
+ set_irq_chip_and_handler(i, &emma2rh_irq_controller,
+ handle_level_irq);
emma2rh_irq_base = irq_base;
}
diff --git a/arch/mips/emma2rh/markeins/irq_markeins.c b/arch/mips/emma2rh/markeins/irq_markeins.c
index 0b36eb001e6..2116d9be5fa 100644
--- a/arch/mips/emma2rh/markeins/irq_markeins.c
+++ b/arch/mips/emma2rh/markeins/irq_markeins.c
@@ -48,46 +48,21 @@ static void emma2rh_sw_irq_disable(unsigned int irq)
ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base);
}
-static unsigned int emma2rh_sw_irq_startup(unsigned int irq)
-{
- emma2rh_sw_irq_enable(irq);
- return 0;
-}
-
-#define emma2rh_sw_irq_shutdown emma2rh_sw_irq_disable
-
-static void emma2rh_sw_irq_ack(unsigned int irq)
-{
- ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base);
-}
-
-static void emma2rh_sw_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- ll_emma2rh_sw_irq_enable(irq - emma2rh_sw_irq_base);
-}
-
struct irq_chip emma2rh_sw_irq_controller = {
.typename = "emma2rh_sw_irq",
- .startup = emma2rh_sw_irq_startup,
- .shutdown = emma2rh_sw_irq_shutdown,
- .enable = emma2rh_sw_irq_enable,
- .disable = emma2rh_sw_irq_disable,
- .ack = emma2rh_sw_irq_ack,
- .end = emma2rh_sw_irq_end,
- .set_affinity = NULL,
+ .ack = emma2rh_sw_irq_disable,
+ .mask = emma2rh_sw_irq_disable,
+ .mask_ack = emma2rh_sw_irq_disable,
+ .unmask = emma2rh_sw_irq_enable,
};
void emma2rh_sw_irq_init(u32 irq_base)
{
u32 i;
- for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_SW; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 2;
- irq_desc[i].chip = &emma2rh_sw_irq_controller;
- }
+ for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_SW; i++)
+ set_irq_chip_and_handler(i, &emma2rh_sw_irq_controller,
+ handle_level_irq);
emma2rh_sw_irq_base = irq_base;
}
@@ -126,14 +101,6 @@ static void emma2rh_gpio_irq_disable(unsigned int irq)
ll_emma2rh_gpio_irq_disable(irq - emma2rh_gpio_irq_base);
}
-static unsigned int emma2rh_gpio_irq_startup(unsigned int irq)
-{
- emma2rh_gpio_irq_enable(irq);
- return 0;
-}
-
-#define emma2rh_gpio_irq_shutdown emma2rh_gpio_irq_disable
-
static void emma2rh_gpio_irq_ack(unsigned int irq)
{
irq -= emma2rh_gpio_irq_base;
@@ -149,25 +116,19 @@ static void emma2rh_gpio_irq_end(unsigned int irq)
struct irq_chip emma2rh_gpio_irq_controller = {
.typename = "emma2rh_gpio_irq",
- .startup = emma2rh_gpio_irq_startup,
- .shutdown = emma2rh_gpio_irq_shutdown,
- .enable = emma2rh_gpio_irq_enable,
- .disable = emma2rh_gpio_irq_disable,
.ack = emma2rh_gpio_irq_ack,
+ .mask = emma2rh_gpio_irq_disable,
+ .mask_ack = emma2rh_gpio_irq_ack,
+ .unmask = emma2rh_gpio_irq_enable,
.end = emma2rh_gpio_irq_end,
- .set_affinity = NULL,
};
void emma2rh_gpio_irq_init(u32 irq_base)
{
u32 i;
- for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_GPIO; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 2;
- irq_desc[i].chip = &emma2rh_gpio_irq_controller;
- }
+ for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_GPIO; i++)
+ set_irq_chip(i, &emma2rh_gpio_irq_controller);
emma2rh_gpio_irq_base = irq_base;
}
diff --git a/arch/mips/gt64120/ev64120/irq.c b/arch/mips/gt64120/ev64120/irq.c
index ed4d82b9a24..b3e5796c81d 100644
--- a/arch/mips/gt64120/ev64120/irq.c
+++ b/arch/mips/gt64120/ev64120/irq.c
@@ -66,38 +66,21 @@ asmlinkage void plat_irq_dispatch(void)
static void disable_ev64120_irq(unsigned int irq_nr)
{
- unsigned long flags;
-
- local_irq_save(flags);
if (irq_nr >= 8) { // All PCI interrupts are on line 5 or 2
clear_c0_status(9 << 10);
} else {
clear_c0_status(1 << (irq_nr + 8));
}
- local_irq_restore(flags);
}
static void enable_ev64120_irq(unsigned int irq_nr)
{
- unsigned long flags;
-
- local_irq_save(flags);
if (irq_nr >= 8) // All PCI interrupts are on line 5 or 2
set_c0_status(9 << 10);
else
set_c0_status(1 << (irq_nr + 8));
- local_irq_restore(flags);
-}
-
-static unsigned int startup_ev64120_irq(unsigned int irq)
-{
- enable_ev64120_irq(irq);
- return 0; /* Never anything pending */
}
-#define shutdown_ev64120_irq disable_ev64120_irq
-#define mask_and_ack_ev64120_irq disable_ev64120_irq
-
static void end_ev64120_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -106,13 +89,11 @@ static void end_ev64120_irq(unsigned int irq)
static struct irq_chip ev64120_irq_type = {
.typename = "EV64120",
- .startup = startup_ev64120_irq,
- .shutdown = shutdown_ev64120_irq,
- .enable = enable_ev64120_irq,
- .disable = disable_ev64120_irq,
- .ack = mask_and_ack_ev64120_irq,
+ .ack = disable_ev64120_irq,
+ .mask = disable_ev64120_irq,
+ .mask_ack = disable_ev64120_irq,
+ .unmask = enable_ev64120_irq,
.end = end_ev64120_irq,
- .set_affinity = NULL
};
void gt64120_irq_setup(void)
@@ -122,8 +103,6 @@ void gt64120_irq_setup(void)
*/
clear_c0_status(ST0_IM);
- local_irq_disable();
-
/*
* Enable timer. Other interrupts will be enabled as they are
* registered.
@@ -133,16 +112,5 @@ void gt64120_irq_setup(void)
void __init arch_init_irq(void)
{
- int i;
-
- /* Let's initialize our IRQ descriptors */
- for (i = 0; i < NR_IRQS; i++) {
- irq_desc[i].status = 0;
- irq_desc[i].chip = &no_irq_chip;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 0;
- spin_lock_init(&irq_desc[i].lock);
- }
-
gt64120_irq_setup();
}
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index d5bd6b3a093..f8d417b5c2b 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -28,14 +28,6 @@ static void enable_r4030_irq(unsigned int irq)
spin_unlock_irqrestore(&r4030_lock, flags);
}
-static unsigned int startup_r4030_irq(unsigned int irq)
-{
- enable_r4030_irq(irq);
- return 0; /* never anything pending */
-}
-
-#define shutdown_r4030_irq disable_r4030_irq
-
void disable_r4030_irq(unsigned int irq)
{
unsigned int mask = ~(1 << (irq - JAZZ_PARALLEL_IRQ));
@@ -47,34 +39,20 @@ void disable_r4030_irq(unsigned int irq)
spin_unlock_irqrestore(&r4030_lock, flags);
}
-#define mask_and_ack_r4030_irq disable_r4030_irq
-
-static void end_r4030_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_r4030_irq(irq);
-}
-
static struct irq_chip r4030_irq_type = {
.typename = "R4030",
- .startup = startup_r4030_irq,
- .shutdown = shutdown_r4030_irq,
- .enable = enable_r4030_irq,
- .disable = disable_r4030_irq,
- .ack = mask_and_ack_r4030_irq,
- .end = end_r4030_irq,
+ .ack = disable_r4030_irq,
+ .mask = disable_r4030_irq,
+ .mask_ack = disable_r4030_irq,
+ .unmask = enable_r4030_irq,
};
void __init init_r4030_ints(void)
{
int i;
- for (i = JAZZ_PARALLEL_IRQ; i <= JAZZ_TIMER_IRQ; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &r4030_irq_type;
- }
+ for (i = JAZZ_PARALLEL_IRQ; i <= JAZZ_TIMER_IRQ; i++)
+ set_irq_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */
diff --git a/arch/mips/jmr3927/rbhma3100/irq.c b/arch/mips/jmr3927/rbhma3100/irq.c
index de4a238c28b..3da49c5aaf4 100644
--- a/arch/mips/jmr3927/rbhma3100/irq.c
+++ b/arch/mips/jmr3927/rbhma3100/irq.c
@@ -90,17 +90,6 @@ static unsigned char irc_level[TX3927_NUM_IR] = {
static void jmr3927_irq_disable(unsigned int irq_nr);
static void jmr3927_irq_enable(unsigned int irq_nr);
-static DEFINE_SPINLOCK(jmr3927_irq_lock);
-
-static unsigned int jmr3927_irq_startup(unsigned int irq)
-{
- jmr3927_irq_enable(irq);
-
- return 0;
-}
-
-#define jmr3927_irq_shutdown jmr3927_irq_disable
-
static void jmr3927_irq_ack(unsigned int irq)
{
if (irq == JMR3927_IRQ_IRC_TMR0)
@@ -118,9 +107,7 @@ static void jmr3927_irq_end(unsigned int irq)
static void jmr3927_irq_disable(unsigned int irq_nr)
{
struct tb_irq_space* sp;
- unsigned long flags;
- spin_lock_irqsave(&jmr3927_irq_lock, flags);
for (sp = tb_irq_spaces; sp; sp = sp->next) {
if (sp->start_irqno <= irq_nr &&
irq_nr < sp->start_irqno + sp->nr_irqs) {
@@ -130,15 +117,12 @@ static void jmr3927_irq_disable(unsigned int irq_nr)
break;
}
}
- spin_unlock_irqrestore(&jmr3927_irq_lock, flags);
}
static void jmr3927_irq_enable(unsigned int irq_nr)
{
struct tb_irq_space* sp;
- unsigned long flags;
- spin_lock_irqsave(&jmr3927_irq_lock, flags);
for (sp = tb_irq_spaces; sp; sp = sp->next) {
if (sp->start_irqno <= irq_nr &&
irq_nr < sp->start_irqno + sp->nr_irqs) {
@@ -148,7 +132,6 @@ static void jmr3927_irq_enable(unsigned int irq_nr)
break;
}
}
- spin_unlock_irqrestore(&jmr3927_irq_lock, flags);
}
/*
@@ -457,11 +440,10 @@ void __init arch_init_irq(void)
static struct irq_chip jmr3927_irq_controller = {
.typename = "jmr3927_irq",
- .startup = jmr3927_irq_startup,
- .shutdown = jmr3927_irq_shutdown,
- .enable = jmr3927_irq_enable,
- .disable = jmr3927_irq_disable,
.ack = jmr3927_irq_ack,
+ .mask = jmr3927_irq_disable,
+ .mask_ack = jmr3927_irq_ack,
+ .unmask = jmr3927_irq_enable,
.end = jmr3927_irq_end,
};
@@ -469,12 +451,8 @@ void jmr3927_irq_init(u32 irq_base)
{
u32 i;
- for (i= irq_base; i< irq_base + JMR3927_NR_IRQ_IRC + JMR3927_NR_IRQ_IOC; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &jmr3927_irq_controller;
- }
+ for (i= irq_base; i< irq_base + JMR3927_NR_IRQ_IRC + JMR3927_NR_IRQ_IOC; i++)
+ set_irq_chip(i, &jmr3927_irq_controller);
jmr3927_irq_base = irq_base;
}
diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c
index 16e5dfe7aa8..138f25efe38 100644
--- a/arch/mips/jmr3927/rbhma3100/setup.c
+++ b/arch/mips/jmr3927/rbhma3100/setup.c
@@ -170,7 +170,7 @@ static void jmr3927_machine_power_off(void)
while (1);
}
-static unsigned int jmr3927_hpt_read(void)
+static cycle_t jmr3927_hpt_read(void)
{
/* We assume this function is called xtime_lock held. */
return jiffies * (JMR3927_TIMER_CLK / HZ) + jmr3927_tmrptr->trr;
@@ -182,7 +182,7 @@ extern void rtc_ds1742_init(unsigned long base);
#endif
static void __init jmr3927_time_init(void)
{
- mips_hpt_read = jmr3927_hpt_read;
+ clocksource_mips.read = jmr3927_hpt_read;
mips_hpt_frequency = JMR3927_TIMER_CLK;
#ifdef USE_RTC_DS1742
if (jmr3927_have_nvram()) {
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 6bfbbed0897..bbbb8d7cb89 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -45,7 +45,6 @@ obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
-obj-$(CONFIG_NO_ISA) += dma-no-isa.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
@@ -67,6 +66,8 @@ obj-$(CONFIG_64BIT) += cpu-bugs64.o
obj-$(CONFIG_I8253) += i8253.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 4a9f1ecefaf..9b34238d41c 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -90,7 +90,6 @@ struct elf_prpsinfo32
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
-#define elf_addr_t u32
#define elf_caddr_t u32
#define init_elf_binfmt init_elfn32_binfmt
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index e3181377989..993f7ec70f3 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -92,7 +92,6 @@ struct elf_prpsinfo32
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
-#define elf_addr_t u32
#define elf_caddr_t u32
#define init_elf_binfmt init_elf32_binfmt
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 8485af340ee..442839e9578 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -110,9 +110,8 @@ static inline void check_wait(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- printk("Checking for 'wait' instruction... ");
if (nowait) {
- printk (" disabled.\n");
+ printk("Wait instruction disabled.\n");
return;
}
@@ -120,11 +119,9 @@ static inline void check_wait(void)
case CPU_R3081:
case CPU_R3081E:
cpu_wait = r3081_wait;
- printk(" available.\n");
break;
case CPU_TX3927:
cpu_wait = r39xx_wait;
- printk(" available.\n");
break;
case CPU_R4200:
/* case CPU_R4300: */
@@ -146,33 +143,23 @@ static inline void check_wait(void)
case CPU_74K:
case CPU_PR4450:
cpu_wait = r4k_wait;
- printk(" available.\n");
break;
case CPU_TX49XX:
cpu_wait = r4k_wait_irqoff;
- printk(" available.\n");
break;
case CPU_AU1000:
case CPU_AU1100:
case CPU_AU1500:
case CPU_AU1550:
case CPU_AU1200:
- if (allow_au1k_wait) {
+ if (allow_au1k_wait)
cpu_wait = au1k_wait;
- printk(" available.\n");
- } else
- printk(" unavailable.\n");
break;
case CPU_RM9000:
- if ((c->processor_id & 0x00ff) >= 0x40) {
+ if ((c->processor_id & 0x00ff) >= 0x40)
cpu_wait = r4k_wait;
- printk(" available.\n");
- } else {
- printk(" unavailable.\n");
- }
break;
default:
- printk(" unavailable.\n");
break;
}
}
diff --git a/arch/mips/kernel/dma-no-isa.c b/arch/mips/kernel/dma-no-isa.c
deleted file mode 100644
index 6df8b07741e..00000000000
--- a/arch/mips/kernel/dma-no-isa.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 by Ralf Baechle
- *
- * Dummy ISA DMA functions for systems that don't have ISA but share drivers
- * with ISA such as legacy free PCI.
- */
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-
-DEFINE_SPINLOCK(dma_spin_lock);
-
-int request_dma(unsigned int dmanr, const char * device_id)
-{
- return -EINVAL;
-}
-
-void free_dma(unsigned int dmanr)
-{
-}
-
-EXPORT_SYMBOL(dma_spin_lock);
-EXPORT_SYMBOL(request_dma);
-EXPORT_SYMBOL(free_dma);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 5baca16993d..aacd4a005c5 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -19,6 +19,7 @@
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/war.h>
+#include <asm/page.h>
#define PANIC_PIC(msg) \
.set push; \
@@ -378,6 +379,68 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER dsp dsp sti silent /* #26 */
BUILD_HANDLER reserved reserved sti verbose /* others */
+ .align 5
+ LEAF(handle_ri_rdhwr_vivt)
+#ifdef CONFIG_MIPS_MT_SMTC
+ PANIC_PIC("handle_ri_rdhwr_vivt called")
+#else
+ .set push
+ .set noat
+ .set noreorder
+ /* check if TLB contains a entry for EPC */
+ MFC0 k1, CP0_ENTRYHI
+ andi k1, 0xff /* ASID_MASK */
+ MFC0 k0, CP0_EPC
+ PTR_SRL k0, PAGE_SHIFT + 1
+ PTR_SLL k0, PAGE_SHIFT + 1
+ or k1, k0
+ MTC0 k1, CP0_ENTRYHI
+ mtc0_tlbw_hazard
+ tlbp
+ tlb_probe_hazard
+ mfc0 k1, CP0_INDEX
+ .set pop
+ bltz k1, handle_ri /* slow path */
+ /* fall thru */
+#endif
+ END(handle_ri_rdhwr_vivt)
+
+ LEAF(handle_ri_rdhwr)
+ .set push
+ .set noat
+ .set noreorder
+ /* 0x7c03e83b: rdhwr v1,$29 */
+ MFC0 k1, CP0_EPC
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+ .set reorder
+ bne k0, k1, handle_ri /* if not ours */
+ /* The insn is rdhwr. No need to check CAUSE.BD here. */
+ get_saved_sp /* k1 := current_thread_info */
+ .set noreorder
+ MFC0 k0, CP0_EPC
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ LONG_ADDIU k0, 4
+ jr k0
+ rfe
+#else
+ LONG_ADDIU k0, 4 /* stall on $k0 */
+ MTC0 k0, CP0_EPC
+ /* I hope three instructions between MTC0 and ERET are enough... */
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ .set mips3
+ eret
+ .set mips0
+#endif
+ .set pop
+ END(handle_ri_rdhwr)
+
#ifdef CONFIG_64BIT
/* A temporary overflow handler used by check_daddi(). */
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index ddc1b71c937..a2e095adaa3 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -250,6 +250,9 @@ NESTED(smp_bootstrap, 16, sp)
*/
page swapper_pg_dir, _PGD_ORDER
#ifdef CONFIG_64BIT
+#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
+ page module_pg_dir, _PGD_ORDER
+#endif
page invalid_pmd_table, _PMD_ORDER
#endif
page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 48e3418c217..b59a676c6d0 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -19,9 +19,6 @@
#include <asm/i8259.h>
#include <asm/io.h>
-void enable_8259A_irq(unsigned int irq);
-void disable_8259A_irq(unsigned int irq);
-
/*
* This is the 'legacy' 8259A Programmable Interrupt Controller,
* present in the majority of PC/AT boxes.
@@ -31,34 +28,16 @@ void disable_8259A_irq(unsigned int irq);
* moves to arch independent land
*/
+static int i8259A_auto_eoi;
DEFINE_SPINLOCK(i8259A_lock);
-
-static void end_8259A_irq (unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
- irq_desc[irq].action)
- enable_8259A_irq(irq);
-}
-
-#define shutdown_8259A_irq disable_8259A_irq
-
+/* some platforms call this... */
void mask_and_ack_8259A(unsigned int);
-static unsigned int startup_8259A_irq(unsigned int irq)
-{
- enable_8259A_irq(irq);
-
- return 0; /* never anything pending */
-}
-
-static struct irq_chip i8259A_irq_type = {
- .typename = "XT-PIC",
- .startup = startup_8259A_irq,
- .shutdown = shutdown_8259A_irq,
- .enable = enable_8259A_irq,
- .disable = disable_8259A_irq,
- .ack = mask_and_ack_8259A,
- .end = end_8259A_irq,
+static struct irq_chip i8259A_chip = {
+ .name = "XT-PIC",
+ .mask = disable_8259A_irq,
+ .unmask = enable_8259A_irq,
+ .mask_ack = mask_and_ack_8259A,
};
/*
@@ -70,8 +49,8 @@ static struct irq_chip i8259A_irq_type = {
*/
static unsigned int cached_irq_mask = 0xffff;
-#define cached_21 (cached_irq_mask)
-#define cached_A1 (cached_irq_mask >> 8)
+#define cached_master_mask (cached_irq_mask)
+#define cached_slave_mask (cached_irq_mask >> 8)
void disable_8259A_irq(unsigned int irq)
{
@@ -81,9 +60,9 @@ void disable_8259A_irq(unsigned int irq)
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
- outb(cached_A1,0xA1);
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
else
- outb(cached_21,0x21);
+ outb(cached_master_mask, PIC_MASTER_IMR);
spin_unlock_irqrestore(&i8259A_lock, flags);
}
@@ -95,9 +74,9 @@ void enable_8259A_irq(unsigned int irq)
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
- outb(cached_A1,0xA1);
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
else
- outb(cached_21,0x21);
+ outb(cached_master_mask, PIC_MASTER_IMR);
spin_unlock_irqrestore(&i8259A_lock, flags);
}
@@ -109,9 +88,9 @@ int i8259A_irq_pending(unsigned int irq)
spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
- ret = inb(0x20) & mask;
+ ret = inb(PIC_MASTER_CMD) & mask;
else
- ret = inb(0xA0) & (mask >> 8);
+ ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
@@ -120,7 +99,7 @@ int i8259A_irq_pending(unsigned int irq)
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- irq_desc[irq].chip = &i8259A_irq_type;
+ set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
enable_irq(irq);
}
@@ -136,14 +115,14 @@ static inline int i8259A_irq_real(unsigned int irq)
int irqmask = 1 << irq;
if (irq < 8) {
- outb(0x0B,0x20); /* ISR register */
- value = inb(0x20) & irqmask;
- outb(0x0A,0x20); /* back to the IRR register */
+ outb(0x0B,PIC_MASTER_CMD); /* ISR register */
+ value = inb(PIC_MASTER_CMD) & irqmask;
+ outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */
return value;
}
- outb(0x0B,0xA0); /* ISR register */
- value = inb(0xA0) & (irqmask >> 8);
- outb(0x0A,0xA0); /* back to the IRR register */
+ outb(0x0B,PIC_SLAVE_CMD); /* ISR register */
+ value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+ outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */
return value;
}
@@ -160,17 +139,19 @@ void mask_and_ack_8259A(unsigned int irq)
spin_lock_irqsave(&i8259A_lock, flags);
/*
- * Lightweight spurious IRQ detection. We do not want to overdo
- * spurious IRQ handling - it's usually a sign of hardware problems, so
- * we only do the checks we can do without slowing down good hardware
- * nnecesserily.
+ * Lightweight spurious IRQ detection. We do not want
+ * to overdo spurious IRQ handling - it's usually a sign
+ * of hardware problems, so we only do the checks we can
+ * do without slowing down good hardware unnecessarily.
*
- * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting
- * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A.
- * Thus we can check spurious 8259A IRQs without doing the quite slow
- * i8259A_irq_real() call for every IRQ. This does not cover 100% of
- * spurious interrupts, but should be enough to warn the user that
- * there is something bad going on ...
+ * Note that IRQ7 and IRQ15 (the two spurious IRQs
+ * usually resulting from the 8259A-1|2 PICs) occur
+ * even if the IRQ is masked in the 8259A. Thus we
+ * can check spurious 8259A IRQs without doing the
+ * quite slow i8259A_irq_real() call for every IRQ.
+ * This does not cover 100% of spurious interrupts,
+ * but should be enough to warn the user that there
+ * is something bad going on ...
*/
if (cached_irq_mask & irqmask)
goto spurious_8259A_irq;
@@ -178,14 +159,14 @@ void mask_and_ack_8259A(unsigned int irq)
handle_real_irq:
if (irq & 8) {
- inb(0xA1); /* DUMMY - (do we need this?) */
- outb(cached_A1,0xA1);
- outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
- outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */
+ inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+ outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
} else {
- inb(0x21); /* DUMMY - (do we need this?) */
- outb(cached_21,0x21);
- outb(0x60+irq,0x20); /* 'Specific EOI' to master */
+ inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
}
#ifdef CONFIG_MIPS_MT_SMTC
if (irq_hwmask[irq] & ST0_IM)
@@ -206,7 +187,7 @@ spurious_8259A_irq:
goto handle_real_irq;
{
- static int spurious_irq_mask = 0;
+ static int spurious_irq_mask;
/*
* At this point we can be sure the IRQ is spurious,
* lets ACK and report it. [once per IRQ]
@@ -227,13 +208,25 @@ spurious_8259A_irq:
static int i8259A_resume(struct sys_device *dev)
{
- init_8259A(0);
+ init_8259A(i8259A_auto_eoi);
+ return 0;
+}
+
+static int i8259A_shutdown(struct sys_device *dev)
+{
+ /* Put the i8259A into a quiescent state that
+ * the kernel initialization code can get it
+ * out of.
+ */
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
return 0;
}
static struct sysdev_class i8259_sysdev_class = {
set_kset_name("i8259"),
.resume = i8259A_resume,
+ .shutdown = i8259A_shutdown,
};
static struct sys_device device_i8259A = {
@@ -255,41 +248,41 @@ void __init init_8259A(int auto_eoi)
{
unsigned long flags;
+ i8259A_auto_eoi = auto_eoi;
+
spin_lock_irqsave(&i8259A_lock, flags);
- outb(0xff, 0x21); /* mask all of 8259A-1 */
- outb(0xff, 0xA1); /* mask all of 8259A-2 */
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
/*
* outb_p - this has to work on a wide range of PC hardware.
*/
- outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */
- outb_p(0x00, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */
- outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */
- if (auto_eoi)
- outb_p(0x03, 0x21); /* master does Auto EOI */
- else
- outb_p(0x01, 0x21); /* master expects normal EOI */
-
- outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */
- outb_p(0x08, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */
- outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */
- outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode
- is to be investigated) */
-
+ outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
+ outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+ outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
+ if (auto_eoi) /* master does Auto EOI */
+ outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+ else /* master expects normal EOI */
+ outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+
+ outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
+ outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+ outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
+ outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
if (auto_eoi)
/*
- * in AEOI mode we just have to mask the interrupt
+ * In AEOI mode we just have to mask the interrupt
* when acking.
*/
- i8259A_irq_type.ack = disable_8259A_irq;
+ i8259A_chip.mask_ack = disable_8259A_irq;
else
- i8259A_irq_type.ack = mask_and_ack_8259A;
+ i8259A_chip.mask_ack = mask_and_ack_8259A;
udelay(100); /* wait for 8259A to initialize */
- outb(cached_21, 0x21); /* restore master IRQ mask */
- outb(cached_A1, 0xA1); /* restore slave IRQ mask */
+ outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
spin_unlock_irqrestore(&i8259A_lock, flags);
}
@@ -302,11 +295,17 @@ static struct irqaction irq2 = {
};
static struct resource pic1_io_resource = {
- .name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY
+ .name = "pic1",
+ .start = PIC_MASTER_CMD,
+ .end = PIC_MASTER_IMR,
+ .flags = IORESOURCE_BUSY
};
static struct resource pic2_io_resource = {
- .name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY
+ .name = "pic2",
+ .start = PIC_SLAVE_CMD,
+ .end = PIC_SLAVE_IMR,
+ .flags = IORESOURCE_BUSY
};
/*
@@ -323,12 +322,8 @@ void __init init_i8259_irqs (void)
init_8259A(0);
- for (i = 0; i < 16; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &i8259A_irq_type;
- }
+ for (i = 0; i < 16; i++)
+ set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
- setup_irq(2, &irq2);
+ setup_irq(PIC_CASCADE_IR, &irq2);
}
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index ab12c8f0151..1bbefbf4337 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -52,10 +52,6 @@ static struct linux_binfmt irix_format = {
irix_core_dump, PAGE_SIZE
};
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
-
#ifdef DEBUG
/* Debugging routines. */
static char *get_elf_p_type(Elf32_Word p_type)
@@ -1013,7 +1009,7 @@ static int notesize(struct memelfnote *en)
int sz;
sz = sizeof(struct elf_note);
- sz += roundup(strlen(en->name), 4);
+ sz += roundup(strlen(en->name) + 1, 4);
sz += roundup(en->datasz, 4);
return sz;
@@ -1032,7 +1028,7 @@ static int writenote(struct memelfnote *men, struct file *file)
{
struct elf_note en;
- en.n_namesz = strlen(men->name);
+ en.n_namesz = strlen(men->name) + 1;
en.n_descsz = men->datasz;
en.n_type = men->type;
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 650a80ca374..bcaad669608 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -45,31 +45,6 @@ static inline void unmask_msc_irq(unsigned int irq)
}
/*
- * Enables the IRQ on SOC-it
- */
-static void enable_msc_irq(unsigned int irq)
-{
- unmask_msc_irq(irq);
-}
-
-/*
- * Initialize the IRQ on SOC-it
- */
-static unsigned int startup_msc_irq(unsigned int irq)
-{
- unmask_msc_irq(irq);
- return 0;
-}
-
-/*
- * Disables the IRQ on SOC-it
- */
-static void disable_msc_irq(unsigned int irq)
-{
- mask_msc_irq(irq);
-}
-
-/*
* Masks and ACKs an IRQ
*/
static void level_mask_and_ack_msc_irq(unsigned int irq)
@@ -136,25 +111,23 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
(irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
}
-#define shutdown_msc_irq disable_msc_irq
-
struct irq_chip msc_levelirq_type = {
.typename = "SOC-it-Level",
- .startup = startup_msc_irq,
- .shutdown = shutdown_msc_irq,
- .enable = enable_msc_irq,
- .disable = disable_msc_irq,
.ack = level_mask_and_ack_msc_irq,
+ .mask = mask_msc_irq,
+ .mask_ack = level_mask_and_ack_msc_irq,
+ .unmask = unmask_msc_irq,
+ .eoi = unmask_msc_irq,
.end = end_msc_irq,
};
struct irq_chip msc_edgeirq_type = {
.typename = "SOC-it-Edge",
- .startup =startup_msc_irq,
- .shutdown = shutdown_msc_irq,
- .enable = enable_msc_irq,
- .disable = disable_msc_irq,
.ack = edge_mask_and_ack_msc_irq,
+ .mask = mask_msc_irq,
+ .mask_ack = edge_mask_and_ack_msc_irq,
+ .unmask = unmask_msc_irq,
+ .eoi = unmask_msc_irq,
.end = end_msc_irq,
};
@@ -175,14 +148,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
- irq_desc[base+n].chip = &msc_edgeirq_type;
+ set_irq_chip(base+n, &msc_edgeirq_type);
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
- irq_desc[base+n].chip = &msc_levelirq_type;
+ set_irq_chip(base+n, &msc_levelirq_type);
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index 37d106202b8..efbd219845b 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -67,48 +67,6 @@ static inline void unmask_mv64340_irq(unsigned int irq)
}
/*
- * Enables the IRQ on Marvell Chip
- */
-static void enable_mv64340_irq(unsigned int irq)
-{
- unmask_mv64340_irq(irq);
-}
-
-/*
- * Initialize the IRQ on Marvell Chip
- */
-static unsigned int startup_mv64340_irq(unsigned int irq)
-{
- unmask_mv64340_irq(irq);
- return 0;
-}
-
-/*
- * Disables the IRQ on Marvell Chip
- */
-static void disable_mv64340_irq(unsigned int irq)
-{
- mask_mv64340_irq(irq);
-}
-
-/*
- * Masks and ACKs an IRQ
- */
-static void mask_and_ack_mv64340_irq(unsigned int irq)
-{
- mask_mv64340_irq(irq);
-}
-
-/*
- * End IRQ processing
- */
-static void end_mv64340_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- unmask_mv64340_irq(irq);
-}
-
-/*
* Interrupt handler for interrupts coming from the Marvell chip.
* It could be built in ethernet ports etc...
*/
@@ -133,29 +91,21 @@ void ll_mv64340_irq(void)
do_IRQ(ls1bit32(irq_src_high) + irq_base + 32);
}
-#define shutdown_mv64340_irq disable_mv64340_irq
-
struct irq_chip mv64340_irq_type = {
.typename = "MV-64340",
- .startup = startup_mv64340_irq,
- .shutdown = shutdown_mv64340_irq,
- .enable = enable_mv64340_irq,
- .disable = disable_mv64340_irq,
- .ack = mask_and_ack_mv64340_irq,
- .end = end_mv64340_irq,
+ .ack = mask_mv64340_irq,
+ .mask = mask_mv64340_irq,
+ .mask_ack = mask_mv64340_irq,
+ .unmask = unmask_mv64340_irq,
};
void __init mv64340_irq_init(unsigned int base)
{
int i;
- /* Reset irq handlers pointers to NULL */
- for (i = base; i < base + 64; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 2;
- irq_desc[i].chip = &mv64340_irq_type;
- }
+ for (i = base; i < base + 64; i++)
+ set_irq_chip_and_handler(i, &mv64340_irq_type,
+ handle_level_irq);
irq_base = base;
}
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 6b54c7109e2..123324ba8c1 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -29,56 +29,12 @@ static inline void mask_rm7k_irq(unsigned int irq)
clear_c0_intcontrol(0x100 << (irq - irq_base));
}
-static inline void rm7k_cpu_irq_enable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- unmask_rm7k_irq(irq);
- local_irq_restore(flags);
-}
-
-static void rm7k_cpu_irq_disable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- mask_rm7k_irq(irq);
- local_irq_restore(flags);
-}
-
-static unsigned int rm7k_cpu_irq_startup(unsigned int irq)
-{
- rm7k_cpu_irq_enable(irq);
-
- return 0;
-}
-
-#define rm7k_cpu_irq_shutdown rm7k_cpu_irq_disable
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues. Same for rm7k_cpu_irq_end.
- */
-static void rm7k_cpu_irq_ack(unsigned int irq)
-{
- mask_rm7k_irq(irq);
-}
-
-static void rm7k_cpu_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- unmask_rm7k_irq(irq);
-}
-
static struct irq_chip rm7k_irq_controller = {
.typename = "RM7000",
- .startup = rm7k_cpu_irq_startup,
- .shutdown = rm7k_cpu_irq_shutdown,
- .enable = rm7k_cpu_irq_enable,
- .disable = rm7k_cpu_irq_disable,
- .ack = rm7k_cpu_irq_ack,
- .end = rm7k_cpu_irq_end,
+ .ack = mask_rm7k_irq,
+ .mask = mask_rm7k_irq,
+ .mask_ack = mask_rm7k_irq,
+ .unmask = unmask_rm7k_irq,
};
void __init rm7k_cpu_irq_init(int base)
@@ -87,12 +43,9 @@ void __init rm7k_cpu_irq_init(int base)
clear_c0_intcontrol(0x00000f00); /* Mask all */
- for (i = base; i < base + 4; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &rm7k_irq_controller;
- }
+ for (i = base; i < base + 4; i++)
+ set_irq_chip_and_handler(i, &rm7k_irq_controller,
+ handle_level_irq);
irq_base = base;
}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 62f011ba97a..0e6f4c5349d 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -48,15 +48,6 @@ static void rm9k_cpu_irq_disable(unsigned int irq)
local_irq_restore(flags);
}
-static unsigned int rm9k_cpu_irq_startup(unsigned int irq)
-{
- rm9k_cpu_irq_enable(irq);
-
- return 0;
-}
-
-#define rm9k_cpu_irq_shutdown rm9k_cpu_irq_disable
-
/*
* Performance counter interrupts are global on all processors.
*/
@@ -89,40 +80,22 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
}
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues. Same for rm9k_cpu_irq_end.
- */
-static void rm9k_cpu_irq_ack(unsigned int irq)
-{
- mask_rm9k_irq(irq);
-}
-
-static void rm9k_cpu_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- unmask_rm9k_irq(irq);
-}
-
static struct irq_chip rm9k_irq_controller = {
.typename = "RM9000",
- .startup = rm9k_cpu_irq_startup,
- .shutdown = rm9k_cpu_irq_shutdown,
- .enable = rm9k_cpu_irq_enable,
- .disable = rm9k_cpu_irq_disable,
- .ack = rm9k_cpu_irq_ack,
- .end = rm9k_cpu_irq_end,
+ .ack = mask_rm9k_irq,
+ .mask = mask_rm9k_irq,
+ .mask_ack = mask_rm9k_irq,
+ .unmask = unmask_rm9k_irq,
};
static struct irq_chip rm9k_perfcounter_irq = {
.typename = "RM9000",
.startup = rm9k_perfcounter_irq_startup,
.shutdown = rm9k_perfcounter_irq_shutdown,
- .enable = rm9k_cpu_irq_enable,
- .disable = rm9k_cpu_irq_disable,
- .ack = rm9k_cpu_irq_ack,
- .end = rm9k_cpu_irq_end,
+ .ack = mask_rm9k_irq,
+ .mask = mask_rm9k_irq,
+ .mask_ack = mask_rm9k_irq,
+ .unmask = unmask_rm9k_irq,
};
unsigned int rm9000_perfcount_irq;
@@ -135,15 +108,13 @@ void __init rm9k_cpu_irq_init(int base)
clear_c0_intcontrol(0x0000f000); /* Mask all */
- for (i = base; i < base + 4; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &rm9k_irq_controller;
- }
+ for (i = base; i < base + 4; i++)
+ set_irq_chip_and_handler(i, &rm9k_irq_controller,
+ handle_level_irq);
rm9000_perfcount_irq = base + 1;
- irq_desc[rm9000_perfcount_irq].chip = &rm9k_perfcounter_irq;
+ set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
+ handle_level_irq);
irq_base = base;
}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 9b0e49d63d7..2fe4c868a80 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -88,25 +88,6 @@ atomic_t irq_err_count;
unsigned long irq_hwmask[NR_IRQS];
#endif /* CONFIG_MIPS_MT_SMTC */
-#undef do_IRQ
-
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-asmlinkage unsigned int do_IRQ(unsigned int irq)
-{
- irq_enter();
-
- __DO_IRQ_SMTC_HOOK();
- __do_IRQ(irq);
-
- irq_exit();
-
- return 1;
-}
-
/*
* Generic, controller-independent functions:
*/
@@ -136,7 +117,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -172,19 +153,6 @@ __setup("nokgdb", nokgdb);
void __init init_IRQ(void)
{
- int i;
-
- for (i = 0; i < NR_IRQS; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &no_irq_chip;
- spin_lock_init(&irq_desc[i].lock);
-#ifdef CONFIG_MIPS_MT_SMTC
- irq_hwmask[i] = 0;
-#endif /* CONFIG_MIPS_MT_SMTC */
- }
-
arch_init_irq();
#ifdef CONFIG_KGDB
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 9bb21c7f214..fcc86b96ccf 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -50,58 +50,13 @@ static inline void mask_mips_irq(unsigned int irq)
irq_disable_hazard();
}
-static inline void mips_cpu_irq_enable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- unmask_mips_irq(irq);
- back_to_back_c0_hazard();
- local_irq_restore(flags);
-}
-
-static void mips_cpu_irq_disable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- mask_mips_irq(irq);
- back_to_back_c0_hazard();
- local_irq_restore(flags);
-}
-
-static unsigned int mips_cpu_irq_startup(unsigned int irq)
-{
- mips_cpu_irq_enable(irq);
-
- return 0;
-}
-
-#define mips_cpu_irq_shutdown mips_cpu_irq_disable
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues. Same for mips_cpu_irq_end.
- */
-static void mips_cpu_irq_ack(unsigned int irq)
-{
- mask_mips_irq(irq);
-}
-
-static void mips_cpu_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- unmask_mips_irq(irq);
-}
-
static struct irq_chip mips_cpu_irq_controller = {
.typename = "MIPS",
- .startup = mips_cpu_irq_startup,
- .shutdown = mips_cpu_irq_shutdown,
- .enable = mips_cpu_irq_enable,
- .disable = mips_cpu_irq_disable,
- .ack = mips_cpu_irq_ack,
- .end = mips_cpu_irq_end,
+ .ack = mask_mips_irq,
+ .mask = mask_mips_irq,
+ .mask_ack = mask_mips_irq,
+ .unmask = unmask_mips_irq,
+ .eoi = unmask_mips_irq,
};
/*
@@ -110,8 +65,6 @@ static struct irq_chip mips_cpu_irq_controller = {
#define unmask_mips_mt_irq unmask_mips_irq
#define mask_mips_mt_irq mask_mips_irq
-#define mips_mt_cpu_irq_enable mips_cpu_irq_enable
-#define mips_mt_cpu_irq_disable mips_cpu_irq_disable
static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
{
@@ -119,13 +72,11 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
evpe(vpflags);
- mips_mt_cpu_irq_enable(irq);
+ unmask_mips_mt_irq(irq);
return 0;
}
-#define mips_mt_cpu_irq_shutdown mips_mt_cpu_irq_disable
-
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for mips_cpu_irq_end.
@@ -138,16 +89,14 @@ static void mips_mt_cpu_irq_ack(unsigned int irq)
mask_mips_mt_irq(irq);
}
-#define mips_mt_cpu_irq_end mips_cpu_irq_end
-
static struct irq_chip mips_mt_cpu_irq_controller = {
.typename = "MIPS",
.startup = mips_mt_cpu_irq_startup,
- .shutdown = mips_mt_cpu_irq_shutdown,
- .enable = mips_mt_cpu_irq_enable,
- .disable = mips_mt_cpu_irq_disable,
.ack = mips_mt_cpu_irq_ack,
- .end = mips_mt_cpu_irq_end,
+ .mask = mask_mips_mt_irq,
+ .mask_ack = mips_mt_cpu_irq_ack,
+ .unmask = unmask_mips_mt_irq,
+ .eoi = unmask_mips_mt_irq,
};
void __init mips_cpu_irq_init(int irq_base)
@@ -163,19 +112,12 @@ void __init mips_cpu_irq_init(int irq_base)
* leave them uninitialized for other processors.
*/
if (cpu_has_mipsmt)
- for (i = irq_base; i < irq_base + 2; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &mips_mt_cpu_irq_controller;
- }
-
- for (i = irq_base + 2; i < irq_base + 8; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &mips_cpu_irq_controller;
- }
+ for (i = irq_base; i < irq_base + 2; i++)
+ set_irq_chip(i, &mips_mt_cpu_irq_controller);
+
+ for (i = irq_base + 2; i < irq_base + 8; i++)
+ set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
+ handle_level_irq);
mips_cpu_irq_base = irq_base;
}
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index f06a144c788..2c82412b9ef 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -319,7 +319,7 @@ static void sp_cleanup(void)
static int channel_open = 0;
/* the work handler */
-static void sp_work(void *data)
+static void sp_work(struct work_struct *unused)
{
if (!channel_open) {
if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
@@ -354,7 +354,7 @@ static void startwork(int vpe)
return;
}
- INIT_WORK(&work, sp_work, NULL);
+ INIT_WORK(&work, sp_work);
queue_work(workqueue, &work);
} else
queue_work(workqueue, &work);
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 7a3ebbeba1f..b061c9aa630 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -382,531 +382,6 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
return ret;
}
-struct msgbuf32 { s32 mtype; char mtext[1]; };
-
-struct ipc_perm32
-{
- key_t key;
- __compat_uid_t uid;
- __compat_gid_t gid;
- __compat_uid_t cuid;
- __compat_gid_t cgid;
- compat_mode_t mode;
- unsigned short seq;
-};
-
-struct ipc64_perm32 {
- key_t key;
- __compat_uid_t uid;
- __compat_gid_t gid;
- __compat_uid_t cuid;
- __compat_gid_t cgid;
- compat_mode_t mode;
- unsigned short seq;
- unsigned short __pad1;
- unsigned int __unused1;
- unsigned int __unused2;
-};
-
-struct semid_ds32 {
- struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
- compat_time_t sem_otime; /* last semop time */
- compat_time_t sem_ctime; /* last change time */
- u32 sem_base; /* ptr to first semaphore in array */
- u32 sem_pending; /* pending operations to be processed */
- u32 sem_pending_last; /* last pending operation */
- u32 undo; /* undo requests on this array */
- unsigned short sem_nsems; /* no. of semaphores in array */
-};
-
-struct semid64_ds32 {
- struct ipc64_perm32 sem_perm;
- compat_time_t sem_otime;
- compat_time_t sem_ctime;
- unsigned int sem_nsems;
- unsigned int __unused1;
- unsigned int __unused2;
-};
-
-struct msqid_ds32
-{
- struct ipc_perm32 msg_perm;
- u32 msg_first;
- u32 msg_last;
- compat_time_t msg_stime;
- compat_time_t msg_rtime;
- compat_time_t msg_ctime;
- u32 wwait;
- u32 rwait;
- unsigned short msg_cbytes;
- unsigned short msg_qnum;
- unsigned short msg_qbytes;
- compat_ipc_pid_t msg_lspid;
- compat_ipc_pid_t msg_lrpid;
-};
-
-struct msqid64_ds32 {
- struct ipc64_perm32 msg_perm;
- compat_time_t msg_stime;
- unsigned int __unused1;
- compat_time_t msg_rtime;
- unsigned int __unused2;
- compat_time_t msg_ctime;
- unsigned int __unused3;
- unsigned int msg_cbytes;
- unsigned int msg_qnum;
- unsigned int msg_qbytes;
- compat_pid_t msg_lspid;
- compat_pid_t msg_lrpid;
- unsigned int __unused4;
- unsigned int __unused5;
-};
-
-struct shmid_ds32 {
- struct ipc_perm32 shm_perm;
- int shm_segsz;
- compat_time_t shm_atime;
- compat_time_t shm_dtime;
- compat_time_t shm_ctime;
- compat_ipc_pid_t shm_cpid;
- compat_ipc_pid_t shm_lpid;
- unsigned short shm_nattch;
-};
-
-struct shmid64_ds32 {
- struct ipc64_perm32 shm_perm;
- compat_size_t shm_segsz;
- compat_time_t shm_atime;
- compat_time_t shm_dtime;
- compat_time_t shm_ctime;
- compat_pid_t shm_cpid;
- compat_pid_t shm_lpid;
- unsigned int shm_nattch;
- unsigned int __unused1;
- unsigned int __unused2;
-};
-
-struct ipc_kludge32 {
- u32 msgp;
- s32 msgtyp;
-};
-
-static int
-do_sys32_semctl(int first, int second, int third, void __user *uptr)
-{
- union semun fourth;
- u32 pad;
- int err, err2;
- struct semid64_ds s;
- mm_segment_t old_fs;
-
- if (!uptr)
- return -EINVAL;
- err = -EFAULT;
- if (get_user (pad, (u32 __user *)uptr))
- return err;
- if ((third & ~IPC_64) == SETVAL)
- fourth.val = (int)pad;
- else
- fourth.__pad = (void __user *)A(pad);
- switch (third & ~IPC_64) {
- case IPC_INFO:
- case IPC_RMID:
- case IPC_SET:
- case SEM_INFO:
- case GETVAL:
- case GETPID:
- case GETNCNT:
- case GETZCNT:
- case GETALL:
- case SETVAL:
- case SETALL:
- err = sys_semctl (first, second, third, fourth);
- break;
-
- case IPC_STAT:
- case SEM_STAT:
- fourth.__pad = (struct semid64_ds __user *)&s;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_semctl(first, second, third | IPC_64, fourth);
- set_fs(old_fs);
-
- if (third & IPC_64) {
- struct semid64_ds32 __user *usp64 = (struct semid64_ds32 __user *) A(pad);
-
- if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
- err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
- err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
- err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
- err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
- err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
- err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
- err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
- err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
- err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
- } else {
- struct semid_ds32 __user *usp32 = (struct semid_ds32 __user *) A(pad);
-
- if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
- err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
- err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
- err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
- err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
- err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
- err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
- err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
- err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
- err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
- }
- if (err2)
- err = -EFAULT;
- break;
-
- default:
- err = - EINVAL;
- break;
- }
-
- return err;
-}
-
-static int
-do_sys32_msgsnd (int first, int second, int third, void __user *uptr)
-{
- struct msgbuf32 __user *up = (struct msgbuf32 __user *)uptr;
- struct msgbuf *p;
- mm_segment_t old_fs;
- int err;
-
- if (second < 0)
- return -EINVAL;
- p = kmalloc (second + sizeof (struct msgbuf)
- + 4, GFP_USER);
- if (!p)
- return -ENOMEM;
- err = get_user (p->mtype, &up->mtype);
- if (err)
- goto out;
- err |= __copy_from_user (p->mtext, &up->mtext, second);
- if (err)
- goto out;
- old_fs = get_fs ();
- set_fs (KERNEL_DS);
- err = sys_msgsnd (first, (struct msgbuf __user *)p, second, third);
- set_fs (old_fs);
-out:
- kfree (p);
-
- return err;
-}
-
-static int
-do_sys32_msgrcv (int first, int second, int msgtyp, int third,
- int version, void __user *uptr)
-{
- struct msgbuf32 __user *up;
- struct msgbuf *p;
- mm_segment_t old_fs;
- int err;
-
- if (!version) {
- struct ipc_kludge32 __user *uipck = (struct ipc_kludge32 __user *)uptr;
- struct ipc_kludge32 ipck;
-
- err = -EINVAL;
- if (!uptr)
- goto out;
- err = -EFAULT;
- if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge32)))
- goto out;
- uptr = (void __user *)AA(ipck.msgp);
- msgtyp = ipck.msgtyp;
- }
-
- if (second < 0)
- return -EINVAL;
- err = -ENOMEM;
- p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
- if (!p)
- goto out;
- old_fs = get_fs ();
- set_fs (KERNEL_DS);
- err = sys_msgrcv (first, (struct msgbuf __user *)p, second + 4, msgtyp, third);
- set_fs (old_fs);
- if (err < 0)
- goto free_then_out;
- up = (struct msgbuf32 __user *)uptr;
- if (put_user (p->mtype, &up->mtype) ||
- __copy_to_user (&up->mtext, p->mtext, err))
- err = -EFAULT;
-free_then_out:
- kfree (p);
-out:
- return err;
-}
-
-static int
-do_sys32_msgctl (int first, int second, void __user *uptr)
-{
- int err = -EINVAL, err2;
- struct msqid64_ds m;
- struct msqid_ds32 __user *up32 = (struct msqid_ds32 __user *)uptr;
- struct msqid64_ds32 __user *up64 = (struct msqid64_ds32 __user *)uptr;
- mm_segment_t old_fs;
-
- switch (second & ~IPC_64) {
- case IPC_INFO:
- case IPC_RMID:
- case MSG_INFO:
- err = sys_msgctl (first, second, (struct msqid_ds __user *)uptr);
- break;
-
- case IPC_SET:
- if (second & IPC_64) {
- if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) {
- err = -EFAULT;
- break;
- }
- err = __get_user(m.msg_perm.uid, &up64->msg_perm.uid);
- err |= __get_user(m.msg_perm.gid, &up64->msg_perm.gid);
- err |= __get_user(m.msg_perm.mode, &up64->msg_perm.mode);
- err |= __get_user(m.msg_qbytes, &up64->msg_qbytes);
- } else {
- if (!access_ok(VERIFY_READ, up32, sizeof(*up32))) {
- err = -EFAULT;
- break;
- }
- err = __get_user(m.msg_perm.uid, &up32->msg_perm.uid);
- err |= __get_user(m.msg_perm.gid, &up32->msg_perm.gid);
- err |= __get_user(m.msg_perm.mode, &up32->msg_perm.mode);
- err |= __get_user(m.msg_qbytes, &up32->msg_qbytes);
- }
- if (err)
- break;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
- set_fs(old_fs);
- break;
-
- case IPC_STAT:
- case MSG_STAT:
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
- set_fs(old_fs);
- if (second & IPC_64) {
- if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(m.msg_perm.key, &up64->msg_perm.key);
- err2 |= __put_user(m.msg_perm.uid, &up64->msg_perm.uid);
- err2 |= __put_user(m.msg_perm.gid, &up64->msg_perm.gid);
- err2 |= __put_user(m.msg_perm.cuid, &up64->msg_perm.cuid);
- err2 |= __put_user(m.msg_perm.cgid, &up64->msg_perm.cgid);
- err2 |= __put_user(m.msg_perm.mode, &up64->msg_perm.mode);
- err2 |= __put_user(m.msg_perm.seq, &up64->msg_perm.seq);
- err2 |= __put_user(m.msg_stime, &up64->msg_stime);
- err2 |= __put_user(m.msg_rtime, &up64->msg_rtime);
- err2 |= __put_user(m.msg_ctime, &up64->msg_ctime);
- err2 |= __put_user(m.msg_cbytes, &up64->msg_cbytes);
- err2 |= __put_user(m.msg_qnum, &up64->msg_qnum);
- err2 |= __put_user(m.msg_qbytes, &up64->msg_qbytes);
- err2 |= __put_user(m.msg_lspid, &up64->msg_lspid);
- err2 |= __put_user(m.msg_lrpid, &up64->msg_lrpid);
- if (err2)
- err = -EFAULT;
- } else {
- if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(m.msg_perm.key, &up32->msg_perm.key);
- err2 |= __put_user(m.msg_perm.uid, &up32->msg_perm.uid);
- err2 |= __put_user(m.msg_perm.gid, &up32->msg_perm.gid);
- err2 |= __put_user(m.msg_perm.cuid, &up32->msg_perm.cuid);
- err2 |= __put_user(m.msg_perm.cgid, &up32->msg_perm.cgid);
- err2 |= __put_user(m.msg_perm.mode, &up32->msg_perm.mode);
- err2 |= __put_user(m.msg_perm.seq, &up32->msg_perm.seq);
- err2 |= __put_user(m.msg_stime, &up32->msg_stime);
- err2 |= __put_user(m.msg_rtime, &up32->msg_rtime);
- err2 |= __put_user(m.msg_ctime, &up32->msg_ctime);
- err2 |= __put_user(m.msg_cbytes, &up32->msg_cbytes);
- err2 |= __put_user(m.msg_qnum, &up32->msg_qnum);
- err2 |= __put_user(m.msg_qbytes, &up32->msg_qbytes);
- err2 |= __put_user(m.msg_lspid, &up32->msg_lspid);
- err2 |= __put_user(m.msg_lrpid, &up32->msg_lrpid);
- if (err2)
- err = -EFAULT;
- }
- break;
- }
-
- return err;
-}
-
-static int
-do_sys32_shmat (int first, int second, int third, int version, void __user *uptr)
-{
- unsigned long raddr;
- u32 __user *uaddr = (u32 __user *)A((u32)third);
- int err = -EINVAL;
-
- if (version == 1)
- return err;
- err = do_shmat (first, uptr, second, &raddr);
- if (err)
- return err;
- err = put_user (raddr, uaddr);
- return err;
-}
-
-struct shm_info32 {
- int used_ids;
- u32 shm_tot, shm_rss, shm_swp;
- u32 swap_attempts, swap_successes;
-};
-
-static int
-do_sys32_shmctl (int first, int second, void __user *uptr)
-{
- struct shmid64_ds32 __user *up64 = (struct shmid64_ds32 __user *)uptr;
- struct shmid_ds32 __user *up32 = (struct shmid_ds32 __user *)uptr;
- struct shm_info32 __user *uip = (struct shm_info32 __user *)uptr;
- int err = -EFAULT, err2;
- struct shmid64_ds s64;
- mm_segment_t old_fs;
- struct shm_info si;
- struct shmid_ds s;
-
- switch (second & ~IPC_64) {
- case IPC_INFO:
- second = IPC_INFO; /* So that we don't have to translate it */
- case IPC_RMID:
- case SHM_LOCK:
- case SHM_UNLOCK:
- err = sys_shmctl(first, second, (struct shmid_ds __user *)uptr);
- break;
- case IPC_SET:
- if (second & IPC_64) {
- err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
- err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
- err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
- } else {
- err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
- err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
- err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
- }
- if (err)
- break;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_shmctl(first, second & ~IPC_64, (struct shmid_ds __user *)&s);
- set_fs(old_fs);
- break;
-
- case IPC_STAT:
- case SHM_STAT:
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_shmctl(first, second | IPC_64, (void __user *) &s64);
- set_fs(old_fs);
- if (err < 0)
- break;
- if (second & IPC_64) {
- if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
- err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
- err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
- err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
- err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
- err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
- err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
- err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
- err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
- err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
- err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
- err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
- err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
- err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
- } else {
- if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
- err = -EFAULT;
- break;
- }
- err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
- err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
- err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
- err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
- err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
- err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
- err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
- err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
- err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
- err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
- err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
- err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
- err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
- err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
- }
- if (err2)
- err = -EFAULT;
- break;
-
- case SHM_INFO:
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_shmctl(first, second, (void __user *)&si);
- set_fs(old_fs);
- if (err < 0)
- break;
- err2 = put_user(si.used_ids, &uip->used_ids);
- err2 |= __put_user(si.shm_tot, &uip->shm_tot);
- err2 |= __put_user(si.shm_rss, &uip->shm_rss);
- err2 |= __put_user(si.shm_swp, &uip->shm_swp);
- err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
- err2 |= __put_user (si.swap_successes, &uip->swap_successes);
- if (err2)
- err = -EFAULT;
- break;
-
- default:
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-static int sys32_semtimedop(int semid, struct sembuf __user *tsems, int nsems,
- const struct compat_timespec __user *timeout32)
-{
- struct compat_timespec t32;
- struct timespec __user *t64 = compat_alloc_user_space(sizeof(*t64));
-
- if (copy_from_user(&t32, timeout32, sizeof(t32)))
- return -EFAULT;
-
- if (put_user(t32.tv_sec, &t64->tv_sec) ||
- put_user(t32.tv_nsec, &t64->tv_nsec))
- return -EFAULT;
-
- return sys_semtimedop(semid, tsems, nsems, t64);
-}
-
asmlinkage long
sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
@@ -918,48 +393,43 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
switch (call) {
case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */
- err = sys_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
- NULL);
+ err = sys_semtimedop(first, compat_ptr(ptr), second, NULL);
break;
case SEMTIMEDOP:
- err = sys32_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
- (const struct compat_timespec __user *)AA(fifth));
+ err = compat_sys_semtimedop(first, compat_ptr(ptr), second,
+ compat_ptr(fifth));
break;
case SEMGET:
- err = sys_semget (first, second, third);
+ err = sys_semget(first, second, third);
break;
case SEMCTL:
- err = do_sys32_semctl (first, second, third,
- (void __user *)AA(ptr));
+ err = compat_sys_semctl(first, second, third, compat_ptr(ptr));
break;
-
case MSGSND:
- err = do_sys32_msgsnd (first, second, third,
- (void __user *)AA(ptr));
+ err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
break;
case MSGRCV:
- err = do_sys32_msgrcv (first, second, fifth, third,
- version, (void __user *)AA(ptr));
+ err = compat_sys_msgrcv(first, second, fifth, third,
+ version, compat_ptr(ptr));
break;
case MSGGET:
- err = sys_msgget ((key_t) first, second);
+ err = sys_msgget((key_t) first, second);
break;
case MSGCTL:
- err = do_sys32_msgctl (first, second, (void __user *)AA(ptr));
+ err = compat_sys_msgctl(first, second, compat_ptr(ptr));
break;
-
case SHMAT:
- err = do_sys32_shmat (first, second, third,
- version, (void __user *)AA(ptr));
+ err = compat_sys_shmat(first, second, third, version,
+ compat_ptr(ptr));
break;
case SHMDT:
- err = sys_shmdt ((char __user *)A(ptr));
+ err = sys_shmdt(compat_ptr(ptr));
break;
case SHMGET:
- err = sys_shmget (first, (unsigned)second, third);
+ err = sys_shmget(first, (unsigned)second, third);
break;
case SHMCTL:
- err = do_sys32_shmctl (first, second, (void __user *)AA(ptr));
+ err = compat_sys_shmctl(first, second, compat_ptr(ptr));
break;
default:
err = -EINVAL;
@@ -969,18 +439,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
return err;
}
-asmlinkage long sys32_shmat(int shmid, char __user *shmaddr,
- int shmflg, int32_t __user *addr)
+#ifdef CONFIG_MIPS32_N32
+asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, union semun arg)
{
- unsigned long raddr;
- int err;
-
- err = do_shmat(shmid, shmaddr, shmflg, &raddr);
- if (err)
- return err;
-
- return put_user(raddr, addr);
+ /* compat_sys_semctl expects a pointer to union semun */
+ u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
+ if (put_user(ptr_to_compat(arg.__pad), uptr))
+ return -EFAULT;
+ return compat_sys_semctl(semid, semnum, cmd, uptr);
}
+#endif
struct sysctl_args32
{
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
new file mode 100644
index 00000000000..e0ad754c7ed
--- /dev/null
+++ b/arch/mips/kernel/machine_kexec.c
@@ -0,0 +1,85 @@
+/*
+ * machine_kexec.c for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kexec.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+const extern unsigned char relocate_new_kernel[];
+const extern unsigned int relocate_new_kernel_size;
+
+extern unsigned long kexec_start_address;
+extern unsigned long kexec_indirection_page;
+
+int
+machine_kexec_prepare(struct kimage *kimage)
+{
+ return 0;
+}
+
+void
+machine_kexec_cleanup(struct kimage *kimage)
+{
+}
+
+void
+machine_shutdown(void)
+{
+}
+
+void
+machine_crash_shutdown(struct pt_regs *regs)
+{
+}
+
+void
+machine_kexec(struct kimage *image)
+{
+ unsigned long reboot_code_buffer;
+ unsigned long entry;
+ unsigned long *ptr;
+
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+
+ kexec_start_address = image->start;
+ kexec_indirection_page = phys_to_virt(image->head & PAGE_MASK);
+
+ memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+ relocate_new_kernel_size);
+
+ /*
+ * The generic kexec code builds a page list with physical
+ * addresses. they are directly accessible through KSEG0 (or
+ * CKSEG0 or XPHYS if on 64bit system), hence the
+ * pys_to_virt() call.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+ phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = phys_to_virt(*ptr);
+ }
+
+ /*
+ * we do not want to be bothered.
+ */
+ local_irq_disable();
+
+ flush_icache_range(reboot_code_buffer,
+ reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
+
+ printk("Will call new kernel at %08x\n", image->start);
+ printk("Bye ...\n");
+ flush_cache_all();
+ ((void (*)(void))reboot_code_buffer)();
+}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index d7bf0215bc1..cb0801437b6 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -29,6 +29,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <asm/pgtable.h> /* MODULE_START */
struct mips_hi16 {
struct mips_hi16 *next;
@@ -43,9 +44,23 @@ static DEFINE_SPINLOCK(dbe_lock);
void *module_alloc(unsigned long size)
{
+#ifdef MODULE_START
+ struct vm_struct *area;
+
+ size = PAGE_ALIGN(size);
+ if (!size)
+ return NULL;
+
+ area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
+ if (!area)
+ return NULL;
+
+ return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+#else
if (size == 0)
return NULL;
return vmalloc(size);
+#endif
}
/* Free memory returned from module_alloc */
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
new file mode 100644
index 00000000000..a3f0d00c133
--- /dev/null
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -0,0 +1,80 @@
+/*
+ * relocate_kernel.S for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/page.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+
+ .globl relocate_new_kernel
+relocate_new_kernel:
+
+ PTR_L s0, kexec_indirection_page
+ PTR_L s1, kexec_start_address
+
+process_entry:
+ PTR_L s2, (s0)
+ PTR_ADD s0, s0, SZREG
+
+ /* destination page */
+ and s3, s2, 0x1
+ beq s3, zero, 1f
+ and s4, s2, ~0x1 /* store destination addr in s4 */
+ move a0, s4
+ b process_entry
+
+1:
+ /* indirection page, update s0 */
+ and s3, s2, 0x2
+ beq s3, zero, 1f
+ and s0, s2, ~0x2
+ b process_entry
+
+1:
+ /* done page */
+ and s3, s2, 0x4
+ beq s3, zero, 1f
+ b done
+1:
+ /* source page */
+ and s3, s2, 0x8
+ beq s3, zero, process_entry
+ and s2, s2, ~0x8
+ li s6, (1 << PAGE_SHIFT) / SZREG
+
+copy_word:
+ /* copy page word by word */
+ REG_L s5, (s2)
+ REG_S s5, (s4)
+ INT_ADD s4, s4, SZREG
+ INT_ADD s2, s2, SZREG
+ INT_SUB s6, s6, 1
+ beq s6, zero, process_entry
+ b copy_word
+ b process_entry
+
+done:
+ /* jump to kexec_start_address */
+ j s1
+
+ .globl kexec_start_address
+kexec_start_address:
+ .long 0x0
+
+ .globl kexec_indirection_page
+kexec_indirection_page:
+ .long 0x0
+
+relocate_new_kernel_end:
+
+ .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+ .long relocate_new_kernel_end - relocate_new_kernel
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a95f37de080..7c0b3936ba4 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -653,7 +653,7 @@ einval: li v0, -EINVAL
sys sys_move_pages 6
sys sys_set_robust_list 2
sys sys_get_robust_list 3 /* 4310 */
- sys sys_ni_syscall 0
+ sys sys_kexec_load 4
sys sys_getcpu 3
sys sys_epoll_pwait 6
.endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 8fb0f60f657..e569b846e9a 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -468,6 +468,6 @@ sys_call_table:
PTR sys_move_pages
PTR sys_set_robust_list
PTR sys_get_robust_list
- PTR sys_ni_syscall /* 5270 */
+ PTR sys_kexec_load /* 5270 */
PTR sys_getcpu
PTR sys_epoll_pwait
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0da5ca2040f..34567d81f94 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -149,8 +149,8 @@ EXPORT(sysn32_call_table)
PTR sys_mincore
PTR sys_madvise
PTR sys_shmget
- PTR sys32_shmat
- PTR sys_shmctl /* 6030 */
+ PTR sys_shmat
+ PTR compat_sys_shmctl /* 6030 */
PTR sys_dup
PTR sys_dup2
PTR sys_pause
@@ -184,12 +184,12 @@ EXPORT(sysn32_call_table)
PTR sys32_newuname
PTR sys_semget
PTR sys_semop
- PTR sys_semctl
+ PTR sysn32_semctl
PTR sys_shmdt /* 6065 */
PTR sys_msgget
- PTR sys_msgsnd
- PTR sys_msgrcv
- PTR sys_msgctl
+ PTR compat_sys_msgsnd
+ PTR compat_sys_msgrcv
+ PTR compat_sys_msgctl
PTR compat_sys_fcntl /* 6070 */
PTR sys_flock
PTR sys_fsync
@@ -335,7 +335,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_fcntl64
PTR sys_set_tid_address
PTR sys_restart_syscall
- PTR sys_semtimedop /* 6215 */
+ PTR compat_sys_semtimedop /* 6215 */
PTR sys_fadvise64_64
PTR compat_sys_statfs64
PTR compat_sys_fstatfs64
@@ -394,6 +394,6 @@ EXPORT(sysn32_call_table)
PTR sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list
- PTR sys_ni_syscall
+ PTR compat_sys_kexec_load
PTR sys_getcpu
PTR sys_epoll_pwait
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b9d00cae8b5..e91379c1be1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -516,7 +516,7 @@ sys_call_table:
PTR compat_sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list /* 4310 */
- PTR sys_ni_syscall
+ PTR compat_sys_kexec_load
PTR sys_getcpu
PTR sys_epoll_pwait
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8f6e89697cc..89440a0d852 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -145,13 +145,12 @@ static int __init rd_start_early(char *p)
unsigned long start = memparse(p, &p);
#ifdef CONFIG_64BIT
- /* HACK: Guess if the sign extension was forgotten */
- if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
- start |= 0xffffffff00000000UL;
+ /* Guess if the sign extension was forgotten by bootloader */
+ if (start < XKPHYS)
+ start = (int)start;
#endif
initrd_start = start;
initrd_end += start;
-
return 0;
}
early_param("rd_start", rd_start_early);
@@ -159,41 +158,64 @@ early_param("rd_start", rd_start_early);
static int __init rd_size_early(char *p)
{
initrd_end += memparse(p, &p);
-
return 0;
}
early_param("rd_size", rd_size_early);
+/* it returns the next free pfn after initrd */
static unsigned long __init init_initrd(void)
{
- unsigned long tmp, end, size;
+ unsigned long end;
u32 *initrd_header;
- ROOT_DEV = Root_RAM0;
-
/*
* Board specific code or command line parser should have
* already set up initrd_start and initrd_end. In these cases
* perfom sanity checks and use them if all looks good.
*/
- size = initrd_end - initrd_start;
- if (initrd_end == 0 || size == 0) {
- initrd_start = 0;
- initrd_end = 0;
- } else
- return initrd_end;
-
- end = (unsigned long)&_end;
- tmp = PAGE_ALIGN(end) - sizeof(u32) * 2;
- if (tmp < end)
- tmp += PAGE_SIZE;
-
- initrd_header = (u32 *)tmp;
- if (initrd_header[0] == 0x494E5244) {
- initrd_start = (unsigned long)&initrd_header[2];
- initrd_end = initrd_start + initrd_header[1];
+ if (initrd_start && initrd_end > initrd_start)
+ goto sanitize;
+
+ /*
+ * See if initrd has been added to the kernel image by
+ * arch/mips/boot/addinitrd.c. In that case a header is
+ * prepended to initrd and is made up by 8 bytes. The fisrt
+ * word is a magic number and the second one is the size of
+ * initrd. Initrd start must be page aligned in any cases.
+ */
+ initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
+ if (initrd_header[0] != 0x494E5244)
+ goto disable;
+ initrd_start = (unsigned long)(initrd_header + 2);
+ initrd_end = initrd_start + initrd_header[1];
+
+sanitize:
+ if (initrd_start & ~PAGE_MASK) {
+ printk(KERN_ERR "initrd start must be page aligned\n");
+ goto disable;
}
- return initrd_end;
+ if (initrd_start < PAGE_OFFSET) {
+ printk(KERN_ERR "initrd start < PAGE_OFFSET\n");
+ goto disable;
+ }
+
+ /*
+ * Sanitize initrd addresses. For example firmware
+ * can't guess if they need to pass them through
+ * 64-bits values if the kernel has been built in pure
+ * 32-bit. We need also to switch from KSEG0 to XKPHYS
+ * addresses now, so the code can now safely use __pa().
+ */
+ end = __pa(initrd_end);
+ initrd_end = (unsigned long)__va(end);
+ initrd_start = (unsigned long)__va(__pa(initrd_start));
+
+ ROOT_DEV = Root_RAM0;
+ return PFN_UP(end);
+disable:
+ initrd_start = 0;
+ initrd_end = 0;
+ return 0;
}
static void __init finalize_initrd(void)
@@ -204,12 +226,12 @@ static void __init finalize_initrd(void)
printk(KERN_INFO "Initrd not found or empty");
goto disable;
}
- if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
printk("Initrd extends beyond end of memory");
goto disable;
}
- reserve_bootmem(CPHYSADDR(initrd_start), size);
+ reserve_bootmem(__pa(initrd_start), size);
initrd_below_start_ok = 1;
printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
@@ -259,8 +281,7 @@ static void __init bootmem_init(void)
* not selected. Once that done we can determine the low bound
* of usable memory.
*/
- reserved_end = init_initrd();
- reserved_end = PFN_UP(CPHYSADDR(max(reserved_end, (unsigned long)&_end)));
+ reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
/*
* Find the highest page frame number we have available.
@@ -432,10 +453,10 @@ static void __init resource_init(void)
if (UNCAC_BASE != IO_BASE)
return;
- code_resource.start = virt_to_phys(&_text);
- code_resource.end = virt_to_phys(&_etext) - 1;
- data_resource.start = virt_to_phys(&_etext);
- data_resource.end = virt_to_phys(&_edata) - 1;
+ code_resource.start = __pa_symbol(&_text);
+ code_resource.end = __pa_symbol(&_etext) - 1;
+ data_resource.start = __pa_symbol(&_etext);
+ data_resource.end = __pa_symbol(&_edata) - 1;
/*
* Request address space for all standard RAM.
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 477c5334ec1..a67c18555ed 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -17,7 +17,6 @@
*/
#include <linux/cache.h>
#include <linux/sched.h>
-#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 2ac19a6cbf6..1ee689c0e0c 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -278,7 +278,9 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
/* need to mark IPI's as IRQ_PER_CPU */
irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
+ set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
+ set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
}
/*
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 49db516789e..f2a8701e414 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -172,7 +172,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
spin_lock(&smp_call_lock);
call_data = &data;
- mb();
+ smp_mb();
/* Send a message to all other CPUs and wait for them to respond */
for_each_online_cpu(i)
@@ -204,7 +204,7 @@ void smp_call_function_interrupt(void)
* Notify initiating CPU that I've grabbed the data and am
* about to execute the function.
*/
- mb();
+ smp_mb();
atomic_inc(&call_data->started);
/*
@@ -215,7 +215,7 @@ void smp_call_function_interrupt(void)
irq_exit();
if (wait) {
- mb();
+ smp_mb();
atomic_inc(&call_data->finished);
}
}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 3b78caf112f..802febed7df 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1009,6 +1009,7 @@ void setup_cross_vpe_interrupts(void)
setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
+ set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
}
/*
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index e535f86efa2..11aab6d6bfe 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -11,7 +11,6 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
-#include <linux/clocksource.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -83,17 +82,11 @@ static void null_timer_ack(void) { /* nothing */ }
/*
* Null high precision timer functions for systems lacking one.
*/
-static unsigned int null_hpt_read(void)
+static cycle_t null_hpt_read(void)
{
return 0;
}
-static void __init null_hpt_init(void)
-{
- /* nothing */
-}
-
-
/*
* Timer ack for an R4k-compatible timer of a known frequency.
*/
@@ -118,7 +111,7 @@ static void c0_timer_ack(void)
/*
* High precision timer functions for a R4k-compatible timer.
*/
-static unsigned int c0_hpt_read(void)
+static cycle_t c0_hpt_read(void)
{
return read_c0_count();
}
@@ -132,9 +125,6 @@ static void __init c0_hpt_timer_init(void)
int (*mips_timer_state)(void);
void (*mips_timer_ack)(void);
-unsigned int (*mips_hpt_read)(void);
-void (*mips_hpt_init)(void) __initdata = null_hpt_init;
-unsigned int mips_hpt_mask = 0xffffffff;
/* last time when xtime and rtc are sync'ed up */
static long last_rtc_update;
@@ -276,8 +266,7 @@ static struct irqaction timer_irqaction = {
static unsigned int __init calibrate_hpt(void)
{
- u64 frequency;
- u32 hpt_start, hpt_end, hpt_count, hz;
+ cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
const int loops = HZ / 10;
int log_2_loops = 0;
@@ -303,28 +292,23 @@ static unsigned int __init calibrate_hpt(void)
* during the calculated number of periods between timer
* interrupts.
*/
- hpt_start = mips_hpt_read();
+ hpt_start = clocksource_mips.read();
do {
while (mips_timer_state());
while (!mips_timer_state());
} while (--i);
- hpt_end = mips_hpt_read();
+ hpt_end = clocksource_mips.read();
- hpt_count = (hpt_end - hpt_start) & mips_hpt_mask;
+ hpt_count = (hpt_end - hpt_start) & clocksource_mips.mask;
hz = HZ;
- frequency = (u64)hpt_count * (u64)hz;
+ frequency = hpt_count * hz;
return frequency >> log_2_loops;
}
-static cycle_t read_mips_hpt(void)
-{
- return (cycle_t)mips_hpt_read();
-}
-
-static struct clocksource clocksource_mips = {
+struct clocksource clocksource_mips = {
.name = "MIPS",
- .read = read_mips_hpt,
+ .mask = 0xffffffff,
.is_continuous = 1,
};
@@ -333,7 +317,7 @@ static void __init init_mips_clocksource(void)
u64 temp;
u32 shift;
- if (!mips_hpt_frequency || mips_hpt_read == null_hpt_read)
+ if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
return;
/* Calclate a somewhat reasonable rating value */
@@ -347,7 +331,6 @@ static void __init init_mips_clocksource(void)
}
clocksource_mips.shift = shift;
clocksource_mips.mult = (u32)temp;
- clocksource_mips.mask = mips_hpt_mask;
clocksource_register(&clocksource_mips);
}
@@ -367,32 +350,36 @@ void __init time_init(void)
-xtime.tv_sec, -xtime.tv_nsec);
/* Choose appropriate high precision timer routines. */
- if (!cpu_has_counter && !mips_hpt_read)
+ if (!cpu_has_counter && !clocksource_mips.read)
/* No high precision timer -- sorry. */
- mips_hpt_read = null_hpt_read;
+ clocksource_mips.read = null_hpt_read;
else if (!mips_hpt_frequency && !mips_timer_state) {
/* A high precision timer of unknown frequency. */
- if (!mips_hpt_read)
+ if (!clocksource_mips.read)
/* No external high precision timer -- use R4k. */
- mips_hpt_read = c0_hpt_read;
+ clocksource_mips.read = c0_hpt_read;
} else {
/* We know counter frequency. Or we can get it. */
- if (!mips_hpt_read) {
+ if (!clocksource_mips.read) {
/* No external high precision timer -- use R4k. */
- mips_hpt_read = c0_hpt_read;
+ clocksource_mips.read = c0_hpt_read;
if (!mips_timer_state) {
/* No external timer interrupt -- use R4k. */
- mips_hpt_init = c0_hpt_timer_init;
mips_timer_ack = c0_timer_ack;
+ /* Calculate cache parameters. */
+ cycles_per_jiffy =
+ (mips_hpt_frequency + HZ / 2) / HZ;
+ /*
+ * This sets up the high precision
+ * timer for the first interrupt.
+ */
+ c0_hpt_timer_init();
}
}
if (!mips_hpt_frequency)
mips_hpt_frequency = calibrate_hpt();
- /* Calculate cache parameters. */
- cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ;
-
/* Report the high precision timer rate for a reference. */
printk("Using %u.%03u MHz high precision timer.\n",
((mips_hpt_frequency + 500) / 1000) / 1000,
@@ -403,9 +390,6 @@ void __init time_init(void)
/* No timer interrupt ack (e.g. i8254). */
mips_timer_ack = null_timer_ack;
- /* This sets up the high precision timer for the first interrupt. */
- mips_hpt_init();
-
/*
* Call board specific timer interrupt setup.
*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9fda1b8be3a..2a932cada24 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -54,6 +54,8 @@ extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
+extern asmlinkage void handle_ri_rdhwr_vivt(void);
+extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
@@ -397,19 +399,6 @@ asmlinkage void do_be(struct pt_regs *regs)
force_sig(SIGBUS, current);
}
-static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
-{
- unsigned int __user *epc;
-
- epc = (unsigned int __user *) regs->cp0_epc +
- ((regs->cp0_cause & CAUSEF_BD) != 0);
- if (!get_user(*opcode, epc))
- return 0;
-
- force_sig(SIGSEGV, current);
- return 1;
-}
-
/*
* ll/sc emulation
*/
@@ -544,8 +533,8 @@ static inline int simulate_llsc(struct pt_regs *regs)
{
unsigned int opcode;
- if (unlikely(get_insn_opcode(regs, &opcode)))
- return -EFAULT;
+ if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ goto out_sigsegv;
if ((opcode & OPCODE) == LL) {
simulate_ll(regs, opcode);
@@ -557,6 +546,10 @@ static inline int simulate_llsc(struct pt_regs *regs)
}
return -EFAULT; /* Strange things going on ... */
+
+out_sigsegv:
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
}
/*
@@ -569,8 +562,8 @@ static inline int simulate_rdhwr(struct pt_regs *regs)
struct thread_info *ti = task_thread_info(current);
unsigned int opcode;
- if (unlikely(get_insn_opcode(regs, &opcode)))
- return -EFAULT;
+ if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ goto out_sigsegv;
if (unlikely(compute_return_epc(regs)))
return -EFAULT;
@@ -589,6 +582,10 @@ static inline int simulate_rdhwr(struct pt_regs *regs)
/* Not ours. */
return -EFAULT;
+
+out_sigsegv:
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
}
asmlinkage void do_ov(struct pt_regs *regs)
@@ -672,10 +669,8 @@ asmlinkage void do_bp(struct pt_regs *regs)
unsigned int opcode, bcode;
siginfo_t info;
- die_if_kernel("Break instruction in kernel code", regs);
-
- if (get_insn_opcode(regs, &opcode))
- return;
+ if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ goto out_sigsegv;
/*
* There is the ancient bug in the MIPS assemblers that the break
@@ -696,6 +691,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
switch (bcode) {
case BRK_OVERFLOW << 10:
case BRK_DIVZERO << 10:
+ die_if_kernel("Break instruction in kernel code", regs);
if (bcode == (BRK_DIVZERO << 10))
info.si_code = FPE_INTDIV;
else
@@ -705,9 +701,16 @@ asmlinkage void do_bp(struct pt_regs *regs)
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
+ case BRK_BUG:
+ die("Kernel bug detected", regs);
+ break;
default:
+ die_if_kernel("Break instruction in kernel code", regs);
force_sig(SIGTRAP, current);
}
+
+out_sigsegv:
+ force_sig(SIGSEGV, current);
}
asmlinkage void do_tr(struct pt_regs *regs)
@@ -715,10 +718,8 @@ asmlinkage void do_tr(struct pt_regs *regs)
unsigned int opcode, tcode = 0;
siginfo_t info;
- die_if_kernel("Trap instruction in kernel code", regs);
-
- if (get_insn_opcode(regs, &opcode))
- return;
+ if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+ goto out_sigsegv;
/* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
@@ -733,6 +734,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
switch (tcode) {
case BRK_OVERFLOW:
case BRK_DIVZERO:
+ die_if_kernel("Trap instruction in kernel code", regs);
if (tcode == BRK_DIVZERO)
info.si_code = FPE_INTDIV;
else
@@ -742,9 +744,16 @@ asmlinkage void do_tr(struct pt_regs *regs)
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
+ case BRK_BUG:
+ die("Kernel bug detected", regs);
+ break;
default:
+ die_if_kernel("Trap instruction in kernel code", regs);
force_sig(SIGTRAP, current);
}
+
+out_sigsegv:
+ force_sig(SIGSEGV, current);
}
asmlinkage void do_ri(struct pt_regs *regs)
@@ -1423,6 +1432,15 @@ void __init set_uncached_handler (unsigned long offset, void *addr, unsigned lon
memcpy((void *)(uncached_ebase + offset), addr, size);
}
+static int __initdata rdhwr_noopt;
+static int __init set_rdhwr_noopt(char *str)
+{
+ rdhwr_noopt = 1;
+ return 1;
+}
+
+__setup("rdhwr_noopt", set_rdhwr_noopt);
+
void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
@@ -1502,7 +1520,9 @@ void __init trap_init(void)
set_except_vector(8, handle_sys);
set_except_vector(9, handle_bp);
- set_except_vector(10, handle_ri);
+ set_except_vector(10, rdhwr_noopt ? handle_ri :
+ (cpu_has_vtag_icache ?
+ handle_ri_rdhwr_vivt : handle_ri_rdhwr));
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index a144a002dcc..2affa5ff171 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -36,47 +36,20 @@ static volatile int lasat_int_mask_shift;
void disable_lasat_irq(unsigned int irq_nr)
{
- unsigned long flags;
-
- local_irq_save(flags);
*lasat_int_mask &= ~(1 << irq_nr) << lasat_int_mask_shift;
- local_irq_restore(flags);
}
void enable_lasat_irq(unsigned int irq_nr)
{
- unsigned long flags;
-
- local_irq_save(flags);
*lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift;
- local_irq_restore(flags);
-}
-
-static unsigned int startup_lasat_irq(unsigned int irq)
-{
- enable_lasat_irq(irq);
-
- return 0; /* never anything pending */
-}
-
-#define shutdown_lasat_irq disable_lasat_irq
-
-#define mask_and_ack_lasat_irq disable_lasat_irq
-
-static void end_lasat_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_lasat_irq(irq);
}
static struct irq_chip lasat_irq_type = {
.typename = "Lasat",
- .startup = startup_lasat_irq,
- .shutdown = shutdown_lasat_irq,
- .enable = enable_lasat_irq,
- .disable = disable_lasat_irq,
- .ack = mask_and_ack_lasat_irq,
- .end = end_lasat_irq,
+ .ack = disable_lasat_irq,
+ .mask = disable_lasat_irq,
+ .mask_ack = disable_lasat_irq,
+ .unmask = enable_lasat_irq,
};
static inline int ls1bit32(unsigned int x)
@@ -152,10 +125,6 @@ void __init arch_init_irq(void)
panic("arch_init_irq: mips_machtype incorrect");
}
- for (i = 0; i <= LASATINT_END; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &lasat_irq_type;
- }
+ for (i = 0; i <= LASATINT_END; i++)
+ set_irq_chip_and_handler(i, &lasat_irq_type, handle_level_irq);
}
diff --git a/arch/mips/lib-32/Makefile b/arch/mips/lib-32/Makefile
index ad285786e74..dcd4d2ed2ac 100644
--- a/arch/mips/lib-32/Makefile
+++ b/arch/mips/lib-32/Makefile
@@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#
-lib-y += csum_partial.o memset.o watch.o
+lib-y += memset.o watch.o
obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-32/csum_partial.S b/arch/mips/lib-32/csum_partial.S
deleted file mode 100644
index ea257dbdcc4..00000000000
--- a/arch/mips/lib-32/csum_partial.S
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ralf Baechle
- */
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-#define ADDC(sum,reg) \
- addu sum, reg; \
- sltu v1, sum, reg; \
- addu sum, v1
-
-#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
- lw t0, (offset + 0x00)(src); \
- lw t1, (offset + 0x04)(src); \
- lw t2, (offset + 0x08)(src); \
- lw t3, (offset + 0x0c)(src); \
- ADDC(sum, t0); \
- ADDC(sum, t1); \
- ADDC(sum, t2); \
- ADDC(sum, t3); \
- lw t0, (offset + 0x10)(src); \
- lw t1, (offset + 0x14)(src); \
- lw t2, (offset + 0x18)(src); \
- lw t3, (offset + 0x1c)(src); \
- ADDC(sum, t0); \
- ADDC(sum, t1); \
- ADDC(sum, t2); \
- ADDC(sum, t3); \
-
-/*
- * a0: source address
- * a1: length of the area to checksum
- * a2: partial checksum
- */
-
-#define src a0
-#define dest a1
-#define sum v0
-
- .text
- .set noreorder
-
-/* unknown src alignment and < 8 bytes to go */
-small_csumcpy:
- move a1, t2
-
- andi t0, a1, 4
- beqz t0, 1f
- andi t0, a1, 2
-
- /* Still a full word to go */
- ulw t1, (src)
- addiu src, 4
- ADDC(sum, t1)
-
-1: move t1, zero
- beqz t0, 1f
- andi t0, a1, 1
-
- /* Still a halfword to go */
- ulhu t1, (src)
- addiu src, 2
-
-1: beqz t0, 1f
- sll t1, t1, 16
-
- lbu t2, (src)
- nop
-
-#ifdef __MIPSEB__
- sll t2, t2, 8
-#endif
- or t1, t2
-
-1: ADDC(sum, t1)
-
- /* fold checksum */
- sll v1, sum, 16
- addu sum, v1
- sltu v1, sum, v1
- srl sum, sum, 16
- addu sum, v1
-
- /* odd buffer alignment? */
- beqz t7, 1f
- nop
- sll v1, sum, 8
- srl sum, sum, 8
- or sum, v1
- andi sum, 0xffff
-1:
- .set reorder
- /* Add the passed partial csum. */
- ADDC(sum, a2)
- jr ra
- .set noreorder
-
-/* ------------------------------------------------------------------------- */
-
- .align 5
-LEAF(csum_partial)
- move sum, zero
- move t7, zero
-
- sltiu t8, a1, 0x8
- bnez t8, small_csumcpy /* < 8 bytes to copy */
- move t2, a1
-
- beqz a1, out
- andi t7, src, 0x1 /* odd buffer? */
-
-hword_align:
- beqz t7, word_align
- andi t8, src, 0x2
-
- lbu t0, (src)
- subu a1, a1, 0x1
-#ifdef __MIPSEL__
- sll t0, t0, 8
-#endif
- ADDC(sum, t0)
- addu src, src, 0x1
- andi t8, src, 0x2
-
-word_align:
- beqz t8, dword_align
- sltiu t8, a1, 56
-
- lhu t0, (src)
- subu a1, a1, 0x2
- ADDC(sum, t0)
- sltiu t8, a1, 56
- addu src, src, 0x2
-
-dword_align:
- bnez t8, do_end_words
- move t8, a1
-
- andi t8, src, 0x4
- beqz t8, qword_align
- andi t8, src, 0x8
-
- lw t0, 0x00(src)
- subu a1, a1, 0x4
- ADDC(sum, t0)
- addu src, src, 0x4
- andi t8, src, 0x8
-
-qword_align:
- beqz t8, oword_align
- andi t8, src, 0x10
-
- lw t0, 0x00(src)
- lw t1, 0x04(src)
- subu a1, a1, 0x8
- ADDC(sum, t0)
- ADDC(sum, t1)
- addu src, src, 0x8
- andi t8, src, 0x10
-
-oword_align:
- beqz t8, begin_movement
- srl t8, a1, 0x7
-
- lw t3, 0x08(src)
- lw t4, 0x0c(src)
- lw t0, 0x00(src)
- lw t1, 0x04(src)
- ADDC(sum, t3)
- ADDC(sum, t4)
- ADDC(sum, t0)
- ADDC(sum, t1)
- subu a1, a1, 0x10
- addu src, src, 0x10
- srl t8, a1, 0x7
-
-begin_movement:
- beqz t8, 1f
- andi t2, a1, 0x40
-
-move_128bytes:
- CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
- CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
- CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
- CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
- subu t8, t8, 0x01
- bnez t8, move_128bytes
- addu src, src, 0x80
-
-1:
- beqz t2, 1f
- andi t2, a1, 0x20
-
-move_64bytes:
- CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
- CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
- addu src, src, 0x40
-
-1:
- beqz t2, do_end_words
- andi t8, a1, 0x1c
-
-move_32bytes:
- CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
- andi t8, a1, 0x1c
- addu src, src, 0x20
-
-do_end_words:
- beqz t8, maybe_end_cruft
- srl t8, t8, 0x2
-
-end_words:
- lw t0, (src)
- subu t8, t8, 0x1
- ADDC(sum, t0)
- bnez t8, end_words
- addu src, src, 0x4
-
-maybe_end_cruft:
- andi t2, a1, 0x3
-
-small_memcpy:
- j small_csumcpy; move a1, t2
- beqz t2, out
- move a1, t2
-
-end_bytes:
- lb t0, (src)
- subu a1, a1, 0x1
- bnez a2, end_bytes
- addu src, src, 0x1
-
-out:
- jr ra
- move v0, sum
- END(csum_partial)
diff --git a/arch/mips/lib-64/Makefile b/arch/mips/lib-64/Makefile
index ad285786e74..dcd4d2ed2ac 100644
--- a/arch/mips/lib-64/Makefile
+++ b/arch/mips/lib-64/Makefile
@@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#
-lib-y += csum_partial.o memset.o watch.o
+lib-y += memset.o watch.o
obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/csum_partial.S b/arch/mips/lib-64/csum_partial.S
deleted file mode 100644
index 25aba660cc9..00000000000
--- a/arch/mips/lib-64/csum_partial.S
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Quick'n'dirty IP checksum ...
- *
- * Copyright (C) 1998, 1999 Ralf Baechle
- * Copyright (C) 1999 Silicon Graphics, Inc.
- */
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-#define ADDC(sum,reg) \
- addu sum, reg; \
- sltu v1, sum, reg; \
- addu sum, v1
-
-#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
- lw t0, (offset + 0x00)(src); \
- lw t1, (offset + 0x04)(src); \
- lw t2, (offset + 0x08)(src); \
- lw t3, (offset + 0x0c)(src); \
- ADDC(sum, t0); \
- ADDC(sum, t1); \
- ADDC(sum, t2); \
- ADDC(sum, t3); \
- lw t0, (offset + 0x10)(src); \
- lw t1, (offset + 0x14)(src); \
- lw t2, (offset + 0x18)(src); \
- lw t3, (offset + 0x1c)(src); \
- ADDC(sum, t0); \
- ADDC(sum, t1); \
- ADDC(sum, t2); \
- ADDC(sum, t3); \
-
-/*
- * a0: source address
- * a1: length of the area to checksum
- * a2: partial checksum
- */
-
-#define src a0
-#define sum v0
-
- .text
- .set noreorder
-
-/* unknown src alignment and < 8 bytes to go */
-small_csumcpy:
- move a1, ta2
-
- andi ta0, a1, 4
- beqz ta0, 1f
- andi ta0, a1, 2
-
- /* Still a full word to go */
- ulw ta1, (src)
- daddiu src, 4
- ADDC(sum, ta1)
-
-1: move ta1, zero
- beqz ta0, 1f
- andi ta0, a1, 1
-
- /* Still a halfword to go */
- ulhu ta1, (src)
- daddiu src, 2
-
-1: beqz ta0, 1f
- sll ta1, ta1, 16
-
- lbu ta2, (src)
- nop
-
-#ifdef __MIPSEB__
- sll ta2, ta2, 8
-#endif
- or ta1, ta2
-
-1: ADDC(sum, ta1)
-
- /* fold checksum */
- sll v1, sum, 16
- addu sum, v1
- sltu v1, sum, v1
- srl sum, sum, 16
- addu sum, v1
-
- /* odd buffer alignment? */
- beqz t3, 1f
- nop
- sll v1, sum, 8
- srl sum, sum, 8
- or sum, v1
- andi sum, 0xffff
-1:
- .set reorder
- /* Add the passed partial csum. */
- ADDC(sum, a2)
- jr ra
- .set noreorder
-
-/* ------------------------------------------------------------------------- */
-
- .align 5
-LEAF(csum_partial)
- move sum, zero
- move t3, zero
-
- sltiu t8, a1, 0x8
- bnez t8, small_csumcpy /* < 8 bytes to copy */
- move ta2, a1
-
- beqz a1, out
- andi t3, src, 0x1 /* odd buffer? */
-
-hword_align:
- beqz t3, word_align
- andi t8, src, 0x2
-
- lbu ta0, (src)
- dsubu a1, a1, 0x1
-#ifdef __MIPSEL__
- sll ta0, ta0, 8
-#endif
- ADDC(sum, ta0)
- daddu src, src, 0x1
- andi t8, src, 0x2
-
-word_align:
- beqz t8, dword_align
- sltiu t8, a1, 56
-
- lhu ta0, (src)
- dsubu a1, a1, 0x2
- ADDC(sum, ta0)
- sltiu t8, a1, 56
- daddu src, src, 0x2
-
-dword_align:
- bnez t8, do_end_words
- move t8, a1
-
- andi t8, src, 0x4
- beqz t8, qword_align
- andi t8, src, 0x8
-
- lw ta0, 0x00(src)
- dsubu a1, a1, 0x4
- ADDC(sum, ta0)
- daddu src, src, 0x4
- andi t8, src, 0x8
-
-qword_align:
- beqz t8, oword_align
- andi t8, src, 0x10
-
- lw ta0, 0x00(src)
- lw ta1, 0x04(src)
- dsubu a1, a1, 0x8
- ADDC(sum, ta0)
- ADDC(sum, ta1)
- daddu src, src, 0x8
- andi t8, src, 0x10
-
-oword_align:
- beqz t8, begin_movement
- dsrl t8, a1, 0x7
-
- lw ta3, 0x08(src)
- lw t0, 0x0c(src)
- lw ta0, 0x00(src)
- lw ta1, 0x04(src)
- ADDC(sum, ta3)
- ADDC(sum, t0)
- ADDC(sum, ta0)
- ADDC(sum, ta1)
- dsubu a1, a1, 0x10
- daddu src, src, 0x10
- dsrl t8, a1, 0x7
-
-begin_movement:
- beqz t8, 1f
- andi ta2, a1, 0x40
-
-move_128bytes:
- CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
- CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
- CSUM_BIGCHUNK(src, 0x40, sum, ta0, ta1, ta3, t0)
- CSUM_BIGCHUNK(src, 0x60, sum, ta0, ta1, ta3, t0)
- dsubu t8, t8, 0x01
- bnez t8, move_128bytes
- daddu src, src, 0x80
-
-1:
- beqz ta2, 1f
- andi ta2, a1, 0x20
-
-move_64bytes:
- CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
- CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
- daddu src, src, 0x40
-
-1:
- beqz ta2, do_end_words
- andi t8, a1, 0x1c
-
-move_32bytes:
- CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
- andi t8, a1, 0x1c
- daddu src, src, 0x20
-
-do_end_words:
- beqz t8, maybe_end_cruft
- dsrl t8, t8, 0x2
-
-end_words:
- lw ta0, (src)
- dsubu t8, t8, 0x1
- ADDC(sum, ta0)
- bnez t8, end_words
- daddu src, src, 0x4
-
-maybe_end_cruft:
- andi ta2, a1, 0x3
-
-small_memcpy:
- j small_csumcpy; move a1, ta2 /* XXX ??? */
- beqz t2, out
- move a1, ta2
-
-end_bytes:
- lb ta0, (src)
- dsubu a1, a1, 0x1
- bnez a2, end_bytes
- daddu src, src, 0x1
-
-out:
- jr ra
- move v0, sum
- END(csum_partial)
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index b225543f530..888b61ea12f 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,8 +2,8 @@
# Makefile for MIPS-specific library files..
#
-lib-y += csum_partial_copy.o memcpy.o promlib.o strlen_user.o strncpy_user.o \
- strnlen_user.o uncached.o
+lib-y += csum_partial.o csum_partial_copy.o memcpy.o promlib.o \
+ strlen_user.o strncpy_user.o strnlen_user.o uncached.o
obj-y += iomap.o
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
new file mode 100644
index 00000000000..15611d9df7a
--- /dev/null
+++ b/arch/mips/lib/csum_partial.S
@@ -0,0 +1,258 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Quick'n'dirty IP checksum ...
+ *
+ * Copyright (C) 1998, 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_64BIT
+#define T0 ta0
+#define T1 ta1
+#define T2 ta2
+#define T3 ta3
+#define T4 t0
+#define T7 t3
+#else
+#define T0 t0
+#define T1 t1
+#define T2 t2
+#define T3 t3
+#define T4 t4
+#define T7 t7
+#endif
+
+#define ADDC(sum,reg) \
+ addu sum, reg; \
+ sltu v1, sum, reg; \
+ addu sum, v1
+
+#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
+ lw _t0, (offset + 0x00)(src); \
+ lw _t1, (offset + 0x04)(src); \
+ lw _t2, (offset + 0x08)(src); \
+ lw _t3, (offset + 0x0c)(src); \
+ ADDC(sum, _t0); \
+ ADDC(sum, _t1); \
+ ADDC(sum, _t2); \
+ ADDC(sum, _t3); \
+ lw _t0, (offset + 0x10)(src); \
+ lw _t1, (offset + 0x14)(src); \
+ lw _t2, (offset + 0x18)(src); \
+ lw _t3, (offset + 0x1c)(src); \
+ ADDC(sum, _t0); \
+ ADDC(sum, _t1); \
+ ADDC(sum, _t2); \
+ ADDC(sum, _t3); \
+
+/*
+ * a0: source address
+ * a1: length of the area to checksum
+ * a2: partial checksum
+ */
+
+#define src a0
+#define sum v0
+
+ .text
+ .set noreorder
+
+/* unknown src alignment and < 8 bytes to go */
+small_csumcpy:
+ move a1, T2
+
+ andi T0, a1, 4
+ beqz T0, 1f
+ andi T0, a1, 2
+
+ /* Still a full word to go */
+ ulw T1, (src)
+ PTR_ADDIU src, 4
+ ADDC(sum, T1)
+
+1: move T1, zero
+ beqz T0, 1f
+ andi T0, a1, 1
+
+ /* Still a halfword to go */
+ ulhu T1, (src)
+ PTR_ADDIU src, 2
+
+1: beqz T0, 1f
+ sll T1, T1, 16
+
+ lbu T2, (src)
+ nop
+
+#ifdef __MIPSEB__
+ sll T2, T2, 8
+#endif
+ or T1, T2
+
+1: ADDC(sum, T1)
+
+ /* fold checksum */
+ sll v1, sum, 16
+ addu sum, v1
+ sltu v1, sum, v1
+ srl sum, sum, 16
+ addu sum, v1
+
+ /* odd buffer alignment? */
+ beqz T7, 1f
+ nop
+ sll v1, sum, 8
+ srl sum, sum, 8
+ or sum, v1
+ andi sum, 0xffff
+1:
+ .set reorder
+ /* Add the passed partial csum. */
+ ADDC(sum, a2)
+ jr ra
+ .set noreorder
+
+/* ------------------------------------------------------------------------- */
+
+ .align 5
+LEAF(csum_partial)
+ move sum, zero
+ move T7, zero
+
+ sltiu t8, a1, 0x8
+ bnez t8, small_csumcpy /* < 8 bytes to copy */
+ move T2, a1
+
+ beqz a1, out
+ andi T7, src, 0x1 /* odd buffer? */
+
+hword_align:
+ beqz T7, word_align
+ andi t8, src, 0x2
+
+ lbu T0, (src)
+ LONG_SUBU a1, a1, 0x1
+#ifdef __MIPSEL__
+ sll T0, T0, 8
+#endif
+ ADDC(sum, T0)
+ PTR_ADDU src, src, 0x1
+ andi t8, src, 0x2
+
+word_align:
+ beqz t8, dword_align
+ sltiu t8, a1, 56
+
+ lhu T0, (src)
+ LONG_SUBU a1, a1, 0x2
+ ADDC(sum, T0)
+ sltiu t8, a1, 56
+ PTR_ADDU src, src, 0x2
+
+dword_align:
+ bnez t8, do_end_words
+ move t8, a1
+
+ andi t8, src, 0x4
+ beqz t8, qword_align
+ andi t8, src, 0x8
+
+ lw T0, 0x00(src)
+ LONG_SUBU a1, a1, 0x4
+ ADDC(sum, T0)
+ PTR_ADDU src, src, 0x4
+ andi t8, src, 0x8
+
+qword_align:
+ beqz t8, oword_align
+ andi t8, src, 0x10
+
+ lw T0, 0x00(src)
+ lw T1, 0x04(src)
+ LONG_SUBU a1, a1, 0x8
+ ADDC(sum, T0)
+ ADDC(sum, T1)
+ PTR_ADDU src, src, 0x8
+ andi t8, src, 0x10
+
+oword_align:
+ beqz t8, begin_movement
+ LONG_SRL t8, a1, 0x7
+
+ lw T3, 0x08(src)
+ lw T4, 0x0c(src)
+ lw T0, 0x00(src)
+ lw T1, 0x04(src)
+ ADDC(sum, T3)
+ ADDC(sum, T4)
+ ADDC(sum, T0)
+ ADDC(sum, T1)
+ LONG_SUBU a1, a1, 0x10
+ PTR_ADDU src, src, 0x10
+ LONG_SRL t8, a1, 0x7
+
+begin_movement:
+ beqz t8, 1f
+ andi T2, a1, 0x40
+
+move_128bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+ CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
+ CSUM_BIGCHUNK(src, 0x40, sum, T0, T1, T3, T4)
+ CSUM_BIGCHUNK(src, 0x60, sum, T0, T1, T3, T4)
+ LONG_SUBU t8, t8, 0x01
+ bnez t8, move_128bytes
+ PTR_ADDU src, src, 0x80
+
+1:
+ beqz T2, 1f
+ andi T2, a1, 0x20
+
+move_64bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+ CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
+ PTR_ADDU src, src, 0x40
+
+1:
+ beqz T2, do_end_words
+ andi t8, a1, 0x1c
+
+move_32bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+ andi t8, a1, 0x1c
+ PTR_ADDU src, src, 0x20
+
+do_end_words:
+ beqz t8, maybe_end_cruft
+ LONG_SRL t8, t8, 0x2
+
+end_words:
+ lw T0, (src)
+ LONG_SUBU t8, t8, 0x1
+ ADDC(sum, T0)
+ bnez t8, end_words
+ PTR_ADDU src, src, 0x4
+
+maybe_end_cruft:
+ andi T2, a1, 0x3
+
+small_memcpy:
+ j small_csumcpy; move a1, T2 /* XXX ??? */
+ beqz t2, out
+ move a1, T2
+
+end_bytes:
+ lb T0, (src)
+ LONG_SUBU a1, a1, 0x1
+ bnez a2, end_bytes
+ PTR_ADDU src, src, 0x1
+
+out:
+ jr ra
+ move v0, sum
+ END(csum_partial)
diff --git a/arch/mips/lib/csum_partial_copy.c b/arch/mips/lib/csum_partial_copy.c
index 6e9f366f961..1720f2ceeea 100644
--- a/arch/mips/lib/csum_partial_copy.c
+++ b/arch/mips/lib/csum_partial_copy.c
@@ -16,8 +16,8 @@
/*
* copy while checksumming, otherwise like csum_partial
*/
-unsigned int csum_partial_copy_nocheck(const unsigned char *src,
- unsigned char *dst, int len, unsigned int sum)
+__wsum csum_partial_copy_nocheck(const void *src,
+ void *dst, int len, __wsum sum)
{
/*
* It's 2:30 am and I don't feel like doing it real ...
@@ -33,8 +33,8 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src,
* Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer.
*/
-unsigned int csum_partial_copy_from_user (const unsigned char __user *src,
- unsigned char *dst, int len, unsigned int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user (const void __user *src,
+ void *dst, int len, __wsum sum, int *err_ptr)
{
int missing;
diff --git a/arch/mips/mips-boards/atlas/atlas_int.c b/arch/mips/mips-boards/atlas/atlas_int.c
index be624b8c3b0..43dba6ce660 100644
--- a/arch/mips/mips-boards/atlas/atlas_int.c
+++ b/arch/mips/mips-boards/atlas/atlas_int.c
@@ -62,16 +62,6 @@ void enable_atlas_irq(unsigned int irq_nr)
iob();
}
-static unsigned int startup_atlas_irq(unsigned int irq)
-{
- enable_atlas_irq(irq);
- return 0; /* never anything pending */
-}
-
-#define shutdown_atlas_irq disable_atlas_irq
-
-#define mask_and_ack_atlas_irq disable_atlas_irq
-
static void end_atlas_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -80,11 +70,11 @@ static void end_atlas_irq(unsigned int irq)
static struct irq_chip atlas_irq_type = {
.typename = "Atlas",
- .startup = startup_atlas_irq,
- .shutdown = shutdown_atlas_irq,
- .enable = enable_atlas_irq,
- .disable = disable_atlas_irq,
- .ack = mask_and_ack_atlas_irq,
+ .ack = disable_atlas_irq,
+ .mask = disable_atlas_irq,
+ .mask_ack = disable_atlas_irq,
+ .unmask = enable_atlas_irq,
+ .eoi = enable_atlas_irq,
.end = end_atlas_irq,
};
@@ -217,13 +207,8 @@ static inline void init_atlas_irqs (int base)
*/
atlas_hw0_icregs->intrsten = 0xffffffff;
- for (i = ATLAS_INT_BASE; i <= ATLAS_INT_END; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &atlas_irq_type;
- spin_lock_init(&irq_desc[i].lock);
- }
+ for (i = ATLAS_INT_BASE; i <= ATLAS_INT_END; i++)
+ set_irq_chip_and_handler(i, &atlas_irq_type, handle_level_irq);
}
static struct irqaction atlasirq = {
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index d817c60c5ca..e4604c73f02 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -288,6 +288,7 @@ void __init plat_timer_setup(struct irqaction *irq)
The effect is that the int remains disabled on the second cpu.
Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
+ set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif
/* to generate the first timer interrupt */
diff --git a/arch/mips/mips-boards/sim/sim_time.c b/arch/mips/mips-boards/sim/sim_time.c
index 24a4ed00cc0..30711d016fe 100644
--- a/arch/mips/mips-boards/sim/sim_time.c
+++ b/arch/mips/mips-boards/sim/sim_time.c
@@ -3,31 +3,24 @@
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
-
-#include <asm/mipsregs.h>
-#include <asm/ptrace.h>
-#include <asm/hardirq.h>
-#include <asm/div64.h>
-#include <asm/cpu.h>
-#include <asm/time.h>
-
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/timex.h>
+
#include <asm/mipsregs.h>
+#include <asm/ptrace.h>
#include <asm/hardirq.h>
-#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/cpu.h>
#include <asm/time.h>
+#include <asm/irq.h>
#include <asm/mc146818-time.h>
#include <asm/msc01_ic.h>
+#include <asm/smp.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/prom.h>
#include <asm/mips-boards/simint.h>
-#include <asm/mc146818-time.h>
-#include <asm/smp.h>
unsigned long cpu_khz;
@@ -203,7 +196,8 @@ void __init plat_timer_setup(struct irqaction *irq)
on seperate cpu's the first one tries to handle the second interrupt.
The effect is that the int remains disabled on the second cpu.
Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
- irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
+ irq_desc[mips_cpu_timer_irq].flags |= IRQ_PER_CPU;
+ set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif
/* to generate the first timer interrupt */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index cc895dad71d..df04a315d83 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -323,7 +323,6 @@ static void __init r4k_blast_scache_setup(void)
static inline void local_r4k_flush_cache_all(void * args)
{
r4k_blast_dcache();
- r4k_blast_icache();
}
static void r4k_flush_cache_all(void)
@@ -359,21 +358,19 @@ static void r4k___flush_cache_all(void)
static inline void local_r4k_flush_cache_range(void * args)
{
struct vm_area_struct *vma = args;
- int exec;
if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
return;
- exec = vma->vm_flags & VM_EXEC;
- if (cpu_has_dc_aliases || exec)
- r4k_blast_dcache();
- if (exec)
- r4k_blast_icache();
+ r4k_blast_dcache();
}
static void r4k_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
+ if (!cpu_has_dc_aliases)
+ return;
+
r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
}
@@ -384,18 +381,21 @@ static inline void local_r4k_flush_cache_mm(void * args)
if (!cpu_context(smp_processor_id(), mm))
return;
- r4k_blast_dcache();
- r4k_blast_icache();
-
/*
* Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
* only flush the primary caches but R10000 and R12000 behave sane ...
+ * R4000SC and R4400SC indexed S-cache ops also invalidate primary
+ * caches, so we can bail out early.
*/
if (current_cpu_data.cputype == CPU_R4000SC ||
current_cpu_data.cputype == CPU_R4000MC ||
current_cpu_data.cputype == CPU_R4400SC ||
- current_cpu_data.cputype == CPU_R4400MC)
+ current_cpu_data.cputype == CPU_R4400MC) {
r4k_blast_scache();
+ return;
+ }
+
+ r4k_blast_dcache();
}
static void r4k_flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c
index 7fa5fd16e46..5697c6e250a 100644
--- a/arch/mips/mm/dma-coherent.c
+++ b/arch/mips/mm/dma-coherent.c
@@ -190,14 +190,14 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported);
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 1;
}
EXPORT_SYMBOL(dma_is_consistent);
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c
index 8da19fd22ac..f088344db46 100644
--- a/arch/mips/mm/dma-ip27.c
+++ b/arch/mips/mm/dma-ip27.c
@@ -197,14 +197,14 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported);
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 1;
}
EXPORT_SYMBOL(dma_is_consistent);
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c
index ec54ed0d26f..b42b6f7456e 100644
--- a/arch/mips/mm/dma-ip32.c
+++ b/arch/mips/mm/dma-ip32.c
@@ -363,14 +363,15 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported);
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 1;
}
EXPORT_SYMBOL(dma_is_consistent);
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
{
if (direction == DMA_NONE)
return;
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 2eeffe5c2a3..8cecef0957c 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -299,14 +299,15 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported);
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 1;
}
EXPORT_SYMBOL(dma_is_consistent);
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
{
if (direction == DMA_NONE)
return;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 8423d859077..6f90e7ef66a 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -60,6 +60,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
*/
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto vmalloc_fault;
+#ifdef MODULE_START
+ if (unlikely(address >= MODULE_START && address < MODULE_END))
+ goto vmalloc_fault;
+#endif
/*
* If we're in an interrupt or have no user
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 99ebf3ccc22..675502ada5a 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -39,7 +39,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- inc_preempt_count();
+ pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
@@ -62,8 +62,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIXADDR_START) { // FIXME
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_enable();
return;
}
@@ -78,8 +77,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
local_flush_tlb_one(vaddr);
#endif
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_enable();
}
#ifndef CONFIG_LIMITED_DMA
@@ -92,7 +90,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
enum fixed_addresses idx;
unsigned long vaddr;
- inc_preempt_count();
+ pagefault_disable();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 2de4d3c367a..9e29ba9205f 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -90,9 +90,9 @@ unsigned long setup_zero_pages(void)
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
- page = virt_to_page(empty_zero_page);
+ page = virt_to_page((void *)empty_zero_page);
split_page(page, order);
- while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
+ while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
SetPageReserved(page);
page++;
}
@@ -443,15 +443,18 @@ void __init mem_init(void)
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
+static void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
- unsigned long addr;
+ unsigned long pfn;
- for (addr = begin; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- memset((void *)addr, 0xcc, PAGE_SIZE);
- free_page(addr);
+ for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
+ struct page *page = pfn_to_page(pfn);
+ void *addr = phys_to_virt(PFN_PHYS(pfn));
+
+ ClearPageReserved(page);
+ init_page_count(page);
+ memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
+ __free_page(page);
totalram_pages++;
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
@@ -460,12 +463,9 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
-#ifdef CONFIG_64BIT
- /* Switch from KSEG0 to XKPHYS addresses */
- start = (unsigned long)phys_to_virt(CPHYSADDR(start));
- end = (unsigned long)phys_to_virt(CPHYSADDR(end));
-#endif
- free_init_pages("initrd memory", start, end);
+ free_init_pages("initrd memory",
+ virt_to_phys((void *)start),
+ virt_to_phys((void *)end));
}
#endif
@@ -473,17 +473,13 @@ extern unsigned long prom_free_prom_memory(void);
void free_initmem(void)
{
- unsigned long start, end, freed;
+ unsigned long freed;
freed = prom_free_prom_memory();
if (freed)
printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed);
- start = (unsigned long)(&__init_begin);
- end = (unsigned long)(&__init_end);
-#ifdef CONFIG_64BIT
- start = PAGE_OFFSET | CPHYSADDR(start);
- end = PAGE_OFFSET | CPHYSADDR(end);
-#endif
- free_init_pages("unused kernel memory", start, end);
+ free_init_pages("unused kernel memory",
+ __pa_symbol(&__init_begin),
+ __pa_symbol(&__init_end));
}
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 8d600d307d5..c46eb651bf0 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -58,6 +58,9 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
+#ifdef MODULE_START
+ pgd_init((unsigned long)module_pg_dir);
+#endif
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
pgd_base = swapper_pg_dir;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index fec318a1c8c..492c518e7ba 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -423,6 +423,9 @@ enum label_id {
label_invalid,
label_second_part,
label_leave,
+#ifdef MODULE_START
+ label_module_alloc,
+#endif
label_vmalloc,
label_vmalloc_done,
label_tlbw_hazard,
@@ -455,6 +458,9 @@ static __init void build_label(struct label **lab, u32 *addr,
L_LA(_second_part)
L_LA(_leave)
+#ifdef MODULE_START
+L_LA(_module_alloc)
+#endif
L_LA(_vmalloc)
L_LA(_vmalloc_done)
L_LA(_tlbw_hazard)
@@ -686,6 +692,13 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
i_bgezl(p, reg, 0);
}
+static void __init __attribute__((unused))
+il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
+{
+ r_mips_pc16(r, *p, l);
+ i_bgez(p, reg, 0);
+}
+
/* The only general purpose registers allowed in TLB handlers. */
#define K0 26
#define K1 27
@@ -970,7 +983,11 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
* The vmalloc handling is not in the hotpath.
*/
i_dmfc0(p, tmp, C0_BADVADDR);
+#ifdef MODULE_START
+ il_bltz(p, r, tmp, label_module_alloc);
+#else
il_bltz(p, r, tmp, label_vmalloc);
+#endif
/* No i_nop needed here, since the next insn doesn't touch TMP. */
#ifdef CONFIG_SMP
@@ -1023,8 +1040,46 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
{
long swpd = (long)swapper_pg_dir;
+#ifdef MODULE_START
+ long modd = (long)module_pg_dir;
+
+ l_module_alloc(l, *p);
+ /*
+ * Assumption:
+ * VMALLOC_START >= 0xc000000000000000UL
+ * MODULE_START >= 0xe000000000000000UL
+ */
+ i_SLL(p, ptr, bvaddr, 2);
+ il_bgez(p, r, ptr, label_vmalloc);
+
+ if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) {
+ i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */
+ } else {
+ /* unlikely configuration */
+ i_nop(p); /* delay slot */
+ i_LA(p, ptr, MODULE_START);
+ }
+ i_dsubu(p, bvaddr, bvaddr, ptr);
+
+ if (in_compat_space_p(modd) && !rel_lo(modd)) {
+ il_b(p, r, label_vmalloc_done);
+ i_lui(p, ptr, rel_hi(modd));
+ } else {
+ i_LA_mostly(p, ptr, modd);
+ il_b(p, r, label_vmalloc_done);
+ i_daddiu(p, ptr, ptr, rel_lo(modd));
+ }
+
+ l_vmalloc(l, *p);
+ if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) &&
+ MODULE_START << 32 == VMALLOC_START)
+ i_dsll32(p, ptr, ptr, 0); /* typical case */
+ else
+ i_LA(p, ptr, VMALLOC_START);
+#else
l_vmalloc(l, *p);
i_LA(p, ptr, VMALLOC_START);
+#endif
i_dsubu(p, bvaddr, bvaddr, ptr);
if (in_compat_space_p(swpd) && !rel_lo(swpd)) {
diff --git a/arch/mips/momentum/ocelot_c/cpci-irq.c b/arch/mips/momentum/ocelot_c/cpci-irq.c
index 47e3fa32b07..bb11fef0847 100644
--- a/arch/mips/momentum/ocelot_c/cpci-irq.c
+++ b/arch/mips/momentum/ocelot_c/cpci-irq.c
@@ -66,48 +66,6 @@ static inline void unmask_cpci_irq(unsigned int irq)
}
/*
- * Enables the IRQ in the FPGA
- */
-static void enable_cpci_irq(unsigned int irq)
-{
- unmask_cpci_irq(irq);
-}
-
-/*
- * Initialize the IRQ in the FPGA
- */
-static unsigned int startup_cpci_irq(unsigned int irq)
-{
- unmask_cpci_irq(irq);
- return 0;
-}
-
-/*
- * Disables the IRQ in the FPGA
- */
-static void disable_cpci_irq(unsigned int irq)
-{
- mask_cpci_irq(irq);
-}
-
-/*
- * Masks and ACKs an IRQ
- */
-static void mask_and_ack_cpci_irq(unsigned int irq)
-{
- mask_cpci_irq(irq);
-}
-
-/*
- * End IRQ processing
- */
-static void end_cpci_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- unmask_cpci_irq(irq);
-}
-
-/*
* Interrupt handler for interrupts coming from the FPGA chip.
* It could be built in ethernet ports etc...
*/
@@ -125,27 +83,18 @@ void ll_cpci_irq(void)
do_IRQ(ls1bit8(irq_src) + CPCI_IRQ_BASE);
}
-#define shutdown_cpci_irq disable_cpci_irq
-
struct irq_chip cpci_irq_type = {
.typename = "CPCI/FPGA",
- .startup = startup_cpci_irq,
- .shutdown = shutdown_cpci_irq,
- .enable = enable_cpci_irq,
- .disable = disable_cpci_irq,
- .ack = mask_and_ack_cpci_irq,
- .end = end_cpci_irq,
+ .ack = mask_cpci_irq,
+ .mask = mask_cpci_irq,
+ .mask_ack = mask_cpci_irq,
+ .unmask = unmask_cpci_irq,
};
void cpci_irq_init(void)
{
int i;
- /* Reset irq handlers pointers to NULL */
- for (i = CPCI_IRQ_BASE; i < (CPCI_IRQ_BASE + 8); i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 2;
- irq_desc[i].chip = &cpci_irq_type;
- }
+ for (i = CPCI_IRQ_BASE; i < (CPCI_IRQ_BASE + 8); i++)
+ set_irq_chip_and_handler(i, &cpci_irq_type, handle_level_irq);
}
diff --git a/arch/mips/momentum/ocelot_c/uart-irq.c b/arch/mips/momentum/ocelot_c/uart-irq.c
index 510257dc205..a7a80c0da56 100644
--- a/arch/mips/momentum/ocelot_c/uart-irq.c
+++ b/arch/mips/momentum/ocelot_c/uart-irq.c
@@ -60,48 +60,6 @@ static inline void unmask_uart_irq(unsigned int irq)
}
/*
- * Enables the IRQ in the FPGA
- */
-static void enable_uart_irq(unsigned int irq)
-{
- unmask_uart_irq(irq);
-}
-
-/*
- * Initialize the IRQ in the FPGA
- */
-static unsigned int startup_uart_irq(unsigned int irq)
-{
- unmask_uart_irq(irq);
- return 0;
-}
-
-/*
- * Disables the IRQ in the FPGA
- */
-static void disable_uart_irq(unsigned int irq)
-{
- mask_uart_irq(irq);
-}
-
-/*
- * Masks and ACKs an IRQ
- */
-static void mask_and_ack_uart_irq(unsigned int irq)
-{
- mask_uart_irq(irq);
-}
-
-/*
- * End IRQ processing
- */
-static void end_uart_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- unmask_uart_irq(irq);
-}
-
-/*
* Interrupt handler for interrupts coming from the FPGA chip.
*/
void ll_uart_irq(void)
@@ -118,28 +76,16 @@ void ll_uart_irq(void)
do_IRQ(ls1bit8(irq_src) + 74);
}
-#define shutdown_uart_irq disable_uart_irq
-
struct irq_chip uart_irq_type = {
.typename = "UART/FPGA",
- .startup = startup_uart_irq,
- .shutdown = shutdown_uart_irq,
- .enable = enable_uart_irq,
- .disable = disable_uart_irq,
- .ack = mask_and_ack_uart_irq,
- .end = end_uart_irq,
+ .ack = mask_uart_irq,
+ .mask = mask_uart_irq,
+ .mask_ack = mask_uart_irq,
+ .unmask = unmask_uart_irq,
};
void uart_irq_init(void)
{
- /* Reset irq handlers pointers to NULL */
- irq_desc[80].status = IRQ_DISABLED;
- irq_desc[80].action = 0;
- irq_desc[80].depth = 2;
- irq_desc[80].chip = &uart_irq_type;
-
- irq_desc[81].status = IRQ_DISABLED;
- irq_desc[81].action = 0;
- irq_desc[81].depth = 2;
- irq_desc[81].chip = &uart_irq_type;
+ set_irq_chip_and_handler(80, &uart_irq_type, handle_level_irq);
+ set_irq_chip_and_handler(81, &uart_irq_type, handle_level_irq);
}
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 0a50aad5bbe..bf3be6fcf7f 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -12,5 +12,6 @@ oprofile-y := $(DRIVER_OBJS) common.o
oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
+oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_RM9000) += op_model_rm9000.o
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 65eb55400d7..4e0a90b3916 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -83,6 +83,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_74K:
case CPU_SB1:
case CPU_SB1A:
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_R14000:
lmodel = &op_model_mipsxx_ops;
break;
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1fb240c57ba..455d76ad06d 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -18,7 +18,7 @@
#define M_PERFCTL_SUPERVISOR (1UL << 2)
#define M_PERFCTL_USER (1UL << 3)
#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
-#define M_PERFCTL_EVENT(event) ((event) << 5)
+#define M_PERFCTL_EVENT(event) (((event) & 0x3f) << 5)
#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
@@ -218,13 +218,23 @@ static inline int __n_counters(void)
static inline int n_counters(void)
{
- int counters = __n_counters();
+ int counters;
+
+ switch (current_cpu_data.cputype) {
+ case CPU_R10000:
+ counters = 2;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ counters = 4;
+
+ default:
+ counters = __n_counters();
+ }
#ifdef CONFIG_MIPS_MT_SMP
- if (current_cpu_data.cputype == CPU_34K)
- return counters >> 1;
+ counters >> 1;
#endif
-
return counters;
}
@@ -284,6 +294,18 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/5K";
break;
+ case CPU_R10000:
+ if ((current_cpu_data.processor_id & 0xff) == 0x20)
+ op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
+ else
+ op_model_mipsxx_ops.cpu_type = "mips/r10000";
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ op_model_mipsxx_ops.cpu_type = "mips/r12000";
+ break;
+
case CPU_SB1:
case CPU_SB1A:
op_model_mipsxx_ops.cpu_type = "mips/sb1";
diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c
index 75a01e76489..7d5f6bbf7a9 100644
--- a/arch/mips/pci/fixup-cobalt.c
+++ b/arch/mips/pci/fixup-cobalt.c
@@ -94,22 +94,21 @@ static void qube_raq_galileo_fixup(struct pci_dev *dev)
#if 0
if (galileo_id >= 0x10) {
/* New Galileo, assumes PCI stop line to VIA is connected. */
- GALILEO_OUTL(0x4020, GT_PCI0_TOR_OFS);
+ GT_WRITE(GT_PCI0_TOR_OFS, 0x4020);
} else if (galileo_id == 0x1 || galileo_id == 0x2)
#endif
{
signed int timeo;
/* XXX WE MUST DO THIS ELSE GALILEO LOCKS UP! -DaveM */
- timeo = GALILEO_INL(GT_PCI0_TOR_OFS);
+ timeo = GT_READ(GT_PCI0_TOR_OFS);
/* Old Galileo, assumes PCI STOP line to VIA is disconnected. */
- GALILEO_OUTL(
+ GT_WRITE(GT_PCI0_TOR_OFS,
(0xff << 16) | /* retry count */
(0xff << 8) | /* timeout 1 */
- 0xff, /* timeout 0 */
- GT_PCI0_TOR_OFS);
+ 0xff); /* timeout 0 */
/* enable PCI retry exceeded interrupt */
- GALILEO_OUTL(GALILEO_INTR_RETRY_CTR | GALILEO_INL(GT_INTRMASK_OFS), GT_INTRMASK_OFS);
+ GT_WRITE(GT_INTRMASK_OFS, GT_INTR_RETRYCTR0_MSK | GT_READ(GT_INTRMASK_OFS));
}
}
diff --git a/arch/mips/pci/ops-gt64111.c b/arch/mips/pci/ops-gt64111.c
index 13de45940b1..ecd3991bd0e 100644
--- a/arch/mips/pci/ops-gt64111.c
+++ b/arch/mips/pci/ops-gt64111.c
@@ -38,18 +38,18 @@ static int gt64111_pci_read_config(struct pci_bus *bus, unsigned int devfn,
switch (size) {
case 4:
PCI_CFG_SET(devfn, where);
- *val = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
+ *val = GT_READ(GT_PCI0_CFGDATA_OFS);
return PCIBIOS_SUCCESSFUL;
case 2:
PCI_CFG_SET(devfn, (where & ~0x3));
- *val = GALILEO_INL(GT_PCI0_CFGDATA_OFS)
+ *val = GT_READ(GT_PCI0_CFGDATA_OFS)
>> ((where & 3) * 8);
return PCIBIOS_SUCCESSFUL;
case 1:
PCI_CFG_SET(devfn, (where & ~0x3));
- *val = GALILEO_INL(GT_PCI0_CFGDATA_OFS)
+ *val = GT_READ(GT_PCI0_CFGDATA_OFS)
>> ((where & 3) * 8);
return PCIBIOS_SUCCESSFUL;
}
@@ -68,25 +68,25 @@ static int gt64111_pci_write_config(struct pci_bus *bus, unsigned int devfn,
switch (size) {
case 4:
PCI_CFG_SET(devfn, where);
- GALILEO_OUTL(val, GT_PCI0_CFGDATA_OFS);
+ GT_WRITE(GT_PCI0_CFGDATA_OFS, val);
return PCIBIOS_SUCCESSFUL;
case 2:
PCI_CFG_SET(devfn, (where & ~0x3));
- tmp = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
+ tmp = GT_READ(GT_PCI0_CFGDATA_OFS);
tmp &= ~(0xffff << ((where & 0x3) * 8));
tmp |= (val << ((where & 0x3) * 8));
- GALILEO_OUTL(tmp, GT_PCI0_CFGDATA_OFS);
+ GT_WRITE(GT_PCI0_CFGDATA_OFS, tmp);
return PCIBIOS_SUCCESSFUL;
case 1:
PCI_CFG_SET(devfn, (where & ~0x3));
- tmp = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
+ tmp = GT_READ(GT_PCI0_CFGDATA_OFS);
tmp &= ~(0xff << ((where & 0x3) * 8));
tmp |= (val << ((where & 0x3) * 8));
- GALILEO_OUTL(tmp, GT_PCI0_CFGDATA_OFS);
+ GT_WRITE(GT_PCI0_CFGDATA_OFS, tmp);
return PCIBIOS_SUCCESSFUL;
}
diff --git a/arch/mips/philips/pnx8550/common/int.c b/arch/mips/philips/pnx8550/common/int.c
index 710611615ca..2c36c108c4d 100644
--- a/arch/mips/philips/pnx8550/common/int.c
+++ b/arch/mips/philips/pnx8550/common/int.c
@@ -38,8 +38,6 @@
#include <int.h>
#include <uart.h>
-static DEFINE_SPINLOCK(irq_lock);
-
/* default prio for interrupts */
/* first one is a no-no so therefore always prio 0 (disabled) */
static char gic_prio[PNX8550_INT_GIC_TOTINT] = {
@@ -149,38 +147,6 @@ static inline void unmask_irq(unsigned int irq_nr)
}
}
-#define pnx8550_disable pnx8550_ack
-static void pnx8550_ack(unsigned int irq)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&irq_lock, flags);
- mask_irq(irq);
- spin_unlock_irqrestore(&irq_lock, flags);
-}
-
-#define pnx8550_enable pnx8550_unmask
-static void pnx8550_unmask(unsigned int irq)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&irq_lock, flags);
- unmask_irq(irq);
- spin_unlock_irqrestore(&irq_lock, flags);
-}
-
-static unsigned int startup_irq(unsigned int irq_nr)
-{
- pnx8550_unmask(irq_nr);
- return 0;
-}
-
-static void shutdown_irq(unsigned int irq_nr)
-{
- pnx8550_ack(irq_nr);
- return;
-}
-
int pnx8550_set_gic_priority(int irq, int priority)
{
int gic_irq = irq-PNX8550_INT_GIC_MIN;
@@ -192,27 +158,12 @@ int pnx8550_set_gic_priority(int irq, int priority)
return prev_priority;
}
-static inline void mask_and_ack_level_irq(unsigned int irq)
-{
- pnx8550_disable(irq);
- return;
-}
-
-static void end_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
- pnx8550_enable(irq);
- }
-}
-
static struct irq_chip level_irq_type = {
.typename = "PNX Level IRQ",
- .startup = startup_irq,
- .shutdown = shutdown_irq,
- .enable = pnx8550_enable,
- .disable = pnx8550_disable,
- .ack = mask_and_ack_level_irq,
- .end = end_irq,
+ .ack = mask_irq,
+ .mask = mask_irq,
+ .mask_ack = mask_irq,
+ .unmask = unmask_irq,
};
static struct irqaction gic_action = {
@@ -233,8 +184,8 @@ void __init arch_init_irq(void)
int configPR;
for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++) {
- irq_desc[i].chip = &level_irq_type;
- pnx8550_ack(i); /* mask the irq just in case */
+ set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
+ mask_irq(i); /* mask the irq just in case */
}
/* init of GIC/IPC interrupts */
@@ -270,7 +221,7 @@ void __init arch_init_irq(void)
/* mask/priority is still 0 so we will not get any
* interrupts until it is unmasked */
- irq_desc[i].chip = &level_irq_type;
+ set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
}
/* Priority level 0 */
@@ -279,20 +230,21 @@ void __init arch_init_irq(void)
/* Set int vector table address */
PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
- irq_desc[MIPS_CPU_GIC_IRQ].chip = &level_irq_type;
+ set_irq_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
+ handle_level_irq);
setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
/* init of Timer interrupts */
- for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++) {
- irq_desc[i].chip = &level_irq_type;
- }
+ for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++)
+ set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
/* Stop Timer 1-3 */
configPR = read_c0_config7();
configPR |= 0x00000038;
write_c0_config7(configPR);
- irq_desc[MIPS_CPU_TIMER_IRQ].chip = &level_irq_type;
+ set_irq_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
+ handle_level_irq);
setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
}
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 3cc0436db6c..305491e74db 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -99,8 +99,6 @@ void prom_cpus_done(void)
*/
void prom_init_secondary(void)
{
- mips_hpt_init();
-
set_c0_status(ST0_CO | ST0_IE | ST0_IM);
}
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index 0d18ed47c47..a1a9af6da7b 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -95,16 +95,11 @@ static irqreturn_t ip22_eisa_intr(int irq, void *dev_id)
static void enable_eisa1_irq(unsigned int irq)
{
- unsigned long flags;
u8 mask;
- local_irq_save(flags);
-
mask = inb(EISA_INT1_MASK);
mask &= ~((u8) (1 << irq));
outb(mask, EISA_INT1_MASK);
-
- local_irq_restore(flags);
}
static unsigned int startup_eisa1_irq(unsigned int irq)
@@ -130,8 +125,6 @@ static void disable_eisa1_irq(unsigned int irq)
outb(mask, EISA_INT1_MASK);
}
-#define shutdown_eisa1_irq disable_eisa1_irq
-
static void mask_and_ack_eisa1_irq(unsigned int irq)
{
disable_eisa1_irq(irq);
@@ -148,25 +141,20 @@ static void end_eisa1_irq(unsigned int irq)
static struct irq_chip ip22_eisa1_irq_type = {
.typename = "IP22 EISA",
.startup = startup_eisa1_irq,
- .shutdown = shutdown_eisa1_irq,
- .enable = enable_eisa1_irq,
- .disable = disable_eisa1_irq,
.ack = mask_and_ack_eisa1_irq,
+ .mask = disable_eisa1_irq,
+ .mask_ack = mask_and_ack_eisa1_irq,
+ .unmask = enable_eisa1_irq,
.end = end_eisa1_irq,
};
static void enable_eisa2_irq(unsigned int irq)
{
- unsigned long flags;
u8 mask;
- local_irq_save(flags);
-
mask = inb(EISA_INT2_MASK);
mask &= ~((u8) (1 << (irq - 8)));
outb(mask, EISA_INT2_MASK);
-
- local_irq_restore(flags);
}
static unsigned int startup_eisa2_irq(unsigned int irq)
@@ -192,8 +180,6 @@ static void disable_eisa2_irq(unsigned int irq)
outb(mask, EISA_INT2_MASK);
}
-#define shutdown_eisa2_irq disable_eisa2_irq
-
static void mask_and_ack_eisa2_irq(unsigned int irq)
{
disable_eisa2_irq(irq);
@@ -210,10 +196,10 @@ static void end_eisa2_irq(unsigned int irq)
static struct irq_chip ip22_eisa2_irq_type = {
.typename = "IP22 EISA",
.startup = startup_eisa2_irq,
- .shutdown = shutdown_eisa2_irq,
- .enable = enable_eisa2_irq,
- .disable = disable_eisa2_irq,
.ack = mask_and_ack_eisa2_irq,
+ .mask = disable_eisa2_irq,
+ .mask_ack = mask_and_ack_eisa2_irq,
+ .unmask = enable_eisa2_irq,
.end = end_eisa2_irq,
};
@@ -275,13 +261,10 @@ int __init ip22_eisa_init(void)
outb(0, EISA_DMA2_WRITE_SINGLE);
for (i = SGINT_EISA; i < (SGINT_EISA + EISA_MAX_IRQ); i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
if (i < (SGINT_EISA + 8))
- irq_desc[i].chip = &ip22_eisa1_irq_type;
+ set_irq_chip(i, &ip22_eisa1_irq_type);
else
- irq_desc[i].chip = &ip22_eisa2_irq_type;
+ set_irq_chip(i, &ip22_eisa2_irq_type);
}
/* Cannot use request_irq because of kmalloc not being ready at such
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index af518898eaa..c44f8be0644 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -40,186 +40,86 @@ extern int ip22_eisa_init(void);
static void enable_local0_irq(unsigned int irq)
{
- unsigned long flags;
-
- local_irq_save(flags);
/* don't allow mappable interrupt to be enabled from setup_irq,
* we have our own way to do so */
if (irq != SGI_MAP_0_IRQ)
sgint->imask0 |= (1 << (irq - SGINT_LOCAL0));
- local_irq_restore(flags);
-}
-
-static unsigned int startup_local0_irq(unsigned int irq)
-{
- enable_local0_irq(irq);
- return 0; /* Never anything pending */
}
static void disable_local0_irq(unsigned int irq)
{
- unsigned long flags;
-
- local_irq_save(flags);
sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0));
- local_irq_restore(flags);
-}
-
-#define shutdown_local0_irq disable_local0_irq
-#define mask_and_ack_local0_irq disable_local0_irq
-
-static void end_local0_irq (unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_local0_irq(irq);
}
static struct irq_chip ip22_local0_irq_type = {
.typename = "IP22 local 0",
- .startup = startup_local0_irq,
- .shutdown = shutdown_local0_irq,
- .enable = enable_local0_irq,
- .disable = disable_local0_irq,
- .ack = mask_and_ack_local0_irq,
- .end = end_local0_irq,
+ .ack = disable_local0_irq,
+ .mask = disable_local0_irq,
+ .mask_ack = disable_local0_irq,
+ .unmask = enable_local0_irq,
};
static void enable_local1_irq(unsigned int irq)
{
- unsigned long flags;
-
- local_irq_save(flags);
/* don't allow mappable interrupt to be enabled from setup_irq,
* we have our own way to do so */
if (irq != SGI_MAP_1_IRQ)
sgint->imask1 |= (1 << (irq - SGINT_LOCAL1));
- local_irq_restore(flags);
-}
-
-static unsigned int startup_local1_irq(unsigned int irq)
-{
- enable_local1_irq(irq);
- return 0; /* Never anything pending */
}
void disable_local1_irq(unsigned int irq)
{
- unsigned long flags;
-
- local_irq_save(flags);
sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1));
- local_irq_restore(flags);
-}
-
-#define shutdown_local1_irq disable_local1_irq
-#define mask_and_ack_local1_irq disable_local1_irq
-
-static void end_local1_irq (unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_local1_irq(irq);
}
static struct irq_chip ip22_local1_irq_type = {
.typename = "IP22 local 1",
- .startup = startup_local1_irq,
- .shutdown = shutdown_local1_irq,
- .enable = enable_local1_irq,
- .disable = disable_local1_irq,
- .ack = mask_and_ack_local1_irq,
- .end = end_local1_irq,
+ .ack = disable_local1_irq,
+ .mask = disable_local1_irq,
+ .mask_ack = disable_local1_irq,
+ .unmask = enable_local1_irq,
};
static void enable_local2_irq(unsigned int irq)
{
- unsigned long flags;
-
- local_irq_save(flags);
sgint->imask0 |= (1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
sgint->cmeimask0 |= (1 << (irq - SGINT_LOCAL2));
- local_irq_restore(flags);
-}
-
-static unsigned int startup_local2_irq(unsigned int irq)
-{
- enable_local2_irq(irq);
- return 0; /* Never anything pending */
}
void disable_local2_irq(unsigned int irq)
{
- unsigned long flags;
-
- local_irq_save(flags);
sgint->cmeimask0 &= ~(1 << (irq - SGINT_LOCAL2));
if (!sgint->cmeimask0)
sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
- local_irq_restore(flags);
-}
-
-#define shutdown_local2_irq disable_local2_irq
-#define mask_and_ack_local2_irq disable_local2_irq
-
-static void end_local2_irq (unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_local2_irq(irq);
}
static struct irq_chip ip22_local2_irq_type = {
.typename = "IP22 local 2",
- .startup = startup_local2_irq,
- .shutdown = shutdown_local2_irq,
- .enable = enable_local2_irq,
- .disable = disable_local2_irq,
- .ack = mask_and_ack_local2_irq,
- .end = end_local2_irq,
+ .ack = disable_local2_irq,
+ .mask = disable_local2_irq,
+ .mask_ack = disable_local2_irq,
+ .unmask = enable_local2_irq,
};
static void enable_local3_irq(unsigned int irq)
{
- unsigned long flags;
-
- local_irq_save(flags);
sgint->imask1 |= (1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
sgint->cmeimask1 |= (1 << (irq - SGINT_LOCAL3));
- local_irq_restore(flags);
-}
-
-static unsigned int startup_local3_irq(unsigned int irq)
-{
- enable_local3_irq(irq);
- return 0; /* Never anything pending */
}
void disable_local3_irq(unsigned int irq)
{
- unsigned long flags;
-
- local_irq_save(flags);
sgint->cmeimask1 &= ~(1 << (irq - SGINT_LOCAL3));
if (!sgint->cmeimask1)
sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
- local_irq_restore(flags);
-}
-
-#define shutdown_local3_irq disable_local3_irq
-#define mask_and_ack_local3_irq disable_local3_irq
-
-static void end_local3_irq (unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_local3_irq(irq);
}
static struct irq_chip ip22_local3_irq_type = {
.typename = "IP22 local 3",
- .startup = startup_local3_irq,
- .shutdown = shutdown_local3_irq,
- .enable = enable_local3_irq,
- .disable = disable_local3_irq,
- .ack = mask_and_ack_local3_irq,
- .end = end_local3_irq,
+ .ack = disable_local3_irq,
+ .mask = disable_local3_irq,
+ .mask_ack = disable_local3_irq,
+ .unmask = enable_local3_irq,
};
static void indy_local0_irqdispatch(void)
@@ -430,10 +330,7 @@ void __init arch_init_irq(void)
else
handler = &ip22_local3_irq_type;
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = handler;
+ set_irq_chip_and_handler(i, handler, handle_level_irq);
}
/* vector handler. this register the IRQ as non-sharable */
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 270ecd3e6b4..319f8803ef6 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -332,34 +332,19 @@ static inline void disable_bridge_irq(unsigned int irq)
intr_disconnect_level(cpu, swlevel);
}
-static void mask_and_ack_bridge_irq(unsigned int irq)
-{
- disable_bridge_irq(irq);
-}
-
-static void end_bridge_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
- irq_desc[irq].action)
- enable_bridge_irq(irq);
-}
-
static struct irq_chip bridge_irq_type = {
.typename = "bridge",
.startup = startup_bridge_irq,
.shutdown = shutdown_bridge_irq,
- .enable = enable_bridge_irq,
- .disable = disable_bridge_irq,
- .ack = mask_and_ack_bridge_irq,
- .end = end_bridge_irq,
+ .ack = disable_bridge_irq,
+ .mask = disable_bridge_irq,
+ .mask_ack = disable_bridge_irq,
+ .unmask = enable_bridge_irq,
};
void __devinit register_bridge_irq(unsigned int irq)
{
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = 0;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &bridge_irq_type;
+ set_irq_chip_and_handler(irq, &bridge_irq_type, handle_level_irq);
}
int __devinit request_bridge_irq(struct bridge_controller *bc)
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 5e82a268e3c..c20e9899b34 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -172,15 +172,6 @@ static __init unsigned long get_m48t35_time(void)
return mktime(year, month, date, hour, min, sec);
}
-static unsigned int startup_rt_irq(unsigned int irq)
-{
- return 0;
-}
-
-static void shutdown_rt_irq(unsigned int irq)
-{
-}
-
static void enable_rt_irq(unsigned int irq)
{
}
@@ -189,22 +180,13 @@ static void disable_rt_irq(unsigned int irq)
{
}
-static void mask_and_ack_rt(unsigned int irq)
-{
-}
-
-static void end_rt_irq(unsigned int irq)
-{
-}
-
static struct irq_chip rt_irq_type = {
.typename = "SN HUB RT timer",
- .startup = startup_rt_irq,
- .shutdown = shutdown_rt_irq,
- .enable = enable_rt_irq,
- .disable = disable_rt_irq,
- .ack = mask_and_ack_rt,
- .end = end_rt_irq,
+ .ack = disable_rt_irq,
+ .mask = disable_rt_irq,
+ .mask_ack = disable_rt_irq,
+ .unmask = enable_rt_irq,
+ .eoi = enable_rt_irq,
};
static struct irqaction rt_irqaction = {
@@ -221,10 +203,7 @@ void __init plat_timer_setup(struct irqaction *irq)
if (irqno < 0)
panic("Can't allocate interrupt number for timer interrupt");
- irq_desc[irqno].status = IRQ_DISABLED;
- irq_desc[irqno].action = NULL;
- irq_desc[irqno].depth = 1;
- irq_desc[irqno].chip = &rt_irq_type;
+ set_irq_chip_and_handler(irqno, &rt_irq_type, handle_percpu_irq);
/* over-write the handler, we use our own way */
irq->handler = no_action;
@@ -239,14 +218,14 @@ void __init plat_timer_setup(struct irqaction *irq)
setup_irq(irqno, &rt_irqaction);
}
-static unsigned int ip27_hpt_read(void)
+static cycle_t ip27_hpt_read(void)
{
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
}
void __init ip27_time_init(void)
{
- mips_hpt_read = ip27_hpt_read;
+ clocksource_mips.read = ip27_hpt_read;
mips_hpt_frequency = CYCLES_PER_SEC;
xtime.tv_sec = get_m48t35_time();
xtime.tv_nsec = 0;
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index c9acadd0846..ae063864c02 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -113,12 +113,6 @@ static void inline flush_mace_bus(void)
* is quite different anyway.
*/
-/*
- * IRQ spinlock - Ralf says not to disable CPU interrupts,
- * and I think he knows better.
- */
-static DEFINE_SPINLOCK(ip32_irq_lock);
-
/* Some initial interrupts to set up */
extern irqreturn_t crime_memerr_intr(int irq, void *dev_id);
extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
@@ -138,12 +132,6 @@ static void enable_cpu_irq(unsigned int irq)
set_c0_status(STATUSF_IP7);
}
-static unsigned int startup_cpu_irq(unsigned int irq)
-{
- enable_cpu_irq(irq);
- return 0;
-}
-
static void disable_cpu_irq(unsigned int irq)
{
clear_c0_status(STATUSF_IP7);
@@ -155,16 +143,12 @@ static void end_cpu_irq(unsigned int irq)
enable_cpu_irq (irq);
}
-#define shutdown_cpu_irq disable_cpu_irq
-#define mask_and_ack_cpu_irq disable_cpu_irq
-
static struct irq_chip ip32_cpu_interrupt = {
.typename = "IP32 CPU",
- .startup = startup_cpu_irq,
- .shutdown = shutdown_cpu_irq,
- .enable = enable_cpu_irq,
- .disable = disable_cpu_irq,
- .ack = mask_and_ack_cpu_irq,
+ .ack = disable_cpu_irq,
+ .mask = disable_cpu_irq,
+ .mask_ack = disable_cpu_irq,
+ .unmask = enable_cpu_irq,
.end = end_cpu_irq,
};
@@ -177,45 +161,27 @@ static uint64_t crime_mask;
static void enable_crime_irq(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask |= 1 << (irq - 1);
crime->imask = crime_mask;
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
-}
-
-static unsigned int startup_crime_irq(unsigned int irq)
-{
- enable_crime_irq(irq);
- return 0; /* This is probably not right; we could have pending irqs */
}
static void disable_crime_irq(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask &= ~(1 << (irq - 1));
crime->imask = crime_mask;
flush_crime_bus();
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static void mask_and_ack_crime_irq(unsigned int irq)
{
- unsigned long flags;
-
/* Edge triggered interrupts must be cleared. */
if ((irq >= CRIME_GBE0_IRQ && irq <= CRIME_GBE3_IRQ)
|| (irq >= CRIME_RE_EMPTY_E_IRQ && irq <= CRIME_RE_IDLE_E_IRQ)
|| (irq >= CRIME_SOFT0_IRQ && irq <= CRIME_SOFT2_IRQ)) {
uint64_t crime_int;
- spin_lock_irqsave(&ip32_irq_lock, flags);
crime_int = crime->hard_int;
crime_int &= ~(1 << (irq - 1));
crime->hard_int = crime_int;
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
disable_crime_irq(irq);
}
@@ -226,15 +192,12 @@ static void end_crime_irq(unsigned int irq)
enable_crime_irq(irq);
}
-#define shutdown_crime_irq disable_crime_irq
-
static struct irq_chip ip32_crime_interrupt = {
.typename = "IP32 CRIME",
- .startup = startup_crime_irq,
- .shutdown = shutdown_crime_irq,
- .enable = enable_crime_irq,
- .disable = disable_crime_irq,
.ack = mask_and_ack_crime_irq,
+ .mask = disable_crime_irq,
+ .mask_ack = mask_and_ack_crime_irq,
+ .unmask = enable_crime_irq,
.end = end_crime_irq,
};
@@ -248,34 +211,20 @@ static unsigned long macepci_mask;
static void enable_macepci_irq(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ip32_irq_lock, flags);
macepci_mask |= MACEPCI_CONTROL_INT(irq - 9);
mace->pci.control = macepci_mask;
crime_mask |= 1 << (irq - 1);
crime->imask = crime_mask;
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
-}
-
-static unsigned int startup_macepci_irq(unsigned int irq)
-{
- enable_macepci_irq (irq);
- return 0;
}
static void disable_macepci_irq(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask &= ~(1 << (irq - 1));
crime->imask = crime_mask;
flush_crime_bus();
macepci_mask &= ~MACEPCI_CONTROL_INT(irq - 9);
mace->pci.control = macepci_mask;
flush_mace_bus();
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static void end_macepci_irq(unsigned int irq)
@@ -284,16 +233,12 @@ static void end_macepci_irq(unsigned int irq)
enable_macepci_irq(irq);
}
-#define shutdown_macepci_irq disable_macepci_irq
-#define mask_and_ack_macepci_irq disable_macepci_irq
-
static struct irq_chip ip32_macepci_interrupt = {
.typename = "IP32 MACE PCI",
- .startup = startup_macepci_irq,
- .shutdown = shutdown_macepci_irq,
- .enable = enable_macepci_irq,
- .disable = disable_macepci_irq,
- .ack = mask_and_ack_macepci_irq,
+ .ack = disable_macepci_irq,
+ .mask = disable_macepci_irq,
+ .mask_ack = disable_macepci_irq,
+ .unmask = enable_macepci_irq,
.end = end_macepci_irq,
};
@@ -339,7 +284,6 @@ static unsigned long maceisa_mask;
static void enable_maceisa_irq (unsigned int irq)
{
unsigned int crime_int = 0;
- unsigned long flags;
DBG ("maceisa enable: %u\n", irq);
@@ -355,26 +299,16 @@ static void enable_maceisa_irq (unsigned int irq)
break;
}
DBG ("crime_int %08x enabled\n", crime_int);
- spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask |= crime_int;
crime->imask = crime_mask;
maceisa_mask |= 1 << (irq - 33);
mace->perif.ctrl.imask = maceisa_mask;
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
-}
-
-static unsigned int startup_maceisa_irq(unsigned int irq)
-{
- enable_maceisa_irq(irq);
- return 0;
}
static void disable_maceisa_irq(unsigned int irq)
{
unsigned int crime_int = 0;
- unsigned long flags;
- spin_lock_irqsave(&ip32_irq_lock, flags);
maceisa_mask &= ~(1 << (irq - 33));
if(!(maceisa_mask & MACEISA_AUDIO_INT))
crime_int |= MACE_AUDIO_INT;
@@ -387,23 +321,20 @@ static void disable_maceisa_irq(unsigned int irq)
flush_crime_bus();
mace->perif.ctrl.imask = maceisa_mask;
flush_mace_bus();
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static void mask_and_ack_maceisa_irq(unsigned int irq)
{
- unsigned long mace_int, flags;
+ unsigned long mace_int;
switch (irq) {
case MACEISA_PARALLEL_IRQ:
case MACEISA_SERIAL1_TDMAPR_IRQ:
case MACEISA_SERIAL2_TDMAPR_IRQ:
/* edge triggered */
- spin_lock_irqsave(&ip32_irq_lock, flags);
mace_int = mace->perif.ctrl.istat;
mace_int &= ~(1 << (irq - 33));
mace->perif.ctrl.istat = mace_int;
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
break;
}
disable_maceisa_irq(irq);
@@ -415,15 +346,12 @@ static void end_maceisa_irq(unsigned irq)
enable_maceisa_irq(irq);
}
-#define shutdown_maceisa_irq disable_maceisa_irq
-
static struct irq_chip ip32_maceisa_interrupt = {
.typename = "IP32 MACE ISA",
- .startup = startup_maceisa_irq,
- .shutdown = shutdown_maceisa_irq,
- .enable = enable_maceisa_irq,
- .disable = disable_maceisa_irq,
.ack = mask_and_ack_maceisa_irq,
+ .mask = disable_maceisa_irq,
+ .mask_ack = mask_and_ack_maceisa_irq,
+ .unmask = enable_maceisa_irq,
.end = end_maceisa_irq,
};
@@ -433,29 +361,15 @@ static struct irq_chip ip32_maceisa_interrupt = {
static void enable_mace_irq(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask |= 1 << (irq - 1);
crime->imask = crime_mask;
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
-}
-
-static unsigned int startup_mace_irq(unsigned int irq)
-{
- enable_mace_irq(irq);
- return 0;
}
static void disable_mace_irq(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask &= ~(1 << (irq - 1));
crime->imask = crime_mask;
flush_crime_bus();
- spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static void end_mace_irq(unsigned int irq)
@@ -464,16 +378,12 @@ static void end_mace_irq(unsigned int irq)
enable_mace_irq(irq);
}
-#define shutdown_mace_irq disable_mace_irq
-#define mask_and_ack_mace_irq disable_mace_irq
-
static struct irq_chip ip32_mace_interrupt = {
.typename = "IP32 MACE",
- .startup = startup_mace_irq,
- .shutdown = shutdown_mace_irq,
- .enable = enable_mace_irq,
- .disable = disable_mace_irq,
- .ack = mask_and_ack_mace_irq,
+ .ack = disable_mace_irq,
+ .mask = disable_mace_irq,
+ .mask_ack = disable_mace_irq,
+ .unmask = enable_mace_irq,
.end = end_mace_irq,
};
@@ -586,10 +496,7 @@ void __init arch_init_irq(void)
else
controller = &ip32_maceisa_interrupt;
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = 0;
- irq_desc[irq].depth = 0;
- irq_desc[irq].chip = controller;
+ set_irq_chip(irq, controller);
}
setup_irq(CRIME_MEMERR_IRQ, &memerr_irq);
setup_irq(CRIME_CPUERR_IRQ, &cpuerr_irq);
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 8b1f4148492..2e8f6b2e242 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -45,11 +45,9 @@
*/
-#define shutdown_bcm1480_irq disable_bcm1480_irq
static void end_bcm1480_irq(unsigned int irq);
static void enable_bcm1480_irq(unsigned int irq);
static void disable_bcm1480_irq(unsigned int irq);
-static unsigned int startup_bcm1480_irq(unsigned int irq);
static void ack_bcm1480_irq(unsigned int irq);
#ifdef CONFIG_SMP
static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask);
@@ -85,11 +83,10 @@ extern char sb1250_duart_present[];
static struct irq_chip bcm1480_irq_type = {
.typename = "BCM1480-IMR",
- .startup = startup_bcm1480_irq,
- .shutdown = shutdown_bcm1480_irq,
- .enable = enable_bcm1480_irq,
- .disable = disable_bcm1480_irq,
.ack = ack_bcm1480_irq,
+ .mask = disable_bcm1480_irq,
+ .mask_ack = ack_bcm1480_irq,
+ .unmask = enable_bcm1480_irq,
.end = end_bcm1480_irq,
#ifdef CONFIG_SMP
.set_affinity = bcm1480_set_affinity
@@ -188,14 +185,6 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
/*****************************************************************************/
-static unsigned int startup_bcm1480_irq(unsigned int irq)
-{
- bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq);
-
- return 0; /* never anything pending */
-}
-
-
static void disable_bcm1480_irq(unsigned int irq)
{
bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
@@ -270,16 +259,9 @@ void __init init_bcm1480_irqs(void)
{
int i;
- for (i = 0; i < NR_IRQS; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- if (i < BCM1480_NR_IRQS) {
- irq_desc[i].chip = &bcm1480_irq_type;
- bcm1480_irq_owner[i] = 0;
- } else {
- irq_desc[i].chip = &no_irq_chip;
- }
+ for (i = 0; i < BCM1480_NR_IRQS; i++) {
+ set_irq_chip(i, &bcm1480_irq_type);
+ bcm1480_irq_owner[i] = 0;
}
}
diff --git a/arch/mips/sibyte/bcm1480/time.c b/arch/mips/sibyte/bcm1480/time.c
index e136bde5248..6f3f71bf424 100644
--- a/arch/mips/sibyte/bcm1480/time.c
+++ b/arch/mips/sibyte/bcm1480/time.c
@@ -94,8 +94,6 @@ void bcm1480_time_init(void)
*/
}
-#include <asm/sibyte/sb1250.h>
-
void bcm1480_timer_interrupt(void)
{
int cpu = smp_processor_id();
@@ -119,7 +117,7 @@ void bcm1480_timer_interrupt(void)
}
}
-static unsigned int bcm1480_hpt_read(void)
+static cycle_t bcm1480_hpt_read(void)
{
/* We assume this function is called xtime_lock held. */
unsigned long count =
@@ -129,6 +127,6 @@ static unsigned int bcm1480_hpt_read(void)
void __init bcm1480_hpt_setup(void)
{
- mips_hpt_read = bcm1480_hpt_read;
+ clocksource_mips.read = bcm1480_hpt_read;
mips_hpt_frequency = BCM1480_HPT_VALUE;
}
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index d5d26770daf..82ce7533053 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -44,11 +44,9 @@
*/
-#define shutdown_sb1250_irq disable_sb1250_irq
static void end_sb1250_irq(unsigned int irq);
static void enable_sb1250_irq(unsigned int irq);
static void disable_sb1250_irq(unsigned int irq);
-static unsigned int startup_sb1250_irq(unsigned int irq);
static void ack_sb1250_irq(unsigned int irq);
#ifdef CONFIG_SMP
static void sb1250_set_affinity(unsigned int irq, cpumask_t mask);
@@ -70,11 +68,10 @@ extern char sb1250_duart_present[];
static struct irq_chip sb1250_irq_type = {
.typename = "SB1250-IMR",
- .startup = startup_sb1250_irq,
- .shutdown = shutdown_sb1250_irq,
- .enable = enable_sb1250_irq,
- .disable = disable_sb1250_irq,
.ack = ack_sb1250_irq,
+ .mask = disable_sb1250_irq,
+ .mask_ack = ack_sb1250_irq,
+ .unmask = enable_sb1250_irq,
.end = end_sb1250_irq,
#ifdef CONFIG_SMP
.set_affinity = sb1250_set_affinity
@@ -163,14 +160,6 @@ static void sb1250_set_affinity(unsigned int irq, cpumask_t mask)
/*****************************************************************************/
-static unsigned int startup_sb1250_irq(unsigned int irq)
-{
- sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
-
- return 0; /* never anything pending */
-}
-
-
static void disable_sb1250_irq(unsigned int irq)
{
sb1250_mask_irq(sb1250_irq_owner[irq], irq);
@@ -239,16 +228,9 @@ void __init init_sb1250_irqs(void)
{
int i;
- for (i = 0; i < NR_IRQS; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- if (i < SB1250_NR_IRQS) {
- irq_desc[i].chip = &sb1250_irq_type;
- sb1250_irq_owner[i] = 0;
- } else {
- irq_desc[i].chip = &no_irq_chip;
- }
+ for (i = 0; i < SB1250_NR_IRQS; i++) {
+ set_irq_chip(i, &sb1250_irq_type);
+ sb1250_irq_owner[i] = 0;
}
}
diff --git a/arch/mips/sibyte/sb1250/time.c b/arch/mips/sibyte/sb1250/time.c
index bcb74f2c194..2efffe15ff2 100644
--- a/arch/mips/sibyte/sb1250/time.c
+++ b/arch/mips/sibyte/sb1250/time.c
@@ -51,7 +51,7 @@
extern int sb1250_steal_irq(int irq);
-static unsigned int sb1250_hpt_read(void);
+static cycle_t sb1250_hpt_read(void);
void __init sb1250_hpt_setup(void)
{
@@ -66,8 +66,8 @@ void __init sb1250_hpt_setup(void)
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG)));
mips_hpt_frequency = V_SCD_TIMER_FREQ;
- mips_hpt_read = sb1250_hpt_read;
- mips_hpt_mask = M_SCD_TIMER_INIT;
+ clocksource_mips.read = sb1250_hpt_read;
+ clocksource_mips.mask = M_SCD_TIMER_INIT;
}
}
@@ -143,7 +143,7 @@ void sb1250_timer_interrupt(void)
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
* again.
*/
-static unsigned int sb1250_hpt_read(void)
+static cycle_t sb1250_hpt_read(void)
{
unsigned int count;
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index ac342f5643c..defa1f1452a 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -43,7 +43,7 @@
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
#include <asm/sibyte/sb1250_regs.h>
#else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
#endif
#include <asm/sibyte/sb1250_genbus.h>
#include <asm/sibyte/board.h>
@@ -53,7 +53,7 @@ extern void bcm1480_setup(void);
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
extern void sb1250_setup(void);
#else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
#endif
extern int xicor_probe(void);
@@ -90,7 +90,7 @@ void __init plat_timer_setup(struct irqaction *irq)
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
sb1250_time_init();
#else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
#endif
}
@@ -111,7 +111,7 @@ void __init plat_mem_setup(void)
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
sb1250_setup();
#else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
#endif
panic_timeout = 5; /* For debug. */
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
index 48fb74a7aae..8511bcc6d99 100644
--- a/arch/mips/sni/irq.c
+++ b/arch/mips/sni/irq.c
@@ -11,44 +11,25 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
-#include <linux/spinlock.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/sni.h>
-DEFINE_SPINLOCK(pciasic_lock);
-
static void enable_pciasic_irq(unsigned int irq)
{
unsigned int mask = 1 << (irq - PCIMT_IRQ_INT2);
- unsigned long flags;
- spin_lock_irqsave(&pciasic_lock, flags);
*(volatile u8 *) PCIMT_IRQSEL |= mask;
- spin_unlock_irqrestore(&pciasic_lock, flags);
-}
-
-static unsigned int startup_pciasic_irq(unsigned int irq)
-{
- enable_pciasic_irq(irq);
- return 0; /* never anything pending */
}
-#define shutdown_pciasic_irq disable_pciasic_irq
-
void disable_pciasic_irq(unsigned int irq)
{
unsigned int mask = ~(1 << (irq - PCIMT_IRQ_INT2));
- unsigned long flags;
- spin_lock_irqsave(&pciasic_lock, flags);
*(volatile u8 *) PCIMT_IRQSEL &= mask;
- spin_unlock_irqrestore(&pciasic_lock, flags);
}
-#define mask_and_ack_pciasic_irq disable_pciasic_irq
-
static void end_pciasic_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -57,11 +38,10 @@ static void end_pciasic_irq(unsigned int irq)
static struct irq_chip pciasic_irq_type = {
.typename = "ASIC-PCI",
- .startup = startup_pciasic_irq,
- .shutdown = shutdown_pciasic_irq,
- .enable = enable_pciasic_irq,
- .disable = disable_pciasic_irq,
- .ack = mask_and_ack_pciasic_irq,
+ .ack = disable_pciasic_irq,
+ .mask = disable_pciasic_irq,
+ .mask_ack = disable_pciasic_irq,
+ .unmask = enable_pciasic_irq,
.end = end_pciasic_irq,
};
@@ -178,12 +158,8 @@ asmlinkage void plat_irq_dispatch(void)
void __init init_pciasic(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&pciasic_lock, flags);
* (volatile u8 *) PCIMT_IRQSEL =
IT_EISA | IT_INTA | IT_INTB | IT_INTC | IT_INTD;
- spin_unlock_irqrestore(&pciasic_lock, flags);
}
/*
@@ -199,12 +175,8 @@ void __init arch_init_irq(void)
init_pciasic();
/* Actually we've got more interrupts to handle ... */
- for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_ETHERNET; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &pciasic_irq_type;
- }
+ for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_ETHERNET; i++)
+ set_irq_chip(i, &pciasic_irq_type);
change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ2|IE_IRQ3|IE_IRQ4);
}
diff --git a/arch/mips/tx4927/common/tx4927_irq.c b/arch/mips/tx4927/common/tx4927_irq.c
index 8266a88a3f8..ed4a19adf36 100644
--- a/arch/mips/tx4927/common/tx4927_irq.c
+++ b/arch/mips/tx4927/common/tx4927_irq.c
@@ -64,20 +64,12 @@
#define TX4927_IRQ_NEST4 ( 1 << 9 )
#define TX4927_IRQ_CP0_INIT ( 1 << 10 )
-#define TX4927_IRQ_CP0_STARTUP ( 1 << 11 )
-#define TX4927_IRQ_CP0_SHUTDOWN ( 1 << 12 )
#define TX4927_IRQ_CP0_ENABLE ( 1 << 13 )
#define TX4927_IRQ_CP0_DISABLE ( 1 << 14 )
-#define TX4927_IRQ_CP0_MASK ( 1 << 15 )
-#define TX4927_IRQ_CP0_ENDIRQ ( 1 << 16 )
#define TX4927_IRQ_PIC_INIT ( 1 << 20 )
-#define TX4927_IRQ_PIC_STARTUP ( 1 << 21 )
-#define TX4927_IRQ_PIC_SHUTDOWN ( 1 << 22 )
#define TX4927_IRQ_PIC_ENABLE ( 1 << 23 )
#define TX4927_IRQ_PIC_DISABLE ( 1 << 24 )
-#define TX4927_IRQ_PIC_MASK ( 1 << 25 )
-#define TX4927_IRQ_PIC_ENDIRQ ( 1 << 26 )
#define TX4927_IRQ_ALL 0xffffffff
#endif
@@ -87,19 +79,11 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE
| TX4927_IRQ_INFO
| TX4927_IRQ_WARN | TX4927_IRQ_EROR
// | TX4927_IRQ_CP0_INIT
-// | TX4927_IRQ_CP0_STARTUP
-// | TX4927_IRQ_CP0_SHUTDOWN
// | TX4927_IRQ_CP0_ENABLE
-// | TX4927_IRQ_CP0_DISABLE
-// | TX4927_IRQ_CP0_MASK
// | TX4927_IRQ_CP0_ENDIRQ
// | TX4927_IRQ_PIC_INIT
-// | TX4927_IRQ_PIC_STARTUP
-// | TX4927_IRQ_PIC_SHUTDOWN
// | TX4927_IRQ_PIC_ENABLE
// | TX4927_IRQ_PIC_DISABLE
-// | TX4927_IRQ_PIC_MASK
-// | TX4927_IRQ_PIC_ENDIRQ
// | TX4927_IRQ_INIT
// | TX4927_IRQ_NEST1
// | TX4927_IRQ_NEST2
@@ -124,49 +108,32 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE
* Forwad definitions for all pic's
*/
-static unsigned int tx4927_irq_cp0_startup(unsigned int irq);
-static void tx4927_irq_cp0_shutdown(unsigned int irq);
static void tx4927_irq_cp0_enable(unsigned int irq);
static void tx4927_irq_cp0_disable(unsigned int irq);
-static void tx4927_irq_cp0_mask_and_ack(unsigned int irq);
-static void tx4927_irq_cp0_end(unsigned int irq);
-static unsigned int tx4927_irq_pic_startup(unsigned int irq);
-static void tx4927_irq_pic_shutdown(unsigned int irq);
static void tx4927_irq_pic_enable(unsigned int irq);
static void tx4927_irq_pic_disable(unsigned int irq);
-static void tx4927_irq_pic_mask_and_ack(unsigned int irq);
-static void tx4927_irq_pic_end(unsigned int irq);
/*
* Kernel structs for all pic's
*/
-static DEFINE_SPINLOCK(tx4927_cp0_lock);
-static DEFINE_SPINLOCK(tx4927_pic_lock);
-
#define TX4927_CP0_NAME "TX4927-CP0"
static struct irq_chip tx4927_irq_cp0_type = {
.typename = TX4927_CP0_NAME,
- .startup = tx4927_irq_cp0_startup,
- .shutdown = tx4927_irq_cp0_shutdown,
- .enable = tx4927_irq_cp0_enable,
- .disable = tx4927_irq_cp0_disable,
- .ack = tx4927_irq_cp0_mask_and_ack,
- .end = tx4927_irq_cp0_end,
- .set_affinity = NULL
+ .ack = tx4927_irq_cp0_disable,
+ .mask = tx4927_irq_cp0_disable,
+ .mask_ack = tx4927_irq_cp0_disable,
+ .unmask = tx4927_irq_cp0_enable,
};
#define TX4927_PIC_NAME "TX4927-PIC"
static struct irq_chip tx4927_irq_pic_type = {
.typename = TX4927_PIC_NAME,
- .startup = tx4927_irq_pic_startup,
- .shutdown = tx4927_irq_pic_shutdown,
- .enable = tx4927_irq_pic_enable,
- .disable = tx4927_irq_pic_disable,
- .ack = tx4927_irq_pic_mask_and_ack,
- .end = tx4927_irq_pic_end,
- .set_affinity = NULL
+ .ack = tx4927_irq_pic_disable,
+ .mask = tx4927_irq_pic_disable,
+ .mask_ack = tx4927_irq_pic_disable,
+ .unmask = tx4927_irq_pic_enable,
};
#define TX4927_PIC_ACTION(s) { no_action, 0, CPU_MASK_NONE, s, NULL, NULL }
@@ -211,8 +178,6 @@ tx4927_irq_cp0_modify(unsigned cp0_reg, unsigned clr_bits, unsigned set_bits)
break;
}
}
-
- return;
}
static void __init tx4927_irq_cp0_init(void)
@@ -222,82 +187,23 @@ static void __init tx4927_irq_cp0_init(void)
TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_INIT, "beg=%d end=%d\n",
TX4927_IRQ_CP0_BEG, TX4927_IRQ_CP0_END);
- for (i = TX4927_IRQ_CP0_BEG; i <= TX4927_IRQ_CP0_END; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &tx4927_irq_cp0_type;
- }
-
- return;
-}
-
-static unsigned int tx4927_irq_cp0_startup(unsigned int irq)
-{
- TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_STARTUP, "irq=%d \n", irq);
-
- tx4927_irq_cp0_enable(irq);
-
- return (0);
-}
-
-static void tx4927_irq_cp0_shutdown(unsigned int irq)
-{
- TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_SHUTDOWN, "irq=%d \n", irq);
-
- tx4927_irq_cp0_disable(irq);
-
- return;
+ for (i = TX4927_IRQ_CP0_BEG; i <= TX4927_IRQ_CP0_END; i++)
+ set_irq_chip_and_handler(i, &tx4927_irq_cp0_type,
+ handle_level_irq);
}
static void tx4927_irq_cp0_enable(unsigned int irq)
{
- unsigned long flags;
-
TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_ENABLE, "irq=%d \n", irq);
- spin_lock_irqsave(&tx4927_cp0_lock, flags);
-
tx4927_irq_cp0_modify(CCP0_STATUS, 0, tx4927_irq_cp0_mask(irq));
-
- spin_unlock_irqrestore(&tx4927_cp0_lock, flags);
-
- return;
}
static void tx4927_irq_cp0_disable(unsigned int irq)
{
- unsigned long flags;
-
TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_DISABLE, "irq=%d \n", irq);
- spin_lock_irqsave(&tx4927_cp0_lock, flags);
-
tx4927_irq_cp0_modify(CCP0_STATUS, tx4927_irq_cp0_mask(irq), 0);
-
- spin_unlock_irqrestore(&tx4927_cp0_lock, flags);
-
- return;
-}
-
-static void tx4927_irq_cp0_mask_and_ack(unsigned int irq)
-{
- TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_MASK, "irq=%d \n", irq);
-
- tx4927_irq_cp0_disable(irq);
-
- return;
-}
-
-static void tx4927_irq_cp0_end(unsigned int irq)
-{
- TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_ENDIRQ, "irq=%d \n", irq);
-
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
- tx4927_irq_cp0_enable(irq);
- }
-
- return;
}
/*
@@ -418,105 +324,39 @@ static void tx4927_irq_pic_modify(unsigned pic_reg, unsigned clr_bits,
val &= (~clr_bits);
val |= (set_bits);
TX4927_WR(pic_reg, val);
-
- return;
}
static void __init tx4927_irq_pic_init(void)
{
- unsigned long flags;
int i;
TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_INIT, "beg=%d end=%d\n",
TX4927_IRQ_PIC_BEG, TX4927_IRQ_PIC_END);
- for (i = TX4927_IRQ_PIC_BEG; i <= TX4927_IRQ_PIC_END; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 2;
- irq_desc[i].chip = &tx4927_irq_pic_type;
- }
+ for (i = TX4927_IRQ_PIC_BEG; i <= TX4927_IRQ_PIC_END; i++)
+ set_irq_chip_and_handler(i, &tx4927_irq_pic_type,
+ handle_level_irq);
setup_irq(TX4927_IRQ_NEST_PIC_ON_CP0, &tx4927_irq_pic_action);
- spin_lock_irqsave(&tx4927_pic_lock, flags);
-
TX4927_WR(0xff1ff640, 0x6); /* irq level mask -- only accept hightest */
TX4927_WR(0xff1ff600, TX4927_RD(0xff1ff600) | 0x1); /* irq enable */
-
- spin_unlock_irqrestore(&tx4927_pic_lock, flags);
-
- return;
-}
-
-static unsigned int tx4927_irq_pic_startup(unsigned int irq)
-{
- TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_STARTUP, "irq=%d\n", irq);
-
- tx4927_irq_pic_enable(irq);
-
- return (0);
-}
-
-static void tx4927_irq_pic_shutdown(unsigned int irq)
-{
- TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_SHUTDOWN, "irq=%d\n", irq);
-
- tx4927_irq_pic_disable(irq);
-
- return;
}
static void tx4927_irq_pic_enable(unsigned int irq)
{
- unsigned long flags;
-
TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_ENABLE, "irq=%d\n", irq);
- spin_lock_irqsave(&tx4927_pic_lock, flags);
-
tx4927_irq_pic_modify(tx4927_irq_pic_addr(irq), 0,
tx4927_irq_pic_mask(irq));
-
- spin_unlock_irqrestore(&tx4927_pic_lock, flags);
-
- return;
}
static void tx4927_irq_pic_disable(unsigned int irq)
{
- unsigned long flags;
-
TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_DISABLE, "irq=%d\n", irq);
- spin_lock_irqsave(&tx4927_pic_lock, flags);
-
tx4927_irq_pic_modify(tx4927_irq_pic_addr(irq),
tx4927_irq_pic_mask(irq), 0);
-
- spin_unlock_irqrestore(&tx4927_pic_lock, flags);
-
- return;
-}
-
-static void tx4927_irq_pic_mask_and_ack(unsigned int irq)
-{
- TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_MASK, "irq=%d\n", irq);
-
- tx4927_irq_pic_disable(irq);
-
- return;
-}
-
-static void tx4927_irq_pic_end(unsigned int irq)
-{
- TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_ENDIRQ, "irq=%d\n", irq);
-
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
- tx4927_irq_pic_enable(irq);
- }
-
- return;
}
/*
@@ -533,8 +373,6 @@ void __init tx4927_irq_init(void)
tx4927_irq_pic_init();
TX4927_IRQ_DPRINTK(TX4927_IRQ_INIT, "+\n");
-
- return;
}
static int tx4927_irq_nested(void)
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
index 0c3c3f66823..5a5ea6c0b9f 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
@@ -151,16 +151,10 @@ JP7 is not bus master -- do NOT use -- only 4 pci bus master's allowed -- SouthB
#define TOSHIBA_RBTX4927_IRQ_EROR ( 1 << 2 )
#define TOSHIBA_RBTX4927_IRQ_IOC_INIT ( 1 << 10 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_STARTUP ( 1 << 11 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_SHUTDOWN ( 1 << 12 )
#define TOSHIBA_RBTX4927_IRQ_IOC_ENABLE ( 1 << 13 )
#define TOSHIBA_RBTX4927_IRQ_IOC_DISABLE ( 1 << 14 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_MASK ( 1 << 15 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ ( 1 << 16 )
#define TOSHIBA_RBTX4927_IRQ_ISA_INIT ( 1 << 20 )
-#define TOSHIBA_RBTX4927_IRQ_ISA_STARTUP ( 1 << 21 )
-#define TOSHIBA_RBTX4927_IRQ_ISA_SHUTDOWN ( 1 << 22 )
#define TOSHIBA_RBTX4927_IRQ_ISA_ENABLE ( 1 << 23 )
#define TOSHIBA_RBTX4927_IRQ_ISA_DISABLE ( 1 << 24 )
#define TOSHIBA_RBTX4927_IRQ_ISA_MASK ( 1 << 25 )
@@ -175,15 +169,9 @@ static const u32 toshiba_rbtx4927_irq_debug_flag =
(TOSHIBA_RBTX4927_IRQ_NONE | TOSHIBA_RBTX4927_IRQ_INFO |
TOSHIBA_RBTX4927_IRQ_WARN | TOSHIBA_RBTX4927_IRQ_EROR
// | TOSHIBA_RBTX4927_IRQ_IOC_INIT
-// | TOSHIBA_RBTX4927_IRQ_IOC_STARTUP
-// | TOSHIBA_RBTX4927_IRQ_IOC_SHUTDOWN
// | TOSHIBA_RBTX4927_IRQ_IOC_ENABLE
// | TOSHIBA_RBTX4927_IRQ_IOC_DISABLE
-// | TOSHIBA_RBTX4927_IRQ_IOC_MASK
-// | TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ
// | TOSHIBA_RBTX4927_IRQ_ISA_INIT
-// | TOSHIBA_RBTX4927_IRQ_ISA_STARTUP
-// | TOSHIBA_RBTX4927_IRQ_ISA_SHUTDOWN
// | TOSHIBA_RBTX4927_IRQ_ISA_ENABLE
// | TOSHIBA_RBTX4927_IRQ_ISA_DISABLE
// | TOSHIBA_RBTX4927_IRQ_ISA_MASK
@@ -231,35 +219,23 @@ extern void disable_8259A_irq(unsigned int irq);
extern void mask_and_ack_8259A(unsigned int irq);
#endif
-static unsigned int toshiba_rbtx4927_irq_ioc_startup(unsigned int irq);
-static void toshiba_rbtx4927_irq_ioc_shutdown(unsigned int irq);
static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq);
static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq);
-static void toshiba_rbtx4927_irq_ioc_mask_and_ack(unsigned int irq);
-static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq);
#ifdef CONFIG_TOSHIBA_FPCIB0
-static unsigned int toshiba_rbtx4927_irq_isa_startup(unsigned int irq);
-static void toshiba_rbtx4927_irq_isa_shutdown(unsigned int irq);
static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq);
static void toshiba_rbtx4927_irq_isa_disable(unsigned int irq);
static void toshiba_rbtx4927_irq_isa_mask_and_ack(unsigned int irq);
static void toshiba_rbtx4927_irq_isa_end(unsigned int irq);
#endif
-static DEFINE_SPINLOCK(toshiba_rbtx4927_ioc_lock);
-
-
#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
.typename = TOSHIBA_RBTX4927_IOC_NAME,
- .startup = toshiba_rbtx4927_irq_ioc_startup,
- .shutdown = toshiba_rbtx4927_irq_ioc_shutdown,
- .enable = toshiba_rbtx4927_irq_ioc_enable,
- .disable = toshiba_rbtx4927_irq_ioc_disable,
- .ack = toshiba_rbtx4927_irq_ioc_mask_and_ack,
- .end = toshiba_rbtx4927_irq_ioc_end,
- .set_affinity = NULL
+ .ack = toshiba_rbtx4927_irq_ioc_disable,
+ .mask = toshiba_rbtx4927_irq_ioc_disable,
+ .mask_ack = toshiba_rbtx4927_irq_ioc_disable,
+ .unmask = toshiba_rbtx4927_irq_ioc_enable,
};
#define TOSHIBA_RBTX4927_IOC_INTR_ENAB 0xbc002000
#define TOSHIBA_RBTX4927_IOC_INTR_STAT 0xbc002006
@@ -269,13 +245,11 @@ static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA"
static struct irq_chip toshiba_rbtx4927_irq_isa_type = {
.typename = TOSHIBA_RBTX4927_ISA_NAME,
- .startup = toshiba_rbtx4927_irq_isa_startup,
- .shutdown = toshiba_rbtx4927_irq_isa_shutdown,
- .enable = toshiba_rbtx4927_irq_isa_enable,
- .disable = toshiba_rbtx4927_irq_isa_disable,
.ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
+ .mask = toshiba_rbtx4927_irq_isa_disable,
+ .mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
+ .unmask = toshiba_rbtx4927_irq_isa_enable,
.end = toshiba_rbtx4927_irq_isa_end,
- .set_affinity = NULL
};
#endif
@@ -363,58 +337,16 @@ static void __init toshiba_rbtx4927_irq_ioc_init(void)
TOSHIBA_RBTX4927_IRQ_IOC_END);
for (i = TOSHIBA_RBTX4927_IRQ_IOC_BEG;
- i <= TOSHIBA_RBTX4927_IRQ_IOC_END; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 3;
- irq_desc[i].chip = &toshiba_rbtx4927_irq_ioc_type;
- }
+ i <= TOSHIBA_RBTX4927_IRQ_IOC_END; i++)
+ set_irq_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type,
+ handle_level_irq);
setup_irq(TOSHIBA_RBTX4927_IRQ_NEST_IOC_ON_PIC,
&toshiba_rbtx4927_irq_ioc_action);
-
- return;
-}
-
-static unsigned int toshiba_rbtx4927_irq_ioc_startup(unsigned int irq)
-{
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_STARTUP,
- "irq=%d\n", irq);
-
- if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
- || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
- "bad irq=%d\n", irq);
- panic("\n");
- }
-
- toshiba_rbtx4927_irq_ioc_enable(irq);
-
- return (0);
-}
-
-
-static void toshiba_rbtx4927_irq_ioc_shutdown(unsigned int irq)
-{
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_SHUTDOWN,
- "irq=%d\n", irq);
-
- if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
- || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
- "bad irq=%d\n", irq);
- panic("\n");
- }
-
- toshiba_rbtx4927_irq_ioc_disable(irq);
-
- return;
}
-
static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq)
{
- unsigned long flags;
volatile unsigned char v;
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENABLE,
@@ -427,21 +359,14 @@ static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq)
panic("\n");
}
- spin_lock_irqsave(&toshiba_rbtx4927_ioc_lock, flags);
-
v = TX4927_RD08(TOSHIBA_RBTX4927_IOC_INTR_ENAB);
v |= (1 << (irq - TOSHIBA_RBTX4927_IRQ_IOC_BEG));
TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v);
-
- spin_unlock_irqrestore(&toshiba_rbtx4927_ioc_lock, flags);
-
- return;
}
static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq)
{
- unsigned long flags;
volatile unsigned char v;
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_DISABLE,
@@ -454,53 +379,9 @@ static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq)
panic("\n");
}
- spin_lock_irqsave(&toshiba_rbtx4927_ioc_lock, flags);
-
v = TX4927_RD08(TOSHIBA_RBTX4927_IOC_INTR_ENAB);
v &= ~(1 << (irq - TOSHIBA_RBTX4927_IRQ_IOC_BEG));
TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v);
-
- spin_unlock_irqrestore(&toshiba_rbtx4927_ioc_lock, flags);
-
- return;
-}
-
-
-static void toshiba_rbtx4927_irq_ioc_mask_and_ack(unsigned int irq)
-{
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_MASK,
- "irq=%d\n", irq);
-
- if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
- || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
- "bad irq=%d\n", irq);
- panic("\n");
- }
-
- toshiba_rbtx4927_irq_ioc_disable(irq);
-
- return;
-}
-
-
-static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq)
-{
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ,
- "irq=%d\n", irq);
-
- if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
- || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
- "bad irq=%d\n", irq);
- panic("\n");
- }
-
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
- toshiba_rbtx4927_irq_ioc_enable(irq);
- }
-
- return;
}
@@ -520,13 +401,8 @@ static void __init toshiba_rbtx4927_irq_isa_init(void)
TOSHIBA_RBTX4927_IRQ_ISA_END);
for (i = TOSHIBA_RBTX4927_IRQ_ISA_BEG;
- i <= TOSHIBA_RBTX4927_IRQ_ISA_END; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth =
- ((i < TOSHIBA_RBTX4927_IRQ_ISA_MID) ? (4) : (5));
- irq_desc[i].chip = &toshiba_rbtx4927_irq_isa_type;
- }
+ i <= TOSHIBA_RBTX4927_IRQ_ISA_END; i++)
+ set_irq_chip(i, &toshiba_rbtx4927_irq_isa_type);
setup_irq(TOSHIBA_RBTX4927_IRQ_NEST_ISA_ON_IOC,
&toshiba_rbtx4927_irq_isa_master);
@@ -536,48 +412,6 @@ static void __init toshiba_rbtx4927_irq_isa_init(void)
/* make sure we are looking at IRR (not ISR) */
outb(0x0A, 0x20);
outb(0x0A, 0xA0);
-
- return;
-}
-#endif
-
-
-#ifdef CONFIG_TOSHIBA_FPCIB0
-static unsigned int toshiba_rbtx4927_irq_isa_startup(unsigned int irq)
-{
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_ISA_STARTUP,
- "irq=%d\n", irq);
-
- if (irq < TOSHIBA_RBTX4927_IRQ_ISA_BEG
- || irq > TOSHIBA_RBTX4927_IRQ_ISA_END) {
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
- "bad irq=%d\n", irq);
- panic("\n");
- }
-
- toshiba_rbtx4927_irq_isa_enable(irq);
-
- return (0);
-}
-#endif
-
-
-#ifdef CONFIG_TOSHIBA_FPCIB0
-static void toshiba_rbtx4927_irq_isa_shutdown(unsigned int irq)
-{
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_ISA_SHUTDOWN,
- "irq=%d\n", irq);
-
- if (irq < TOSHIBA_RBTX4927_IRQ_ISA_BEG
- || irq > TOSHIBA_RBTX4927_IRQ_ISA_END) {
- TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
- "bad irq=%d\n", irq);
- panic("\n");
- }
-
- toshiba_rbtx4927_irq_isa_disable(irq);
-
- return;
}
#endif
@@ -596,8 +430,6 @@ static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq)
}
enable_8259A_irq(irq);
-
- return;
}
#endif
@@ -616,8 +448,6 @@ static void toshiba_rbtx4927_irq_isa_disable(unsigned int irq)
}
disable_8259A_irq(irq);
-
- return;
}
#endif
@@ -636,8 +466,6 @@ static void toshiba_rbtx4927_irq_isa_mask_and_ack(unsigned int irq)
}
mask_and_ack_8259A(irq);
-
- return;
}
#endif
@@ -658,8 +486,6 @@ static void toshiba_rbtx4927_irq_isa_end(unsigned int irq)
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
toshiba_rbtx4927_irq_isa_enable(irq);
}
-
- return;
}
#endif
@@ -668,8 +494,6 @@ void __init arch_init_irq(void)
{
extern void tx4927_irq_init(void);
- local_irq_disable();
-
tx4927_irq_init();
toshiba_rbtx4927_irq_ioc_init();
#ifdef CONFIG_TOSHIBA_FPCIB0
@@ -681,8 +505,6 @@ void __init arch_init_irq(void)
#endif
wbflush();
-
- return;
}
void toshiba_rbtx4927_irq_dump(char *key)
@@ -715,7 +537,6 @@ void toshiba_rbtx4927_irq_dump(char *key)
}
}
#endif
- return;
}
void toshiba_rbtx4927_irq_dump_pics(char *s)
@@ -780,6 +601,4 @@ void toshiba_rbtx4927_irq_dump_pics(char *s)
level5_s);
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_INFO, "[%s]\n",
s);
-
- return;
}
diff --git a/arch/mips/tx4938/common/irq.c b/arch/mips/tx4938/common/irq.c
index 77fe2454f5b..a347b424d91 100644
--- a/arch/mips/tx4938/common/irq.c
+++ b/arch/mips/tx4938/common/irq.c
@@ -37,48 +37,32 @@
/* Forwad definitions for all pic's */
/**********************************************************************************/
-static unsigned int tx4938_irq_cp0_startup(unsigned int irq);
-static void tx4938_irq_cp0_shutdown(unsigned int irq);
static void tx4938_irq_cp0_enable(unsigned int irq);
static void tx4938_irq_cp0_disable(unsigned int irq);
-static void tx4938_irq_cp0_mask_and_ack(unsigned int irq);
-static void tx4938_irq_cp0_end(unsigned int irq);
-static unsigned int tx4938_irq_pic_startup(unsigned int irq);
-static void tx4938_irq_pic_shutdown(unsigned int irq);
static void tx4938_irq_pic_enable(unsigned int irq);
static void tx4938_irq_pic_disable(unsigned int irq);
-static void tx4938_irq_pic_mask_and_ack(unsigned int irq);
-static void tx4938_irq_pic_end(unsigned int irq);
/**********************************************************************************/
/* Kernel structs for all pic's */
/**********************************************************************************/
-DEFINE_SPINLOCK(tx4938_cp0_lock);
-DEFINE_SPINLOCK(tx4938_pic_lock);
#define TX4938_CP0_NAME "TX4938-CP0"
static struct irq_chip tx4938_irq_cp0_type = {
.typename = TX4938_CP0_NAME,
- .startup = tx4938_irq_cp0_startup,
- .shutdown = tx4938_irq_cp0_shutdown,
- .enable = tx4938_irq_cp0_enable,
- .disable = tx4938_irq_cp0_disable,
- .ack = tx4938_irq_cp0_mask_and_ack,
- .end = tx4938_irq_cp0_end,
- .set_affinity = NULL
+ .ack = tx4938_irq_cp0_disable,
+ .mask = tx4938_irq_cp0_disable,
+ .mask_ack = tx4938_irq_cp0_disable,
+ .unmask = tx4938_irq_cp0_enable,
};
#define TX4938_PIC_NAME "TX4938-PIC"
static struct irq_chip tx4938_irq_pic_type = {
.typename = TX4938_PIC_NAME,
- .startup = tx4938_irq_pic_startup,
- .shutdown = tx4938_irq_pic_shutdown,
- .enable = tx4938_irq_pic_enable,
- .disable = tx4938_irq_pic_disable,
- .ack = tx4938_irq_pic_mask_and_ack,
- .end = tx4938_irq_pic_end,
- .set_affinity = NULL
+ .ack = tx4938_irq_pic_disable,
+ .mask = tx4938_irq_pic_disable,
+ .mask_ack = tx4938_irq_pic_disable,
+ .unmask = tx4938_irq_pic_enable,
};
static struct irqaction tx4938_irq_pic_action = {
@@ -99,64 +83,21 @@ tx4938_irq_cp0_init(void)
{
int i;
- for (i = TX4938_IRQ_CP0_BEG; i <= TX4938_IRQ_CP0_END; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &tx4938_irq_cp0_type;
- }
-}
-
-static unsigned int
-tx4938_irq_cp0_startup(unsigned int irq)
-{
- tx4938_irq_cp0_enable(irq);
-
- return 0;
-}
-
-static void
-tx4938_irq_cp0_shutdown(unsigned int irq)
-{
- tx4938_irq_cp0_disable(irq);
+ for (i = TX4938_IRQ_CP0_BEG; i <= TX4938_IRQ_CP0_END; i++)
+ set_irq_chip_and_handler(i, &tx4938_irq_cp0_type,
+ handle_level_irq);
}
static void
tx4938_irq_cp0_enable(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tx4938_cp0_lock, flags);
-
set_c0_status(tx4938_irq_cp0_mask(irq));
-
- spin_unlock_irqrestore(&tx4938_cp0_lock, flags);
}
static void
tx4938_irq_cp0_disable(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tx4938_cp0_lock, flags);
-
clear_c0_status(tx4938_irq_cp0_mask(irq));
-
- spin_unlock_irqrestore(&tx4938_cp0_lock, flags);
-}
-
-static void
-tx4938_irq_cp0_mask_and_ack(unsigned int irq)
-{
- tx4938_irq_cp0_disable(irq);
-}
-
-static void
-tx4938_irq_cp0_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
- tx4938_irq_cp0_enable(irq);
- }
}
/**********************************************************************************/
@@ -290,78 +231,30 @@ tx4938_irq_pic_modify(unsigned pic_reg, unsigned clr_bits, unsigned set_bits)
static void __init
tx4938_irq_pic_init(void)
{
- unsigned long flags;
int i;
- for (i = TX4938_IRQ_PIC_BEG; i <= TX4938_IRQ_PIC_END; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 2;
- irq_desc[i].chip = &tx4938_irq_pic_type;
- }
+ for (i = TX4938_IRQ_PIC_BEG; i <= TX4938_IRQ_PIC_END; i++)
+ set_irq_chip_and_handler(i, &tx4938_irq_pic_type,
+ handle_level_irq);
setup_irq(TX4938_IRQ_NEST_PIC_ON_CP0, &tx4938_irq_pic_action);
- spin_lock_irqsave(&tx4938_pic_lock, flags);
-
TX4938_WR(0xff1ff640, 0x6); /* irq level mask -- only accept hightest */
TX4938_WR(0xff1ff600, TX4938_RD(0xff1ff600) | 0x1); /* irq enable */
-
- spin_unlock_irqrestore(&tx4938_pic_lock, flags);
-}
-
-static unsigned int
-tx4938_irq_pic_startup(unsigned int irq)
-{
- tx4938_irq_pic_enable(irq);
-
- return 0;
-}
-
-static void
-tx4938_irq_pic_shutdown(unsigned int irq)
-{
- tx4938_irq_pic_disable(irq);
}
static void
tx4938_irq_pic_enable(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tx4938_pic_lock, flags);
-
tx4938_irq_pic_modify(tx4938_irq_pic_addr(irq), 0,
tx4938_irq_pic_mask(irq));
-
- spin_unlock_irqrestore(&tx4938_pic_lock, flags);
}
static void
tx4938_irq_pic_disable(unsigned int irq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tx4938_pic_lock, flags);
-
tx4938_irq_pic_modify(tx4938_irq_pic_addr(irq),
tx4938_irq_pic_mask(irq), 0);
-
- spin_unlock_irqrestore(&tx4938_pic_lock, flags);
-}
-
-static void
-tx4938_irq_pic_mask_and_ack(unsigned int irq)
-{
- tx4938_irq_pic_disable(irq);
-}
-
-static void
-tx4938_irq_pic_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
- tx4938_irq_pic_enable(irq);
- }
}
/**********************************************************************************/
diff --git a/arch/mips/tx4938/common/setup.c b/arch/mips/tx4938/common/setup.c
index f415a1f18fb..dc87d92bb08 100644
--- a/arch/mips/tx4938/common/setup.c
+++ b/arch/mips/tx4938/common/setup.c
@@ -31,7 +31,6 @@
#include <asm/mipsregs.h>
#include <asm/system.h>
#include <asm/time.h>
-#include <asm/time.h>
#include <asm/tx4938/rbtx4938.h>
extern void toshiba_rbtx4938_setup(void);
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
index 102e473c10a..b6f363d0801 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/irq.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
@@ -87,25 +87,16 @@ IRQ Device
#include <linux/bootmem.h>
#include <asm/tx4938/rbtx4938.h>
-static unsigned int toshiba_rbtx4938_irq_ioc_startup(unsigned int irq);
-static void toshiba_rbtx4938_irq_ioc_shutdown(unsigned int irq);
static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq);
static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
-static void toshiba_rbtx4938_irq_ioc_mask_and_ack(unsigned int irq);
-static void toshiba_rbtx4938_irq_ioc_end(unsigned int irq);
-
-DEFINE_SPINLOCK(toshiba_rbtx4938_ioc_lock);
#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
.typename = TOSHIBA_RBTX4938_IOC_NAME,
- .startup = toshiba_rbtx4938_irq_ioc_startup,
- .shutdown = toshiba_rbtx4938_irq_ioc_shutdown,
- .enable = toshiba_rbtx4938_irq_ioc_enable,
- .disable = toshiba_rbtx4938_irq_ioc_disable,
- .ack = toshiba_rbtx4938_irq_ioc_mask_and_ack,
- .end = toshiba_rbtx4938_irq_ioc_end,
- .set_affinity = NULL
+ .ack = toshiba_rbtx4938_irq_ioc_disable,
+ .mask = toshiba_rbtx4938_irq_ioc_disable,
+ .mask_ack = toshiba_rbtx4938_irq_ioc_disable,
+ .unmask = toshiba_rbtx4938_irq_ioc_enable,
};
#define TOSHIBA_RBTX4938_IOC_INTR_ENAB 0xb7f02000
@@ -142,77 +133,36 @@ toshiba_rbtx4938_irq_ioc_init(void)
int i;
for (i = TOSHIBA_RBTX4938_IRQ_IOC_BEG;
- i <= TOSHIBA_RBTX4938_IRQ_IOC_END; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
- irq_desc[i].depth = 3;
- irq_desc[i].chip = &toshiba_rbtx4938_irq_ioc_type;
- }
+ i <= TOSHIBA_RBTX4938_IRQ_IOC_END; i++)
+ set_irq_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type,
+ handle_level_irq);
setup_irq(RBTX4938_IRQ_IOCINT,
&toshiba_rbtx4938_irq_ioc_action);
}
-static unsigned int
-toshiba_rbtx4938_irq_ioc_startup(unsigned int irq)
-{
- toshiba_rbtx4938_irq_ioc_enable(irq);
-
- return 0;
-}
-
-static void
-toshiba_rbtx4938_irq_ioc_shutdown(unsigned int irq)
-{
- toshiba_rbtx4938_irq_ioc_disable(irq);
-}
-
static void
toshiba_rbtx4938_irq_ioc_enable(unsigned int irq)
{
- unsigned long flags;
volatile unsigned char v;
- spin_lock_irqsave(&toshiba_rbtx4938_ioc_lock, flags);
-
v = TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
v |= (1 << (irq - TOSHIBA_RBTX4938_IRQ_IOC_BEG));
TX4938_WR08(TOSHIBA_RBTX4938_IOC_INTR_ENAB, v);
mmiowb();
TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
-
- spin_unlock_irqrestore(&toshiba_rbtx4938_ioc_lock, flags);
}
static void
toshiba_rbtx4938_irq_ioc_disable(unsigned int irq)
{
- unsigned long flags;
volatile unsigned char v;
- spin_lock_irqsave(&toshiba_rbtx4938_ioc_lock, flags);
-
v = TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
v &= ~(1 << (irq - TOSHIBA_RBTX4938_IRQ_IOC_BEG));
TX4938_WR08(TOSHIBA_RBTX4938_IOC_INTR_ENAB, v);
mmiowb();
TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
-
- spin_unlock_irqrestore(&toshiba_rbtx4938_ioc_lock, flags);
-}
-
-static void
-toshiba_rbtx4938_irq_ioc_mask_and_ack(unsigned int irq)
-{
- toshiba_rbtx4938_irq_ioc_disable(irq);
-}
-
-static void
-toshiba_rbtx4938_irq_ioc_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
- toshiba_rbtx4938_irq_ioc_enable(irq);
- }
}
extern void __init txx9_spi_irqinit(int irc_irq);
diff --git a/arch/mips/vr41xx/Kconfig b/arch/mips/vr41xx/Kconfig
index 92f41f6f934..c8dfd8092ca 100644
--- a/arch/mips/vr41xx/Kconfig
+++ b/arch/mips/vr41xx/Kconfig
@@ -6,6 +6,7 @@ config CASIO_E55
select ISA
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select GENERIC_HARDIRQS_NO__DO_IRQ
config IBM_WORKPAD
bool "Support for IBM WorkPad z50"
@@ -15,6 +16,7 @@ config IBM_WORKPAD
select ISA
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select GENERIC_HARDIRQS_NO__DO_IRQ
config NEC_CMBVR4133
bool "Support for NEC CMB-VR4133"
@@ -39,6 +41,7 @@ config TANBAC_TB022X
select IRQ_CPU
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select GENERIC_HARDIRQS_NO__DO_IRQ
help
The TANBAC VR4131 multichip module(TB0225) and
the TANBAC VR4131DIMM(TB0229) are MIPS-based platforms
@@ -71,6 +74,7 @@ config VICTOR_MPC30X
select IRQ_CPU
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select GENERIC_HARDIRQS_NO__DO_IRQ
config ZAO_CAPCELLA
bool "Support for ZAO Networks Capcella"
@@ -80,6 +84,7 @@ config ZAO_CAPCELLA
select IRQ_CPU
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select GENERIC_HARDIRQS_NO__DO_IRQ
config PCI_VR41XX
bool "Add PCI control unit support of NEC VR4100 series"
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index c215c0d39fa..c075261976c 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -417,14 +417,7 @@ void vr41xx_disable_bcuint(void)
EXPORT_SYMBOL(vr41xx_disable_bcuint);
-static unsigned int startup_sysint1_irq(unsigned int irq)
-{
- icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
-
- return 0; /* never anything pending */
-}
-
-static void shutdown_sysint1_irq(unsigned int irq)
+static void disable_sysint1_irq(unsigned int irq)
{
icu1_clear(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
}
@@ -434,33 +427,15 @@ static void enable_sysint1_irq(unsigned int irq)
icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
}
-#define disable_sysint1_irq shutdown_sysint1_irq
-#define ack_sysint1_irq shutdown_sysint1_irq
-
-static void end_sysint1_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
-}
-
static struct irq_chip sysint1_irq_type = {
.typename = "SYSINT1",
- .startup = startup_sysint1_irq,
- .shutdown = shutdown_sysint1_irq,
- .enable = enable_sysint1_irq,
- .disable = disable_sysint1_irq,
- .ack = ack_sysint1_irq,
- .end = end_sysint1_irq,
+ .ack = disable_sysint1_irq,
+ .mask = disable_sysint1_irq,
+ .mask_ack = disable_sysint1_irq,
+ .unmask = enable_sysint1_irq,
};
-static unsigned int startup_sysint2_irq(unsigned int irq)
-{
- icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
-
- return 0; /* never anything pending */
-}
-
-static void shutdown_sysint2_irq(unsigned int irq)
+static void disable_sysint2_irq(unsigned int irq)
{
icu2_clear(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
}
@@ -470,23 +445,12 @@ static void enable_sysint2_irq(unsigned int irq)
icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
}
-#define disable_sysint2_irq shutdown_sysint2_irq
-#define ack_sysint2_irq shutdown_sysint2_irq
-
-static void end_sysint2_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
-}
-
static struct irq_chip sysint2_irq_type = {
.typename = "SYSINT2",
- .startup = startup_sysint2_irq,
- .shutdown = shutdown_sysint2_irq,
- .enable = enable_sysint2_irq,
- .disable = disable_sysint2_irq,
- .ack = ack_sysint2_irq,
- .end = end_sysint2_irq,
+ .ack = disable_sysint2_irq,
+ .mask = disable_sysint2_irq,
+ .mask_ack = disable_sysint2_irq,
+ .unmask = enable_sysint2_irq,
};
static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
@@ -723,10 +687,12 @@ static int __init vr41xx_icu_init(void)
icu2_write(MGIUINTHREG, 0xffff);
for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++)
- irq_desc[i].chip = &sysint1_irq_type;
+ set_irq_chip_and_handler(i, &sysint1_irq_type,
+ handle_level_irq);
for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++)
- irq_desc[i].chip = &sysint2_irq_type;
+ set_irq_chip_and_handler(i, &sysint2_irq_type,
+ handle_level_irq);
cascade_irq(INT0_IRQ, icu_get_irq);
cascade_irq(INT1_IRQ, icu_get_irq);
diff --git a/arch/mips/vr41xx/nec-cmbvr4133/irq.c b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
index 2483487344c..a039bb7251f 100644
--- a/arch/mips/vr41xx/nec-cmbvr4133/irq.c
+++ b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
@@ -30,17 +30,6 @@ extern void init_8259A(int hoge);
extern int vr4133_rockhopper;
-static unsigned int startup_i8259_irq(unsigned int irq)
-{
- enable_8259A_irq(irq - I8259_IRQ_BASE);
- return 0;
-}
-
-static void shutdown_i8259_irq(unsigned int irq)
-{
- disable_8259A_irq(irq - I8259_IRQ_BASE);
-}
-
static void enable_i8259_irq(unsigned int irq)
{
enable_8259A_irq(irq - I8259_IRQ_BASE);
@@ -64,11 +53,10 @@ static void end_i8259_irq(unsigned int irq)
static struct irq_chip i8259_irq_type = {
.typename = "XT-PIC",
- .startup = startup_i8259_irq,
- .shutdown = shutdown_i8259_irq,
- .enable = enable_i8259_irq,
- .disable = disable_i8259_irq,
.ack = ack_i8259_irq,
+ .mask = disable_i8259_irq,
+ .mask_ack = ack_i8259_irq,
+ .unmask = enable_i8259_irq,
.end = end_i8259_irq,
};
@@ -104,7 +92,7 @@ void __init rockhopper_init_irq(void)
}
for (i = I8259_IRQ_BASE; i <= I8259_IRQ_LAST; i++)
- irq_desc[i].chip = &i8259_irq_type;
+ set_irq_chip(i, &i8259_irq_type);
setup_irq(I8259_SLAVE_IRQ, &i8259_slave_cascade);
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
index 1e64e7b8811..ecb10a4f63c 100644
--- a/arch/parisc/kernel/binfmt_elf32.c
+++ b/arch/parisc/kernel/binfmt_elf32.c
@@ -75,7 +75,6 @@ struct elf_prpsinfo32
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
-#define elf_addr_t unsigned int
#define init_elf_binfmt init_elf32_binfmt
#define ELF_PLATFORM ("PARISC32\0")
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c
index 8a1e08068e7..462696d30d3 100644
--- a/arch/parisc/lib/checksum.c
+++ b/arch/parisc/lib/checksum.c
@@ -101,11 +101,14 @@ out:
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
+/*
+ * why bother folding?
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned int result = do_csum(buff, len);
addc(result, sum);
- return from32to16(result);
+ return (__force __wsum)from32to16(result);
}
EXPORT_SYMBOL(csum_partial);
@@ -113,8 +116,8 @@ EXPORT_SYMBOL(csum_partial);
/*
* copy while checksumming, otherwise like csum_partial
*/
-unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst,
- int len, unsigned int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
{
/*
* It's 2:30 am and I don't feel like doing it real ...
@@ -131,9 +134,9 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
* Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer.
*/
-unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
- unsigned char *dst, int len,
- unsigned int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst, int len,
+ __wsum sum, int *err_ptr)
{
int missing;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 64785e46f93..641f9c920ee 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -152,7 +152,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
const struct exception_table_entry *fix;
unsigned long acc_type;
- if (in_interrupt() || !mm)
+ if (in_atomic() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/powerpc/.gitignore b/arch/powerpc/.gitignore
new file mode 100644
index 00000000000..a1a869c8c84
--- /dev/null
+++ b/arch/powerpc/.gitignore
@@ -0,0 +1 @@
+include
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 0673dbedb24..291c95ac4b3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -112,7 +112,7 @@ choice
default 6xx
config CLASSIC32
- bool "6xx/7xx/74xx"
+ bool "52xx/6xx/7xx/74xx"
select PPC_FPU
select 6xx
help
@@ -121,16 +121,18 @@ config CLASSIC32
versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
embedded versions (403 and 405) and the high end 64 bit Power
processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
+
+ This option is the catch-all for 6xx types, including some of the
+ embedded versions. Unless there is see an option for the specific
+ chip family you are using, you want this option.
+
+ You do not want this if you are building a kernel for a 64 bit
+ IBM RS/6000 or an Apple G5, choose 6xx.
+
+ If unsure, select this option
- Unless you are building a kernel for one of the embedded processor
- systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
Note that the kernel runs in 32-bit mode even on 64-bit chips.
-config PPC_52xx
- bool "Freescale 52xx"
- select 6xx
- select PPC_FPU
-
config PPC_82xx
bool "Freescale 82xx"
select 6xx
@@ -160,9 +162,11 @@ config PPC_86xx
config 40x
bool "AMCC 40x"
+ select PPC_DCR_NATIVE
config 44x
bool "AMCC 44x"
+ select PPC_DCR_NATIVE
config 8xx
bool "Freescale 8xx"
@@ -208,6 +212,24 @@ config PPC_FPU
bool
default y if PPC64
+config PPC_DCR_NATIVE
+ bool
+ default n
+
+config PPC_DCR_MMIO
+ bool
+ default n
+
+config PPC_DCR
+ bool
+ depends on PPC_DCR_NATIVE || PPC_DCR_MMIO
+ default y
+
+config PPC_OF_PLATFORM_PCI
+ bool
+ depends on PPC64 # not supported on 32 bits yet
+ default n
+
config BOOKE
bool
depends on E200 || E500
@@ -227,6 +249,7 @@ config PTE_64BIT
config PHYS_64BIT
bool 'Large physical address support' if E500
depends on 44x || E500
+ select RESOURCES_64BIT
default y if 44x
---help---
This option enables kernel support for larger than 32-bit physical
@@ -369,11 +392,13 @@ config PPC_PSERIES
select PPC_RTAS
select RTAS_ERROR_LOGGING
select PPC_UDBG_16550
+ select PPC_NATIVE
default y
config PPC_ISERIES
bool "IBM Legacy iSeries"
depends on PPC_MULTIPLATFORM && PPC64
+ select PPC_INDIRECT_IO
config PPC_CHRP
bool "Common Hardware Reference Platform (CHRP) based machines"
@@ -384,14 +409,35 @@ config PPC_CHRP
select PPC_RTAS
select PPC_MPC106
select PPC_UDBG_16550
+ select PPC_NATIVE
+ default y
+
+config PPC_MPC52xx
+ bool
+ default n
+
+config PPC_EFIKA
+ bool "bPlan Efika 5k2. MPC5200B based computer"
+ depends on PPC_MULTIPLATFORM && PPC32
+ select PPC_RTAS
+ select RTAS_PROC
+ select PPC_MPC52xx
+ select PPC_NATIVE
default y
+config PPC_LITE5200
+ bool "Freescale Lite5200 Eval Board"
+ depends on PPC_MULTIPLATFORM && PPC32
+ select PPC_MPC52xx
+ default n
+
config PPC_PMAC
bool "Apple PowerMac based machines"
depends on PPC_MULTIPLATFORM
select MPIC
select PPC_INDIRECT_PCI if PPC32
select PPC_MPC106 if PPC32
+ select PPC_NATIVE
default y
config PPC_PMAC64
@@ -411,6 +457,7 @@ config PPC_PREP
select PPC_I8259
select PPC_INDIRECT_PCI
select PPC_UDBG_16550
+ select PPC_NATIVE
default y
config PPC_MAPLE
@@ -422,10 +469,11 @@ config PPC_MAPLE
select GENERIC_TBSYNC
select PPC_UDBG_16550
select PPC_970_NAP
+ select PPC_NATIVE
default n
help
This option enables support for the Maple 970FX Evaluation Board.
- For more informations, refer to <http://www.970eval.com>
+ For more information, refer to <http://www.970eval.com>
config PPC_PASEMI
depends on PPC_MULTIPLATFORM && PPC64
@@ -434,6 +482,7 @@ config PPC_PASEMI
select MPIC
select PPC_UDBG_16550
select GENERIC_TBSYNC
+ select PPC_NATIVE
help
This option enables support for PA Semi's PWRficient line
of SoC processors, including PA6T-1682M
@@ -445,6 +494,11 @@ config PPC_CELL
config PPC_CELL_NATIVE
bool
select PPC_CELL
+ select PPC_DCR_MMIO
+ select PPC_OF_PLATFORM_PCI
+ select PPC_INDIRECT_IO
+ select PPC_NATIVE
+ select MPIC
default n
config PPC_IBM_CELL_BLADE
@@ -456,6 +510,22 @@ config PPC_IBM_CELL_BLADE
select PPC_UDBG_16550
select UDBG_RTAS_CONSOLE
+config PPC_PS3
+ bool "Sony PS3"
+ depends on PPC_MULTIPLATFORM && PPC64
+ select PPC_CELL
+ help
+ This option enables support for the Sony PS3 game console
+ and other platforms using the PS3 hypervisor.
+
+config PPC_NATIVE
+ bool
+ depends on PPC_MULTIPLATFORM
+ help
+ Support for running natively on the hardware, i.e. without
+ a hypervisor. This option is not user-selectable but should
+ be selected by all platforms that need it.
+
config UDBG_RTAS_CONSOLE
bool "RTAS based debug console"
depends on PPC_RTAS
@@ -517,6 +587,15 @@ config PPC_970_NAP
bool
default n
+config PPC_INDIRECT_IO
+ bool
+ select GENERIC_IOMAP
+ default n
+
+config GENERIC_IOMAP
+ bool
+ default n
+
source "drivers/cpufreq/Kconfig"
config CPU_FREQ_PMAC
@@ -594,12 +673,6 @@ config TAU_AVERAGE
If in doubt, say N here.
-config PPC_TODC
- depends on EMBEDDED6xx
- bool "Generic Time-of-day Clock (TODC) support"
- ---help---
- This adds support for many TODC/RTC chips.
-
endmenu
source arch/powerpc/platforms/embedded6xx/Kconfig
@@ -610,6 +683,7 @@ source arch/powerpc/platforms/85xx/Kconfig
source arch/powerpc/platforms/86xx/Kconfig
source arch/powerpc/platforms/8xx/Kconfig
source arch/powerpc/platforms/cell/Kconfig
+source arch/powerpc/platforms/ps3/Kconfig
menu "Kernel options"
@@ -790,7 +864,6 @@ source "arch/powerpc/platforms/prep/Kconfig"
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
- depends on !PPC_ISERIES
config CMDLINE
string "Initial kernel command string"
@@ -880,7 +953,7 @@ config MCA
config PCI
bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx \
- || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) || MPC7448HPC2
+ || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) || MPC7448HPC2 || PPC_PS3
default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx \
&& !PPC_85xx && !PPC_86xx
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 5ad149b47e3..f0e51edde02 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -77,7 +77,7 @@ config KGDB_CONSOLE
config XMON
bool "Include xmon kernel debugger"
- depends on DEBUGGER && !PPC_ISERIES
+ depends on DEBUGGER
help
Include in-kernel hooks for the xmon kernel monitor/debugger.
Unless you are intending to debug the kernel, say N here.
@@ -98,6 +98,15 @@ config XMON_DEFAULT
xmon is normally disabled unless booted with 'xmon=on'.
Use 'xmon=off' to disable xmon init during runtime.
+config XMON_DISASSEMBLY
+ bool "Include disassembly support in xmon"
+ depends on XMON
+ default y
+ help
+ Include support for disassembling in xmon. You probably want
+ to say Y here, unless you're building for a memory-constrained
+ system.
+
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
depends on PPC64
@@ -116,7 +125,7 @@ config BDI_SWITCH
config BOOTX_TEXT
bool "Support for early boot text console (BootX or OpenFirmware only)"
- depends PPC_OF && !PPC_ISERIES
+ depends PPC_OF
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires either BootX or Open Firmware.
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 45c9ad23526..0734b2fc1d9 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -1,19 +1,32 @@
addnote
+empty.c
+hack-coff
infblock.c
infblock.h
infcodes.c
infcodes.h
inffast.c
inffast.h
+inffixed.h
inflate.c
+inflate.h
inftrees.c
inftrees.h
infutil.c
infutil.h
kernel-vmlinux.strip.c
kernel-vmlinux.strip.gz
+mktree
uImage
zImage
+zImage.chrp
+zImage.coff
+zImage.coff.lds
+zImage.lds
+zImage.miboot
+zImage.pmac
+zImage.pseries
+zImage.sandpoint
zImage.vmode
zconf.h
zlib.h
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 4b2be611f77..343dbcfdf08 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -40,7 +40,8 @@ zliblinuxheader := zlib.h zconf.h zutil.h
$(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) \
$(addprefix $(obj)/,$(zlibheader))
-src-wlib := string.S stdio.c main.c div64.S $(zlib)
+src-wlib := string.S stdio.c main.c flatdevtree.c flatdevtree_misc.c \
+ ns16550.c serial.c simple_alloc.c div64.S util.S $(zlib)
src-plat := of.c
src-boot := crt0.S $(src-wlib) $(src-plat) empty.c
@@ -74,7 +75,7 @@ $(obj)/zImage.lds $(obj)/zImage.coff.lds: $(obj)/%: $(srctree)/$(src)/%.S
@cp $< $@
clean-files := $(zlib) $(zlibheader) $(zliblinuxheader) \
- $(obj)/empty.c
+ empty.c zImage zImage.coff.lds zImage.lds zImage.sandpoint
quiet_cmd_bootcc = BOOTCC $@
cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
@@ -93,13 +94,13 @@ $(patsubst %.S,%.o, $(filter %.S, $(src-boot))): %.o: %.S
$(obj)/wrapper.a: $(obj-wlib)
$(call cmd,bootar)
-hostprogs-y := addnote addRamDisk hack-coff
+hostprogs-y := addnote addRamDisk hack-coff mktree
extra-y := $(obj)/crt0.o $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds
wrapper :=$(srctree)/$(src)/wrapper
-wrapperbits := $(extra-y) $(addprefix $(obj)/,addnote hack-coff)
+wrapperbits := $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree)
#############
# Bits for building various flavours of zImage
@@ -148,13 +149,18 @@ $(obj)/zImage.miboot: vmlinux $(wrapperbits)
$(obj)/zImage.initrd.miboot: vmlinux $(wrapperbits)
$(call cmd,wrap_initrd,miboot)
+$(obj)/zImage.ps3: vmlinux
+ $(STRIP) -s -R .comment $< -o $@
+
$(obj)/uImage: vmlinux $(wrapperbits)
$(call cmd,wrap,uboot)
image-$(CONFIG_PPC_PSERIES) += zImage.pseries
image-$(CONFIG_PPC_MAPLE) += zImage.pseries
image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
+image-$(CONFIG_PPC_PS3) += zImage.ps3
image-$(CONFIG_PPC_CHRP) += zImage.chrp
+image-$(CONFIG_PPC_EFIKA) += zImage.chrp
image-$(CONFIG_PPC_PMAC) += zImage.pmac
image-$(CONFIG_DEFAULT_UIMAGE) += uImage
@@ -176,3 +182,4 @@ install: $(CONFIGURE) $(image-y)
clean-files += $(addprefix $(objtree)/, $(obj-boot) vmlinux.strip.gz)
clean-files += $(addprefix $(objtree)/, $(obj-boot) vmlinux.bin.gz)
+clean-files += $(image-)
diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
new file mode 100644
index 00000000000..d06b0b01889
--- /dev/null
+++ b/arch/powerpc/boot/dts/kuroboxHG.dts
@@ -0,0 +1,148 @@
+/*
+ * Device Tree Souce for Buffalo KuroboxHG
+ *
+ * Choose CONFIG_LINKSTATION to build a kernel for KuroboxHG, or use
+ * the default configuration linkstation_defconfig.
+ *
+ * Based on sandpoint.dts
+ *
+ * 2006 (c) G. Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+
+XXXX add flash parts, rtc, ??
+
+build with: "dtc -f -I dts -O dtb -o kuroboxHG.dtb -V 16 kuroboxHG.dts"
+
+
+ */
+
+/ {
+ linux,phandle = <1000>;
+ model = "KuroboxHG";
+ compatible = "linkstation";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ linux,phandle = <2000>;
+ #cpus = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,603e { /* Really 8241 */
+ linux,phandle = <2100>;
+ linux,boot-cpu;
+ device_type = "cpu";
+ reg = <0>;
+ clock-frequency = <fdad680>; /* Fixed by bootwrapper */
+ timebase-frequency = <1F04000>; /* Fixed by bootwrapper */
+ bus-frequency = <0>; /* From bootloader */
+ /* Following required by dtc but not used */
+ i-cache-line-size = <0>;
+ d-cache-line-size = <0>;
+ i-cache-size = <4000>;
+ d-cache-size = <4000>;
+ };
+ };
+
+ memory {
+ linux,phandle = <3000>;
+ device_type = "memory";
+ reg = <00000000 08000000>;
+ };
+
+ soc10x { /* AFAICT need to make soc for 8245's uarts to be defined */
+ linux,phandle = <4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #interrupt-cells = <2>;
+ device_type = "soc";
+ compatible = "mpc10x";
+ store-gathering = <0>; /* 0 == off, !0 == on */
+ reg = <80000000 00100000>;
+ ranges = <80000000 80000000 70000000 /* pci mem space */
+ fc000000 fc000000 00100000 /* EUMB */
+ fe000000 fe000000 00c00000 /* pci i/o space */
+ fec00000 fec00000 00300000 /* pci cfg regs */
+ fef00000 fef00000 00100000>; /* pci iack */
+
+ i2c@80003000 {
+ linux,phandle = <4300>;
+ device_type = "i2c";
+ compatible = "fsl-i2c";
+ reg = <80003000 1000>;
+ interrupts = <5 2>;
+ interrupt-parent = <4400>;
+ };
+
+ serial@80004500 {
+ linux,phandle = <4511>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <80004500 8>;
+ clock-frequency = <7c044a8>;
+ current-speed = <2580>;
+ interrupts = <9 2>;
+ interrupt-parent = <4400>;
+ };
+
+ serial@80004600 {
+ linux,phandle = <4512>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <80004600 8>;
+ clock-frequency = <7c044a8>;
+ current-speed = <e100>;
+ interrupts = <a 0>;
+ interrupt-parent = <4400>;
+ };
+
+ pic@80040000 {
+ linux,phandle = <4400>;
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ device_type = "open-pic";
+ compatible = "chrp,open-pic";
+ interrupt-controller;
+ reg = <80040000 40000>;
+ built-in;
+ };
+
+ pci@fec00000 {
+ linux,phandle = <4500>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ compatible = "mpc10x-pci";
+ reg = <fec00000 400000>;
+ ranges = <01000000 0 0 fe000000 0 00c00000
+ 02000000 0 80000000 80000000 0 70000000>;
+ bus-range = <0 ff>;
+ clock-frequency = <7f28155>;
+ interrupt-parent = <4400>;
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x11 - IRQ0 ETH */
+ 5800 0 0 1 4400 0 1
+ 5800 0 0 2 4400 1 1
+ 5800 0 0 3 4400 2 1
+ 5800 0 0 4 4400 3 1
+ /* IDSEL 0x12 - IRQ1 IDE0 */
+ 6000 0 0 1 4400 1 1
+ 6000 0 0 2 4400 2 1
+ 6000 0 0 3 4400 3 1
+ 6000 0 0 4 4400 0 1
+ /* IDSEL 0x14 - IRQ3 USB2.0 */
+ 7000 0 0 1 4400 3 1
+ 7000 0 0 2 4400 3 1
+ 7000 0 0 3 4400 3 1
+ 7000 0 0 4 4400 3 1
+ >;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
new file mode 100644
index 00000000000..8bc0d259796
--- /dev/null
+++ b/arch/powerpc/boot/dts/lite5200.dts
@@ -0,0 +1,313 @@
+/*
+ * Lite5200 board Device Tree Source
+ *
+ * Copyright 2006 Secret Lab Technologies Ltd.
+ * Grant Likely <grant.likely@secretlab.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/ {
+ model = "Lite5200";
+ compatible = "lite5200\0lite52xx\0mpc5200\0mpc52xx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #cpus = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,5200@0 {
+ device_type = "cpu";
+ reg = <0>;
+ d-cache-line-size = <20>;
+ i-cache-line-size = <20>;
+ d-cache-size = <4000>; // L1, 16K
+ i-cache-size = <4000>; // L1, 16K
+ timebase-frequency = <0>; // from bootloader
+ bus-frequency = <0>; // from bootloader
+ clock-frequency = <0>; // from bootloader
+ 32-bit;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <00000000 04000000>; // 64MB
+ };
+
+ soc5200@f0000000 {
+ #interrupt-cells = <3>;
+ device_type = "soc";
+ ranges = <0 f0000000 f0010000>;
+ reg = <f0000000 00010000>;
+ bus-frequency = <0>; // from bootloader
+
+ cdm@200 {
+ compatible = "mpc5200-cdm\0mpc52xx-cdm";
+ reg = <200 38>;
+ };
+
+ pic@500 {
+ // 5200 interrupts are encoded into two levels;
+ linux,phandle = <500>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ device_type = "interrupt-controller";
+ compatible = "mpc5200-pic\0mpc52xx-pic";
+ reg = <500 80>;
+ built-in;
+ };
+
+ gpt@600 { // General Purpose Timer
+ compatible = "mpc5200-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <600 10>;
+ interrupts = <1 9 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@610 { // General Purpose Timer
+ compatible = "mpc5200-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <610 10>;
+ interrupts = <1 a 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@620 { // General Purpose Timer
+ compatible = "mpc5200-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <620 10>;
+ interrupts = <1 b 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@630 { // General Purpose Timer
+ compatible = "mpc5200-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <630 10>;
+ interrupts = <1 c 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@640 { // General Purpose Timer
+ compatible = "mpc5200-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <640 10>;
+ interrupts = <1 d 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@650 { // General Purpose Timer
+ compatible = "mpc5200-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <650 10>;
+ interrupts = <1 e 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@660 { // General Purpose Timer
+ compatible = "mpc5200-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <660 10>;
+ interrupts = <1 f 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@670 { // General Purpose Timer
+ compatible = "mpc5200-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <670 10>;
+ interrupts = <1 10 0>;
+ interrupt-parent = <500>;
+ };
+
+ rtc@800 { // Real time clock
+ compatible = "mpc5200-rtc\0mpc52xx-rtc";
+ device_type = "rtc";
+ reg = <800 100>;
+ interrupts = <1 5 0 1 6 0>;
+ interrupt-parent = <500>;
+ };
+
+ mscan@900 {
+ device_type = "mscan";
+ compatible = "mpc5200-mscan\0mpc52xx-mscan";
+ interrupts = <2 11 0>;
+ interrupt-parent = <500>;
+ reg = <900 80>;
+ };
+
+ mscan@980 {
+ device_type = "mscan";
+ compatible = "mpc5200-mscan\0mpc52xx-mscan";
+ interrupts = <1 12 0>;
+ interrupt-parent = <500>;
+ reg = <980 80>;
+ };
+
+ gpio@b00 {
+ compatible = "mpc5200-gpio\0mpc52xx-gpio";
+ reg = <b00 40>;
+ interrupts = <1 7 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpio-wkup@b00 {
+ compatible = "mpc5200-gpio-wkup\0mpc52xx-gpio-wkup";
+ reg = <c00 40>;
+ interrupts = <1 8 0 0 3 0>;
+ interrupt-parent = <500>;
+ };
+
+ pci@0d00 {
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ compatible = "mpc5200-pci\0mpc52xx-pci";
+ reg = <d00 100>;
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <c000 0 0 1 500 0 0 3
+ c000 0 0 2 500 0 0 3
+ c000 0 0 3 500 0 0 3
+ c000 0 0 4 500 0 0 3>;
+ clock-frequency = <0>; // From boot loader
+ interrupts = <2 8 0 2 9 0 2 a 0>;
+ interrupt-parent = <500>;
+ bus-range = <0 0>;
+ ranges = <42000000 0 80000000 80000000 0 20000000
+ 02000000 0 a0000000 a0000000 0 10000000
+ 01000000 0 00000000 b0000000 0 01000000>;
+ };
+
+ spi@f00 {
+ device_type = "spi";
+ compatible = "mpc5200-spi\0mpc52xx-spi";
+ reg = <f00 20>;
+ interrupts = <2 d 0 2 e 0>;
+ interrupt-parent = <500>;
+ };
+
+ usb@1000 {
+ device_type = "usb-ohci-be";
+ compatible = "mpc5200-ohci\0mpc52xx-ohci\0ohci-be";
+ reg = <1000 ff>;
+ interrupts = <2 6 0>;
+ interrupt-parent = <500>;
+ };
+
+ bestcomm@1200 {
+ device_type = "dma-controller";
+ compatible = "mpc5200-bestcomm\0mpc52xx-bestcomm";
+ reg = <1200 80>;
+ interrupts = <3 0 0 3 1 0 3 2 0 3 3 0
+ 3 4 0 3 5 0 3 6 0 3 7 0
+ 3 8 0 3 9 0 3 a 0 3 b 0
+ 3 c 0 3 d 0 3 e 0 3 f 0>;
+ interrupt-parent = <500>;
+ };
+
+ xlb@1f00 {
+ compatible = "mpc5200-xlb\0mpc52xx-xlb";
+ reg = <1f00 100>;
+ };
+
+ serial@2000 { // PSC1
+ device_type = "serial";
+ compatible = "mpc5200-psc-uart\0mpc52xx-psc-uart";
+ port-number = <0>; // Logical port assignment
+ reg = <2000 100>;
+ interrupts = <2 1 0>;
+ interrupt-parent = <500>;
+ };
+
+ // PSC2 in spi mode example
+ spi@2200 { // PSC2
+ device_type = "spi";
+ compatible = "mpc5200-psc-spi\0mpc52xx-psc-spi";
+ reg = <2200 100>;
+ interrupts = <2 2 0>;
+ interrupt-parent = <500>;
+ };
+
+ // PSC3 in CODEC mode example
+ i2s@2400 { // PSC3
+ device_type = "i2s";
+ compatible = "mpc5200-psc-i2s\0mpc52xx-psc-i2s";
+ reg = <2400 100>;
+ interrupts = <2 3 0>;
+ interrupt-parent = <500>;
+ };
+
+ // PSC4 unconfigured
+ //serial@2600 { // PSC4
+ // device_type = "serial";
+ // compatible = "mpc5200-psc-uart\0mpc52xx-psc-uart";
+ // reg = <2600 100>;
+ // interrupts = <2 b 0>;
+ // interrupt-parent = <500>;
+ //};
+
+ // PSC5 unconfigured
+ //serial@2800 { // PSC5
+ // device_type = "serial";
+ // compatible = "mpc5200-psc-uart\0mpc52xx-psc-uart";
+ // reg = <2800 100>;
+ // interrupts = <2 c 0>;
+ // interrupt-parent = <500>;
+ //};
+
+ // PSC6 in AC97 mode example
+ ac97@2c00 { // PSC6
+ device_type = "ac97";
+ compatible = "mpc5200-psc-ac97\0mpc52xx-psc-ac97";
+ reg = <2c00 100>;
+ interrupts = <2 4 0>;
+ interrupt-parent = <500>;
+ };
+
+ ethernet@3000 {
+ device_type = "network";
+ compatible = "mpc5200-fec\0mpc52xx-fec";
+ reg = <3000 800>;
+ mac-address = [ 02 03 04 05 06 07 ]; // Bad!
+ interrupts = <2 5 0>;
+ interrupt-parent = <500>;
+ };
+
+ ata@3a00 {
+ device_type = "ata";
+ compatible = "mpc5200-ata\0mpc52xx-ata";
+ reg = <3a00 100>;
+ interrupts = <2 7 0>;
+ interrupt-parent = <500>;
+ };
+
+ i2c@3d00 {
+ device_type = "i2c";
+ compatible = "mpc5200-i2c\0mpc52xx-i2c";
+ reg = <3d00 40>;
+ interrupts = <2 f 0>;
+ interrupt-parent = <500>;
+ };
+
+ i2c@3d40 {
+ device_type = "i2c";
+ compatible = "mpc5200-i2c\0mpc52xx-i2c";
+ reg = <3d40 40>;
+ interrupts = <2 10 0>;
+ interrupt-parent = <500>;
+ };
+ sram@8000 {
+ device_type = "sram";
+ compatible = "mpc5200-sram\0mpc52xx-sram\0sram";
+ reg = <8000 4000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
new file mode 100644
index 00000000000..81cb76418a7
--- /dev/null
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -0,0 +1,318 @@
+/*
+ * Lite5200B board Device Tree Source
+ *
+ * Copyright 2006 Secret Lab Technologies Ltd.
+ * Grant Likely <grant.likely@secretlab.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/ {
+ model = "Lite5200b";
+ compatible = "lite5200b\0lite52xx\0mpc5200b\0mpc52xx";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #cpus = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,5200@0 {
+ device_type = "cpu";
+ reg = <0>;
+ d-cache-line-size = <20>;
+ i-cache-line-size = <20>;
+ d-cache-size = <4000>; // L1, 16K
+ i-cache-size = <4000>; // L1, 16K
+ timebase-frequency = <0>; // from bootloader
+ bus-frequency = <0>; // from bootloader
+ clock-frequency = <0>; // from bootloader
+ 32-bit;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <00000000 10000000>; // 256MB
+ };
+
+ soc5200@f0000000 {
+ #interrupt-cells = <3>;
+ device_type = "soc";
+ ranges = <0 f0000000 f0010000>;
+ reg = <f0000000 00010000>;
+ bus-frequency = <0>; // from bootloader
+
+ cdm@200 {
+ compatible = "mpc5200b-cdm\0mpc52xx-cdm";
+ reg = <200 38>;
+ };
+
+ pic@500 {
+ // 5200 interrupts are encoded into two levels;
+ linux,phandle = <500>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ device_type = "interrupt-controller";
+ compatible = "mpc5200b-pic\0mpc52xx-pic";
+ reg = <500 80>;
+ built-in;
+ };
+
+ gpt@600 { // General Purpose Timer
+ compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <600 10>;
+ interrupts = <1 9 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@610 { // General Purpose Timer
+ compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <610 10>;
+ interrupts = <1 a 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@620 { // General Purpose Timer
+ compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <620 10>;
+ interrupts = <1 b 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@630 { // General Purpose Timer
+ compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <630 10>;
+ interrupts = <1 c 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@640 { // General Purpose Timer
+ compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <640 10>;
+ interrupts = <1 d 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@650 { // General Purpose Timer
+ compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <650 10>;
+ interrupts = <1 e 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@660 { // General Purpose Timer
+ compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <660 10>;
+ interrupts = <1 f 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpt@670 { // General Purpose Timer
+ compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+ device_type = "gpt";
+ reg = <670 10>;
+ interrupts = <1 10 0>;
+ interrupt-parent = <500>;
+ };
+
+ rtc@800 { // Real time clock
+ compatible = "mpc5200b-rtc\0mpc52xx-rtc";
+ device_type = "rtc";
+ reg = <800 100>;
+ interrupts = <1 5 0 1 6 0>;
+ interrupt-parent = <500>;
+ };
+
+ mscan@900 {
+ device_type = "mscan";
+ compatible = "mpc5200b-mscan\0mpc52xx-mscan";
+ interrupts = <2 11 0>;
+ interrupt-parent = <500>;
+ reg = <900 80>;
+ };
+
+ mscan@980 {
+ device_type = "mscan";
+ compatible = "mpc5200b-mscan\0mpc52xx-mscan";
+ interrupts = <1 12 0>;
+ interrupt-parent = <500>;
+ reg = <980 80>;
+ };
+
+ gpio@b00 {
+ compatible = "mpc5200b-gpio\0mpc52xx-gpio";
+ reg = <b00 40>;
+ interrupts = <1 7 0>;
+ interrupt-parent = <500>;
+ };
+
+ gpio-wkup@b00 {
+ compatible = "mpc5200b-gpio-wkup\0mpc52xx-gpio-wkup";
+ reg = <c00 40>;
+ interrupts = <1 8 0 0 3 0>;
+ interrupt-parent = <500>;
+ };
+
+ pci@0d00 {
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ compatible = "mpc5200b-pci\0mpc52xx-pci";
+ reg = <d00 100>;
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <c000 0 0 1 500 0 0 3 // 1st slot
+ c000 0 0 2 500 1 1 3
+ c000 0 0 3 500 1 2 3
+ c000 0 0 4 500 1 3 3
+
+ c800 0 0 1 500 1 1 3 // 2nd slot
+ c800 0 0 2 500 1 2 3
+ c800 0 0 3 500 1 3 3
+ c800 0 0 4 500 0 0 3>;
+ clock-frequency = <0>; // From boot loader
+ interrupts = <2 8 0 2 9 0 2 a 0>;
+ interrupt-parent = <500>;
+ bus-range = <0 0>;
+ ranges = <42000000 0 80000000 80000000 0 20000000
+ 02000000 0 a0000000 a0000000 0 10000000
+ 01000000 0 00000000 b0000000 0 01000000>;
+ };
+
+ spi@f00 {
+ device_type = "spi";
+ compatible = "mpc5200b-spi\0mpc52xx-spi";
+ reg = <f00 20>;
+ interrupts = <2 d 0 2 e 0>;
+ interrupt-parent = <500>;
+ };
+
+ usb@1000 {
+ device_type = "usb-ohci-be";
+ compatible = "mpc5200b-ohci\0mpc52xx-ohci\0ohci-be";
+ reg = <1000 ff>;
+ interrupts = <2 6 0>;
+ interrupt-parent = <500>;
+ };
+
+ bestcomm@1200 {
+ device_type = "dma-controller";
+ compatible = "mpc5200b-bestcomm\0mpc52xx-bestcomm";
+ reg = <1200 80>;
+ interrupts = <3 0 0 3 1 0 3 2 0 3 3 0
+ 3 4 0 3 5 0 3 6 0 3 7 0
+ 3 8 0 3 9 0 3 a 0 3 b 0
+ 3 c 0 3 d 0 3 e 0 3 f 0>;
+ interrupt-parent = <500>;
+ };
+
+ xlb@1f00 {
+ compatible = "mpc5200b-xlb\0mpc52xx-xlb";
+ reg = <1f00 100>;
+ };
+
+ serial@2000 { // PSC1
+ device_type = "serial";
+ compatible = "mpc5200b-psc-uart\0mpc52xx-psc-uart";
+ port-number = <0>; // Logical port assignment
+ reg = <2000 100>;
+ interrupts = <2 1 0>;
+ interrupt-parent = <500>;
+ };
+
+ // PSC2 in spi mode example
+ spi@2200 { // PSC2
+ device_type = "spi";
+ compatible = "mpc5200b-psc-spi\0mpc52xx-psc-spi";
+ reg = <2200 100>;
+ interrupts = <2 2 0>;
+ interrupt-parent = <500>;
+ };
+
+ // PSC3 in CODEC mode example
+ i2s@2400 { // PSC3
+ device_type = "i2s";
+ compatible = "mpc5200b-psc-i2s\0mpc52xx-psc-i2s";
+ reg = <2400 100>;
+ interrupts = <2 3 0>;
+ interrupt-parent = <500>;
+ };
+
+ // PSC4 unconfigured
+ //serial@2600 { // PSC4
+ // device_type = "serial";
+ // compatible = "mpc5200b-psc-uart\0mpc52xx-psc-uart";
+ // reg = <2600 100>;
+ // interrupts = <2 b 0>;
+ // interrupt-parent = <500>;
+ //};
+
+ // PSC5 unconfigured
+ //serial@2800 { // PSC5
+ // device_type = "serial";
+ // compatible = "mpc5200b-psc-uart\0mpc52xx-psc-uart";
+ // reg = <2800 100>;
+ // interrupts = <2 c 0>;
+ // interrupt-parent = <500>;
+ //};
+
+ // PSC6 in AC97 mode example
+ ac97@2c00 { // PSC6
+ device_type = "ac97";
+ compatible = "mpc5200b-psc-ac97\0mpc52xx-psc-ac97";
+ reg = <2c00 100>;
+ interrupts = <2 4 0>;
+ interrupt-parent = <500>;
+ };
+
+ ethernet@3000 {
+ device_type = "network";
+ compatible = "mpc5200b-fec\0mpc52xx-fec";
+ reg = <3000 800>;
+ mac-address = [ 02 03 04 05 06 07 ]; // Bad!
+ interrupts = <2 5 0>;
+ interrupt-parent = <500>;
+ };
+
+ ata@3a00 {
+ device_type = "ata";
+ compatible = "mpc5200b-ata\0mpc52xx-ata";
+ reg = <3a00 100>;
+ interrupts = <2 7 0>;
+ interrupt-parent = <500>;
+ };
+
+ i2c@3d00 {
+ device_type = "i2c";
+ compatible = "mpc5200b-i2c\0mpc52xx-i2c";
+ reg = <3d00 40>;
+ interrupts = <2 f 0>;
+ interrupt-parent = <500>;
+ };
+
+ i2c@3d40 {
+ device_type = "i2c";
+ compatible = "mpc5200b-i2c\0mpc52xx-i2c";
+ reg = <3d40 40>;
+ interrupts = <2 10 0>;
+ interrupt-parent = <500>;
+ };
+ sram@8000 {
+ device_type = "sram";
+ compatible = "mpc5200b-sram\0mpc52xx-sram\0sram";
+ reg = <8000 4000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc7448hpc2.dts b/arch/powerpc/boot/dts/mpc7448hpc2.dts
index d7b985e6bd2..c4d9562cbaa 100644
--- a/arch/powerpc/boot/dts/mpc7448hpc2.dts
+++ b/arch/powerpc/boot/dts/mpc7448hpc2.dts
@@ -161,29 +161,41 @@
interrupt-map = <
/* IDSEL 0x11 */
- 0800 0 0 1 7400 24 0
- 0800 0 0 2 7400 25 0
- 0800 0 0 3 7400 26 0
- 0800 0 0 4 7400 27 0
+ 0800 0 0 1 1180 24 0
+ 0800 0 0 2 1180 25 0
+ 0800 0 0 3 1180 26 0
+ 0800 0 0 4 1180 27 0
/* IDSEL 0x12 */
- 1000 0 0 1 7400 25 0
- 1000 0 0 2 7400 26 0
- 1000 0 0 3 7400 27 0
- 1000 0 0 4 7400 24 0
+ 1000 0 0 1 1180 25 0
+ 1000 0 0 2 1180 26 0
+ 1000 0 0 3 1180 27 0
+ 1000 0 0 4 1180 24 0
/* IDSEL 0x13 */
- 1800 0 0 1 7400 26 0
- 1800 0 0 2 7400 27 0
- 1800 0 0 3 7400 24 0
- 1800 0 0 4 7400 25 0
+ 1800 0 0 1 1180 26 0
+ 1800 0 0 2 1180 27 0
+ 1800 0 0 3 1180 24 0
+ 1800 0 0 4 1180 25 0
/* IDSEL 0x14 */
- 2000 0 0 1 7400 27 0
- 2000 0 0 2 7400 24 0
- 2000 0 0 3 7400 25 0
- 2000 0 0 4 7400 26 0
+ 2000 0 0 1 1180 27 0
+ 2000 0 0 2 1180 24 0
+ 2000 0 0 3 1180 25 0
+ 2000 0 0 4 1180 26 0
>;
+ router@1180 {
+ linux,phandle = <1180>;
+ clock-frequency = <0>;
+ interrupt-controller;
+ device_type = "pic-router";
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ built-in;
+ big-endian;
+ interrupts = <17 2>;
+ interrupt-parent = <7400>;
+ };
};
};
diff --git a/arch/powerpc/boot/flatdevtree.c b/arch/powerpc/boot/flatdevtree.c
new file mode 100644
index 00000000000..c76c194715b
--- /dev/null
+++ b/arch/powerpc/boot/flatdevtree.c
@@ -0,0 +1,880 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright Pantelis Antoniou 2006
+ * Copyright (C) IBM Corporation 2006
+ *
+ * Authors: Pantelis Antoniou <pantelis@embeddedalley.com>
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ * Mark A. Greer <mgreer@mvista.com>
+ * Paul Mackerras <paulus@samba.org>
+ */
+
+#include <string.h>
+#include <stddef.h>
+#include "flatdevtree.h"
+#include "flatdevtree_env.h"
+
+#define _ALIGN(x, al) (((x) + (al) - 1) & ~((al) - 1))
+
+/* Routines for keeping node ptrs returned by ft_find_device current */
+/* First entry not used b/c it would return 0 and be taken as NULL/error */
+static void *ft_node_add(struct ft_cxt *cxt, char *node)
+{
+ unsigned int i;
+
+ for (i = 1; i < cxt->nodes_used; i++) /* already there? */
+ if (cxt->node_tbl[i] == node)
+ return (void *)i;
+
+ if (cxt->nodes_used < cxt->node_max) {
+ cxt->node_tbl[cxt->nodes_used] = node;
+ return (void *)cxt->nodes_used++;
+ }
+
+ return NULL;
+}
+
+static char *ft_node_ph2node(struct ft_cxt *cxt, const void *phandle)
+{
+ unsigned int i = (unsigned int)phandle;
+
+ if (i < cxt->nodes_used)
+ return cxt->node_tbl[i];
+ return NULL;
+}
+
+static void ft_node_update_before(struct ft_cxt *cxt, char *addr, int shift)
+{
+ unsigned int i;
+
+ if (shift == 0)
+ return;
+
+ for (i = 1; i < cxt->nodes_used; i++)
+ if (cxt->node_tbl[i] < addr)
+ cxt->node_tbl[i] += shift;
+}
+
+static void ft_node_update_after(struct ft_cxt *cxt, char *addr, int shift)
+{
+ unsigned int i;
+
+ if (shift == 0)
+ return;
+
+ for (i = 1; i < cxt->nodes_used; i++)
+ if (cxt->node_tbl[i] >= addr)
+ cxt->node_tbl[i] += shift;
+}
+
+/* Struct used to return info from ft_next() */
+struct ft_atom {
+ u32 tag;
+ const char *name;
+ void *data;
+ u32 size;
+};
+
+/* Set ptrs to current one's info; return addr of next one */
+static char *ft_next(struct ft_cxt *cxt, char *p, struct ft_atom *ret)
+{
+ u32 sz;
+
+ if (p >= cxt->rgn[FT_STRUCT].start + cxt->rgn[FT_STRUCT].size)
+ return NULL;
+
+ ret->tag = be32_to_cpu(*(u32 *) p);
+ p += 4;
+
+ switch (ret->tag) { /* Tag */
+ case OF_DT_BEGIN_NODE:
+ ret->name = p;
+ ret->data = (void *)(p - 4); /* start of node */
+ p += _ALIGN(strlen(p) + 1, 4);
+ break;
+ case OF_DT_PROP:
+ ret->size = sz = be32_to_cpu(*(u32 *) p);
+ ret->name = cxt->str_anchor + be32_to_cpu(*(u32 *) (p + 4));
+ ret->data = (void *)(p + 8);
+ p += 8 + _ALIGN(sz, 4);
+ break;
+ case OF_DT_END_NODE:
+ case OF_DT_NOP:
+ break;
+ case OF_DT_END:
+ default:
+ p = NULL;
+ break;
+ }
+
+ return p;
+}
+
+#define HDR_SIZE _ALIGN(sizeof(struct boot_param_header), 8)
+#define EXPAND_INCR 1024 /* alloc this much extra when expanding */
+
+/* See if the regions are in the standard order and non-overlapping */
+static int ft_ordered(struct ft_cxt *cxt)
+{
+ char *p = (char *)cxt->bph + HDR_SIZE;
+ enum ft_rgn_id r;
+
+ for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) {
+ if (p > cxt->rgn[r].start)
+ return 0;
+ p = cxt->rgn[r].start + cxt->rgn[r].size;
+ }
+ return p <= (char *)cxt->bph + cxt->max_size;
+}
+
+/* Copy the tree to a newly-allocated region and put things in order */
+static int ft_reorder(struct ft_cxt *cxt, int nextra)
+{
+ unsigned long tot;
+ enum ft_rgn_id r;
+ char *p, *pend;
+ int stroff;
+
+ tot = HDR_SIZE + EXPAND_INCR;
+ for (r = FT_RSVMAP; r <= FT_STRINGS; ++r)
+ tot += cxt->rgn[r].size;
+ if (nextra > 0)
+ tot += nextra;
+ tot = _ALIGN(tot, 8);
+
+ if (!cxt->realloc)
+ return 0;
+ p = cxt->realloc(NULL, tot);
+ if (!p)
+ return 0;
+
+ memcpy(p, cxt->bph, sizeof(struct boot_param_header));
+ /* offsets get fixed up later */
+
+ cxt->bph = (struct boot_param_header *)p;
+ cxt->max_size = tot;
+ pend = p + tot;
+ p += HDR_SIZE;
+
+ memcpy(p, cxt->rgn[FT_RSVMAP].start, cxt->rgn[FT_RSVMAP].size);
+ cxt->rgn[FT_RSVMAP].start = p;
+ p += cxt->rgn[FT_RSVMAP].size;
+
+ memcpy(p, cxt->rgn[FT_STRUCT].start, cxt->rgn[FT_STRUCT].size);
+ ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start,
+ p - cxt->rgn[FT_STRUCT].start);
+ cxt->p += p - cxt->rgn[FT_STRUCT].start;
+ cxt->rgn[FT_STRUCT].start = p;
+
+ p = pend - cxt->rgn[FT_STRINGS].size;
+ memcpy(p, cxt->rgn[FT_STRINGS].start, cxt->rgn[FT_STRINGS].size);
+ stroff = cxt->str_anchor - cxt->rgn[FT_STRINGS].start;
+ cxt->rgn[FT_STRINGS].start = p;
+ cxt->str_anchor = p + stroff;
+
+ cxt->isordered = 1;
+ return 1;
+}
+
+static inline char *prev_end(struct ft_cxt *cxt, enum ft_rgn_id r)
+{
+ if (r > FT_RSVMAP)
+ return cxt->rgn[r - 1].start + cxt->rgn[r - 1].size;
+ return (char *)cxt->bph + HDR_SIZE;
+}
+
+static inline char *next_start(struct ft_cxt *cxt, enum ft_rgn_id r)
+{
+ if (r < FT_STRINGS)
+ return cxt->rgn[r + 1].start;
+ return (char *)cxt->bph + cxt->max_size;
+}
+
+/*
+ * See if we can expand region rgn by nextra bytes by using up
+ * free space after or before the region.
+ */
+static int ft_shuffle(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn,
+ int nextra)
+{
+ char *p = *pp;
+ char *rgn_start, *rgn_end;
+
+ rgn_start = cxt->rgn[rgn].start;
+ rgn_end = rgn_start + cxt->rgn[rgn].size;
+ if (nextra <= 0 || rgn_end + nextra <= next_start(cxt, rgn)) {
+ /* move following stuff */
+ if (p < rgn_end) {
+ if (nextra < 0)
+ memmove(p, p - nextra, rgn_end - p + nextra);
+ else
+ memmove(p + nextra, p, rgn_end - p);
+ if (rgn == FT_STRUCT)
+ ft_node_update_after(cxt, p, nextra);
+ }
+ cxt->rgn[rgn].size += nextra;
+ if (rgn == FT_STRINGS)
+ /* assumes strings only added at beginning */
+ cxt->str_anchor += nextra;
+ return 1;
+ }
+ if (prev_end(cxt, rgn) <= rgn_start - nextra) {
+ /* move preceding stuff */
+ if (p > rgn_start) {
+ memmove(rgn_start - nextra, rgn_start, p - rgn_start);
+ if (rgn == FT_STRUCT)
+ ft_node_update_before(cxt, p, -nextra);
+ }
+ *p -= nextra;
+ cxt->rgn[rgn].start -= nextra;
+ cxt->rgn[rgn].size += nextra;
+ return 1;
+ }
+ return 0;
+}
+
+static int ft_make_space(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn,
+ int nextra)
+{
+ unsigned long size, ssize, tot;
+ char *str, *next;
+ enum ft_rgn_id r;
+
+ if (!cxt->isordered && !ft_reorder(cxt, nextra))
+ return 0;
+ if (ft_shuffle(cxt, pp, rgn, nextra))
+ return 1;
+
+ /* See if there is space after the strings section */
+ ssize = cxt->rgn[FT_STRINGS].size;
+ if (cxt->rgn[FT_STRINGS].start + ssize
+ < (char *)cxt->bph + cxt->max_size) {
+ /* move strings up as far as possible */
+ str = (char *)cxt->bph + cxt->max_size - ssize;
+ cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start;
+ memmove(str, cxt->rgn[FT_STRINGS].start, ssize);
+ cxt->rgn[FT_STRINGS].start = str;
+ /* enough space now? */
+ if (rgn >= FT_STRUCT && ft_shuffle(cxt, pp, rgn, nextra))
+ return 1;
+ }
+
+ /* how much total free space is there following this region? */
+ tot = 0;
+ for (r = rgn; r < FT_STRINGS; ++r) {
+ char *r_end = cxt->rgn[r].start + cxt->rgn[r].size;
+ tot += next_start(cxt, rgn) - r_end;
+ }
+
+ /* cast is to shut gcc up; we know nextra >= 0 */
+ if (tot < (unsigned int)nextra) {
+ /* have to reallocate */
+ char *newp, *new_start;
+ int shift;
+
+ if (!cxt->realloc)
+ return 0;
+ size = _ALIGN(cxt->max_size + (nextra - tot) + EXPAND_INCR, 8);
+ newp = cxt->realloc(cxt->bph, size);
+ if (!newp)
+ return 0;
+ cxt->max_size = size;
+ shift = newp - (char *)cxt->bph;
+
+ if (shift) { /* realloc can return same addr */
+ cxt->bph = (struct boot_param_header *)newp;
+ ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start,
+ shift);
+ for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) {
+ new_start = cxt->rgn[r].start + shift;
+ cxt->rgn[r].start = new_start;
+ }
+ *pp += shift;
+ cxt->str_anchor += shift;
+ }
+
+ /* move strings up to the end */
+ str = newp + size - ssize;
+ cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start;
+ memmove(str, cxt->rgn[FT_STRINGS].start, ssize);
+ cxt->rgn[FT_STRINGS].start = str;
+
+ if (ft_shuffle(cxt, pp, rgn, nextra))
+ return 1;
+ }
+
+ /* must be FT_RSVMAP and we need to move FT_STRUCT up */
+ if (rgn == FT_RSVMAP) {
+ next = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size
+ + nextra;
+ ssize = cxt->rgn[FT_STRUCT].size;
+ if (next + ssize >= cxt->rgn[FT_STRINGS].start)
+ return 0; /* "can't happen" */
+ memmove(next, cxt->rgn[FT_STRUCT].start, ssize);
+ ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start, nextra);
+ cxt->rgn[FT_STRUCT].start = next;
+
+ if (ft_shuffle(cxt, pp, rgn, nextra))
+ return 1;
+ }
+
+ return 0; /* "can't happen" */
+}
+
+static void ft_put_word(struct ft_cxt *cxt, u32 v)
+{
+ *(u32 *) cxt->p = cpu_to_be32(v);
+ cxt->p += 4;
+}
+
+static void ft_put_bin(struct ft_cxt *cxt, const void *data, unsigned int sz)
+{
+ unsigned long sza = _ALIGN(sz, 4);
+
+ /* zero out the alignment gap if necessary */
+ if (sz < sza)
+ *(u32 *) (cxt->p + sza - 4) = 0;
+
+ /* copy in the data */
+ memcpy(cxt->p, data, sz);
+
+ cxt->p += sza;
+}
+
+int ft_begin_node(struct ft_cxt *cxt, const char *name)
+{
+ unsigned long nlen = strlen(name) + 1;
+ unsigned long len = 8 + _ALIGN(nlen, 4);
+
+ if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len))
+ return -1;
+ ft_put_word(cxt, OF_DT_BEGIN_NODE);
+ ft_put_bin(cxt, name, strlen(name) + 1);
+ return 0;
+}
+
+void ft_end_node(struct ft_cxt *cxt)
+{
+ ft_put_word(cxt, OF_DT_END_NODE);
+}
+
+void ft_nop(struct ft_cxt *cxt)
+{
+ if (ft_make_space(cxt, &cxt->p, FT_STRUCT, 4))
+ ft_put_word(cxt, OF_DT_NOP);
+}
+
+#define NO_STRING 0x7fffffff
+
+static int lookup_string(struct ft_cxt *cxt, const char *name)
+{
+ char *p, *end;
+
+ p = cxt->rgn[FT_STRINGS].start;
+ end = p + cxt->rgn[FT_STRINGS].size;
+ while (p < end) {
+ if (strcmp(p, (char *)name) == 0)
+ return p - cxt->str_anchor;
+ p += strlen(p) + 1;
+ }
+
+ return NO_STRING;
+}
+
+/* lookup string and insert if not found */
+static int map_string(struct ft_cxt *cxt, const char *name)
+{
+ int off;
+ char *p;
+
+ off = lookup_string(cxt, name);
+ if (off != NO_STRING)
+ return off;
+ p = cxt->rgn[FT_STRINGS].start;
+ if (!ft_make_space(cxt, &p, FT_STRINGS, strlen(name) + 1))
+ return NO_STRING;
+ strcpy(p, name);
+ return p - cxt->str_anchor;
+}
+
+int ft_prop(struct ft_cxt *cxt, const char *name, const void *data,
+ unsigned int sz)
+{
+ int off, len;
+
+ off = lookup_string(cxt, name);
+ if (off == NO_STRING)
+ return -1;
+
+ len = 12 + _ALIGN(sz, 4);
+ if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len))
+ return -1;
+
+ ft_put_word(cxt, OF_DT_PROP);
+ ft_put_word(cxt, sz);
+ ft_put_word(cxt, off);
+ ft_put_bin(cxt, data, sz);
+ return 0;
+}
+
+int ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str)
+{
+ return ft_prop(cxt, name, str, strlen(str) + 1);
+}
+
+int ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val)
+{
+ u32 v = cpu_to_be32((u32) val);
+
+ return ft_prop(cxt, name, &v, 4);
+}
+
+/* Calculate the size of the reserved map */
+static unsigned long rsvmap_size(struct ft_cxt *cxt)
+{
+ struct ft_reserve *res;
+
+ res = (struct ft_reserve *)cxt->rgn[FT_RSVMAP].start;
+ while (res->start || res->len)
+ ++res;
+ return (char *)(res + 1) - cxt->rgn[FT_RSVMAP].start;
+}
+
+/* Calculate the size of the struct region by stepping through it */
+static unsigned long struct_size(struct ft_cxt *cxt)
+{
+ char *p = cxt->rgn[FT_STRUCT].start;
+ char *next;
+ struct ft_atom atom;
+
+ /* make check in ft_next happy */
+ if (cxt->rgn[FT_STRUCT].size == 0)
+ cxt->rgn[FT_STRUCT].size = 0xfffffffful - (unsigned long)p;
+
+ while ((next = ft_next(cxt, p, &atom)) != NULL)
+ p = next;
+ return p + 4 - cxt->rgn[FT_STRUCT].start;
+}
+
+/* add `adj' on to all string offset values in the struct area */
+static void adjust_string_offsets(struct ft_cxt *cxt, int adj)
+{
+ char *p = cxt->rgn[FT_STRUCT].start;
+ char *next;
+ struct ft_atom atom;
+ int off;
+
+ while ((next = ft_next(cxt, p, &atom)) != NULL) {
+ if (atom.tag == OF_DT_PROP) {
+ off = be32_to_cpu(*(u32 *) (p + 8));
+ *(u32 *) (p + 8) = cpu_to_be32(off + adj);
+ }
+ p = next;
+ }
+}
+
+/* start construction of the flat OF tree from scratch */
+void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+ void *(*realloc_fn) (void *, unsigned long))
+{
+ struct boot_param_header *bph = blob;
+ char *p;
+ struct ft_reserve *pres;
+
+ /* clear the cxt */
+ memset(cxt, 0, sizeof(*cxt));
+
+ cxt->bph = bph;
+ cxt->max_size = max_size;
+ cxt->realloc = realloc_fn;
+ cxt->isordered = 1;
+
+ /* zero everything in the header area */
+ memset(bph, 0, sizeof(*bph));
+
+ bph->magic = cpu_to_be32(OF_DT_HEADER);
+ bph->version = cpu_to_be32(0x10);
+ bph->last_comp_version = cpu_to_be32(0x10);
+
+ /* start pointers */
+ cxt->rgn[FT_RSVMAP].start = p = blob + HDR_SIZE;
+ cxt->rgn[FT_RSVMAP].size = sizeof(struct ft_reserve);
+ pres = (struct ft_reserve *)p;
+ cxt->rgn[FT_STRUCT].start = p += sizeof(struct ft_reserve);
+ cxt->rgn[FT_STRUCT].size = 4;
+ cxt->rgn[FT_STRINGS].start = blob + max_size;
+ cxt->rgn[FT_STRINGS].size = 0;
+
+ /* init rsvmap and struct */
+ pres->start = 0;
+ pres->len = 0;
+ *(u32 *) p = cpu_to_be32(OF_DT_END);
+
+ cxt->str_anchor = blob;
+}
+
+/* open up an existing blob to be examined or modified */
+int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+ unsigned int max_find_device,
+ void *(*realloc_fn) (void *, unsigned long))
+{
+ struct boot_param_header *bph = blob;
+
+ /* can't cope with version < 16 */
+ if (be32_to_cpu(bph->version) < 16)
+ return -1;
+
+ /* clear the cxt */
+ memset(cxt, 0, sizeof(*cxt));
+
+ /* alloc node_tbl to track node ptrs returned by ft_find_device */
+ ++max_find_device;
+ cxt->node_tbl = realloc_fn(NULL, max_find_device * sizeof(char *));
+ if (!cxt->node_tbl)
+ return -1;
+ memset(cxt->node_tbl, 0, max_find_device * sizeof(char *));
+ cxt->node_max = max_find_device;
+ cxt->nodes_used = 1; /* don't use idx 0 b/c looks like NULL */
+
+ cxt->bph = bph;
+ cxt->max_size = max_size;
+ cxt->realloc = realloc_fn;
+
+ cxt->rgn[FT_RSVMAP].start = blob + be32_to_cpu(bph->off_mem_rsvmap);
+ cxt->rgn[FT_RSVMAP].size = rsvmap_size(cxt);
+ cxt->rgn[FT_STRUCT].start = blob + be32_to_cpu(bph->off_dt_struct);
+ cxt->rgn[FT_STRUCT].size = struct_size(cxt);
+ cxt->rgn[FT_STRINGS].start = blob + be32_to_cpu(bph->off_dt_strings);
+ cxt->rgn[FT_STRINGS].size = be32_to_cpu(bph->dt_strings_size);
+ /* Leave as '0' to force first ft_make_space call to do a ft_reorder
+ * and move dt to an area allocated by realloc.
+ cxt->isordered = ft_ordered(cxt);
+ */
+
+ cxt->p = cxt->rgn[FT_STRUCT].start;
+ cxt->str_anchor = cxt->rgn[FT_STRINGS].start;
+
+ return 0;
+}
+
+/* add a reserver physical area to the rsvmap */
+int ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size)
+{
+ char *p;
+ struct ft_reserve *pres;
+
+ p = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size
+ - sizeof(struct ft_reserve);
+ if (!ft_make_space(cxt, &p, FT_RSVMAP, sizeof(struct ft_reserve)))
+ return -1;
+
+ pres = (struct ft_reserve *)p;
+ pres->start = cpu_to_be64(physaddr);
+ pres->len = cpu_to_be64(size);
+
+ return 0;
+}
+
+void ft_begin_tree(struct ft_cxt *cxt)
+{
+ cxt->p = cxt->rgn[FT_STRUCT].start;
+}
+
+void ft_end_tree(struct ft_cxt *cxt)
+{
+ struct boot_param_header *bph = cxt->bph;
+ char *p, *oldstr, *str, *endp;
+ unsigned long ssize;
+ int adj;
+
+ if (!cxt->isordered)
+ return; /* we haven't touched anything */
+
+ /* adjust string offsets */
+ oldstr = cxt->rgn[FT_STRINGS].start;
+ adj = cxt->str_anchor - oldstr;
+ if (adj)
+ adjust_string_offsets(cxt, adj);
+
+ /* make strings end on 8-byte boundary */
+ ssize = cxt->rgn[FT_STRINGS].size;
+ endp = (char *)_ALIGN((unsigned long)cxt->rgn[FT_STRUCT].start
+ + cxt->rgn[FT_STRUCT].size + ssize, 8);
+ str = endp - ssize;
+
+ /* move strings down to end of structs */
+ memmove(str, oldstr, ssize);
+ cxt->str_anchor = str;
+ cxt->rgn[FT_STRINGS].start = str;
+
+ /* fill in header fields */
+ p = (char *)bph;
+ bph->totalsize = cpu_to_be32(endp - p);
+ bph->off_mem_rsvmap = cpu_to_be32(cxt->rgn[FT_RSVMAP].start - p);
+ bph->off_dt_struct = cpu_to_be32(cxt->rgn[FT_STRUCT].start - p);
+ bph->off_dt_strings = cpu_to_be32(cxt->rgn[FT_STRINGS].start - p);
+ bph->dt_strings_size = cpu_to_be32(ssize);
+}
+
+void *ft_find_device(struct ft_cxt *cxt, const char *srch_path)
+{
+ char *node;
+
+ /* require absolute path */
+ if (srch_path[0] != '/')
+ return NULL;
+ node = ft_find_descendent(cxt, cxt->rgn[FT_STRUCT].start, srch_path);
+ return ft_node_add(cxt, node);
+}
+
+void *ft_find_descendent(struct ft_cxt *cxt, void *top, const char *srch_path)
+{
+ struct ft_atom atom;
+ char *p;
+ const char *cp, *q;
+ int cl;
+ int depth = -1;
+ int dmatch = 0;
+ const char *path_comp[FT_MAX_DEPTH];
+
+ cp = srch_path;
+ cl = 0;
+ p = top;
+
+ while ((p = ft_next(cxt, p, &atom)) != NULL) {
+ switch (atom.tag) {
+ case OF_DT_BEGIN_NODE:
+ ++depth;
+ if (depth != dmatch)
+ break;
+ cxt->genealogy[depth] = atom.data;
+ cxt->genealogy[depth + 1] = NULL;
+ if (depth && !(strncmp(atom.name, cp, cl) == 0
+ && (atom.name[cl] == '/'
+ || atom.name[cl] == '\0'
+ || atom.name[cl] == '@')))
+ break;
+ path_comp[dmatch] = cp;
+ /* it matches so far, advance to next path component */
+ cp += cl;
+ /* skip slashes */
+ while (*cp == '/')
+ ++cp;
+ /* we're done if this is the end of the string */
+ if (*cp == 0)
+ return atom.data;
+ /* look for end of this component */
+ q = strchr(cp, '/');
+ if (q)
+ cl = q - cp;
+ else
+ cl = strlen(cp);
+ ++dmatch;
+ break;
+ case OF_DT_END_NODE:
+ if (depth == 0)
+ return NULL;
+ if (dmatch > depth) {
+ --dmatch;
+ cl = cp - path_comp[dmatch] - 1;
+ cp = path_comp[dmatch];
+ while (cl > 0 && cp[cl - 1] == '/')
+ --cl;
+ }
+ --depth;
+ break;
+ }
+ }
+ return NULL;
+}
+
+void *ft_get_parent(struct ft_cxt *cxt, const void *phandle)
+{
+ void *node;
+ int d;
+ struct ft_atom atom;
+ char *p;
+
+ node = ft_node_ph2node(cxt, phandle);
+ if (node == NULL)
+ return NULL;
+
+ for (d = 0; cxt->genealogy[d] != NULL; ++d)
+ if (cxt->genealogy[d] == node)
+ return cxt->genealogy[d > 0 ? d - 1 : 0];
+
+ /* have to do it the hard way... */
+ p = cxt->rgn[FT_STRUCT].start;
+ d = 0;
+ while ((p = ft_next(cxt, p, &atom)) != NULL) {
+ switch (atom.tag) {
+ case OF_DT_BEGIN_NODE:
+ cxt->genealogy[d] = atom.data;
+ if (node == atom.data) {
+ /* found it */
+ cxt->genealogy[d + 1] = NULL;
+ return d > 0 ? cxt->genealogy[d - 1] : node;
+ }
+ ++d;
+ break;
+ case OF_DT_END_NODE:
+ --d;
+ break;
+ }
+ }
+ return NULL;
+}
+
+int ft_get_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+ void *buf, const unsigned int buflen)
+{
+ struct ft_atom atom;
+ void *node;
+ char *p;
+ int depth;
+ unsigned int size;
+
+ node = ft_node_ph2node(cxt, phandle);
+ if (node == NULL)
+ return -1;
+
+ depth = 0;
+ p = (char *)node;
+
+ while ((p = ft_next(cxt, p, &atom)) != NULL) {
+ switch (atom.tag) {
+ case OF_DT_BEGIN_NODE:
+ ++depth;
+ break;
+ case OF_DT_PROP:
+ if ((depth != 1) || strcmp(atom.name, propname))
+ break;
+ size = min(atom.size, buflen);
+ memcpy(buf, atom.data, size);
+ return atom.size;
+ case OF_DT_END_NODE:
+ if (--depth <= 0)
+ return -1;
+ }
+ }
+ return -1;
+}
+
+int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+ const void *buf, const unsigned int buflen)
+{
+ struct ft_atom atom;
+ void *node;
+ char *p, *next;
+ int nextra, depth;
+
+ node = ft_node_ph2node(cxt, phandle);
+ if (node == NULL)
+ return -1;
+
+ depth = 0;
+ p = node;
+
+ while ((next = ft_next(cxt, p, &atom)) != NULL) {
+ switch (atom.tag) {
+ case OF_DT_BEGIN_NODE:
+ ++depth;
+ break;
+ case OF_DT_END_NODE:
+ if (--depth > 0)
+ break;
+ /* haven't found the property, insert here */
+ cxt->p = p;
+ return ft_prop(cxt, propname, buf, buflen);
+ case OF_DT_PROP:
+ if ((depth != 1) || strcmp(atom.name, propname))
+ break;
+ /* found an existing property, overwrite it */
+ nextra = _ALIGN(buflen, 4) - _ALIGN(atom.size, 4);
+ cxt->p = atom.data;
+ if (nextra && !ft_make_space(cxt, &cxt->p, FT_STRUCT,
+ nextra))
+ return -1;
+ *(u32 *) (cxt->p - 8) = cpu_to_be32(buflen);
+ ft_put_bin(cxt, buf, buflen);
+ return 0;
+ }
+ p = next;
+ }
+ return -1;
+}
+
+int ft_del_prop(struct ft_cxt *cxt, const void *phandle, const char *propname)
+{
+ struct ft_atom atom;
+ void *node;
+ char *p, *next;
+ int size;
+
+ node = ft_node_ph2node(cxt, phandle);
+ if (node == NULL)
+ return -1;
+
+ p = node;
+ while ((next = ft_next(cxt, p, &atom)) != NULL) {
+ switch (atom.tag) {
+ case OF_DT_BEGIN_NODE:
+ case OF_DT_END_NODE:
+ return -1;
+ case OF_DT_PROP:
+ if (strcmp(atom.name, propname))
+ break;
+ /* found the property, remove it */
+ size = 12 + -_ALIGN(atom.size, 4);
+ cxt->p = p;
+ if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, -size))
+ return -1;
+ return 0;
+ }
+ p = next;
+ }
+ return -1;
+}
+
+void *ft_create_node(struct ft_cxt *cxt, const void *parent, const char *path)
+{
+ struct ft_atom atom;
+ char *p, *next;
+ int depth = 0;
+
+ p = cxt->rgn[FT_STRUCT].start;
+ while ((next = ft_next(cxt, p, &atom)) != NULL) {
+ switch (atom.tag) {
+ case OF_DT_BEGIN_NODE:
+ ++depth;
+ if (depth == 1 && strcmp(atom.name, path) == 0)
+ /* duplicate node path, return error */
+ return NULL;
+ break;
+ case OF_DT_END_NODE:
+ --depth;
+ if (depth > 0)
+ break;
+ /* end of node, insert here */
+ cxt->p = p;
+ ft_begin_node(cxt, path);
+ ft_end_node(cxt);
+ return p;
+ }
+ p = next;
+ }
+ return NULL;
+}
diff --git a/arch/powerpc/boot/flatdevtree.h b/arch/powerpc/boot/flatdevtree.h
index 761c8dc8400..b9cd9f61f35 100644
--- a/arch/powerpc/boot/flatdevtree.h
+++ b/arch/powerpc/boot/flatdevtree.h
@@ -17,7 +17,7 @@
#ifndef FLATDEVTREE_H
#define FLATDEVTREE_H
-#include "types.h"
+#include "flatdevtree_env.h"
/* Definitions used by the flattened device tree */
#define OF_DT_HEADER 0xd00dfeed /* marker */
@@ -43,4 +43,64 @@ struct boot_param_header {
u32 dt_strings_size; /* size of the DT strings block */
};
+struct ft_reserve {
+ u64 start;
+ u64 len;
+};
+
+struct ft_region {
+ char *start;
+ unsigned long size;
+};
+
+enum ft_rgn_id {
+ FT_RSVMAP,
+ FT_STRUCT,
+ FT_STRINGS,
+ FT_N_REGION
+};
+
+#define FT_MAX_DEPTH 50
+
+struct ft_cxt {
+ struct boot_param_header *bph;
+ int max_size; /* maximum size of tree */
+ int isordered; /* everything in standard order */
+ void *(*realloc)(void *, unsigned long);
+ char *str_anchor;
+ char *p; /* current insertion point in structs */
+ struct ft_region rgn[FT_N_REGION];
+ void *genealogy[FT_MAX_DEPTH+1];
+ char **node_tbl;
+ unsigned int node_max;
+ unsigned int nodes_used;
+};
+
+int ft_begin_node(struct ft_cxt *cxt, const char *name);
+void ft_end_node(struct ft_cxt *cxt);
+
+void ft_begin_tree(struct ft_cxt *cxt);
+void ft_end_tree(struct ft_cxt *cxt);
+
+void ft_nop(struct ft_cxt *cxt);
+int ft_prop(struct ft_cxt *cxt, const char *name,
+ const void *data, unsigned int sz);
+int ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str);
+int ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val);
+void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+ void *(*realloc_fn)(void *, unsigned long));
+int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+ unsigned int max_find_device,
+ void *(*realloc_fn)(void *, unsigned long));
+int ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size);
+
+void ft_dump_blob(const void *bphp);
+void ft_merge_blob(struct ft_cxt *cxt, void *blob);
+void *ft_find_device(struct ft_cxt *cxt, const char *srch_path);
+void *ft_find_descendent(struct ft_cxt *cxt, void *top, const char *srch_path);
+int ft_get_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+ void *buf, const unsigned int buflen);
+int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+ const void *buf, const unsigned int buflen);
+
#endif /* FLATDEVTREE_H */
diff --git a/arch/powerpc/boot/flatdevtree_env.h b/arch/powerpc/boot/flatdevtree_env.h
new file mode 100644
index 00000000000..83bc1c71883
--- /dev/null
+++ b/arch/powerpc/boot/flatdevtree_env.h
@@ -0,0 +1,47 @@
+/*
+ * This file adds the header file glue so that the shared files
+ * flatdevicetree.[ch] can compile and work in the powerpc bootwrapper.
+ *
+ * strncmp & strchr copied from <file:lib/strings.c>
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Maintained by: Mark A. Greer <mgreer@mvista.com>
+ */
+#ifndef _PPC_BOOT_FLATDEVTREE_ENV_H_
+#define _PPC_BOOT_FLATDEVTREE_ENV_H_
+
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "string.h"
+#include "stdio.h"
+#include "ops.h"
+
+#define be16_to_cpu(x) (x)
+#define cpu_to_be16(x) (x)
+#define be32_to_cpu(x) (x)
+#define cpu_to_be32(x) (x)
+#define be64_to_cpu(x) (x)
+#define cpu_to_be64(x) (x)
+
+static inline int strncmp(const char *cs, const char *ct, size_t count)
+{
+ signed char __res = 0;
+
+ while (count) {
+ if ((__res = *cs - *ct++) != 0 || !*cs++)
+ break;
+ count--;
+ }
+ return __res;
+}
+
+static inline char *strchr(const char *s, int c)
+{
+ for (; *s != (char)c; ++s)
+ if (*s == '\0')
+ return NULL;
+ return (char *)s;
+}
+
+#endif /* _PPC_BOOT_FLATDEVTREE_ENV_H_ */
diff --git a/arch/powerpc/boot/flatdevtree_misc.c b/arch/powerpc/boot/flatdevtree_misc.c
new file mode 100644
index 00000000000..04da38fa477
--- /dev/null
+++ b/arch/powerpc/boot/flatdevtree_misc.c
@@ -0,0 +1,51 @@
+/*
+ * This file does the necessary interface mapping between the bootwrapper
+ * device tree operations and the interface provided by shared source
+ * files flatdevicetree.[ch].
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <stddef.h>
+#include "flatdevtree.h"
+#include "ops.h"
+
+static struct ft_cxt cxt;
+
+static void *ft_finddevice(const char *name)
+{
+ return ft_find_device(&cxt, name);
+}
+
+static int ft_getprop(const void *phandle, const char *propname, void *buf,
+ const int buflen)
+{
+ return ft_get_prop(&cxt, phandle, propname, buf, buflen);
+}
+
+static int ft_setprop(const void *phandle, const char *propname,
+ const void *buf, const int buflen)
+{
+ return ft_set_prop(&cxt, phandle, propname, buf, buflen);
+}
+
+static unsigned long ft_finalize(void)
+{
+ ft_end_tree(&cxt);
+ return (unsigned long)cxt.bph;
+}
+
+int ft_init(void *dt_blob, unsigned int max_size, unsigned int max_find_device)
+{
+ dt_ops.finddevice = ft_finddevice;
+ dt_ops.getprop = ft_getprop;
+ dt_ops.setprop = ft_setprop;
+ dt_ops.finalize = ft_finalize;
+
+ return ft_open(&cxt, dt_blob, max_size, max_find_device,
+ platform_ops.realloc);
+}
diff --git a/arch/powerpc/boot/io.h b/arch/powerpc/boot/io.h
new file mode 100644
index 00000000000..32974ed49e0
--- /dev/null
+++ b/arch/powerpc/boot/io.h
@@ -0,0 +1,53 @@
+#ifndef _IO_H
+#define __IO_H
+/*
+ * Low-level I/O routines.
+ *
+ * Copied from <file:include/asm-powerpc/io.h> (which has no copyright)
+ */
+static inline int in_8(const volatile unsigned char *addr)
+{
+ int ret;
+
+ __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync"
+ : "=r" (ret) : "m" (*addr));
+ return ret;
+}
+
+static inline void out_8(volatile unsigned char *addr, int val)
+{
+ __asm__ __volatile__("stb%U0%X0 %1,%0; sync"
+ : "=m" (*addr) : "r" (val));
+}
+
+static inline unsigned in_le32(const volatile unsigned *addr)
+{
+ unsigned ret;
+
+ __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync"
+ : "=r" (ret) : "r" (addr), "m" (*addr));
+ return ret;
+}
+
+static inline unsigned in_be32(const volatile unsigned *addr)
+{
+ unsigned ret;
+
+ __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync"
+ : "=r" (ret) : "m" (*addr));
+ return ret;
+}
+
+static inline void out_le32(volatile unsigned *addr, int val)
+{
+ __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr)
+ : "r" (val), "r" (addr));
+}
+
+static inline void out_be32(volatile unsigned *addr, int val)
+{
+ __asm__ __volatile__("stw%U0%X0 %1,%0; sync"
+ : "=m" (*addr) : "r" (val));
+}
+
+#endif /* _IO_H */
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index d719bb9333d..6f6b50d238b 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -27,6 +27,8 @@ extern char _vmlinux_start[];
extern char _vmlinux_end[];
extern char _initrd_start[];
extern char _initrd_end[];
+extern char _dtb_start[];
+extern char _dtb_end[];
struct addr_range {
unsigned long addr;
@@ -167,7 +169,7 @@ static int is_elf32(void *hdr)
return 1;
}
-static void prep_kernel(unsigned long *a1, unsigned long *a2)
+static void prep_kernel(unsigned long a1, unsigned long a2)
{
int len;
@@ -203,11 +205,14 @@ static void prep_kernel(unsigned long *a1, unsigned long *a2)
}
/*
- * Now we try to alloc memory for the initrd (and copy it there)
+ * Now find the initrd
+ *
+ * First see if we have an image attached to us. If so
+ * allocate memory for it and copy it there.
*/
initrd.size = (unsigned long)(_initrd_end - _initrd_start);
initrd.memsize = initrd.size;
- if ( initrd.size > 0 ) {
+ if (initrd.size > 0) {
printf("Allocating 0x%lx bytes for initrd ...\n\r",
initrd.size);
initrd.addr = (unsigned long)malloc((u32)initrd.size);
@@ -216,8 +221,6 @@ static void prep_kernel(unsigned long *a1, unsigned long *a2)
"ramdisk !\n\r");
exit();
}
- *a1 = initrd.addr;
- *a2 = initrd.size;
printf("initial ramdisk moving 0x%lx <- 0x%lx "
"(0x%lx bytes)\n\r", initrd.addr,
(unsigned long)_initrd_start, initrd.size);
@@ -225,6 +228,12 @@ static void prep_kernel(unsigned long *a1, unsigned long *a2)
initrd.size);
printf("initrd head: 0x%lx\n\r",
*((unsigned long *)initrd.addr));
+ } else if (a2 != 0) {
+ /* Otherwise, see if yaboot or another loader gave us an initrd */
+ initrd.addr = a1;
+ initrd.memsize = initrd.size = a2;
+ printf("Using loader supplied initrd at 0x%lx (0x%lx bytes)\n\r",
+ initrd.addr, initrd.size);
}
/* Eventually gunzip the kernel */
@@ -250,10 +259,6 @@ static void prep_kernel(unsigned long *a1, unsigned long *a2)
flush_cache((void *)vmlinux.addr, vmlinux.size);
}
-void __attribute__ ((weak)) ft_init(void *dt_blob)
-{
-}
-
/* A buffer that may be edited by tools operating on a zImage binary so as to
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier.
@@ -285,36 +290,22 @@ static void set_cmdline(char *buf)
setprop(devp, "bootargs", buf, strlen(buf) + 1);
}
-/* Section where ft can be tacked on after zImage is built */
-union blobspace {
- struct boot_param_header hdr;
- char space[8*1024];
-} dt_blob __attribute__((__section__("__builtin_ft")));
-
struct platform_ops platform_ops;
struct dt_ops dt_ops;
struct console_ops console_ops;
void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
{
- int have_dt = 0;
kernel_entry_t kentry;
char cmdline[COMMAND_LINE_SIZE];
+ unsigned long ft_addr = 0;
memset(__bss_start, 0, _end - __bss_start);
memset(&platform_ops, 0, sizeof(platform_ops));
memset(&dt_ops, 0, sizeof(dt_ops));
memset(&console_ops, 0, sizeof(console_ops));
- /* Override the dt_ops and device tree if there was an flat dev
- * tree attached to the zImage.
- */
- if (dt_blob.hdr.magic == OF_DT_HEADER) {
- have_dt = 1;
- ft_init(&dt_blob);
- }
-
- if (platform_init(promptr))
+ if (platform_init(promptr, _dtb_start, _dtb_end))
exit();
if (console_ops.open && (console_ops.open() < 0))
exit();
@@ -324,7 +315,7 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r",
_start, sp);
- prep_kernel(&a1, &a2);
+ prep_kernel(a1, a2);
/* If cmdline came from zimage wrapper or if we can edit the one
* in the dt, print it out and edit it, if possible.
@@ -338,15 +329,23 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
set_cmdline(cmdline);
}
+ printf("Finalizing device tree...");
+ if (dt_ops.finalize)
+ ft_addr = dt_ops.finalize();
+ if (ft_addr)
+ printf(" flat tree at 0x%lx\n\r", ft_addr);
+ else
+ printf(" using OF tree (promptr=%p)\n\r", promptr);
+
if (console_ops.close)
console_ops.close();
kentry = (kernel_entry_t) vmlinux.addr;
- if (have_dt)
- kentry(dt_ops.ft_addr(), 0, NULL);
+ if (ft_addr)
+ kentry(ft_addr, 0, NULL);
else
/* XXX initrd addr/size should be passed in properties */
- kentry(a1, a2, promptr);
+ kentry(initrd.addr, initrd.size, promptr);
/* console closed so printf below may not work */
printf("Error: Linux kernel returned to zImage boot wrapper!\n\r");
diff --git a/arch/powerpc/boot/mktree.c b/arch/powerpc/boot/mktree.c
new file mode 100644
index 00000000000..4cb89299365
--- /dev/null
+++ b/arch/powerpc/boot/mktree.c
@@ -0,0 +1,152 @@
+/*
+ * Makes a tree bootable image for IBM Evaluation boards.
+ * Basically, just take a zImage, skip the ELF header, and stuff
+ * a 32 byte header on the front.
+ *
+ * We use htonl, which is a network macro, to make sure we're doing
+ * The Right Thing on an LE machine. It's non-obvious, but it should
+ * work on anything BSD'ish.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <netinet/in.h>
+#ifdef __sun__
+#include <inttypes.h>
+#else
+#include <stdint.h>
+#endif
+
+/* This gets tacked on the front of the image. There are also a few
+ * bytes allocated after the _start label used by the boot rom (see
+ * head.S for details).
+ */
+typedef struct boot_block {
+ uint32_t bb_magic; /* 0x0052504F */
+ uint32_t bb_dest; /* Target address of the image */
+ uint32_t bb_num_512blocks; /* Size, rounded-up, in 512 byte blks */
+ uint32_t bb_debug_flag; /* Run debugger or image after load */
+ uint32_t bb_entry_point; /* The image address to start */
+ uint32_t bb_checksum; /* 32 bit checksum including header */
+ uint32_t reserved[2];
+} boot_block_t;
+
+#define IMGBLK 512
+char tmpbuf[IMGBLK];
+
+int main(int argc, char *argv[])
+{
+ int in_fd, out_fd;
+ int nblks, i;
+ uint cksum, *cp;
+ struct stat st;
+ boot_block_t bt;
+
+ if (argc < 3) {
+ fprintf(stderr, "usage: %s <zImage-file> <boot-image> [entry-point]\n",argv[0]);
+ exit(1);
+ }
+
+ if (stat(argv[1], &st) < 0) {
+ perror("stat");
+ exit(2);
+ }
+
+ nblks = (st.st_size + IMGBLK) / IMGBLK;
+
+ bt.bb_magic = htonl(0x0052504F);
+
+ /* If we have the optional entry point parameter, use it */
+ if (argc == 4)
+ bt.bb_dest = bt.bb_entry_point = htonl(strtoul(argv[3], NULL, 0));
+ else
+ bt.bb_dest = bt.bb_entry_point = htonl(0x500000);
+
+ /* We know these from the linker command.
+ * ...and then move it up into memory a little more so the
+ * relocation can happen.
+ */
+ bt.bb_num_512blocks = htonl(nblks);
+ bt.bb_debug_flag = 0;
+
+ bt.bb_checksum = 0;
+
+ /* To be neat and tidy :-).
+ */
+ bt.reserved[0] = 0;
+ bt.reserved[1] = 0;
+
+ if ((in_fd = open(argv[1], O_RDONLY)) < 0) {
+ perror("zImage open");
+ exit(3);
+ }
+
+ if ((out_fd = open(argv[2], (O_RDWR | O_CREAT | O_TRUNC), 0666)) < 0) {
+ perror("bootfile open");
+ exit(3);
+ }
+
+ cksum = 0;
+ cp = (void *)&bt;
+ for (i=0; i<sizeof(bt)/sizeof(uint); i++)
+ cksum += *cp++;
+
+ /* Assume zImage is an ELF file, and skip the 64K header.
+ */
+ if (read(in_fd, tmpbuf, IMGBLK) != IMGBLK) {
+ fprintf(stderr, "%s is too small to be an ELF image\n",
+ argv[1]);
+ exit(4);
+ }
+
+ if ((*(uint *)tmpbuf) != htonl(0x7f454c46)) {
+ fprintf(stderr, "%s is not an ELF image\n", argv[1]);
+ exit(4);
+ }
+
+ if (lseek(in_fd, (64 * 1024), SEEK_SET) < 0) {
+ fprintf(stderr, "%s failed to seek in ELF image\n", argv[1]);
+ exit(4);
+ }
+
+ nblks -= (64 * 1024) / IMGBLK;
+
+ /* And away we go......
+ */
+ if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) {
+ perror("boot-image write");
+ exit(5);
+ }
+
+ while (nblks-- > 0) {
+ if (read(in_fd, tmpbuf, IMGBLK) < 0) {
+ perror("zImage read");
+ exit(5);
+ }
+ cp = (uint *)tmpbuf;
+ for (i=0; i<sizeof(tmpbuf)/sizeof(uint); i++)
+ cksum += *cp++;
+ if (write(out_fd, tmpbuf, sizeof(tmpbuf)) != sizeof(tmpbuf)) {
+ perror("boot-image write");
+ exit(5);
+ }
+ }
+
+ /* rewrite the header with the computed checksum.
+ */
+ bt.bb_checksum = htonl(cksum);
+ if (lseek(out_fd, 0, SEEK_SET) < 0) {
+ perror("rewrite seek");
+ exit(1);
+ }
+ if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) {
+ perror("boot-image rewrite");
+ exit(1);
+ }
+
+ exit(0);
+}
diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c
new file mode 100644
index 00000000000..1ffe72e35cd
--- /dev/null
+++ b/arch/powerpc/boot/ns16550.c
@@ -0,0 +1,74 @@
+/*
+ * 16550 serial console support.
+ *
+ * Original copied from <file:arch/ppc/boot/common/ns16550.c>
+ * (which had no copyright)
+ * Modifications: 2006 (c) MontaVista Software, Inc.
+ *
+ * Modified by: Mark A. Greer <mgreer@mvista.com>
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "string.h"
+#include "stdio.h"
+#include "io.h"
+#include "ops.h"
+
+#define UART_DLL 0 /* Out: Divisor Latch Low */
+#define UART_DLM 1 /* Out: Divisor Latch High */
+#define UART_FCR 2 /* Out: FIFO Control Register */
+#define UART_LCR 3 /* Out: Line Control Register */
+#define UART_MCR 4 /* Out: Modem Control Register */
+#define UART_LSR 5 /* In: Line Status Register */
+#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
+#define UART_LSR_DR 0x01 /* Receiver data ready */
+#define UART_MSR 6 /* In: Modem Status Register */
+#define UART_SCR 7 /* I/O: Scratch Register */
+
+static unsigned char *reg_base;
+static u32 reg_shift;
+
+static int ns16550_open(void)
+{
+ out_8(reg_base + (UART_FCR << reg_shift), 0x06);
+ return 0;
+}
+
+static void ns16550_putc(unsigned char c)
+{
+ while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_THRE) == 0);
+ out_8(reg_base, c);
+}
+
+static unsigned char ns16550_getc(void)
+{
+ while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) == 0);
+ return in_8(reg_base);
+}
+
+static u8 ns16550_tstc(void)
+{
+ return ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) != 0);
+}
+
+int ns16550_console_init(void *devp, struct serial_console_data *scdp)
+{
+ int n;
+
+ n = getprop(devp, "virtual-reg", &reg_base, sizeof(reg_base));
+ if (n != sizeof(reg_base))
+ return -1;
+
+ n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
+ if (n != sizeof(reg_shift))
+ reg_shift = 0;
+
+ scdp->open = ns16550_open;
+ scdp->putc = ns16550_putc;
+ scdp->getc = ns16550_getc;
+ scdp->tstc = ns16550_tstc;
+ scdp->close = NULL;
+
+ return 0;
+}
diff --git a/arch/powerpc/boot/of.c b/arch/powerpc/boot/of.c
index 3a71845afc6..0182f384f3e 100644
--- a/arch/powerpc/boot/of.c
+++ b/arch/powerpc/boot/of.c
@@ -256,24 +256,18 @@ static void of_console_write(char *buf, int len)
call_prom("write", 3, 1, of_stdout_handle, buf, len);
}
-int platform_init(void *promptr)
+int platform_init(void *promptr, char *dt_blob_start, char *dt_blob_end)
{
- platform_ops.fixups = NULL;
platform_ops.image_hdr = of_image_hdr;
platform_ops.malloc = of_try_claim;
- platform_ops.free = NULL;
platform_ops.exit = of_exit;
dt_ops.finddevice = of_finddevice;
dt_ops.getprop = of_getprop;
dt_ops.setprop = of_setprop;
- dt_ops.translate_addr = NULL;
console_ops.open = of_console_open;
console_ops.write = of_console_write;
- console_ops.edit_cmdline = NULL;
- console_ops.close = NULL;
- console_ops.data = NULL;
prom = (int (*)(void *))promptr;
return 0;
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 135eb4bb03b..8abb6516bb7 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -22,7 +22,8 @@ struct platform_ops {
void (*fixups)(void);
void (*image_hdr)(const void *);
void * (*malloc)(u32 size);
- void (*free)(void *ptr, u32 size);
+ void (*free)(void *ptr);
+ void * (*realloc)(void *ptr, unsigned long size);
void (*exit)(void);
};
extern struct platform_ops platform_ops;
@@ -30,13 +31,11 @@ extern struct platform_ops platform_ops;
/* Device Tree operations */
struct dt_ops {
void * (*finddevice)(const char *name);
- int (*getprop)(const void *node, const char *name, void *buf,
+ int (*getprop)(const void *phandle, const char *name, void *buf,
const int buflen);
- int (*setprop)(const void *node, const char *name,
+ int (*setprop)(const void *phandle, const char *name,
const void *buf, const int buflen);
- u64 (*translate_addr)(const char *path, const u32 *in_addr,
- const u32 addr_len);
- unsigned long (*ft_addr)(void);
+ unsigned long (*finalize)(void);
};
extern struct dt_ops dt_ops;
@@ -59,10 +58,13 @@ struct serial_console_data {
void (*close)(void);
};
-extern int platform_init(void *promptr);
-extern void simple_alloc_init(void);
-extern void ft_init(void *dt_blob);
-extern int serial_console_init(void);
+int platform_init(void *promptr, char *dt_blob_start, char *dt_blob_end);
+int ft_init(void *dt_blob, unsigned int max_size, unsigned int max_find_device);
+int serial_console_init(void);
+int ns16550_console_init(void *devp, struct serial_console_data *scdp);
+void *simple_alloc_init(char *base, u32 heap_size, u32 granularity,
+ u32 max_allocs);
+
static inline void *finddevice(const char *name)
{
@@ -84,10 +86,10 @@ static inline void *malloc(u32 size)
return (platform_ops.malloc) ? platform_ops.malloc(size) : NULL;
}
-static inline void free(void *ptr, u32 size)
+static inline void free(void *ptr)
{
if (platform_ops.free)
- platform_ops.free(ptr, size);
+ platform_ops.free(ptr);
}
static inline void exit(void)
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
new file mode 100644
index 00000000000..e8de4cf59be
--- /dev/null
+++ b/arch/powerpc/boot/serial.c
@@ -0,0 +1,142 @@
+/*
+ * Generic serial console support
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * Code in serial_edit_cmdline() copied from <file:arch/ppc/boot/simple/misc.c>
+ * and was written by Matt Porter <mporter@kernel.crashing.org>.
+ *
+ * 2001,2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "string.h"
+#include "stdio.h"
+#include "io.h"
+#include "ops.h"
+
+extern void udelay(long delay);
+
+static int serial_open(void)
+{
+ struct serial_console_data *scdp = console_ops.data;
+ return scdp->open();
+}
+
+static void serial_write(char *buf, int len)
+{
+ struct serial_console_data *scdp = console_ops.data;
+
+ while (*buf != '\0')
+ scdp->putc(*buf++);
+}
+
+static void serial_edit_cmdline(char *buf, int len)
+{
+ int timer = 0, count;
+ char ch, *cp;
+ struct serial_console_data *scdp = console_ops.data;
+
+ cp = buf;
+ count = strlen(buf);
+ cp = &buf[count];
+ count++;
+
+ while (timer++ < 5*1000) {
+ if (scdp->tstc()) {
+ while (((ch = scdp->getc()) != '\n') && (ch != '\r')) {
+ /* Test for backspace/delete */
+ if ((ch == '\b') || (ch == '\177')) {
+ if (cp != buf) {
+ cp--;
+ count--;
+ printf("\b \b");
+ }
+ /* Test for ^x/^u (and wipe the line) */
+ } else if ((ch == '\030') || (ch == '\025')) {
+ while (cp != buf) {
+ cp--;
+ count--;
+ printf("\b \b");
+ }
+ } else if (count < len) {
+ *cp++ = ch;
+ count++;
+ scdp->putc(ch);
+ }
+ }
+ break; /* Exit 'timer' loop */
+ }
+ udelay(1000); /* 1 msec */
+ }
+ *cp = 0;
+}
+
+static void serial_close(void)
+{
+ struct serial_console_data *scdp = console_ops.data;
+
+ if (scdp->close)
+ scdp->close();
+}
+
+static void *serial_get_stdout_devp(void)
+{
+ void *devp;
+ char devtype[MAX_PROP_LEN];
+ char path[MAX_PATH_LEN];
+
+ devp = finddevice("/chosen");
+ if (devp == NULL)
+ goto err_out;
+
+ if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) {
+ devp = finddevice(path);
+ if (devp == NULL)
+ goto err_out;
+
+ if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0)
+ && !strcmp(devtype, "serial"))
+ return devp;
+ }
+err_out:
+ return NULL;
+}
+
+static struct serial_console_data serial_cd;
+
+/* Node's "compatible" property determines which serial driver to use */
+int serial_console_init(void)
+{
+ void *devp;
+ int rc = -1;
+ char compat[MAX_PROP_LEN];
+
+ devp = serial_get_stdout_devp();
+ if (devp == NULL)
+ goto err_out;
+
+ if (getprop(devp, "compatible", compat, sizeof(compat)) < 0)
+ goto err_out;
+
+ if (!strcmp(compat, "ns16550"))
+ rc = ns16550_console_init(devp, &serial_cd);
+
+ /* Add other serial console driver calls here */
+
+ if (!rc) {
+ console_ops.open = serial_open;
+ console_ops.write = serial_write;
+ console_ops.edit_cmdline = serial_edit_cmdline;
+ console_ops.close = serial_close;
+ console_ops.data = &serial_cd;
+
+ return 0;
+ }
+err_out:
+ return -1;
+}
diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c
new file mode 100644
index 00000000000..cfe3a7505ba
--- /dev/null
+++ b/arch/powerpc/boot/simple_alloc.c
@@ -0,0 +1,149 @@
+/*
+ * Implement primitive realloc(3) functionality.
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2006 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <stddef.h>
+#include "types.h"
+#include "page.h"
+#include "string.h"
+#include "ops.h"
+
+#define ENTRY_BEEN_USED 0x01
+#define ENTRY_IN_USE 0x02
+
+static struct alloc_info {
+ u32 flags;
+ u32 base;
+ u32 size;
+} *alloc_tbl;
+
+static u32 tbl_entries;
+static u32 alloc_min;
+static u32 next_base;
+static u32 space_left;
+
+/*
+ * First time an entry is used, its base and size are set.
+ * An entry can be freed and re-malloc'd but its base & size don't change.
+ * Should be smart enough for needs of bootwrapper.
+ */
+static void *simple_malloc(u32 size)
+{
+ u32 i;
+ struct alloc_info *p = alloc_tbl;
+
+ if (size == 0)
+ goto err_out;
+
+ size = _ALIGN_UP(size, alloc_min);
+
+ for (i=0; i<tbl_entries; i++, p++)
+ if (!(p->flags & ENTRY_BEEN_USED)) { /* never been used */
+ if (size <= space_left) {
+ p->base = next_base;
+ p->size = size;
+ p->flags = ENTRY_BEEN_USED | ENTRY_IN_USE;
+ next_base += size;
+ space_left -= size;
+ return (void *)p->base;
+ }
+ goto err_out; /* not enough space left */
+ }
+ /* reuse an entry keeping same base & size */
+ else if (!(p->flags & ENTRY_IN_USE) && (size <= p->size)) {
+ p->flags |= ENTRY_IN_USE;
+ return (void *)p->base;
+ }
+err_out:
+ return NULL;
+}
+
+static struct alloc_info *simple_find_entry(void *ptr)
+{
+ u32 i;
+ struct alloc_info *p = alloc_tbl;
+
+ for (i=0; i<tbl_entries; i++,p++) {
+ if (!(p->flags & ENTRY_BEEN_USED))
+ break;
+ if ((p->flags & ENTRY_IN_USE) && (p->base == (u32)ptr))
+ return p;
+ }
+ return NULL;
+}
+
+static void simple_free(void *ptr)
+{
+ struct alloc_info *p = simple_find_entry(ptr);
+
+ if (p != NULL)
+ p->flags &= ~ENTRY_IN_USE;
+}
+
+/*
+ * Change size of area pointed to by 'ptr' to 'size'.
+ * If 'ptr' is NULL, then its a malloc(). If 'size' is 0, then its a free().
+ * 'ptr' must be NULL or a pointer to a non-freed area previously returned by
+ * simple_realloc() or simple_malloc().
+ */
+static void *simple_realloc(void *ptr, unsigned long size)
+{
+ struct alloc_info *p;
+ void *new;
+
+ if (size == 0) {
+ simple_free(ptr);
+ return NULL;
+ }
+
+ if (ptr == NULL)
+ return simple_malloc(size);
+
+ p = simple_find_entry(ptr);
+ if (p == NULL) /* ptr not from simple_malloc/simple_realloc */
+ return NULL;
+ if (size <= p->size) /* fits in current block */
+ return ptr;
+
+ new = simple_malloc(size);
+ memcpy(new, ptr, p->size);
+ simple_free(ptr);
+ return new;
+}
+
+/*
+ * Returns addr of first byte after heap so caller can see if it took
+ * too much space. If so, change args & try again.
+ */
+void *simple_alloc_init(char *base, u32 heap_size, u32 granularity,
+ u32 max_allocs)
+{
+ u32 heap_base, tbl_size;
+
+ heap_size = _ALIGN_UP(heap_size, granularity);
+ alloc_min = granularity;
+ tbl_entries = max_allocs;
+
+ tbl_size = tbl_entries * sizeof(struct alloc_info);
+
+ alloc_tbl = (struct alloc_info *)_ALIGN_UP((unsigned long)base, 8);
+ memset(alloc_tbl, 0, tbl_size);
+
+ heap_base = _ALIGN_UP((u32)alloc_tbl + tbl_size, alloc_min);
+
+ next_base = heap_base;
+ space_left = heap_size;
+
+ platform_ops.malloc = simple_malloc;
+ platform_ops.free = simple_free;
+ platform_ops.realloc = simple_realloc;
+
+ return (void *)(heap_base + heap_size);
+}
diff --git a/arch/powerpc/boot/stdio.c b/arch/powerpc/boot/stdio.c
index 6d5f6382e1c..0a9feeb9834 100644
--- a/arch/powerpc/boot/stdio.c
+++ b/arch/powerpc/boot/stdio.c
@@ -320,6 +320,7 @@ printf(const char *fmt, ...)
va_start(args, fmt);
n = vsprintf(sprint_buf, fmt, args);
va_end(args);
- console_ops.write(sprint_buf, n);
+ if (console_ops.write)
+ console_ops.write(sprint_buf, n);
return n;
}
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
new file mode 100644
index 00000000000..427ddfc1199
--- /dev/null
+++ b/arch/powerpc/boot/util.S
@@ -0,0 +1,88 @@
+/*
+ * Copied from <file:arch/powerpc/kernel/misc_32.S>
+ *
+ * This file contains miscellaneous low-level functions.
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ *
+ * kexec bits:
+ * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
+ * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include "ppc_asm.h"
+
+#define SPRN_PVR 0x11F /* Processor Version Register */
+
+ .text
+
+/* udelay (on non-601 processors) needs to know the period of the
+ * timebase in nanoseconds. This used to be hardcoded to be 60ns
+ * (period of 66MHz/4). Now a variable is used that is initialized to
+ * 60 for backward compatibility, but it can be overridden as necessary
+ * with code something like this:
+ * extern unsigned long timebase_period_ns;
+ * timebase_period_ns = 1000000000 / bd->bi_tbfreq;
+ */
+ .data
+ .globl timebase_period_ns
+timebase_period_ns:
+ .long 60
+
+ .text
+/*
+ * Delay for a number of microseconds
+ */
+ .globl udelay
+udelay:
+ mfspr r4,SPRN_PVR
+ srwi r4,r4,16
+ cmpwi 0,r4,1 /* 601 ? */
+ bne .udelay_not_601
+00: li r0,86 /* Instructions / microsecond? */
+ mtctr r0
+10: addi r0,r0,0 /* NOP */
+ bdnz 10b
+ subic. r3,r3,1
+ bne 00b
+ blr
+
+.udelay_not_601:
+ mulli r4,r3,1000 /* nanoseconds */
+ /* Change r4 to be the number of ticks using:
+ * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
+ * timebase_period_ns defaults to 60 (16.6MHz) */
+ mflr r5
+ bl 0f
+0: mflr r6
+ mtlr r5
+ lis r5,0b@ha
+ addi r5,r5,0b@l
+ subf r5,r5,r6 /* In case we're relocated */
+ addis r5,r5,timebase_period_ns@ha
+ lwz r5,timebase_period_ns@l(r5)
+ add r4,r4,r5
+ addi r4,r4,-1
+ divw r4,r4,r5 /* BUS ticks */
+1: mftbu r5
+ mftb r6
+ mftbu r7
+ cmpw 0,r5,r7
+ bne 1b /* Get [synced] base time */
+ addc r9,r6,r4 /* Compute end time */
+ addze r8,r5
+2: mftbu r5
+ cmpw 0,r5,r8
+ blt 2b
+ bgt 3f
+ mftb r6
+ cmpw 0,r6,r9
+ blt 2b
+3: blr
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index b5fb1fee76f..024e4d425c5 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -184,6 +184,9 @@ fi
if [ -n "$dtb" ]; then
addsec $tmp "$dtb" .kernel:dtb
+ if [ -n "$dts" ]; then
+ rm $dtb
+ fi
fi
if [ "$platform" != "miboot" ]; then
diff --git a/arch/powerpc/boot/zImage.coff.lds.S b/arch/powerpc/boot/zImage.coff.lds.S
index 05f32388b95..a360905e542 100644
--- a/arch/powerpc/boot/zImage.coff.lds.S
+++ b/arch/powerpc/boot/zImage.coff.lds.S
@@ -21,6 +21,10 @@ SECTIONS
*(.got2)
__got2_end = .;
+ _dtb_start = .;
+ *(.kernel:dtb)
+ _dtb_end = .;
+
_vmlinux_start = .;
*(.kernel:vmlinux.strip)
_vmlinux_end = .;
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 0aba06d7d2e..a98c982c73a 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.18
-# Wed Oct 4 15:30:50 2006
+# Linux kernel version: 2.6.19-rc6
+# Wed Nov 22 15:33:04 2006
#
CONFIG_PPC64=y
CONFIG_64BIT=y
@@ -32,6 +32,10 @@ CONFIG_AUDIT_ARCH=y
CONFIG_POWER3=y
CONFIG_POWER4=y
CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+CONFIG_PPC_DCR_MMIO=y
+CONFIG_PPC_DCR=y
+CONFIG_PPC_OF_PLATFORM_PCI=y
CONFIG_ALTIVEC=y
CONFIG_PPC_STD_MMU=y
CONFIG_VIRT_CPU_ACCOUNTING=y
@@ -67,7 +71,7 @@ CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
# CONFIG_EMBEDDED is not set
-# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -131,6 +135,7 @@ CONFIG_PPC_CELL=y
CONFIG_PPC_CELL_NATIVE=y
CONFIG_PPC_IBM_CELL_BLADE=y
CONFIG_UDBG_RTAS_CONSOLE=y
+CONFIG_PPC_PS3=y
# CONFIG_U3_DART is not set
CONFIG_PPC_RTAS=y
# CONFIG_RTAS_ERROR_LOGGING is not set
@@ -139,9 +144,23 @@ CONFIG_RTAS_FLASH=y
CONFIG_MMIO_NVRAM=y
# CONFIG_PPC_MPC106 is not set
# CONFIG_PPC_970_NAP is not set
-# CONFIG_CPU_FREQ is not set
+CONFIG_PPC_INDIRECT_IO=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_DEBUG=y
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+# CONFIG_CPU_FREQ_PMAC64 is not set
# CONFIG_WANT_EARLY_SERIAL is not set
-# CONFIG_MPIC is not set
+CONFIG_MPIC=y
#
# Cell Broadband Engine options
@@ -149,6 +168,15 @@ CONFIG_MMIO_NVRAM=y
CONFIG_SPU_FS=m
CONFIG_SPU_BASE=y
CONFIG_CBE_RAS=y
+CONFIG_CBE_THERM=m
+CONFIG_CBE_CPUFREQ=m
+
+#
+# PS3 Platform Options
+#
+CONFIG_PS3_HTAB_SIZE=20
+# CONFIG_PS3_DYNAMIC_DMA is not set
+CONFIG_PS3_USE_LPAR_ADDR=y
#
# Kernel options
@@ -166,13 +194,14 @@ CONFIG_BINFMT_MISC=m
CONFIG_FORCE_MAX_ZONEORDER=9
# CONFIG_IOMMU_VMERGE is not set
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
-CONFIG_KEXEC=y
+# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
CONFIG_NODES_SHIFT=4
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
@@ -189,6 +218,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
CONFIG_RESOURCES_64BIT=y
CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_NODES_SPAN_OTHER_NODES=y
CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_PROC_DEVICETREE=y
@@ -207,7 +237,6 @@ CONFIG_GENERIC_ISA_DMA=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCIEPORTBUS=y
-# CONFIG_PCI_MULTITHREAD_PROBE is not set
# CONFIG_PCI_DEBUG is not set
#
@@ -280,7 +309,6 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y
# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
# CONFIG_IPV6_SIT is not set
CONFIG_IPV6_TUNNEL=m
-# CONFIG_IPV6_SUBTREES is not set
# CONFIG_IPV6_MULTIPLE_TABLES is not set
# CONFIG_NETWORK_SECMARK is not set
CONFIG_NETFILTER=y
@@ -1107,7 +1135,8 @@ CONFIG_PLIST=y
#
# Instrumentation Support
#
-# CONFIG_PROFILING is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
# CONFIG_KPROBES is not set
#
@@ -1142,6 +1171,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUGGER=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
+CONFIG_XMON_DISASSEMBLY=y
CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set
# CONFIG_PPC_EARLY_DEBUG is not set
@@ -1159,7 +1189,7 @@ CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_BLKCIPHER=m
CONFIG_CRYPTO_HASH=y
-# CONFIG_CRYPTO_MANAGER is not set
+CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_MD4 is not set
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
new file mode 100644
index 00000000000..23fd210eb56
--- /dev/null
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -0,0 +1,1583 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc4
+# Wed Nov 15 20:36:30 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_86xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_6xx=y
+CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_SMP is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-kuroboxHG"
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+# CONFIG_EMBEDDED is not set
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+# CONFIG_PPC_MULTIPLATFORM is not set
+CONFIG_EMBEDDED6xx=y
+# CONFIG_APUS is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_KATANA is not set
+# CONFIG_WILLOW is not set
+# CONFIG_CPCI690 is not set
+# CONFIG_POWERPMC250 is not set
+# CONFIG_CHESTNUT is not set
+# CONFIG_SPRUCE is not set
+# CONFIG_HDPU is not set
+# CONFIG_EV64260 is not set
+# CONFIG_LOPEC is not set
+# CONFIG_MVME5100 is not set
+# CONFIG_PPLUS is not set
+# CONFIG_PRPMC750 is not set
+# CONFIG_PRPMC800 is not set
+# CONFIG_SANDPOINT is not set
+CONFIG_LINKSTATION=y
+# CONFIG_MPC7448HPC2 is not set
+# CONFIG_RADSTONE_PPC7D is not set
+# CONFIG_PAL4 is not set
+# CONFIG_GEMINI is not set
+# CONFIG_EST8260 is not set
+# CONFIG_SBC82xx is not set
+# CONFIG_SBS8260 is not set
+# CONFIG_RPX8260 is not set
+# CONFIG_TQM8260 is not set
+# CONFIG_ADS8272 is not set
+# CONFIG_PQ2FADS is not set
+# CONFIG_LITE5200 is not set
+# CONFIG_EV64360 is not set
+CONFIG_PPC_GEN550=y
+CONFIG_MPC10X_BRIDGE=y
+CONFIG_MPC10X_OPENPIC=y
+# CONFIG_MPC10X_STORE_GATHERING is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+CONFIG_MPIC=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_FSL_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCIEPORTBUS is not set
+# CONFIG_PCI_MULTITHREAD_PROBE is not set
+# CONFIG_PCI_DEBUG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK is not set
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+# CONFIG_IP_NF_NETBIOS_NS is not set
+CONFIG_IP_NF_TFTP=m
+# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_PPTP is not set
+# CONFIG_IP_NF_H323 is not set
+# CONFIG_IP_NF_SIP is not set
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_IEEE80211=m
+CONFIG_IEEE80211_DEBUG=y
+CONFIG_IEEE80211_CRYPT_WEP=m
+CONFIG_IEEE80211_CRYPT_CCMP=m
+CONFIG_IEEE80211_CRYPT_TKIP=m
+CONFIG_IEEE80211_SOFTMAC=m
+CONFIG_IEEE80211_SOFTMAC_DEBUG=y
+CONFIG_WIRELESS_EXT=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=m
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+# CONFIG_MTD_MAP_BANK_WIDTH_2 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+# CONFIG_MTD_CFI_I2 is not set
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_OTP is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0xffc00000
+CONFIG_MTD_PHYSMAP_LEN=0x400000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=1
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+CONFIG_ATA=y
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+CONFIG_PATA_SIL680=y
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_R8169=y
+# CONFIG_R8169_NAPI is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_QLA3XXX is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+# CONFIG_NET_WIRELESS_RTNETLINK is not set
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_AIRO is not set
+# CONFIG_HERMES is not set
+# CONFIG_ATMEL is not set
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_BCM43XX is not set
+# CONFIG_ZD1211RW is not set
+CONFIG_NET_WIRELESS=y
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+CONFIG_SENSORS_EEPROM=m
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+# CONFIG_USB_DABUSB is not set
+
+#
+# Graphics support
+#
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_SPLIT_ISO is not set
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_TOUCHSCREEN is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP2101 is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=y
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+
+#
+# RTC drivers
+#
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+CONFIG_RTC_DRV_RS5C372=y
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_TEST is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=m
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_PLIST=y
+
+#
+# Instrumentation Support
+#
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/powerpc/configs/lite5200_defconfig b/arch/powerpc/configs/lite5200_defconfig
new file mode 100644
index 00000000000..ee7655776d4
--- /dev/null
+++ b/arch/powerpc/configs/lite5200_defconfig
@@ -0,0 +1,931 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc6
+# Mon Nov 27 11:08:20 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_86xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_6xx=y
+CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_SMP is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_APUS is not set
+# CONFIG_PPC_CHRP is not set
+CONFIG_PPC_MPC52xx=y
+# CONFIG_PPC_EFIKA is not set
+CONFIG_PPC_LITE5200=y
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+# CONFIG_MPIC is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+# CONFIG_KEXEC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
+# CONFIG_PM_DEBUG is not set
+# CONFIG_PM_SYSFS_DEPRECATED is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_INDIRECT_PCI is not set
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCIEPORTBUS is not set
+# CONFIG_PCI_DEBUG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+CONFIG_ATA=y
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+CONFIG_PATA_MPC52xx=y
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_MV643XX_ETH is not set
+# CONFIG_QLA3XXX is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_MPC52xx=y
+CONFIG_SERIAL_MPC52xx_CONSOLE=y
+CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC32 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index be11df7c11a..1c009651f92 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -1386,8 +1386,8 @@ CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=y
-# CONFIG_AUTOFS4_FS is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
# CONFIG_FUSE_FS is not set
#
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
new file mode 100644
index 00000000000..f2d888e014a
--- /dev/null
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -0,0 +1,837 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc6
+# Tue Nov 21 19:38:53 2006
+#
+CONFIG_PPC64=y
+CONFIG_64BIT=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_COMPAT=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+# CONFIG_POWER4_ONLY is not set
+CONFIG_POWER3=y
+CONFIG_POWER4=y
+CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+# CONFIG_PPC_OF_PLATFORM_PCI is not set
+CONFIG_ALTIVEC=y
+CONFIG_PPC_STD_MMU=y
+CONFIG_VIRT_CPU_ACCOUNTING=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_CPUSETS is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_APUS is not set
+# CONFIG_PPC_PSERIES is not set
+# CONFIG_PPC_ISERIES is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_MAPLE is not set
+# CONFIG_PPC_PASEMI is not set
+CONFIG_PPC_CELL=y
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_IBM_CELL_BLADE is not set
+CONFIG_PPC_PS3=y
+# CONFIG_U3_DART is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+# CONFIG_MPIC is not set
+
+#
+# Cell Broadband Engine options
+#
+CONFIG_SPU_FS=y
+CONFIG_SPU_BASE=y
+# CONFIG_CBE_RAS is not set
+
+#
+# PS3 Platform Options
+#
+CONFIG_PS3_HTAB_SIZE=20
+CONFIG_PS3_DYNAMIC_DMA=y
+CONFIG_PS3_USE_LPAR_ADDR=y
+
+#
+# Kernel options
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_PREEMPT_BKL is not set
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+CONFIG_FORCE_MAX_ZONEORDER=9
+# CONFIG_IOMMU_VMERGE is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_IRQ_ALL_CPUS is not set
+# CONFIG_NUMA is not set
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_SPARSE=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_RESOURCES_64BIT=y
+CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_PPC_64K_PAGES=y
+# CONFIG_SCHED_SMT is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="root=/dev/nfs rw ip=dhcp"
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+CONFIG_KERNEL_START=0xc000000000000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+# CONFIG_PACKET is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC32 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+# CONFIG_KPROBES is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RWSEMS is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_DEBUG_LIST=y
+CONFIG_FORCED_INLINING=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUGGER is not set
+CONFIG_IRQSTACKS=y
+# CONFIG_BOOTX_TEXT is not set
+CONFIG_PPC_EARLY_DEBUG=y
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 7af23c43fd4..4fe53d08ab8 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -17,11 +17,11 @@ obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o cpu_setup_ppc970.o \
- firmware.o sysfs.o
+ firmware.o sysfs.o nvram_64.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
-obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o
+obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
procfs-$(CONFIG_PPC64) := proc_ppc64.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64) := rtas_pci.o
@@ -32,7 +32,6 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
-obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
@@ -59,11 +58,11 @@ obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
+
module-$(CONFIG_PPC64) += module_64.o
obj-$(CONFIG_MODULES) += $(module-y)
-pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \
- pci_direct_iommu.o iomap.o
+pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o
pci32-$(CONFIG_PPC32) := pci_32.o
obj-$(CONFIG_PCI) += $(pci64-y) $(pci32-y)
kexec-$(CONFIG_PPC64) := machine_kexec_64.o
@@ -72,8 +71,12 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o $(kexec-y)
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
+ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
+obj-y += iomap.o
+endif
+
ifeq ($(CONFIG_PPC_ISERIES),y)
-$(obj)/head_64.o: $(obj)/lparmap.s
+extra-y += lparmap.s
AFLAGS_head_64.o += -I$(obj)
endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index d06f378597b..e96521530d2 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -118,7 +118,8 @@ int main(void)
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
- DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
+ DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
+ DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index 652594891d5..bf118c38575 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -83,6 +83,22 @@ _GLOBAL(__setup_cpu_ppc970)
rldimi r0,r11,52,8 /* set NAP and DPM */
li r11,0
rldimi r0,r11,32,31 /* clear EN_ATTN */
+ b load_hids /* Jump to shared code */
+
+
+_GLOBAL(__setup_cpu_ppc970MP)
+ /* Do nothing if not running in HV mode */
+ mfmsr r0
+ rldicl. r0,r0,4,63
+ beqlr
+
+ mfspr r0,SPRN_HID0
+ li r11,0x15 /* clear DOZE and SLEEP */
+ rldimi r0,r11,52,6 /* set DEEPNAP, NAP and DPM */
+ li r11,0
+ rldimi r0,r11,32,31 /* clear EN_ATTN */
+
+load_hids:
mtspr SPRN_HID0,r0
mfspr r0,SPRN_HID0
mfspr r0,SPRN_HID0
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index bfd499ee375..9d1614c3ce6 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -42,6 +42,7 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_ppc970(void);
#endif /* CONFIG_PPC64 */
@@ -222,9 +223,9 @@ static struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
- .cpu_setup = __setup_cpu_ppc970,
+ .cpu_setup = __setup_cpu_ppc970MP,
.cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
+ .oprofile_cpu_type = "ppc64/970MP",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
@@ -276,10 +277,45 @@ static struct cpu_spec cpu_specs[] = {
.oprofile_mmcra_sipr = MMCRA_SIPR,
.platform = "power5+",
},
+ { /* POWER6 in P5+ mode; 2.04-compliant processor */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000001,
+ .cpu_name = "POWER5+",
+ .cpu_features = CPU_FTRS_POWER5,
+ .cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .oprofile_cpu_type = "ppc64/power6",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
+ .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
+ .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
+ POWER6_MMCRA_OTHER,
+ .platform = "power5+",
+ },
{ /* Power6 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003e0000,
- .cpu_name = "POWER6",
+ .cpu_name = "POWER6 (raw)",
+ .cpu_features = CPU_FTRS_POWER6,
+ .cpu_user_features = COMMON_USER_POWER6 |
+ PPC_FEATURE_POWER6_EXT,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .oprofile_cpu_type = "ppc64/power6",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
+ .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
+ .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
+ POWER6_MMCRA_OTHER,
+ .platform = "power6x",
+ },
+ { /* 2.05-compliant processor, i.e. Power6 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000002,
+ .cpu_name = "POWER6 (architected)",
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6,
.icache_bsize = 128,
@@ -303,6 +339,9 @@ static struct cpu_spec cpu_specs[] = {
PPC_FEATURE_SMT,
.icache_bsize = 128,
.dcache_bsize = 128,
+ .num_pmcs = 4,
+ .oprofile_cpu_type = "ppc64/cell-be",
+ .oprofile_type = PPC_OPROFILE_CELL,
.platform = "ppc-cell-be",
},
{ /* PA Semi PA6T */
@@ -801,6 +840,17 @@ static struct cpu_spec cpu_specs[] = {
.cpu_setup = __setup_cpu_603,
.platform = "ppc603",
},
+ { /* e300c3 on 83xx */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00850000,
+ .cpu_name = "e300c3",
+ .cpu_features = CPU_FTRS_E300,
+ .cpu_user_features = COMMON_USER,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .platform = "ppc603",
+ },
{ /* default match, we assume split I/D cache & TB (non-601)... */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
@@ -1169,19 +1219,15 @@ static struct cpu_spec cpu_specs[] = {
#endif /* CONFIG_PPC32 */
};
-struct cpu_spec *identify_cpu(unsigned long offset)
+struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr)
{
struct cpu_spec *s = cpu_specs;
struct cpu_spec **cur = &cur_cpu_spec;
- unsigned int pvr = mfspr(SPRN_PVR);
int i;
s = PTRRELOC(s);
cur = PTRRELOC(cur);
- if (*cur != NULL)
- return PTRRELOC(*cur);
-
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
if ((pvr & s->pvr_mask) == s->pvr_value) {
*cur = cpu_specs + i;
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 1af41f7616d..d3f2080d2ee 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -46,61 +46,6 @@ int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
cpumask_t cpus_in_sr = CPU_MASK_NONE;
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
- size_t data_len)
-{
- struct elf_note note;
-
- note.n_namesz = strlen(name) + 1;
- note.n_descsz = data_len;
- note.n_type = type;
- memcpy(buf, &note, sizeof(note));
- buf += (sizeof(note) +3)/4;
- memcpy(buf, name, note.n_namesz);
- buf += (note.n_namesz + 3)/4;
- memcpy(buf, data, note.n_descsz);
- buf += (note.n_descsz + 3)/4;
-
- return buf;
-}
-
-static void final_note(u32 *buf)
-{
- struct elf_note note;
-
- note.n_namesz = 0;
- note.n_descsz = 0;
- note.n_type = 0;
- memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
- struct elf_prstatus prstatus;
- u32 *buf;
-
- if ((cpu < 0) || (cpu >= NR_CPUS))
- return;
-
- /* Using ELF notes here is opportunistic.
- * I need a well defined structure format
- * for the data I pass, and I need tags
- * on the data to indicate what information I have
- * squirrelled away. ELF notes happen to provide
- * all of that that no need to invent something new.
- */
- buf = (u32*)per_cpu_ptr(crash_notes, cpu);
- if (!buf)
- return;
-
- memset(&prstatus, 0, sizeof(prstatus));
- prstatus.pr_pid = current->pid;
- elf_core_copy_regs(&prstatus.pr_reg, regs);
- buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
- sizeof(prstatus));
- final_note(buf);
-}
-
#ifdef CONFIG_SMP
static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
@@ -111,9 +56,9 @@ void crash_ipi_callback(struct pt_regs *regs)
if (!cpu_online(cpu))
return;
- local_irq_disable();
+ hard_irq_disable();
if (!cpu_isset(cpu, cpus_in_crash))
- crash_save_this_cpu(regs, cpu);
+ crash_save_cpu(regs, cpu);
cpu_set(cpu, cpus_in_crash);
/*
@@ -289,7 +234,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
* an SMP system.
* The kernel is broken so disable interrupts.
*/
- local_irq_disable();
+ hard_irq_disable();
for_each_irq(irq) {
struct irq_desc *desc = irq_desc + irq;
@@ -306,7 +251,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
* such that another IPI will not be sent.
*/
crashing_cpu = smp_processor_id();
- crash_save_this_cpu(regs, crashing_cpu);
+ crash_save_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus(crashing_cpu);
cpu_set(crashing_cpu, cpus_in_crash);
if (ppc_md.kexec_cpu_down)
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 6c168f6ea14..7b0e754383c 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -1,151 +1,194 @@
/*
- * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
*
- * Implements the generic device dma API for ppc64. Handles
- * the pci and vio busses
+ * Provide default implementations of the DMA mapping callbacks for
+ * directly mapped busses and busses using the iommu infrastructure
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
-/* Include the busses we support */
-#include <linux/pci.h>
-#include <asm/vio.h>
-#include <asm/ibmebus.h>
-#include <asm/scatterlist.h>
#include <asm/bug.h>
+#include <asm/iommu.h>
+#include <asm/abs_addr.h>
-static struct dma_mapping_ops *get_dma_ops(struct device *dev)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return &pci_dma_ops;
-#endif
-#ifdef CONFIG_IBMVIO
- if (dev->bus == &vio_bus_type)
- return &vio_dma_ops;
-#endif
-#ifdef CONFIG_IBMEBUS
- if (dev->bus == &ibmebus_bus_type)
- return &ibmebus_dma_ops;
-#endif
- return NULL;
-}
+/*
+ * Generic iommu implementation
+ */
-int dma_supported(struct device *dev, u64 mask)
+static inline unsigned long device_to_mask(struct device *dev)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ if (dev->dma_mask && *dev->dma_mask)
+ return *dev->dma_mask;
+ /* Assume devices without mask can take 32 bit addresses */
+ return 0xfffffffful;
+}
- BUG_ON(!dma_ops);
- return dma_ops->dma_supported(dev, mask);
+/* Allocates a contiguous real buffer and creates mappings over it.
+ * Returns the virtual address of the buffer and sets dma_handle
+ * to the dma address (mapping) of the first page.
+ */
+static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
+ device_to_mask(dev), flag,
+ dev->archdata.numa_node);
}
-EXPORT_SYMBOL(dma_supported);
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static void dma_iommu_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
-#endif
-#ifdef CONFIG_IBMVIO
- if (dev->bus == &vio_bus_type)
- return -EIO;
-#endif /* CONFIG_IBMVIO */
-#ifdef CONFIG_IBMEBUS
- if (dev->bus == &ibmebus_bus_type)
- return -EIO;
-#endif
- BUG();
- return 0;
+ iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
}
-EXPORT_SYMBOL(dma_set_mask);
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+/* Creates TCEs for a user provided buffer. The user buffer must be
+ * contiguous real kernel storage (not vmalloc). The address of the buffer
+ * passed here is the kernel (virtual) address of the buffer. The buffer
+ * need not be page aligned, the dma_addr_t returned will point to the same
+ * byte within the page as vaddr.
+ */
+static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
+ size_t size,
+ enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+ return iommu_map_single(dev->archdata.dma_data, vaddr, size,
+ device_to_mask(dev), direction);
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+
+static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction);
+}
- BUG_ON(!dma_ops);
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction)
+{
+ return iommu_map_sg(dev->archdata.dma_data, sglist, nelems,
+ device_to_mask(dev), direction);
}
-EXPORT_SYMBOL(dma_free_coherent);
-dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction direction)
+static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- return dma_ops->map_single(dev, cpu_addr, size, direction);
+ iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction);
}
-EXPORT_SYMBOL(dma_map_single);
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
+/* We support DMA to/from any memory page via the iommu */
+static int dma_iommu_dma_supported(struct device *dev, u64 mask)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- dma_ops->unmap_single(dev, dma_addr, size, direction);
+ struct iommu_table *tbl = dev->archdata.dma_data;
+
+ if (!tbl || tbl->it_offset > mask) {
+ printk(KERN_INFO
+ "Warning: IOMMU offset too big for device mask\n");
+ if (tbl)
+ printk(KERN_INFO
+ "mask: 0x%08lx, table offset: 0x%08lx\n",
+ mask, tbl->it_offset);
+ else
+ printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
+ mask);
+ return 0;
+ } else
+ return 1;
}
-EXPORT_SYMBOL(dma_unmap_single);
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+struct dma_mapping_ops dma_iommu_ops = {
+ .alloc_coherent = dma_iommu_alloc_coherent,
+ .free_coherent = dma_iommu_free_coherent,
+ .map_single = dma_iommu_map_single,
+ .unmap_single = dma_iommu_unmap_single,
+ .map_sg = dma_iommu_map_sg,
+ .unmap_sg = dma_iommu_unmap_sg,
+ .dma_supported = dma_iommu_dma_supported,
+};
+EXPORT_SYMBOL(dma_iommu_ops);
- BUG_ON(!dma_ops);
+/*
+ * Generic direct DMA implementation
+ *
+ * This implementation supports a global offset that can be applied if
+ * the address at which memory is visible to devices is not 0.
+ */
+unsigned long dma_direct_offset;
- return dma_ops->map_single(dev, page_address(page) + offset, size,
- direction);
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ struct page *page;
+ void *ret;
+ int node = dev->archdata.numa_node;
+
+ /* TODO: Maybe use the numa node here too ? */
+ page = alloc_pages_node(node, flag, get_order(size));
+ if (page == NULL)
+ return NULL;
+ ret = page_address(page);
+ memset(ret, 0, size);
+ *dma_handle = virt_to_abs(ret) | dma_direct_offset;
+
+ return ret;
}
-EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+static void dma_direct_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ free_pages((unsigned long)vaddr, get_order(size));
+}
- BUG_ON(!dma_ops);
+static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return virt_to_abs(ptr) | dma_direct_offset;
+}
- dma_ops->unmap_single(dev, dma_address, size, direction);
+static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
}
-EXPORT_SYMBOL(dma_unmap_page);
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+ int i;
- BUG_ON(!dma_ops);
+ for (i = 0; i < nents; i++, sg++) {
+ sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
+ dma_direct_offset;
+ sg->dma_length = sg->length;
+ }
- return dma_ops->map_sg(dev, sg, nents, direction);
+ return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
+static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
- struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
+}
- dma_ops->unmap_sg(dev, sg, nhwentries, direction);
+static int dma_direct_dma_supported(struct device *dev, u64 mask)
+{
+ /* Could be improved to check for memory though it better be
+ * done via some global so platforms can set the limit in case
+ * they have limited DMA windows
+ */
+ return mask >= DMA_32BIT_MASK;
}
-EXPORT_SYMBOL(dma_unmap_sg);
+
+struct dma_mapping_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_single = dma_direct_map_single,
+ .unmap_single = dma_direct_unmap_single,
+ .map_sg = dma_direct_map_sg,
+ .unmap_sg = dma_direct_unmap_sg,
+ .dma_supported = dma_direct_dma_supported,
+};
+EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 748e74fcf54..1a3d4de197d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -87,15 +87,19 @@ system_call_common:
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r9) /* "regshere" marker */
+ li r10,1
+ stb r10,PACASOFTIRQEN(r13)
+ stb r10,PACAHARDIRQEN(r13)
+ std r10,SOFTE(r1)
#ifdef CONFIG_PPC_ISERIES
BEGIN_FW_FTR_SECTION
/* Hack for handling interrupts when soft-enabling on iSeries */
cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
andi. r10,r12,MSR_PR /* from kernel */
crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
- beq hardware_interrupt_entry
- lbz r10,PACAPROCENABLED(r13)
- std r10,SOFTE(r1)
+ bne 2f
+ b hardware_interrupt_entry
+2:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif
mfmsr r11
@@ -460,9 +464,9 @@ _GLOBAL(ret_from_except_lite)
#endif
restore:
+ ld r5,SOFTE(r1)
#ifdef CONFIG_PPC_ISERIES
BEGIN_FW_FTR_SECTION
- ld r5,SOFTE(r1)
cmpdi 0,r5,0
beq 4f
/* Check for pending interrupts (iSeries) */
@@ -472,21 +476,25 @@ BEGIN_FW_FTR_SECTION
beq+ 4f /* skip do_IRQ if no interrupts */
li r3,0
- stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
+ stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
ori r10,r10,MSR_EE
mtmsrd r10 /* hard-enable again */
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_IRQ
b .ret_from_except_lite /* loop back and handle more */
-
-4: stb r5,PACAPROCENABLED(r13)
+4:
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif
+ stb r5,PACASOFTIRQEN(r13)
ld r3,_MSR(r1)
andi. r0,r3,MSR_RI
beq- unrecov_restore
+ /* extract EE bit and use it to restore paca->hard_enabled */
+ rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
+ stb r4,PACAHARDIRQEN(r13)
+
andi. r0,r3,MSR_PR
/*
@@ -538,25 +546,15 @@ do_work:
/* Check that preempt_count() == 0 and interrupts are enabled */
lwz r8,TI_PREEMPT(r9)
cmpwi cr1,r8,0
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
ld r0,SOFTE(r1)
cmpdi r0,0
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
- andi. r0,r3,MSR_EE
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
crandc eq,cr1*4+eq,eq
bne restore
/* here we are preempting the current task */
1:
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
li r0,1
- stb r0,PACAPROCENABLED(r13)
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
+ stb r0,PACASOFTIRQEN(r13)
+ stb r0,PACAHARDIRQEN(r13)
ori r10,r10,MSR_EE
mtmsrd r10,1 /* reenable interrupts */
bl .preempt_schedule
@@ -639,8 +637,7 @@ _GLOBAL(enter_rtas)
/* There is no way it is acceptable to get here with interrupts enabled,
* check it with the asm equivalent of WARN_ON
*/
- mfmsr r6
- andi. r0,r6,MSR_EE
+ lbz r0,PACASOFTIRQEN(r13)
1: tdnei r0,0
.section __bug_table,"a"
.llong 1b,__LINE__ + 0x1000000, 1f, 2f
@@ -649,7 +646,13 @@ _GLOBAL(enter_rtas)
1: .asciz __FILE__
2: .asciz "enter_rtas"
.previous
-
+
+ /* Hard-disable interrupts */
+ mfmsr r6
+ rldicl r7,r6,48,1
+ rotldi r7,r7,16
+ mtmsrd r7,1
+
/* Unfortunately, the stack pointer and the MSR are also clobbered,
* so they are saved in the PACA which allows us to restore
* our original state after RTAS returns.
@@ -735,8 +738,6 @@ _STATIC(rtas_restore_regs)
#endif /* CONFIG_PPC_RTAS */
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
_GLOBAL(enter_prom)
mflr r0
std r0,16(r1)
@@ -821,5 +822,3 @@ _GLOBAL(enter_prom)
ld r0,16(r1)
mtlr r0
blr
-
-#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index e720729f3e5..71b1fe58e9e 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -35,9 +35,7 @@
#include <asm/thread_info.h>
#include <asm/firmware.h>
-#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
-#endif
/*
* We layout physical memory as follows:
@@ -74,13 +72,11 @@
.text
.globl _stext
_stext:
-#ifdef CONFIG_PPC_MULTIPLATFORM
_GLOBAL(__start)
/* NOP this out unconditionally */
BEGIN_FTR_SECTION
b .__start_initialization_multiplatform
END_FTR_SECTION(0, 1)
-#endif /* CONFIG_PPC_MULTIPLATFORM */
/* Catch branch to 0 in real mode */
trap
@@ -308,7 +304,9 @@ exception_marker:
std r9,_LINK(r1); \
mfctr r10; /* save CTR in stackframe */ \
std r10,_CTR(r1); \
+ lbz r10,PACASOFTIRQEN(r13); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \
+ std r10,SOFTE(r1); \
std r11,_XER(r1); \
li r9,(n)+1; \
std r9,_TRAP(r1); /* set trap number */ \
@@ -343,6 +341,34 @@ label##_pSeries: \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
+#define MASKABLE_EXCEPTION_PSERIES(n, label) \
+ . = n; \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ HMT_MEDIUM; \
+ mtspr SPRN_SPRG1,r13; /* save r13 */ \
+ mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
+ std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
+ std r10,PACA_EXGEN+EX_R10(r13); \
+ lbz r10,PACASOFTIRQEN(r13); \
+ mfcr r9; \
+ cmpwi r10,0; \
+ beq masked_interrupt; \
+ mfspr r10,SPRN_SPRG1; \
+ std r10,PACA_EXGEN+EX_R13(r13); \
+ std r11,PACA_EXGEN+EX_R11(r13); \
+ std r12,PACA_EXGEN+EX_R12(r13); \
+ clrrdi r12,r13,32; /* get high part of &label */ \
+ mfmsr r10; \
+ mfspr r11,SPRN_SRR0; /* save SRR0 */ \
+ LOAD_HANDLER(r12,label##_common) \
+ ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
+ mtspr SPRN_SRR0,r12; \
+ mfspr r12,SPRN_SRR1; /* and SRR1 */ \
+ mtspr SPRN_SRR1,r10; \
+ rfid; \
+ b . /* prevent speculative execution */
+
#define STD_EXCEPTION_ISERIES(n, label, area) \
.globl label##_iSeries; \
label##_iSeries: \
@@ -358,40 +384,32 @@ label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
- lbz r10,PACAPROCENABLED(r13); \
+ lbz r10,PACASOFTIRQEN(r13); \
cmpwi 0,r10,0; \
beq- label##_iSeries_masked; \
EXCEPTION_PROLOG_ISERIES_2; \
b label##_common; \
-#ifdef DO_SOFT_DISABLE
+#ifdef CONFIG_PPC_ISERIES
#define DISABLE_INTS \
-BEGIN_FW_FTR_SECTION; \
- lbz r10,PACAPROCENABLED(r13); \
li r11,0; \
- std r10,SOFTE(r1); \
+ stb r11,PACASOFTIRQEN(r13); \
+BEGIN_FW_FTR_SECTION; \
+ stb r11,PACAHARDIRQEN(r13); \
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
+BEGIN_FW_FTR_SECTION; \
mfmsr r10; \
- stb r11,PACAPROCENABLED(r13); \
ori r10,r10,MSR_EE; \
mtmsrd r10,1; \
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#define ENABLE_INTS \
-BEGIN_FW_FTR_SECTION; \
- lbz r10,PACAPROCENABLED(r13); \
- mfmsr r11; \
- std r10,SOFTE(r1); \
- ori r11,r11,MSR_EE; \
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES); \
-BEGIN_FW_FTR_SECTION; \
- ld r12,_MSR(r1); \
- mfmsr r11; \
- rlwimi r11,r12,0,MSR_EE; \
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
- mtmsrd r11,1
+#else
+#define DISABLE_INTS \
+ li r11,0; \
+ stb r11,PACASOFTIRQEN(r13); \
+ stb r11,PACAHARDIRQEN(r13)
-#else /* hard enable/disable interrupts */
-#define DISABLE_INTS
+#endif /* CONFIG_PPC_ISERIES */
#define ENABLE_INTS \
ld r12,_MSR(r1); \
@@ -399,8 +417,6 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
rlwimi r11,r12,0,MSR_EE; \
mtmsrd r11,1
-#endif
-
#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
.align 7; \
.globl label##_common; \
@@ -541,11 +557,11 @@ instruction_access_slb_pSeries:
mfspr r12,SPRN_SRR1 /* and SRR1 */
b .slb_miss_realmode /* Rel. branch works in real mode */
- STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
+ MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
STD_EXCEPTION_PSERIES(0x600, alignment)
STD_EXCEPTION_PSERIES(0x700, program_check)
STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
- STD_EXCEPTION_PSERIES(0x900, decrementer)
+ MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
STD_EXCEPTION_PSERIES(0xa00, trap_0a)
STD_EXCEPTION_PSERIES(0xb00, trap_0b)
@@ -597,7 +613,24 @@ system_call_pSeries:
/*** pSeries interrupt support ***/
/* moved from 0xf00 */
- STD_EXCEPTION_PSERIES(., performance_monitor)
+ MASKABLE_EXCEPTION_PSERIES(., performance_monitor)
+
+/*
+ * An interrupt came in while soft-disabled; clear EE in SRR1,
+ * clear paca->hard_enabled and return.
+ */
+masked_interrupt:
+ stb r10,PACAHARDIRQEN(r13)
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ mfspr r10,SPRN_SRR1
+ rldicl r10,r10,48,1 /* clear MSR_EE */
+ rotldi r10,r10,16
+ mtspr SPRN_SRR1,r10
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ mfspr r13,SPRN_SPRG1
+ rfid
+ b .
.align 7
do_stab_bolted_pSeries:
@@ -792,7 +825,7 @@ system_reset_iSeries:
cmpwi 0,r23,0
beq iSeries_secondary_smp_loop /* Loop until told to go */
- bne .__secondary_start /* Loop until told to go */
+ bne __secondary_start /* Loop until told to go */
iSeries_secondary_smp_loop:
/* Let the Hypervisor know we are alive */
/* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
@@ -813,7 +846,6 @@ iSeries_secondary_smp_loop:
b 1b /* If SMP not configured, secondaries
* loop forever */
- .globl decrementer_iSeries_masked
decrementer_iSeries_masked:
/* We may not have a valid TOC pointer in here. */
li r11,1
@@ -824,7 +856,6 @@ decrementer_iSeries_masked:
mtspr SPRN_DEC,r12
/* fall through */
- .globl hardware_interrupt_iSeries_masked
hardware_interrupt_iSeries_masked:
mtcrf 0x80,r9 /* Restore regs */
ld r12,PACALPPACAPTR(r13)
@@ -926,10 +957,18 @@ bad_stack:
* any task or sent any task a signal, you should use
* ret_from_except or ret_from_except_lite instead of this.
*/
+fast_exc_return_irq: /* restores irq state too */
+ ld r3,SOFTE(r1)
+ ld r12,_MSR(r1)
+ stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */
+ rldicl r4,r12,49,63 /* get MSR_EE to LSB */
+ stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
+ b 1f
+
.globl fast_exception_return
fast_exception_return:
ld r12,_MSR(r1)
- ld r11,_NIP(r1)
+1: ld r11,_NIP(r1)
andi. r3,r12,MSR_RI /* check if RI is set */
beq- unrecov_fer
@@ -952,7 +991,8 @@ fast_exception_return:
REST_8GPRS(2, r1)
mfmsr r10
- clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
+ rldicl r10,r10,48,1 /* clear EE */
+ rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
mtmsrd r10,1
mtspr SPRN_SRR1,r12
@@ -1326,6 +1366,16 @@ BEGIN_FW_FTR_SECTION
* interrupts if necessary.
*/
beq 13f
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif
+BEGIN_FW_FTR_SECTION
+ /*
+ * Here we have interrupts hard-disabled, so it is sufficient
+ * to restore paca->{soft,hard}_enable and get out.
+ */
+ beq fast_exc_return_irq /* Return from exception on success */
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
/* For a hash failure, we don't bother re-enabling interrupts */
ble- 12f
@@ -1337,14 +1387,6 @@ BEGIN_FW_FTR_SECTION
ld r3,SOFTE(r1)
bl .local_irq_restore
b 11f
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
- beq fast_exception_return /* Return from exception on success */
- ble- 12f /* Failure return from hash_page */
-
- /* fall through */
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
@@ -1362,6 +1404,8 @@ handle_page_fault:
bl .bad_page_fault
b .ret_from_except
+13: b .ret_from_except_lite
+
/* We have a page fault that hash_page could handle but HV refused
* the PTE insertion
*/
@@ -1371,8 +1415,6 @@ handle_page_fault:
bl .low_hash_fault
b .ret_from_except
-13: b .ret_from_except_lite
-
/* here we have a segment miss */
do_ste_alloc:
bl .ste_allocate /* try to insert stab entry */
@@ -1560,7 +1602,7 @@ _GLOBAL(generic_secondary_smp_init)
ld r1,PACAEMERGSP(r13)
subi r1,r1,STACK_FRAME_OVERHEAD
- b .__secondary_start
+ b __secondary_start
#endif
#ifdef CONFIG_PPC_ISERIES
@@ -1595,7 +1637,6 @@ _STATIC(__start_initialization_iSeries)
b .start_here_common
#endif /* CONFIG_PPC_ISERIES */
-#ifdef CONFIG_PPC_MULTIPLATFORM
_STATIC(__mmu_off)
mfmsr r3
@@ -1621,13 +1662,11 @@ _STATIC(__mmu_off)
*
*/
_GLOBAL(__start_initialization_multiplatform)
-#ifdef CONFIG_PPC_MULTIPLATFORM
/*
* Are we booted from a PROM Of-type client-interface ?
*/
cmpldi cr0,r5,0
bne .__boot_from_prom /* yes -> prom */
-#endif
/* Save parameters */
mr r31,r3
@@ -1656,7 +1695,6 @@ _GLOBAL(__start_initialization_multiplatform)
bl .__mmu_off
b .__after_prom_start
-#ifdef CONFIG_PPC_MULTIPLATFORM
_STATIC(__boot_from_prom)
/* Save parameters */
mr r31,r3
@@ -1696,7 +1734,6 @@ _STATIC(__boot_from_prom)
bl .prom_init
/* We never return */
trap
-#endif
/*
* At this point, r3 contains the physical address we are running at,
@@ -1752,8 +1789,6 @@ _STATIC(__after_prom_start)
bl .copy_and_flush /* copy the rest */
b .start_here_multiplatform
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
/*
* Copy routine used to copy the kernel to start at physical address 0
* and flush and invalidate the caches as needed.
@@ -1836,7 +1871,7 @@ _GLOBAL(pmac_secondary_start)
ld r1,PACAEMERGSP(r13)
subi r1,r1,STACK_FRAME_OVERHEAD
- b .__secondary_start
+ b __secondary_start
#endif /* CONFIG_PPC_PMAC */
@@ -1853,7 +1888,7 @@ _GLOBAL(pmac_secondary_start)
* r13 = paca virtual address
* SPRG3 = paca virtual address
*/
-_GLOBAL(__secondary_start)
+__secondary_start:
/* Set thread priority to MEDIUM */
HMT_MEDIUM
@@ -1877,11 +1912,16 @@ _GLOBAL(__secondary_start)
/* enable MMU and jump to start_secondary */
LOAD_REG_ADDR(r3, .start_secondary_prolog)
LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
-#ifdef DO_SOFT_DISABLE
+#ifdef CONFIG_PPC_ISERIES
BEGIN_FW_FTR_SECTION
ori r4,r4,MSR_EE
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif
+BEGIN_FW_FTR_SECTION
+ stb r7,PACASOFTIRQEN(r13)
+ stb r7,PACAHARDIRQEN(r13)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
rfid
@@ -1913,7 +1953,6 @@ _GLOBAL(enable_64b_mode)
isync
blr
-#ifdef CONFIG_PPC_MULTIPLATFORM
/*
* This is where the main kernel code starts.
*/
@@ -1977,7 +2016,6 @@ _STATIC(start_here_multiplatform)
mtspr SPRN_SRR1,r4
rfid
b . /* prevent speculative execution */
-#endif /* CONFIG_PPC_MULTIPLATFORM */
/* This is where all platforms converge execution */
_STATIC(start_here_common)
@@ -2005,15 +2043,18 @@ _STATIC(start_here_common)
/* Load up the kernel context */
5:
-#ifdef DO_SOFT_DISABLE
-BEGIN_FW_FTR_SECTION
li r5,0
- stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
+ stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
mfmsr r5
ori r5,r5,MSR_EE /* Hard Enabled */
mtmsrd r5
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif
+BEGIN_FW_FTR_SECTION
+ stb r5,PACAHARDIRQEN(r13)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
bl .start_kernel
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 39db7a3affe..82bd2f10770 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -112,7 +112,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
return 1;
}
-struct dma_mapping_ops ibmebus_dma_ops = {
+static struct dma_mapping_ops ibmebus_dma_ops = {
.alloc_coherent = ibmebus_alloc_coherent,
.free_coherent = ibmebus_free_coherent,
.map_single = ibmebus_map_single,
@@ -176,6 +176,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_common(
dev->ofdev.dev.bus = &ibmebus_bus_type;
dev->ofdev.dev.release = ibmebus_dev_release;
+ dev->ofdev.dev.archdata.of_node = dev->ofdev.node;
+ dev->ofdev.dev.archdata.dma_ops = &ibmebus_dma_ops;
+ dev->ofdev.dev.archdata.numa_node = of_node_to_nid(dev->ofdev.node);
+
/* An ibmebusdev is based on a of_device. We have to change the
* bus type to use our own DMA mapping operations.
*/
@@ -210,11 +214,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_node(
return NULL;
}
- dev = kmalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
if (!dev) {
return NULL;
}
- memset(dev, 0, sizeof(struct ibmebus_dev));
dev->ofdev.node = of_node_get(dn);
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 4180c3998b3..8994af327b4 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -39,6 +39,13 @@
#define cpu_should_die() 0
#endif
+static int __init powersave_off(char *arg)
+{
+ ppc_md.power_save = NULL;
+ return 0;
+}
+__setup("powersave=off", powersave_off);
+
/*
* The body of the idle task.
*/
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 30de81da7b4..ba319547860 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -30,6 +30,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
beqlr
/* Go to NAP now */
+ mfmsr r7
+ rldicl r0,r7,48,1
+ rotldi r0,r0,16
+ mtmsrd r0,1 /* hard-disable interrupts */
+ li r0,1
+ stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
+ stb r0,PACAHARDIRQEN(r13)
BEGIN_FTR_SECTION
DSSALL
sync
@@ -38,7 +45,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
ori r8,r8,_TLF_NAPPING /* so when we take an exception */
std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
- mfmsr r7
ori r7,r7,MSR_EE
oris r7,r7,MSR_POW@h
1: sync
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index e98180686b3..34ae11494dd 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -25,13 +25,11 @@
#include <asm/firmware.h>
#include <asm/bug.h>
-void _insb(volatile u8 __iomem *port, void *buf, long count)
+void _insb(const volatile u8 __iomem *port, void *buf, long count)
{
u8 *tbuf = buf;
u8 tmp;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -48,8 +46,6 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
{
const u8 *tbuf = buf;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -60,13 +56,11 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
}
EXPORT_SYMBOL(_outsb);
-void _insw_ns(volatile u16 __iomem *port, void *buf, long count)
+void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
{
u16 *tbuf = buf;
u16 tmp;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -83,8 +77,6 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
{
const u16 *tbuf = buf;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -95,13 +87,11 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
}
EXPORT_SYMBOL(_outsw_ns);
-void _insl_ns(volatile u32 __iomem *port, void *buf, long count)
+void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
{
u32 *tbuf = buf;
u32 tmp;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -118,8 +108,6 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
{
const u32 *tbuf = buf;
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
if (unlikely(count <= 0))
return;
asm volatile("sync");
@@ -129,3 +117,90 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
asm volatile("sync");
}
EXPORT_SYMBOL(_outsl_ns);
+
+#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
+
+void _memset_io(volatile void __iomem *addr, int c, unsigned long n)
+{
+ void *p = (void __force *)addr;
+ u32 lc = c;
+ lc |= lc << 8;
+ lc |= lc << 16;
+
+ __asm__ __volatile__ ("sync" : : : "memory");
+ while(n && !IO_CHECK_ALIGN(p, 4)) {
+ *((volatile u8 *)p) = c;
+ p++;
+ n--;
+ }
+ while(n >= 4) {
+ *((volatile u32 *)p) = lc;
+ p += 4;
+ n -= 4;
+ }
+ while(n) {
+ *((volatile u8 *)p) = c;
+ p++;
+ n--;
+ }
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memset_io);
+
+void _memcpy_fromio(void *dest, const volatile void __iomem *src,
+ unsigned long n)
+{
+ void *vsrc = (void __force *) src;
+
+ __asm__ __volatile__ ("sync" : : : "memory");
+ while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
+ *((u8 *)dest) = *((volatile u8 *)vsrc);
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ vsrc++;
+ dest++;
+ n--;
+ }
+ while(n > 4) {
+ *((u32 *)dest) = *((volatile u32 *)vsrc);
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ vsrc += 4;
+ dest += 4;
+ n -= 4;
+ }
+ while(n) {
+ *((u8 *)dest) = *((volatile u8 *)vsrc);
+ __asm__ __volatile__ ("eieio" : : : "memory");
+ vsrc++;
+ dest++;
+ n--;
+ }
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memcpy_fromio);
+
+void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
+{
+ void *vdest = (void __force *) dest;
+
+ __asm__ __volatile__ ("sync" : : : "memory");
+ while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
+ *((volatile u8 *)vdest) = *((u8 *)src);
+ src++;
+ vdest++;
+ n--;
+ }
+ while(n > 4) {
+ *((volatile u32 *)vdest) = *((volatile u32 *)src);
+ src += 4;
+ vdest += 4;
+ n-=4;
+ }
+ while(n) {
+ *((volatile u8 *)vdest) = *((u8 *)src);
+ src++;
+ vdest++;
+ n--;
+ }
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memcpy_toio);
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index a13a93dfc65..c6811337105 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(iowrite32_rep);
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
- return (void __iomem *) (port+pci_io_base);
+ return (void __iomem *) (port + _IO_BASE);
}
void ioport_unmap(void __iomem *addr)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ba6b7256084..95edad4faf2 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -258,9 +258,9 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
-int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
- struct scatterlist *sglist, int nelems,
- unsigned long mask, enum dma_data_direction direction)
+int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+ int nelems, unsigned long mask,
+ enum dma_data_direction direction)
{
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5e37bf14ef2..0bd8c766583 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -64,8 +64,9 @@
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
-#ifdef CONFIG_PPC_ISERIES
+#ifdef CONFIG_PPC64
#include <asm/paca.h>
+#include <asm/firmware.h>
#endif
int __irq_offset_value;
@@ -95,6 +96,74 @@ extern atomic_t ipi_sent;
EXPORT_SYMBOL(irq_desc);
int distribute_irqs = 1;
+
+static inline unsigned long get_hard_enabled(void)
+{
+ unsigned long enabled;
+
+ __asm__ __volatile__("lbz %0,%1(13)"
+ : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
+
+ return enabled;
+}
+
+static inline void set_soft_enabled(unsigned long enable)
+{
+ __asm__ __volatile__("stb %0,%1(13)"
+ : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
+}
+
+void local_irq_restore(unsigned long en)
+{
+ /*
+ * get_paca()->soft_enabled = en;
+ * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
+ * That was allowed before, and in such a case we do need to take care
+ * that gcc will set soft_enabled directly via r13, not choose to use
+ * an intermediate register, lest we're preempted to a different cpu.
+ */
+ set_soft_enabled(en);
+ if (!en)
+ return;
+
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ /*
+ * Do we need to disable preemption here? Not really: in the
+ * unlikely event that we're preempted to a different cpu in
+ * between getting r13, loading its lppaca_ptr, and loading
+ * its any_int, we might call iseries_handle_interrupts without
+ * an interrupt pending on the new cpu, but that's no disaster,
+ * is it? And the business of preempting us off the old cpu
+ * would itself involve a local_irq_restore which handles the
+ * interrupt to that cpu.
+ *
+ * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
+ * to avoid any preemption checking added into get_paca().
+ */
+ if (local_paca->lppaca_ptr->int_dword.any_int)
+ iseries_handle_interrupts();
+ return;
+ }
+
+ /*
+ * if (get_paca()->hard_enabled) return;
+ * But again we need to take care that gcc gets hard_enabled directly
+ * via r13, not choose to use an intermediate register, lest we're
+ * preempted to a different cpu in between the two instructions.
+ */
+ if (get_hard_enabled())
+ return;
+
+ /*
+ * Need to hard-enable interrupts here. Since currently disabled,
+ * no need to take further asm precautions against preemption; but
+ * use local_paca instead of get_paca() to avoid preemption checking.
+ */
+ local_paca->hard_enabled = en;
+ if ((int)mfspr(SPRN_DEC) < 0)
+ mtspr(SPRN_DEC, 1);
+ hard_irq_enable();
+}
#endif /* CONFIG_PPC64 */
int show_interrupts(struct seq_file *p, void *v)
@@ -246,7 +315,8 @@ void do_IRQ(struct pt_regs *regs)
set_irq_regs(old_regs);
#ifdef CONFIG_PPC_ISERIES
- if (get_lppaca()->int_dword.fields.decr_int) {
+ if (firmware_has_feature(FW_FEATURE_ISERIES) &&
+ get_lppaca()->int_dword.fields.decr_int) {
get_lppaca()->int_dword.fields.decr_int = 0;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
@@ -626,10 +696,14 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
void irq_dispose_mapping(unsigned int virq)
{
- struct irq_host *host = irq_map[virq].host;
+ struct irq_host *host;
irq_hw_number_t hwirq;
unsigned long flags;
+ if (virq == NO_IRQ)
+ return;
+
+ host = irq_map[virq].host;
WARN_ON (host == NULL);
if (host == NULL)
return;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7b8d12b9026..4657563f881 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -85,7 +85,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
- free_insn_slot(p->ainsn.insn);
+ free_insn_slot(p->ainsn.insn, 0);
mutex_unlock(&kprobe_mutex);
}
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 397c83eda20..8a06724e029 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -9,30 +9,26 @@
#include <asm/of_device.h>
/**
- * of_match_device - Tell if an of_device structure has a matching
- * of_match structure
+ * of_match_node - Tell if an device_node has a matching of_match structure
* @ids: array of of device match structures to search in
- * @dev: the of device structure to match against
+ * @node: the of device structure to match against
*
- * Used by a driver to check whether an of_device present in the
- * system is in its list of supported devices.
+ * Low level utility function used by device matching.
*/
-const struct of_device_id *of_match_device(const struct of_device_id *matches,
- const struct of_device *dev)
+const struct of_device_id *of_match_node(const struct of_device_id *matches,
+ const struct device_node *node)
{
- if (!dev->node)
- return NULL;
while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
int match = 1;
if (matches->name[0])
- match &= dev->node->name
- && !strcmp(matches->name, dev->node->name);
+ match &= node->name
+ && !strcmp(matches->name, node->name);
if (matches->type[0])
- match &= dev->node->type
- && !strcmp(matches->type, dev->node->type);
+ match &= node->type
+ && !strcmp(matches->type, node->type);
if (matches->compatible[0])
- match &= device_is_compatible(dev->node,
- matches->compatible);
+ match &= device_is_compatible(node,
+ matches->compatible);
if (match)
return matches;
matches++;
@@ -40,16 +36,21 @@ const struct of_device_id *of_match_device(const struct of_device_id *matches,
return NULL;
}
-static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
+/**
+ * of_match_device - Tell if an of_device structure has a matching
+ * of_match structure
+ * @ids: array of of device match structures to search in
+ * @dev: the of device structure to match against
+ *
+ * Used by a driver to check whether an of_device present in the
+ * system is in its list of supported devices.
+ */
+const struct of_device_id *of_match_device(const struct of_device_id *matches,
+ const struct of_device *dev)
{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * of_drv = to_of_platform_driver(drv);
- const struct of_device_id * matches = of_drv->match_table;
-
- if (!matches)
- return 0;
-
- return of_match_device(matches, of_dev) != NULL;
+ if (!dev->node)
+ return NULL;
+ return of_match_node(matches, dev->node);
}
struct of_device *of_dev_get(struct of_device *dev)
@@ -71,96 +72,8 @@ void of_dev_put(struct of_device *dev)
put_device(&dev->dev);
}
-
-static int of_device_probe(struct device *dev)
-{
- int error = -ENODEV;
- struct of_platform_driver *drv;
- struct of_device *of_dev;
- const struct of_device_id *match;
-
- drv = to_of_platform_driver(dev->driver);
- of_dev = to_of_device(dev);
-
- if (!drv->probe)
- return error;
-
- of_dev_get(of_dev);
-
- match = of_match_device(drv->match_table, of_dev);
- if (match)
- error = drv->probe(of_dev, match);
- if (error)
- of_dev_put(of_dev);
-
- return error;
-}
-
-static int of_device_remove(struct device *dev)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-
- if (dev->driver && drv->remove)
- drv->remove(of_dev);
- return 0;
-}
-
-static int of_device_suspend(struct device *dev, pm_message_t state)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
- int error = 0;
-
- if (dev->driver && drv->suspend)
- error = drv->suspend(of_dev, state);
- return error;
-}
-
-static int of_device_resume(struct device * dev)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
- int error = 0;
-
- if (dev->driver && drv->resume)
- error = drv->resume(of_dev);
- return error;
-}
-
-struct bus_type of_platform_bus_type = {
- .name = "of_platform",
- .match = of_platform_bus_match,
- .probe = of_device_probe,
- .remove = of_device_remove,
- .suspend = of_device_suspend,
- .resume = of_device_resume,
-};
-
-static int __init of_bus_driver_init(void)
-{
- return bus_register(&of_platform_bus_type);
-}
-
-postcore_initcall(of_bus_driver_init);
-
-int of_register_driver(struct of_platform_driver *drv)
-{
- /* initialize common driver fields */
- drv->driver.name = drv->name;
- drv->driver.bus = &of_platform_bus_type;
-
- /* register with core */
- return driver_register(&drv->driver);
-}
-
-void of_unregister_driver(struct of_platform_driver *drv)
-{
- driver_unregister(&drv->driver);
-}
-
-
-static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dev_show_devspec(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct of_device *ofdev;
@@ -208,41 +121,11 @@ void of_device_unregister(struct of_device *ofdev)
device_unregister(&ofdev->dev);
}
-struct of_device* of_platform_device_create(struct device_node *np,
- const char *bus_id,
- struct device *parent)
-{
- struct of_device *dev;
-
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return NULL;
- memset(dev, 0, sizeof(*dev));
-
- dev->node = of_node_get(np);
- dev->dma_mask = 0xffffffffUL;
- dev->dev.dma_mask = &dev->dma_mask;
- dev->dev.parent = parent;
- dev->dev.bus = &of_platform_bus_type;
- dev->dev.release = of_release_dev;
-
- strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
-
- if (of_device_register(dev) != 0) {
- kfree(dev);
- return NULL;
- }
-
- return dev;
-}
+EXPORT_SYMBOL(of_match_node);
EXPORT_SYMBOL(of_match_device);
-EXPORT_SYMBOL(of_platform_bus_type);
-EXPORT_SYMBOL(of_register_driver);
-EXPORT_SYMBOL(of_unregister_driver);
EXPORT_SYMBOL(of_device_register);
EXPORT_SYMBOL(of_device_unregister);
EXPORT_SYMBOL(of_dev_get);
EXPORT_SYMBOL(of_dev_put);
-EXPORT_SYMBOL(of_platform_device_create);
EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
new file mode 100644
index 00000000000..b3189d0161b
--- /dev/null
+++ b/arch/powerpc/kernel/of_platform.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ * and Arnd Bergmann, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include <asm/errno.h>
+#include <asm/dcr.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
+#include <asm/topology.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+#include <asm/atomic.h>
+
+
+/*
+ * The list of OF IDs below is used for matching bus types in the
+ * system whose devices are to be exposed as of_platform_devices.
+ *
+ * This is the default list valid for most platforms. This file provides
+ * functions who can take an explicit list if necessary though
+ *
+ * The search is always performed recursively looking for children of
+ * the provided device_node and recursively if such a children matches
+ * a bus type in the list
+ */
+
+static struct of_device_id of_default_bus_ids[] = {
+ { .type = "soc", },
+ { .compatible = "soc", },
+ { .type = "spider", },
+ { .type = "axon", },
+ { .type = "plb5", },
+ { .type = "plb4", },
+ { .type = "opb", },
+ {},
+};
+
+static atomic_t bus_no_reg_magic;
+
+/*
+ *
+ * OF platform device type definition & base infrastructure
+ *
+ */
+
+static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct of_device * of_dev = to_of_device(dev);
+ struct of_platform_driver * of_drv = to_of_platform_driver(drv);
+ const struct of_device_id * matches = of_drv->match_table;
+
+ if (!matches)
+ return 0;
+
+ return of_match_device(matches, of_dev) != NULL;
+}
+
+static int of_platform_device_probe(struct device *dev)
+{
+ int error = -ENODEV;
+ struct of_platform_driver *drv;
+ struct of_device *of_dev;
+ const struct of_device_id *match;
+
+ drv = to_of_platform_driver(dev->driver);
+ of_dev = to_of_device(dev);
+
+ if (!drv->probe)
+ return error;
+
+ of_dev_get(of_dev);
+
+ match = of_match_device(drv->match_table, of_dev);
+ if (match)
+ error = drv->probe(of_dev, match);
+ if (error)
+ of_dev_put(of_dev);
+
+ return error;
+}
+
+static int of_platform_device_remove(struct device *dev)
+{
+ struct of_device * of_dev = to_of_device(dev);
+ struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+
+ if (dev->driver && drv->remove)
+ drv->remove(of_dev);
+ return 0;
+}
+
+static int of_platform_device_suspend(struct device *dev, pm_message_t state)
+{
+ struct of_device * of_dev = to_of_device(dev);
+ struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+ int error = 0;
+
+ if (dev->driver && drv->suspend)
+ error = drv->suspend(of_dev, state);
+ return error;
+}
+
+static int of_platform_device_resume(struct device * dev)
+{
+ struct of_device * of_dev = to_of_device(dev);
+ struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+ int error = 0;
+
+ if (dev->driver && drv->resume)
+ error = drv->resume(of_dev);
+ return error;
+}
+
+struct bus_type of_platform_bus_type = {
+ .name = "of_platform",
+ .match = of_platform_bus_match,
+ .probe = of_platform_device_probe,
+ .remove = of_platform_device_remove,
+ .suspend = of_platform_device_suspend,
+ .resume = of_platform_device_resume,
+};
+EXPORT_SYMBOL(of_platform_bus_type);
+
+static int __init of_bus_driver_init(void)
+{
+ return bus_register(&of_platform_bus_type);
+}
+
+postcore_initcall(of_bus_driver_init);
+
+int of_register_platform_driver(struct of_platform_driver *drv)
+{
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &of_platform_bus_type;
+
+ /* register with core */
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(of_register_platform_driver);
+
+void of_unregister_platform_driver(struct of_platform_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(of_unregister_platform_driver);
+
+static void of_platform_make_bus_id(struct of_device *dev)
+{
+ struct device_node *node = dev->node;
+ char *name = dev->dev.bus_id;
+ const u32 *reg;
+ u64 addr;
+ long magic;
+
+ /*
+ * If it's a DCR based device, use 'd' for native DCRs
+ * and 'D' for MMIO DCRs.
+ */
+#ifdef CONFIG_PPC_DCR
+ reg = get_property(node, "dcr-reg", NULL);
+ if (reg) {
+#ifdef CONFIG_PPC_DCR_NATIVE
+ snprintf(name, BUS_ID_SIZE, "d%x.%s",
+ *reg, node->name);
+#else /* CONFIG_PPC_DCR_NATIVE */
+ addr = of_translate_dcr_address(node, *reg, NULL);
+ if (addr != OF_BAD_ADDR) {
+ snprintf(name, BUS_ID_SIZE,
+ "D%llx.%s", (unsigned long long)addr,
+ node->name);
+ return;
+ }
+#endif /* !CONFIG_PPC_DCR_NATIVE */
+ }
+#endif /* CONFIG_PPC_DCR */
+
+ /*
+ * For MMIO, get the physical address
+ */
+ reg = get_property(node, "reg", NULL);
+ if (reg) {
+ addr = of_translate_address(node, reg);
+ if (addr != OF_BAD_ADDR) {
+ snprintf(name, BUS_ID_SIZE,
+ "%llx.%s", (unsigned long long)addr,
+ node->name);
+ return;
+ }
+ }
+
+ /*
+ * No BusID, use the node name and add a globally incremented
+ * counter (and pray...)
+ */
+ magic = atomic_add_return(1, &bus_no_reg_magic);
+ snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1);
+}
+
+struct of_device* of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ struct of_device *dev;
+
+ dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ memset(dev, 0, sizeof(*dev));
+
+ dev->node = of_node_get(np);
+ dev->dma_mask = 0xffffffffUL;
+ dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.parent = parent;
+ dev->dev.bus = &of_platform_bus_type;
+ dev->dev.release = of_release_dev;
+ dev->dev.archdata.of_node = np;
+ dev->dev.archdata.numa_node = of_node_to_nid(np);
+
+ /* We do not fill the DMA ops for platform devices by default.
+ * This is currently the responsibility of the platform code
+ * to do such, possibly using a device notifier
+ */
+
+ if (bus_id)
+ strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
+ else
+ of_platform_make_bus_id(dev);
+
+ if (of_device_register(dev) != 0) {
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(of_platform_device_create);
+
+
+
+/**
+ * of_platform_bus_create - Create an OF device for a bus node and all its
+ * children. Optionally recursively instanciate matching busses.
+ * @bus: device node of the bus to instanciate
+ * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
+ * disallow recursive creation of child busses
+ */
+static int of_platform_bus_create(struct device_node *bus,
+ struct of_device_id *matches,
+ struct device *parent)
+{
+ struct device_node *child;
+ struct of_device *dev;
+ int rc = 0;
+
+ for (child = NULL; (child = of_get_next_child(bus, child)); ) {
+ pr_debug(" create child: %s\n", child->full_name);
+ dev = of_platform_device_create(child, NULL, parent);
+ if (dev == NULL)
+ rc = -ENOMEM;
+ else if (!of_match_node(matches, child))
+ continue;
+ if (rc == 0) {
+ pr_debug(" and sub busses\n");
+ rc = of_platform_bus_create(child, matches, &dev->dev);
+ } if (rc) {
+ of_node_put(child);
+ break;
+ }
+ }
+ return rc;
+}
+
+/**
+ * of_platform_bus_probe - Probe the device-tree for platform busses
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @matches: match table, NULL to use the default
+ * @parent: parent to hook devices from, NULL for toplevel
+ *
+ * Note that children of the provided root are not instanciated as devices
+ * unless the specified root itself matches the bus list and is not NULL.
+ */
+
+int of_platform_bus_probe(struct device_node *root,
+ struct of_device_id *matches,
+ struct device *parent)
+{
+ struct device_node *child;
+ struct of_device *dev;
+ int rc = 0;
+
+ if (matches == NULL)
+ matches = of_default_bus_ids;
+ if (matches == OF_NO_DEEP_PROBE)
+ return -EINVAL;
+ if (root == NULL)
+ root = of_find_node_by_path("/");
+ else
+ of_node_get(root);
+
+ pr_debug("of_platform_bus_probe()\n");
+ pr_debug(" starting at: %s\n", root->full_name);
+
+ /* Do a self check of bus type, if there's a match, create
+ * children
+ */
+ if (of_match_node(matches, root)) {
+ pr_debug(" root match, create all sub devices\n");
+ dev = of_platform_device_create(root, NULL, parent);
+ if (dev == NULL) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+ pr_debug(" create all sub busses\n");
+ rc = of_platform_bus_create(root, matches, &dev->dev);
+ goto bail;
+ }
+ for (child = NULL; (child = of_get_next_child(root, child)); ) {
+ if (!of_match_node(matches, child))
+ continue;
+
+ pr_debug(" match: %s\n", child->full_name);
+ dev = of_platform_device_create(child, NULL, parent);
+ if (dev == NULL)
+ rc = -ENOMEM;
+ else
+ rc = of_platform_bus_create(child, matches, &dev->dev);
+ if (rc) {
+ of_node_put(child);
+ break;
+ }
+ }
+ bail:
+ of_node_put(root);
+ return rc;
+}
+EXPORT_SYMBOL(of_platform_bus_probe);
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+ return to_of_device(dev)->node == data;
+}
+
+struct of_device *of_find_device_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&of_platform_bus_type,
+ NULL, np, of_dev_node_match);
+ if (dev)
+ return to_of_device(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+static int of_dev_phandle_match(struct device *dev, void *data)
+{
+ phandle *ph = data;
+ return to_of_device(dev)->node->linux_phandle == *ph;
+}
+
+struct of_device *of_find_device_by_phandle(phandle ph)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&of_platform_bus_type,
+ NULL, &ph, of_dev_phandle_match);
+ if (dev)
+ return to_of_device(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_phandle);
+
+
+#ifdef CONFIG_PPC_OF_PLATFORM_PCI
+
+/* The probing of PCI controllers from of_platform is currently
+ * 64 bits only, mostly due to gratuitous differences between
+ * the 32 and 64 bits PCI code on PowerPC and the 32 bits one
+ * lacking some bits needed here.
+ */
+
+static int __devinit of_pci_phb_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct pci_controller *phb;
+
+ /* Check if we can do that ... */
+ if (ppc_md.pci_setup_phb == NULL)
+ return -ENODEV;
+
+ printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name);
+
+ /* Alloc and setup PHB data structure */
+ phb = pcibios_alloc_controller(dev->node);
+ if (!phb)
+ return -ENODEV;
+
+ /* Setup parent in sysfs */
+ phb->parent = &dev->dev;
+
+ /* Setup the PHB using arch provided callback */
+ if (ppc_md.pci_setup_phb(phb)) {
+ pcibios_free_controller(phb);
+ return -ENODEV;
+ }
+
+ /* Process "ranges" property */
+ pci_process_bridge_OF_ranges(phb, dev->node, 0);
+
+ /* Setup IO space.
+ * This will not work properly for ISA IOs, something needs to be done
+ * about it if we ever generalize that way of probing PCI brigdes
+ */
+ pci_setup_phb_io_dynamic(phb, 0);
+
+ /* Init pci_dn data structures */
+ pci_devs_phb_init_dynamic(phb);
+
+ /* Register devices with EEH */
+#ifdef CONFIG_EEH
+ if (dev->node->child)
+ eeh_add_device_tree_early(dev->node);
+#endif /* CONFIG_EEH */
+
+ /* Scan the bus */
+ scan_phb(phb);
+
+ /* Claim resources. This might need some rework as well depending
+ * wether we are doing probe-only or not, like assigning unassigned
+ * resources etc...
+ */
+ pcibios_claim_one_bus(phb->bus);
+
+ /* Finish EEH setup */
+#ifdef CONFIG_EEH
+ eeh_add_device_tree_late(phb->bus);
+#endif
+
+ /* Add probed PCI devices to the device model */
+ pci_bus_add_devices(phb->bus);
+
+ return 0;
+}
+
+static struct of_device_id of_pci_phb_ids[] = {
+ { .type = "pci", },
+ { .type = "pcix", },
+ { .type = "pcie", },
+ { .type = "pciex", },
+ { .type = "ht", },
+ {}
+};
+
+static struct of_platform_driver of_pci_phb_driver = {
+ .name = "of-pci",
+ .match_table = of_pci_phb_ids,
+ .probe = of_pci_phb_probe,
+ .driver = {
+ .multithread_probe = 1,
+ },
+};
+
+static __init int of_pci_phb_init(void)
+{
+ return of_register_platform_driver(&of_pci_phb_driver);
+}
+
+device_initcall(of_pci_phb_init);
+
+#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 0d9ff72e285..2f54cd81dea 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
+#include <linux/list.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -99,7 +100,7 @@ pcibios_fixup_resources(struct pci_dev *dev)
continue;
if (res->end == 0xffffffff) {
DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
- pci_name(dev), i, res->start, res->end);
+ pci_name(dev), i, (u64)res->start, (u64)res->end);
res->end -= res->start;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
@@ -115,11 +116,9 @@ pcibios_fixup_resources(struct pci_dev *dev)
if (offset != 0) {
res->start += offset;
res->end += offset;
-#ifdef DEBUG
- printk("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
- i, res->flags, pci_name(dev),
- res->start - offset, res->start);
-#endif
+ DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
+ i, res->flags, pci_name(dev),
+ (u64)res->start - offset, (u64)res->start);
}
}
@@ -255,7 +254,7 @@ pcibios_allocate_bus_resources(struct list_head *bus_list)
}
DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
- res->start, res->end, res->flags, pr);
+ (u64)res->start, (u64)res->end, res->flags, pr);
if (pr) {
if (request_resource(pr, res) == 0)
continue;
@@ -306,7 +305,7 @@ reparent_resources(struct resource *parent, struct resource *res)
for (p = res->child; p != NULL; p = p->sibling) {
p->parent = res;
DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
- p->name, p->start, p->end, res->name);
+ p->name, (u64)p->start, (u64)p->end, res->name);
}
return 0;
}
@@ -362,7 +361,7 @@ pci_relocate_bridge_resource(struct pci_bus *bus, int i)
}
if (request_resource(pr, res)) {
DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
- res->start, res->end);
+ (u64)res->start, (u64)res->end);
return -1; /* "can't happen" */
}
update_bridge_base(bus, i);
@@ -480,14 +479,14 @@ static inline void alloc_resource(struct pci_dev *dev, int idx)
struct resource *pr, *r = &dev->resource[idx];
DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
- pci_name(dev), idx, r->start, r->end, r->flags);
+ pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
pr = pci_find_parent_resource(dev, r);
if (!pr || request_resource(pr, r) < 0) {
printk(KERN_ERR "PCI: Cannot allocate resource region %d"
" of device %s\n", idx, pci_name(dev));
if (pr)
DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
- pr, pr->start, pr->end, pr->flags);
+ pr, (u64)pr->start, (u64)pr->end, pr->flags);
/* We'll assign a new address later */
r->flags |= IORESOURCE_UNSET;
r->end -= r->start;
@@ -960,7 +959,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
res->flags = IORESOURCE_IO;
res->start = ranges[2];
DBG("PCI: IO 0x%llx -> 0x%llx\n",
- res->start, res->start + size - 1);
+ (u64)res->start, (u64)res->start + size - 1);
break;
case 2: /* memory space */
memno = 0;
@@ -982,7 +981,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
res->flags |= IORESOURCE_PREFETCH;
res->start = ranges[na+2];
DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
- res->start, res->start + size - 1);
+ (u64)res->start, (u64)res->start + size - 1);
}
break;
}
@@ -1268,7 +1267,10 @@ pcibios_init(void)
if (pci_assign_all_buses)
hose->first_busno = next_busno;
hose->last_busno = 0xff;
- bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
+ bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
+ hose->ops, hose);
+ if (bus)
+ pci_bus_add_devices(bus);
hose->last_busno = bus->subordinate;
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
@@ -1282,10 +1284,6 @@ pcibios_init(void)
if (pci_assign_all_buses && have_of)
pcibios_make_OF_bus_map();
- /* Do machine dependent PCI interrupt routing */
- if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
- pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
-
/* Call machine dependent fixup */
if (ppc_md.pcibios_fixup)
ppc_md.pcibios_fixup();
@@ -1308,25 +1306,6 @@ pcibios_init(void)
subsys_initcall(pcibios_init);
-unsigned char __init
-common_swizzle(struct pci_dev *dev, unsigned char *pinp)
-{
- struct pci_controller *hose = dev->sysdata;
-
- if (dev->bus->number != hose->first_busno) {
- u8 pin = *pinp;
- do {
- pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
- /* Move up the chain of bridges. */
- dev = dev->bus->self;
- } while (dev->bus->self);
- *pinp = pin;
-
- /* The slot is the idsel of the last bridge. */
- }
- return PCI_SLOT(dev->devfn);
-}
-
unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
unsigned long start, unsigned long size)
{
@@ -1338,6 +1317,7 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
unsigned long io_offset;
struct resource *res;
+ struct pci_dev *dev;
int i;
io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
@@ -1390,8 +1370,16 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
}
}
+ /* Platform specific bus fixups */
if (ppc_md.pcibios_fixup_bus)
ppc_md.pcibios_fixup_bus(bus);
+
+ /* Read default IRQs and fixup if necessary */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_read_irq_line(dev);
+ if (ppc_md.pci_irq_fixup)
+ ppc_md.pci_irq_fixup(dev);
+ }
}
char __init *pcibios_setup(char *str)
@@ -1571,7 +1559,7 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
*offset += hose->pci_mem_offset;
res_bit = IORESOURCE_MEM;
} else {
- io_offset = hose->io_base_virt - ___IO_BASE;
+ io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
*offset += io_offset;
res_bit = IORESOURCE_IO;
}
@@ -1826,7 +1814,8 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
return;
if (rsrc->flags & IORESOURCE_IO)
- offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys;
+ offset = (void __iomem *)_IO_BASE - hose->io_base_virt
+ + hose->io_base_phys;
*start = rsrc->start + offset;
*end = rsrc->end + offset;
@@ -1845,35 +1834,6 @@ pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
res->child = NULL;
}
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
-{
- unsigned long start = pci_resource_start(dev, bar);
- unsigned long len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (!len)
- return NULL;
- if (max && len > max)
- len = max;
- if (flags & IORESOURCE_IO)
- return ioport_map(start, len);
- if (flags & IORESOURCE_MEM)
- /* Not checking IORESOURCE_CACHEABLE because PPC does
- * not currently distinguish between ioremap and
- * ioremap_nocache.
- */
- return ioremap(start, len);
- /* What? */
- return NULL;
-}
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
- /* Nothing to do */
-}
-EXPORT_SYMBOL(pci_iomap);
-EXPORT_SYMBOL(pci_iounmap);
-
unsigned long pci_address_to_pio(phys_addr_t address)
{
struct pci_controller* hose = hose_head;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 9bae8a5bf67..6fa9a0a5c8d 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -42,11 +42,9 @@
unsigned long pci_probe_only = 1;
int pci_assign_all_buses = 0;
-#ifdef CONFIG_PPC_MULTIPLATFORM
static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus);
static void phbs_remap_io(void);
-#endif
/* pci_io_base -- the base address from which io bars are offsets.
* This is the lowest I/O base address (so bar values are always positive),
@@ -63,7 +61,7 @@ void iSeries_pcibios_init(void);
LIST_HEAD(hose_list);
-struct dma_mapping_ops pci_dma_ops;
+struct dma_mapping_ops *pci_dma_ops;
EXPORT_SYMBOL(pci_dma_ops);
int global_phb_number; /* Global phb counter */
@@ -212,6 +210,10 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
void pcibios_free_controller(struct pci_controller *phb)
{
+ spin_lock(&hose_spinlock);
+ list_del(&phb->list_node);
+ spin_unlock(&hose_spinlock);
+
if (phb->is_dynamic)
kfree(phb);
}
@@ -251,7 +253,6 @@ static void __init pcibios_claim_of_setup(void)
pcibios_claim_one_bus(b);
}
-#ifdef CONFIG_PPC_MULTIPLATFORM
static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
{
const u32 *prop;
@@ -329,7 +330,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_dev *dev;
const char *type;
- dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
if (!dev)
return NULL;
type = get_property(node, "device_type", NULL);
@@ -338,7 +339,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
DBG(" create device, devfn: %x, type: %s\n", devfn, type);
- memset(dev, 0, sizeof(struct pci_dev));
dev->bus = bus;
dev->sysdata = node;
dev->dev.parent = bus->bridge;
@@ -506,7 +506,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
pci_scan_child_bus(bus);
}
EXPORT_SYMBOL(of_scan_pci_bridge);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
void __devinit scan_phb(struct pci_controller *hose)
{
@@ -517,7 +516,7 @@ void __devinit scan_phb(struct pci_controller *hose)
DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
- bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);
+ bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
if (bus == NULL) {
printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
hose->global_number);
@@ -540,7 +539,7 @@ void __devinit scan_phb(struct pci_controller *hose)
}
mode = PCI_PROBE_NORMAL;
-#ifdef CONFIG_PPC_MULTIPLATFORM
+
if (node && ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
DBG(" probe mode: %d\n", mode);
@@ -548,7 +547,7 @@ void __devinit scan_phb(struct pci_controller *hose)
bus->subordinate = hose->last_busno;
of_scan_bus(node, bus);
}
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+
if (mode == PCI_PROBE_NORMAL)
hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
}
@@ -592,11 +591,9 @@ static int __init pcibios_init(void)
if (ppc64_isabridge_dev != NULL)
printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
-#ifdef CONFIG_PPC_MULTIPLATFORM
if (!firmware_has_feature(FW_FEATURE_ISERIES))
/* map in PCI I/O space */
phbs_remap_io();
-#endif
printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
@@ -873,8 +870,6 @@ void pcibios_add_platform_entries(struct pci_dev *pdev)
device_create_file(&pdev->dev, &dev_attr_devspec);
}
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
#define ISA_SPACE_MASK 0x1
#define ISA_SPACE_IO 0x1
@@ -975,11 +970,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
res = NULL;
pci_space = ranges[0];
pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
-
- cpu_phys_addr = ranges[3];
- if (na >= 2)
- cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
-
+ cpu_phys_addr = of_translate_address(dev, &ranges[3]);
size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
ranges += np;
if (size == 0)
@@ -1145,7 +1136,7 @@ int unmap_bus_range(struct pci_bus *bus)
if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
return 1;
- if (iounmap_explicit((void __iomem *) start_virt, size))
+ if (__iounmap_explicit((void __iomem *) start_virt, size))
return 1;
return 0;
@@ -1213,23 +1204,52 @@ void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
}
EXPORT_SYMBOL(pcibios_fixup_device_resources);
+void __devinit pcibios_setup_new_device(struct pci_dev *dev)
+{
+ struct dev_archdata *sd = &dev->dev.archdata;
+
+ sd->of_node = pci_device_to_OF_node(dev);
+
+ DBG("PCI device %s OF node: %s\n", pci_name(dev),
+ sd->of_node ? sd->of_node->full_name : "<none>");
+
+ sd->dma_ops = pci_dma_ops;
+#ifdef CONFIG_NUMA
+ sd->numa_node = pcibus_to_node(dev->bus);
+#else
+ sd->numa_node = -1;
+#endif
+ if (ppc_md.pci_dma_dev_setup)
+ ppc_md.pci_dma_dev_setup(dev);
+}
+EXPORT_SYMBOL(pcibios_setup_new_device);
static void __devinit do_bus_setup(struct pci_bus *bus)
{
struct pci_dev *dev;
- ppc_md.iommu_bus_setup(bus);
+ if (ppc_md.pci_dma_bus_setup)
+ ppc_md.pci_dma_bus_setup(bus);
list_for_each_entry(dev, &bus->devices, bus_list)
- ppc_md.iommu_dev_setup(dev);
+ pcibios_setup_new_device(dev);
- if (ppc_md.irq_bus_setup)
- ppc_md.irq_bus_setup(bus);
+ /* Read default IRQs and fixup if necessary */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_read_irq_line(dev);
+ if (ppc_md.pci_irq_fixup)
+ ppc_md.pci_irq_fixup(dev);
+ }
}
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
+ struct device_node *np;
+
+ np = pci_bus_to_OF_node(bus);
+
+ DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
if (dev && pci_probe_only &&
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
@@ -1343,8 +1363,6 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
return NULL;
}
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
unsigned long pci_address_to_pio(phys_addr_t address)
{
struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/kernel/pci_direct_iommu.c b/arch/powerpc/kernel/pci_direct_iommu.c
deleted file mode 100644
index 72ce082ce73..00000000000
--- a/arch/powerpc/kernel/pci_direct_iommu.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Support for DMA from PCI devices to main memory on
- * machines without an iommu or with directly addressable
- * RAM (typically a pmac with 2Gb of RAM or less)
- *
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/sections.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/abs_addr.h>
-#include <asm/ppc-pci.h>
-
-static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- void *ret;
-
- ret = (void *)__get_free_pages(flag, get_order(size));
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = virt_to_abs(ret);
- }
- return ret;
-}
-
-static void pci_direct_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr,
- size_t size, enum dma_data_direction direction)
-{
- return virt_to_abs(ptr);
-}
-
-static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- int i;
-
- for (i = 0; i < nents; i++, sg++) {
- sg->dma_address = page_to_phys(sg->page) + sg->offset;
- sg->dma_length = sg->length;
- }
-
- return nents;
-}
-
-static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_dma_supported(struct device *dev, u64 mask)
-{
- return mask < 0x100000000ull;
-}
-
-static struct dma_mapping_ops pci_direct_ops = {
- .alloc_coherent = pci_direct_alloc_coherent,
- .free_coherent = pci_direct_free_coherent,
- .map_single = pci_direct_map_single,
- .unmap_single = pci_direct_unmap_single,
- .map_sg = pci_direct_map_sg,
- .unmap_sg = pci_direct_unmap_sg,
- .dma_supported = pci_direct_dma_supported,
-};
-
-void __init pci_direct_iommu_init(void)
-{
- pci_dma_ops = pci_direct_ops;
-}
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c
deleted file mode 100644
index 0688b2534ac..00000000000
--- a/arch/powerpc/kernel/pci_iommu.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- *
- * Rewrite, cleanup, new allocation schemes:
- * Copyright (C) 2004 Olof Johansson, IBM Corporation
- *
- * Dynamic DMA mapping support, platform-independent parts.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/iommu.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/ppc-pci.h>
-
-/*
- * We can use ->sysdata directly and avoid the extra work in
- * pci_device_to_OF_node since ->sysdata will have been initialised
- * in the iommu init code for all devices.
- */
-#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
-
-static inline struct iommu_table *device_to_table(struct device *hwdev)
-{
- struct pci_dev *pdev;
-
- if (!hwdev) {
- pdev = ppc64_isabridge_dev;
- if (!pdev)
- return NULL;
- } else
- pdev = to_pci_dev(hwdev);
-
- return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
-}
-
-
-static inline unsigned long device_to_mask(struct device *hwdev)
-{
- struct pci_dev *pdev;
-
- if (!hwdev) {
- pdev = ppc64_isabridge_dev;
- if (!pdev) /* This is the best guess we can do */
- return 0xfffffffful;
- } else
- pdev = to_pci_dev(hwdev);
-
- if (pdev->dma_mask)
- return pdev->dma_mask;
-
- /* Assume devices without mask can take 32 bit addresses */
- return 0xfffffffful;
-}
-
-
-/* Allocates a contiguous real buffer and creates mappings over it.
- * Returns the virtual address of the buffer and sets dma_handle
- * to the dma address (mapping) of the first page.
- */
-static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
- device_to_mask(hwdev), flag,
- pcibus_to_node(to_pci_dev(hwdev)->bus));
-}
-
-static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
-}
-
-/* Creates TCEs for a user provided buffer. The user buffer must be
- * contiguous real kernel storage (not vmalloc). The address of the buffer
- * passed here is the kernel (virtual) address of the buffer. The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
- */
-static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
- size_t size, enum dma_data_direction direction)
-{
- return iommu_map_single(device_to_table(hwdev), vaddr, size,
- device_to_mask(hwdev), direction);
-}
-
-
-static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
-}
-
-
-static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- return iommu_map_sg(pdev, device_to_table(pdev), sglist,
- nelems, device_to_mask(pdev), direction);
-}
-
-static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
-}
-
-/* We support DMA to/from any memory page via the iommu */
-static int pci_iommu_dma_supported(struct device *dev, u64 mask)
-{
- struct iommu_table *tbl = device_to_table(dev);
-
- if (!tbl || tbl->it_offset > mask) {
- printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
- if (tbl)
- printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n",
- mask, tbl->it_offset);
- else
- printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
- mask);
- return 0;
- } else
- return 1;
-}
-
-struct dma_mapping_ops pci_iommu_ops = {
- .alloc_coherent = pci_iommu_alloc_coherent,
- .free_coherent = pci_iommu_free_coherent,
- .map_single = pci_iommu_map_single,
- .unmap_single = pci_iommu_unmap_single,
- .map_sg = pci_iommu_map_sg,
- .unmap_sg = pci_iommu_unmap_sg,
- .dma_supported = pci_iommu_dma_supported,
-};
-
-void pci_iommu_init(void)
-{
- pci_dma_ops = pci_iommu_ops;
-}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 807193a3c78..9179f0739ea 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -49,6 +49,10 @@
#include <asm/commproc.h>
#endif
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(local_irq_restore);
+#endif
+
#ifdef CONFIG_PPC32
extern void transfer_to_handler(void);
extern void do_IRQ(struct pt_regs *regs);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index bdb412d4b74..c18dbe77fdc 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -538,35 +538,31 @@ static struct ibm_pa_feature {
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
};
-static void __init check_cpu_pa_features(unsigned long node)
+static void __init scan_features(unsigned long node, unsigned char *ftrs,
+ unsigned long tablelen,
+ struct ibm_pa_feature *fp,
+ unsigned long ft_size)
{
- unsigned char *pa_ftrs;
- unsigned long len, tablelen, i, bit;
-
- pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
- if (pa_ftrs == NULL)
- return;
+ unsigned long i, len, bit;
/* find descriptor with type == 0 */
for (;;) {
if (tablelen < 3)
return;
- len = 2 + pa_ftrs[0];
+ len = 2 + ftrs[0];
if (tablelen < len)
return; /* descriptor 0 not found */
- if (pa_ftrs[1] == 0)
+ if (ftrs[1] == 0)
break;
tablelen -= len;
- pa_ftrs += len;
+ ftrs += len;
}
/* loop over bits we know about */
- for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) {
- struct ibm_pa_feature *fp = &ibm_pa_features[i];
-
- if (fp->pabyte >= pa_ftrs[0])
+ for (i = 0; i < ft_size; ++i, ++fp) {
+ if (fp->pabyte >= ftrs[0])
continue;
- bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
+ bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
if (bit ^ fp->invert) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
@@ -577,16 +573,59 @@ static void __init check_cpu_pa_features(unsigned long node)
}
}
+static void __init check_cpu_pa_features(unsigned long node)
+{
+ unsigned char *pa_ftrs;
+ unsigned long tablelen;
+
+ pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
+ if (pa_ftrs == NULL)
+ return;
+
+ scan_features(node, pa_ftrs, tablelen,
+ ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
+}
+
+static struct feature_property {
+ const char *name;
+ u32 min_value;
+ unsigned long cpu_feature;
+ unsigned long cpu_user_ftr;
+} feature_properties[] __initdata = {
+#ifdef CONFIG_ALTIVEC
+ {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
+ {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_PPC64
+ {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
+ {"ibm,purr", 1, CPU_FTR_PURR, 0},
+ {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
+#endif /* CONFIG_PPC64 */
+};
+
+static void __init check_cpu_feature_properties(unsigned long node)
+{
+ unsigned long i;
+ struct feature_property *fp = feature_properties;
+ const u32 *prop;
+
+ for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
+ prop = of_get_flat_dt_prop(node, fp->name, NULL);
+ if (prop && *prop >= fp->min_value) {
+ cur_cpu_spec->cpu_features |= fp->cpu_feature;
+ cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
+ }
+ }
+}
+
static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth,
void *data)
{
static int logical_cpuid = 0;
char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-#ifdef CONFIG_ALTIVEC
- u32 *prop;
-#endif
- u32 *intserv;
+ const u32 *prop;
+ const u32 *intserv;
int i, nthreads;
unsigned long len;
int found = 0;
@@ -643,24 +682,27 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
intserv[i]);
boot_cpuid = logical_cpuid;
set_hard_smp_processor_id(boot_cpuid, intserv[i]);
- }
-#ifdef CONFIG_ALTIVEC
- /* Check if we have a VMX and eventually update CPU features */
- prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
- if (prop && (*prop) > 0) {
- cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
- cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
- }
-
- /* Same goes for Apple's "altivec" property */
- prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
- if (prop) {
- cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
- cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
+ /*
+ * PAPR defines "logical" PVR values for cpus that
+ * meet various levels of the architecture:
+ * 0x0f000001 Architecture version 2.04
+ * 0x0f000002 Architecture version 2.05
+ * If the cpu-version property in the cpu node contains
+ * such a value, we call identify_cpu again with the
+ * logical PVR value in order to use the cpu feature
+ * bits appropriate for the architecture level.
+ *
+ * A POWER6 partition in "POWER6 architected" mode
+ * uses the 0x0f000002 PVR value; in POWER5+ mode
+ * it uses 0x0f000001.
+ */
+ prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+ if (prop && (*prop & 0xff000000) == 0x0f000000)
+ identify_cpu(0, *prop);
}
-#endif /* CONFIG_ALTIVEC */
+ check_cpu_feature_properties(node);
check_cpu_pa_features(node);
#ifdef CONFIG_PPC_PSERIES
@@ -1674,6 +1716,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
}
return NULL;
}
+EXPORT_SYMBOL(of_get_cpu_node);
#ifdef DEBUG
static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index b91761639d9..46cf32670dd 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -173,8 +173,8 @@ static unsigned long __initdata dt_string_start, dt_string_end;
static unsigned long __initdata prom_initrd_start, prom_initrd_end;
#ifdef CONFIG_PPC64
-static int __initdata iommu_force_on;
-static int __initdata ppc64_iommu_off;
+static int __initdata prom_iommu_force_on;
+static int __initdata prom_iommu_off;
static unsigned long __initdata prom_tce_alloc_start;
static unsigned long __initdata prom_tce_alloc_end;
#endif
@@ -582,9 +582,9 @@ static void __init early_cmdline_parse(void)
while (*opt && *opt == ' ')
opt++;
if (!strncmp(opt, RELOC("off"), 3))
- RELOC(ppc64_iommu_off) = 1;
+ RELOC(prom_iommu_off) = 1;
else if (!strncmp(opt, RELOC("force"), 5))
- RELOC(iommu_force_on) = 1;
+ RELOC(prom_iommu_force_on) = 1;
}
#endif
}
@@ -627,6 +627,7 @@ static void __init early_cmdline_parse(void)
/* Option vector 3: processor options supported */
#define OV3_FP 0x80 /* floating point */
#define OV3_VMX 0x40 /* VMX/Altivec */
+#define OV3_DFP 0x20 /* decimal FP */
/* Option vector 5: PAPR/OF options supported */
#define OV5_LPAR 0x80 /* logical partitioning supported */
@@ -642,6 +643,7 @@ static void __init early_cmdline_parse(void)
static unsigned char ibm_architecture_vec[] = {
W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
W(0xffff0000), W(0x003e0000), /* POWER6 */
+ W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
5 - 1, /* 5 option vectors */
@@ -668,7 +670,7 @@ static unsigned char ibm_architecture_vec[] = {
/* option vector 3: processor options supported */
3 - 2, /* length */
0, /* don't ignore, don't halt */
- OV3_FP | OV3_VMX,
+ OV3_FP | OV3_VMX | OV3_DFP,
/* option vector 4: IBM PAPR implementation */
2 - 2, /* length */
@@ -1167,7 +1169,7 @@ static void __init prom_initialize_tce_table(void)
u64 local_alloc_top, local_alloc_bottom;
u64 i;
- if (RELOC(ppc64_iommu_off))
+ if (RELOC(prom_iommu_off))
return;
prom_debug("starting prom_initialize_tce_table\n");
@@ -2283,11 +2285,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* Fill in some infos for use by the kernel later on
*/
#ifdef CONFIG_PPC64
- if (RELOC(ppc64_iommu_off))
+ if (RELOC(prom_iommu_off))
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
NULL, 0);
- if (RELOC(iommu_force_on))
+ if (RELOC(prom_iommu_force_on))
prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
NULL, 0);
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 603dff3ad62..0dfbe1cd28e 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -25,6 +25,12 @@
#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
(ns) > 0)
+static struct of_bus *of_match_bus(struct device_node *np);
+static int __of_address_to_resource(struct device_node *dev,
+ const u32 *addrp, u64 size, unsigned int flags,
+ struct resource *r);
+
+
/* Debug utility */
#ifdef DEBUG
static void of_dump_addr(const char *s, const u32 *addr, int na)
@@ -101,6 +107,7 @@ static unsigned int of_bus_default_get_flags(const u32 *addr)
}
+#ifdef CONFIG_PCI
/*
* PCI bus specific translator
*/
@@ -153,15 +160,156 @@ static unsigned int of_bus_pci_get_flags(const u32 *addr)
switch((w >> 24) & 0x03) {
case 0x01:
flags |= IORESOURCE_IO;
+ break;
case 0x02: /* 32 bits */
case 0x03: /* 64 bits */
flags |= IORESOURCE_MEM;
+ break;
}
if (w & 0x40000000)
flags |= IORESOURCE_PREFETCH;
return flags;
}
+const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
+ unsigned int *flags)
+{
+ const u32 *prop;
+ unsigned int psize;
+ struct device_node *parent;
+ struct of_bus *bus;
+ int onesize, i, na, ns;
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ return NULL;
+ bus = of_match_bus(parent);
+ if (strcmp(bus->name, "pci")) {
+ of_node_put(parent);
+ return NULL;
+ }
+ bus->count_cells(dev, &na, &ns);
+ of_node_put(parent);
+ if (!OF_CHECK_COUNTS(na, ns))
+ return NULL;
+
+ /* Get "reg" or "assigned-addresses" property */
+ prop = get_property(dev, bus->addresses, &psize);
+ if (prop == NULL)
+ return NULL;
+ psize /= 4;
+
+ onesize = na + ns;
+ for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
+ if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
+ if (size)
+ *size = of_read_number(prop + na, ns);
+ if (flags)
+ *flags = bus->get_flags(prop);
+ return prop;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_pci_address);
+
+int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ const u32 *addrp;
+ u64 size;
+ unsigned int flags;
+
+ addrp = of_get_pci_address(dev, bar, &size, &flags);
+ if (addrp == NULL)
+ return -EINVAL;
+ return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+
+static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
+{
+ return (((pin - 1) + slot) % 4) + 1;
+}
+
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+{
+ struct device_node *dn, *ppnode;
+ struct pci_dev *ppdev;
+ u32 lspec;
+ u32 laddr[3];
+ u8 pin;
+ int rc;
+
+ /* Check if we have a device node, if yes, fallback to standard OF
+ * parsing
+ */
+ dn = pci_device_to_OF_node(pdev);
+ if (dn)
+ return of_irq_map_one(dn, 0, out_irq);
+
+ /* Ok, we don't, time to have fun. Let's start by building up an
+ * interrupt spec. we assume #interrupt-cells is 1, which is standard
+ * for PCI. If you do different, then don't use that routine.
+ */
+ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (rc != 0)
+ return rc;
+ /* No pin, exit */
+ if (pin == 0)
+ return -ENODEV;
+
+ /* Now we walk up the PCI tree */
+ lspec = pin;
+ for (;;) {
+ /* Get the pci_dev of our parent */
+ ppdev = pdev->bus->self;
+
+ /* Ouch, it's a host bridge... */
+ if (ppdev == NULL) {
+#ifdef CONFIG_PPC64
+ ppnode = pci_bus_to_OF_node(pdev->bus);
+#else
+ struct pci_controller *host;
+ host = pci_bus_to_host(pdev->bus);
+ ppnode = host ? host->arch_data : NULL;
+#endif
+ /* No node for host bridge ? give up */
+ if (ppnode == NULL)
+ return -EINVAL;
+ } else
+ /* We found a P2P bridge, check if it has a node */
+ ppnode = pci_device_to_OF_node(ppdev);
+
+ /* Ok, we have found a parent with a device-node, hand over to
+ * the OF parsing code.
+ * We build a unit address from the linux device to be used for
+ * resolution. Note that we use the linux bus number which may
+ * not match your firmware bus numbering.
+ * Fortunately, in most cases, interrupt-map-mask doesn't include
+ * the bus number as part of the matching.
+ * You should still be careful about that though if you intend
+ * to rely on this function (you ship a firmware that doesn't
+ * create device nodes for all PCI devices).
+ */
+ if (ppnode)
+ break;
+
+ /* We can only get here if we hit a P2P bridge with no node,
+ * let's do standard swizzling and try again
+ */
+ lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+ pdev = ppdev;
+ }
+
+ laddr[0] = (pdev->bus->number << 16)
+ | (pdev->devfn << 8);
+ laddr[1] = laddr[2] = 0;
+ return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
+}
+EXPORT_SYMBOL_GPL(of_irq_map_pci);
+#endif /* CONFIG_PCI */
+
/*
* ISA bus specific translator
*/
@@ -223,6 +371,7 @@ static unsigned int of_bus_isa_get_flags(const u32 *addr)
*/
static struct of_bus of_busses[] = {
+#ifdef CONFIG_PCI
/* PCI */
{
.name = "pci",
@@ -233,6 +382,7 @@ static struct of_bus of_busses[] = {
.translate = of_bus_pci_translate,
.get_flags = of_bus_pci_get_flags,
},
+#endif /* CONFIG_PCI */
/* ISA */
{
.name = "isa",
@@ -445,48 +595,6 @@ const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
-const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
- unsigned int *flags)
-{
- const u32 *prop;
- unsigned int psize;
- struct device_node *parent;
- struct of_bus *bus;
- int onesize, i, na, ns;
-
- /* Get parent & match bus type */
- parent = of_get_parent(dev);
- if (parent == NULL)
- return NULL;
- bus = of_match_bus(parent);
- if (strcmp(bus->name, "pci")) {
- of_node_put(parent);
- return NULL;
- }
- bus->count_cells(dev, &na, &ns);
- of_node_put(parent);
- if (!OF_CHECK_COUNTS(na, ns))
- return NULL;
-
- /* Get "reg" or "assigned-addresses" property */
- prop = get_property(dev, bus->addresses, &psize);
- if (prop == NULL)
- return NULL;
- psize /= 4;
-
- onesize = na + ns;
- for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
- if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
- if (size)
- *size = of_read_number(prop + na, ns);
- if (flags)
- *flags = bus->get_flags(prop);
- return prop;
- }
- return NULL;
-}
-EXPORT_SYMBOL(of_get_pci_address);
-
static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
u64 size, unsigned int flags,
struct resource *r)
@@ -529,20 +637,6 @@ int of_address_to_resource(struct device_node *dev, int index,
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
-int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r)
-{
- const u32 *addrp;
- u64 size;
- unsigned int flags;
-
- addrp = of_get_pci_address(dev, bar, &size, &flags);
- if (addrp == NULL)
- return -EINVAL;
- return __of_address_to_resource(dev, addrp, size, flags, r);
-}
-EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
-
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
unsigned long *busno, unsigned long *phys, unsigned long *size)
{
@@ -898,87 +992,3 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
return res;
}
EXPORT_SYMBOL_GPL(of_irq_map_one);
-
-#ifdef CONFIG_PCI
-static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
-{
- return (((pin - 1) + slot) % 4) + 1;
-}
-
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
-{
- struct device_node *dn, *ppnode;
- struct pci_dev *ppdev;
- u32 lspec;
- u32 laddr[3];
- u8 pin;
- int rc;
-
- /* Check if we have a device node, if yes, fallback to standard OF
- * parsing
- */
- dn = pci_device_to_OF_node(pdev);
- if (dn)
- return of_irq_map_one(dn, 0, out_irq);
-
- /* Ok, we don't, time to have fun. Let's start by building up an
- * interrupt spec. we assume #interrupt-cells is 1, which is standard
- * for PCI. If you do different, then don't use that routine.
- */
- rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
- if (rc != 0)
- return rc;
- /* No pin, exit */
- if (pin == 0)
- return -ENODEV;
-
- /* Now we walk up the PCI tree */
- lspec = pin;
- for (;;) {
- /* Get the pci_dev of our parent */
- ppdev = pdev->bus->self;
-
- /* Ouch, it's a host bridge... */
- if (ppdev == NULL) {
-#ifdef CONFIG_PPC64
- ppnode = pci_bus_to_OF_node(pdev->bus);
-#else
- struct pci_controller *host;
- host = pci_bus_to_host(pdev->bus);
- ppnode = host ? host->arch_data : NULL;
-#endif
- /* No node for host bridge ? give up */
- if (ppnode == NULL)
- return -EINVAL;
- } else
- /* We found a P2P bridge, check if it has a node */
- ppnode = pci_device_to_OF_node(ppdev);
-
- /* Ok, we have found a parent with a device-node, hand over to
- * the OF parsing code.
- * We build a unit address from the linux device to be used for
- * resolution. Note that we use the linux bus number which may
- * not match your firmware bus numbering.
- * Fortunately, in most cases, interrupt-map-mask doesn't include
- * the bus number as part of the matching.
- * You should still be careful about that though if you intend
- * to rely on this function (you ship a firmware that doesn't
- * create device nodes for all PCI devices).
- */
- if (ppnode)
- break;
-
- /* We can only get here if we hit a P2P bridge with no node,
- * let's do standard swizzling and try again
- */
- lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
- pdev = ppdev;
- }
-
- laddr[0] = (pdev->bus->number << 16)
- | (pdev->devfn << 8);
- laddr[1] = laddr[2] = 0;
- return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
-}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
-#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 6ef80d4e38d..387ed0d9ad6 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -810,9 +810,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
return 0;
}
+#ifdef CONFIG_HOTPLUG_CPU
/* This version can't take the spinlock, because it never returns */
-
-struct rtas_args rtas_stop_self_args = {
+static struct rtas_args rtas_stop_self_args = {
/* The token is initialized for real in setup_system() */
.token = RTAS_UNKNOWN_SERVICE,
.nargs = 0,
@@ -834,6 +834,7 @@ void rtas_stop_self(void)
panic("Alas, I survived.\n");
}
+#endif
/*
* Call early during boot, before mem init or bootmem, to retrieve the RTAS
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 6f6fc977cb3..7d0f13fecc0 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -101,7 +101,7 @@ struct flash_block_list_header { /* just the header of flash_block_list */
static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
/* Use slab cache to guarantee 4k alignment */
-static kmem_cache_t *flash_block_cache = NULL;
+static struct kmem_cache *flash_block_cache = NULL;
#define FLASH_BLOCK_LIST_VERSION (1UL)
@@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
}
/* constructor for flash_block_cache */
-void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags)
+void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags)
{
memset(ptr, 0, RTAS_BLK_SIZE);
}
@@ -681,14 +681,12 @@ static int initialize_flash_pde_data(const char *rtas_call_name,
int *status;
int token;
- dp->data = kmalloc(buf_size, GFP_KERNEL);
+ dp->data = kzalloc(buf_size, GFP_KERNEL);
if (dp->data == NULL) {
remove_flash_pde(dp);
return -ENOMEM;
}
- memset(dp->data, 0, buf_size);
-
/*
* This code assumes that the status int is the first member of the
* struct
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index b4a0de79c06..ace9f4c86e6 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -38,6 +38,7 @@
#include <asm/rtas.h>
#include <asm/mpic.h>
#include <asm/ppc-pci.h>
+#include <asm/eeh.h>
/* RTAS tokens */
static int read_pci_config;
@@ -231,32 +232,13 @@ void __init init_pci_config_tokens (void)
unsigned long __devinit get_phb_buid (struct device_node *phb)
{
- int addr_cells;
- const unsigned int *buid_vals;
- unsigned int len;
- unsigned long buid;
-
- if (ibm_read_pci_config == -1) return 0;
+ struct resource r;
- /* PHB's will always be children of the root node,
- * or so it is promised by the current firmware. */
- if (phb->parent == NULL)
+ if (ibm_read_pci_config == -1)
return 0;
- if (phb->parent->parent)
- return 0;
-
- buid_vals = get_property(phb, "reg", &len);
- if (buid_vals == NULL)
+ if (of_address_to_resource(phb, 0, &r))
return 0;
-
- addr_cells = prom_n_addr_cells(phb);
- if (addr_cells == 1) {
- buid = (unsigned long) buid_vals[0];
- } else {
- buid = (((unsigned long)buid_vals[0]) << 32UL) |
- (((unsigned long)buid_vals[1]) & 0xffffffff);
- }
- return buid;
+ return r.start;
}
static int phb_set_bus_ranges(struct device_node *dev,
@@ -276,8 +258,10 @@ static int phb_set_bus_ranges(struct device_node *dev,
return 0;
}
-int __devinit setup_phb(struct device_node *dev, struct pci_controller *phb)
+int __devinit rtas_setup_phb(struct pci_controller *phb)
{
+ struct device_node *dev = phb->arch_data;
+
if (is_python(dev))
python_countermeasures(dev);
@@ -309,7 +293,7 @@ unsigned long __init find_and_init_phbs(void)
phb = pcibios_alloc_controller(node);
if (!phb)
continue;
- setup_phb(node, phb);
+ rtas_setup_phb(phb);
pci_process_bridge_OF_ranges(phb, node, 0);
pci_setup_phb_io(phb, index == 0);
index++;
@@ -381,7 +365,6 @@ int pcibios_remove_root_bus(struct pci_controller *phb)
}
}
- list_del(&phb->list_node);
pcibios_free_controller(phb);
return 0;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a4c2964a3ca..61c65d19ef0 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -63,10 +63,6 @@ unsigned int DMA_MODE_WRITE;
int have_of = 1;
-#ifdef CONFIG_PPC_MULTIPLATFORM
-dev_t boot_dev;
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
#ifdef CONFIG_VGA_CONSOLE
unsigned long vgacon_remap_base;
#endif
@@ -101,7 +97,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
* Identify the CPU type and fix up code sections
* that depend on which cpu we have.
*/
- spec = identify_cpu(offset);
+ spec = identify_cpu(offset, mfspr(SPRN_PVR));
do_feature_fixups(spec->cpu_features,
PTRRELOC(&__start___ftr_fixup),
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 16278968dab..3733de30e84 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -33,6 +33,7 @@
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/bootmem.h>
+#include <linux/pci.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
@@ -71,7 +72,6 @@
int have_of = 1;
int boot_cpuid = 0;
-dev_t boot_dev;
u64 ppc64_pft_size;
/* Pick defaults since we might want to patch instructions
@@ -171,7 +171,7 @@ void __init setup_paca(int cpu)
void __init early_setup(unsigned long dt_ptr)
{
/* Identify CPU type */
- identify_cpu(0);
+ identify_cpu(0, mfspr(SPRN_PVR));
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
setup_paca(0);
@@ -226,8 +226,8 @@ void early_setup_secondary(void)
{
struct paca_struct *lpaca = get_paca();
- /* Mark enabled in PACA */
- lpaca->proc_enabled = 0;
+ /* Mark interrupts enabled in PACA */
+ lpaca->soft_enabled = 0;
/* Initialize hash table for that CPU */
htab_initialize_secondary();
@@ -392,7 +392,8 @@ void __init setup_system(void)
* setting up the hash table pointers. It also sets up some interrupt-mapping
* related options that will be used by finish_device_tree()
*/
- ppc_md.init_early();
+ if (ppc_md.init_early)
+ ppc_md.init_early();
/*
* We can discover serial ports now since the above did setup the
@@ -598,3 +599,10 @@ void __init setup_per_cpu_areas(void)
}
}
#endif
+
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+struct ppc_pci_io ppc_pci_io;
+EXPORT_SYMBOL(ppc_pci_io);
+#endif /* CONFIG_PPC_INDIRECT_IO */
+
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 320353f0926..e4ebe1a6228 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -36,7 +36,7 @@
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
#endif
#include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index de59c6c31a5..bc892e69b4f 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -78,7 +78,7 @@ static int __devinit start_contest(int cmd, long offset, int num)
{
int i, score=0;
u64 tb;
- long mark;
+ u64 mark;
tbsync->cmd = cmd;
@@ -116,8 +116,7 @@ void __devinit smp_generic_give_timebase(void)
printk("Synchronizing timebase\n");
/* if this fails then this kernel won't work anyway... */
- tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL );
- memset( tbsync, 0, sizeof(*tbsync) );
+ tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
mb();
running = 1;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 35c6309bdb7..9b28c238b6c 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -65,6 +65,7 @@ cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(cpu_sibling_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index d15c33e9595..03a2a2f30d6 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -51,6 +51,7 @@
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/ppc-pci.h>
+#include <asm/syscalls.h>
/* readdir & getdents */
#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index d45a168bdac..63ed265b7f0 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -200,10 +200,9 @@ static void register_cpu_online(unsigned int cpu)
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
-#ifndef CONFIG_PPC_ISERIES
- if (cpu_has_feature(CPU_FTR_SMT))
+ if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
+ cpu_has_feature(CPU_FTR_SMT))
sysdev_create_file(s, &attr_smt_snooze_delay);
-#endif
/* PMC stuff */
@@ -240,12 +239,11 @@ static void unregister_cpu_online(unsigned int cpu)
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
- BUG_ON(c->no_control);
+ BUG_ON(!c->hotpluggable);
-#ifndef CONFIG_PPC_ISERIES
- if (cpu_has_feature(CPU_FTR_SMT))
+ if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
+ cpu_has_feature(CPU_FTR_SMT))
sysdev_remove_file(s, &attr_smt_snooze_delay);
-#endif
/* PMC stuff */
@@ -299,6 +297,72 @@ static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
.notifier_call = sysfs_cpu_notify,
};
+static DEFINE_MUTEX(cpu_mutex);
+
+int cpu_add_sysdev_attr(struct sysdev_attribute *attr)
+{
+ int cpu;
+
+ mutex_lock(&cpu_mutex);
+
+ for_each_possible_cpu(cpu) {
+ sysdev_create_file(get_cpu_sysdev(cpu), attr);
+ }
+
+ mutex_unlock(&cpu_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr);
+
+int cpu_add_sysdev_attr_group(struct attribute_group *attrs)
+{
+ int cpu;
+ struct sys_device *sysdev;
+
+ mutex_lock(&cpu_mutex);
+
+ for_each_possible_cpu(cpu) {
+ sysdev = get_cpu_sysdev(cpu);
+ sysfs_create_group(&sysdev->kobj, attrs);
+ }
+
+ mutex_unlock(&cpu_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group);
+
+
+void cpu_remove_sysdev_attr(struct sysdev_attribute *attr)
+{
+ int cpu;
+
+ mutex_lock(&cpu_mutex);
+
+ for_each_possible_cpu(cpu) {
+ sysdev_remove_file(get_cpu_sysdev(cpu), attr);
+ }
+
+ mutex_unlock(&cpu_mutex);
+}
+EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr);
+
+void cpu_remove_sysdev_attr_group(struct attribute_group *attrs)
+{
+ int cpu;
+ struct sys_device *sysdev;
+
+ mutex_lock(&cpu_mutex);
+
+ for_each_possible_cpu(cpu) {
+ sysdev = get_cpu_sysdev(cpu);
+ sysfs_remove_group(&sysdev->kobj, attrs);
+ }
+
+ mutex_unlock(&cpu_mutex);
+}
+EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group);
+
+
/* NUMA stuff */
#ifdef CONFIG_NUMA
@@ -360,10 +424,10 @@ static int __init topology_init(void)
* CPU. For instance, the boot cpu might never be valid
* for hotplugging.
*/
- if (!ppc_md.cpu_die)
- c->no_control = 1;
+ if (ppc_md.cpu_die)
+ c->hotpluggable = 1;
- if (cpu_online(cpu) || (c->no_control == 0)) {
+ if (cpu_online(cpu) || c->hotpluggable) {
register_cpu(c, cpu);
sysdev_create_file(&c->sysdev, &attr_physical_id);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 46a24de36fe..f6f0c6b07c4 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -631,7 +631,8 @@ void timer_interrupt(struct pt_regs * regs)
calculate_steal_time();
#ifdef CONFIG_PPC_ISERIES
- get_lppaca()->int_dword.fields.decr_int = 0;
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ get_lppaca()->int_dword.fields.decr_int = 0;
#endif
while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
@@ -674,7 +675,7 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(next_dec);
#ifdef CONFIG_PPC_ISERIES
- if (hvlpevent_is_pending())
+ if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
process_hvlpevents();
#endif
@@ -774,7 +775,7 @@ int do_settimeofday(struct timespec *tv)
* settimeofday to perform this operation.
*/
#ifdef CONFIG_PPC_ISERIES
- if (first_settimeofday) {
+ if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) {
iSeries_tb_recal();
first_settimeofday = 0;
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c66b4771ef4..0d4e203fa7a 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -53,10 +53,6 @@
#endif
#include <asm/kexec.h>
-#ifdef CONFIG_PPC64 /* XXX */
-#define _IO_BASE pci_io_base
-#endif
-
#ifdef CONFIG_DEBUGGER
int (*__debugger)(struct pt_regs *regs);
int (*__debugger_ipi)(struct pt_regs *regs);
@@ -241,7 +237,7 @@ void system_reset_exception(struct pt_regs *regs)
*/
static inline int check_io_access(struct pt_regs *regs)
{
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+#ifdef CONFIG_PPC32
unsigned long msr = regs->msr;
const struct exception_table_entry *entry;
unsigned int *nip = (unsigned int *)regs->nip;
@@ -274,7 +270,7 @@ static inline int check_io_access(struct pt_regs *regs)
return 1;
}
}
-#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
+#endif /* CONFIG_PPC32 */
return 0;
}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index c913ad5cad2..a4b28c73bba 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -264,7 +264,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
/* Allocate a VMA structure and fill it up */
- vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma == NULL) {
rc = -ENOMEM;
goto fail_mmapsem;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index ed007878d1b..a80f8f1d2e5 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -81,15 +81,15 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
struct iommu_table *tbl;
unsigned long offset, size;
- dma_window = get_property(dev->dev.platform_data,
- "ibm,my-dma-window", NULL);
+ dma_window = get_property(dev->dev.archdata.of_node,
+ "ibm,my-dma-window", NULL);
if (!dma_window)
return NULL;
tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
- of_parse_dma_window(dev->dev.platform_data, dma_window,
- &tbl->it_index, &offset, &size);
+ of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
+ &tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */
tbl->it_size = size >> IOMMU_PAGE_SHIFT;
@@ -117,7 +117,8 @@ static const struct vio_device_id *vio_match_device(
{
while (ids->type[0] != '\0') {
if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
- device_is_compatible(dev->dev.platform_data, ids->compat))
+ device_is_compatible(dev->dev.archdata.of_node,
+ ids->compat))
return ids;
ids++;
}
@@ -198,9 +199,9 @@ EXPORT_SYMBOL(vio_unregister_driver);
/* vio_dev refcount hit 0 */
static void __devinit vio_dev_release(struct device *dev)
{
- if (dev->platform_data) {
- /* XXX free TCE table */
- of_node_put(dev->platform_data);
+ if (dev->archdata.of_node) {
+ /* XXX should free TCE table */
+ of_node_put(dev->archdata.of_node);
}
kfree(to_vio_dev(dev));
}
@@ -210,7 +211,7 @@ static void __devinit vio_dev_release(struct device *dev)
* @of_node: The OF node for this device.
*
* Creates and initializes a vio_dev structure from the data in
- * of_node (dev.platform_data) and adds it to the list of virtual devices.
+ * of_node and adds it to the list of virtual devices.
* Returns a pointer to the created vio_dev or NULL if node has
* NULL device_type or compatible fields.
*/
@@ -240,8 +241,6 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
if (viodev == NULL)
return NULL;
- viodev->dev.platform_data = of_node_get(of_node);
-
viodev->irq = irq_of_parse_and_map(of_node, 0);
snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
@@ -254,7 +253,10 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
if (unit_address != NULL)
viodev->unit_address = *unit_address;
}
- viodev->iommu_table = vio_build_iommu_table(viodev);
+ viodev->dev.archdata.of_node = of_node_get(of_node);
+ viodev->dev.archdata.dma_ops = &dma_iommu_ops;
+ viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
+ viodev->dev.archdata.numa_node = of_node_to_nid(of_node);
/* init generic 'struct device' fields: */
viodev->dev.parent = &vio_bus_device.dev;
@@ -285,10 +287,11 @@ static int __init vio_bus_init(void)
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
iommu_vio_init();
- vio_bus_device.iommu_table = &vio_iommu_table;
+ vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops;
+ vio_bus_device.dev.archdata.dma_data = &vio_iommu_table;
iSeries_vio_dev = &vio_bus_device.dev;
}
-#endif
+#endif /* CONFIG_PPC_ISERIES */
err = bus_register(&vio_bus_type);
if (err) {
@@ -336,7 +339,7 @@ static ssize_t name_show(struct device *dev,
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct device_node *of_node = dev->platform_data;
+ struct device_node *of_node = dev->archdata.of_node;
return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
}
@@ -353,62 +356,6 @@ void __devinit vio_unregister_device(struct vio_dev *viodev)
}
EXPORT_SYMBOL(vio_unregister_device);
-static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
- size_t size, enum dma_data_direction direction)
-{
- return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size,
- ~0ul, direction);
-}
-
-static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size,
- direction);
-}
-
-static int vio_map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist,
- nelems, ~0ul, direction);
-}
-
-static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction);
-}
-
-static void *vio_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
- dma_handle, ~0ul, flag, -1);
-}
-
-static void vio_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr,
- dma_handle);
-}
-
-static int vio_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-struct dma_mapping_ops vio_dma_ops = {
- .alloc_coherent = vio_alloc_coherent,
- .free_coherent = vio_free_coherent,
- .map_single = vio_map_single,
- .unmap_single = vio_unmap_single,
- .map_sg = vio_map_sg,
- .unmap_sg = vio_unmap_sg,
- .dma_supported = vio_dma_supported,
-};
-
static int vio_bus_match(struct device *dev, struct device_driver *drv)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -422,13 +369,14 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
- struct device_node *dn = dev->platform_data;
+ struct device_node *dn;
const char *cp;
int length;
if (!num_envp)
return -ENOMEM;
+ dn = dev->archdata.of_node;
if (!dn)
return -ENODEV;
cp = get_property(dn, "compatible", &length);
@@ -465,7 +413,7 @@ struct bus_type vio_bus_type = {
*/
const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
{
- return get_property(vdev->dev.platform_data, which, length);
+ return get_property(vdev->dev.archdata.of_node, which, length);
}
EXPORT_SYMBOL(vio_get_attribute);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index e8342d86753..04b98671a06 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -33,6 +33,7 @@ SECTIONS
/* Text and gots */
.text : {
+ _text = .;
*(.text .text.*)
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 93441e7a292..38a81967ca0 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -8,7 +8,7 @@ endif
obj-y := fault.o mem.o lmb.o
obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
-hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o
+hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
hash_utils_64.o hash_low_64.o tlb_64.o \
slb_low.o slb.o stab.o mmap.o imalloc.o \
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e8fa50624b7..03aeb3a4607 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -426,18 +426,21 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* kernel has accessed a bad area */
- printk(KERN_ALERT "Unable to handle kernel paging request for ");
switch (regs->trap) {
- case 0x300:
- case 0x380:
- printk("data at address 0x%08lx\n", regs->dar);
- break;
- case 0x400:
- case 0x480:
- printk("instruction fetch\n");
- break;
- default:
- printk("unknown fault\n");
+ case 0x300:
+ case 0x380:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "data at address 0x%08lx\n", regs->dar);
+ break;
+ case 0x400:
+ case 0x480:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "instruction fetch\n");
+ break;
+ default:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "unknown fault\n");
+ break;
}
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index c90f124f3c7..6f1016acdbf 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -123,7 +123,7 @@ static inline void native_unlock_hpte(hpte_t *hptep)
clear_bit(HPTE_LOCK_BIT, word);
}
-long native_hpte_insert(unsigned long hpte_group, unsigned long va,
+static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize)
{
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1915661c2c8..c0d2a694fa3 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -277,7 +277,7 @@ static void __init htab_init_page_sizes(void)
* Not in the device-tree, let's fallback on known size
* list for 16M capable GP & GR
*/
- if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries))
+ if (cpu_has_feature(CPU_FTR_16M_PAGE))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp));
found:
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 506d89768d4..89c836d5480 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -146,6 +146,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
return hugepte_offset(hpdp, addr);
}
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+
static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
{
pte_t *hugepte = hugepd_page(*hpdp);
@@ -1042,7 +1047,7 @@ repeat:
return err;
}
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
memset(addr, 0, kmem_cache_size(cache));
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 3ff374697e3..d12a87ec5ae 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -130,7 +130,7 @@ static int __init setup_kcore(void)
/* GFP_ATOMIC to avoid might_sleep warnings during boot */
kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
if (!kcore_mem)
- panic("mem_init: kmalloc failed\n");
+ panic("%s: kmalloc failed\n", __FUNCTION__);
kclist_add(kcore_mem, __va(base), size);
}
@@ -141,7 +141,7 @@ static int __init setup_kcore(void)
}
module_init(setup_kcore);
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
memset(addr, 0, kmem_cache_size(cache));
}
@@ -166,9 +166,9 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
* can't put into the tables above, because HPAGE_SHIFT is not compile
* time constant. */
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
#else
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
#endif
void pgtable_cache_init(void)
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 8fcacb0239d..1891dbeeb8e 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -141,29 +141,19 @@ void pte_free(struct page *ptepage)
__free_page(ptepage);
}
-#ifndef CONFIG_PHYS_64BIT
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
return __ioremap(addr, size, _PAGE_NO_CACHE);
}
-#else /* CONFIG_PHYS_64BIT */
-void __iomem *
-ioremap64(unsigned long long addr, unsigned long size)
-{
- return __ioremap(addr, size, _PAGE_NO_CACHE);
-}
-EXPORT_SYMBOL(ioremap64);
+EXPORT_SYMBOL(ioremap);
void __iomem *
-ioremap(phys_addr_t addr, unsigned long size)
+ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
{
- phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
-
- return ioremap64(addr64, size);
+ return __ioremap(addr, size, flags);
}
-#endif /* CONFIG_PHYS_64BIT */
-EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_flags);
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
@@ -264,20 +254,7 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
-void __iomem *ioport_map(unsigned long port, unsigned int len)
-{
- return (void __iomem *) (port + _IO_BASE);
-}
-
-void ioport_unmap(void __iomem *addr)
-{
- /* Nothing to do */
-}
-EXPORT_SYMBOL(ioport_map);
-EXPORT_SYMBOL(ioport_unmap);
-
-int
-map_page(unsigned long va, phys_addr_t pa, int flags)
+int map_page(unsigned long va, phys_addr_t pa, int flags)
{
pmd_t *pd;
pte_t *pg;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ac64f4aaa50..16e4ee1c231 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -113,7 +113,7 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
}
-static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
+static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
unsigned long ea, unsigned long size,
unsigned long flags)
{
@@ -129,22 +129,12 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
}
-
-void __iomem *
-ioremap(unsigned long addr, unsigned long size)
-{
- return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
-}
-
-void __iomem * __ioremap(unsigned long addr, unsigned long size,
+void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
unsigned long pa, ea;
void __iomem *ret;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return (void __iomem *)addr;
-
/*
* Choose an address to map it to.
* Once the imalloc system is running, we use it.
@@ -178,9 +168,28 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
return ret;
}
+
+void __iomem * ioremap(phys_addr_t addr, unsigned long size)
+{
+ unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
+
+ if (ppc_md.ioremap)
+ return ppc_md.ioremap(addr, size, flags);
+ return __ioremap(addr, size, flags);
+}
+
+void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
+ unsigned long flags)
+{
+ if (ppc_md.ioremap)
+ return ppc_md.ioremap(addr, size, flags);
+ return __ioremap(addr, size, flags);
+}
+
+
#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
-int __ioremap_explicit(unsigned long pa, unsigned long ea,
+int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
unsigned long size, unsigned long flags)
{
struct vm_struct *area;
@@ -235,13 +244,10 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
*
* XXX what about calls before mem_init_done (ie python_countermeasures())
*/
-void iounmap(volatile void __iomem *token)
+void __iounmap(volatile void __iomem *token)
{
void *addr;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return;
-
if (!mem_init_done)
return;
@@ -250,6 +256,14 @@ void iounmap(volatile void __iomem *token)
im_free(addr);
}
+void iounmap(volatile void __iomem *token)
+{
+ if (ppc_md.iounmap)
+ ppc_md.iounmap(token);
+ else
+ __iounmap(token);
+}
+
static int iounmap_subset_regions(unsigned long addr, unsigned long size)
{
struct vm_struct *area;
@@ -268,7 +282,7 @@ static int iounmap_subset_regions(unsigned long addr, unsigned long size)
return 0;
}
-int iounmap_explicit(volatile void __iomem *start, unsigned long size)
+int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
{
struct vm_struct *area;
unsigned long addr;
@@ -303,8 +317,10 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size)
}
EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_flags);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(__iounmap);
void __iomem * reserve_phb_iospace(unsigned long size)
{
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index d3733912adb..224e960650a 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -23,6 +23,7 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
+#include <asm/firmware.h>
#include <linux/compiler.h>
#ifdef DEBUG
@@ -193,6 +194,7 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
void slb_initialize(void)
{
unsigned long linear_llp, vmalloc_llp, io_llp;
+ unsigned long lflags, vflags;
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
@@ -225,11 +227,12 @@ void slb_initialize(void)
#endif
}
+ get_paca()->stab_rr = SLB_NUM_BOLTED;
+
/* On iSeries the bolted entries have already been set up by
* the hypervisor from the lparMap data in head.S */
-#ifndef CONFIG_PPC_ISERIES
- {
- unsigned long lflags, vflags;
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ return;
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
@@ -247,8 +250,4 @@ void slb_initialize(void)
* elsewhere, we'll call _switch() which will bolt in the new
* one. */
asm volatile("isync":::"memory");
- }
-#endif /* CONFIG_PPC_ISERIES */
-
- get_paca()->stab_rr = SLB_NUM_BOLTED;
}
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index 0b5df9c96ae..4ccef2d5530 100644
--- a/arch/powerpc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -11,6 +11,7 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
+oprofile-$(CONFIG_PPC_CELL_NATIVE) += op_model_cell.o
oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
oprofile-$(CONFIG_6xx) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 63bbef3b63f..b6d82390b6a 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -69,7 +69,10 @@ static void op_powerpc_cpu_start(void *dummy)
static int op_powerpc_start(void)
{
- on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
+ if (model->global_start)
+ model->global_start(ctr);
+ if (model->start)
+ on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
return 0;
}
@@ -80,7 +83,10 @@ static inline void op_powerpc_cpu_stop(void *dummy)
static void op_powerpc_stop(void)
{
- on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
+ if (model->stop)
+ on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
+ if (model->global_stop)
+ model->global_stop();
}
static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
@@ -141,6 +147,11 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
switch (cur_cpu_spec->oprofile_type) {
#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_CELL_NATIVE
+ case PPC_OPROFILE_CELL:
+ model = &op_model_cell;
+ break;
+#endif
case PPC_OPROFILE_RS64:
model = &op_model_rs64;
break;
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
new file mode 100644
index 00000000000..2eb15f38810
--- /dev/null
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -0,0 +1,724 @@
+/*
+ * Cell Broadband Engine OProfile Support
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author: David Erb (djerb@us.ibm.com)
+ * Modifications:
+ * Carl Love <carll@us.ibm.com>
+ * Maynard Johnson <maynardj@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kthread.h>
+#include <linux/oprofile.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <asm/cell-pmu.h>
+#include <asm/cputable.h>
+#include <asm/firmware.h>
+#include <asm/io.h>
+#include <asm/oprofile_impl.h>
+#include <asm/processor.h>
+#include <asm/prom.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/rtas.h>
+#include <asm/system.h>
+
+#include "../platforms/cell/interrupt.h"
+
+#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
+#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
+
+#define NUM_THREADS 2
+#define VIRT_CNTR_SW_TIME_NS 100000000 // 0.5 seconds
+
+struct pmc_cntrl_data {
+ unsigned long vcntr;
+ unsigned long evnts;
+ unsigned long masks;
+ unsigned long enabled;
+};
+
+/*
+ * ibm,cbe-perftools rtas parameters
+ */
+
+struct pm_signal {
+ u16 cpu; /* Processor to modify */
+ u16 sub_unit; /* hw subunit this applies to (if applicable) */
+ u16 signal_group; /* Signal Group to Enable/Disable */
+ u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
+ * Bus Word(s) (bitmask)
+ */
+ u8 bit; /* Trigger/Event bit (if applicable) */
+};
+
+/*
+ * rtas call arguments
+ */
+enum {
+ SUBFUNC_RESET = 1,
+ SUBFUNC_ACTIVATE = 2,
+ SUBFUNC_DEACTIVATE = 3,
+
+ PASSTHRU_IGNORE = 0,
+ PASSTHRU_ENABLE = 1,
+ PASSTHRU_DISABLE = 2,
+};
+
+struct pm_cntrl {
+ u16 enable;
+ u16 stop_at_max;
+ u16 trace_mode;
+ u16 freeze;
+ u16 count_mode;
+};
+
+static struct {
+ u32 group_control;
+ u32 debug_bus_control;
+ struct pm_cntrl pm_cntrl;
+ u32 pm07_cntrl[NR_PHYS_CTRS];
+} pm_regs;
+
+
+#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
+#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
+#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
+#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
+#define GET_COUNT_CYCLES(x) (x & 0x00000001)
+#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
+
+
+static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
+
+static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
+
+/* Interpetation of hdw_thread:
+ * 0 - even virtual cpus 0, 2, 4,...
+ * 1 - odd virtual cpus 1, 3, 5, ...
+ */
+static u32 hdw_thread;
+
+static u32 virt_cntr_inter_mask;
+static struct timer_list timer_virt_cntr;
+
+/* pm_signal needs to be global since it is initialized in
+ * cell_reg_setup at the time when the necessary information
+ * is available.
+ */
+static struct pm_signal pm_signal[NR_PHYS_CTRS];
+static int pm_rtas_token;
+
+static u32 reset_value[NR_PHYS_CTRS];
+static int num_counters;
+static int oprofile_running;
+static spinlock_t virt_cntr_lock = SPIN_LOCK_UNLOCKED;
+
+static u32 ctr_enabled;
+
+static unsigned char trace_bus[4];
+static unsigned char input_bus[2];
+
+/*
+ * Firmware interface functions
+ */
+static int
+rtas_ibm_cbe_perftools(int subfunc, int passthru,
+ void *address, unsigned long length)
+{
+ u64 paddr = __pa(address);
+
+ return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc, passthru,
+ paddr >> 32, paddr & 0xffffffff, length);
+}
+
+static void pm_rtas_reset_signals(u32 node)
+{
+ int ret;
+ struct pm_signal pm_signal_local;
+
+ /* The debug bus is being set to the passthru disable state.
+ * However, the FW still expects atleast one legal signal routing
+ * entry or it will return an error on the arguments. If we don't
+ * supply a valid entry, we must ignore all return values. Ignoring
+ * all return values means we might miss an error we should be
+ * concerned about.
+ */
+
+ /* fw expects physical cpu #. */
+ pm_signal_local.cpu = node;
+ pm_signal_local.signal_group = 21;
+ pm_signal_local.bus_word = 1;
+ pm_signal_local.sub_unit = 0;
+ pm_signal_local.bit = 0;
+
+ ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
+ &pm_signal_local,
+ sizeof(struct pm_signal));
+
+ if (ret)
+ printk(KERN_WARNING "%s: rtas returned: %d\n",
+ __FUNCTION__, ret);
+}
+
+static void pm_rtas_activate_signals(u32 node, u32 count)
+{
+ int ret;
+ int j;
+ struct pm_signal pm_signal_local[NR_PHYS_CTRS];
+
+ for (j = 0; j < count; j++) {
+ /* fw expects physical cpu # */
+ pm_signal_local[j].cpu = node;
+ pm_signal_local[j].signal_group = pm_signal[j].signal_group;
+ pm_signal_local[j].bus_word = pm_signal[j].bus_word;
+ pm_signal_local[j].sub_unit = pm_signal[j].sub_unit;
+ pm_signal_local[j].bit = pm_signal[j].bit;
+ }
+
+ ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
+ pm_signal_local,
+ count * sizeof(struct pm_signal));
+
+ if (ret)
+ printk(KERN_WARNING "%s: rtas returned: %d\n",
+ __FUNCTION__, ret);
+}
+
+/*
+ * PM Signal functions
+ */
+static void set_pm_event(u32 ctr, int event, u32 unit_mask)
+{
+ struct pm_signal *p;
+ u32 signal_bit;
+ u32 bus_word, bus_type, count_cycles, polarity, input_control;
+ int j, i;
+
+ if (event == PPU_CYCLES_EVENT_NUM) {
+ /* Special Event: Count all cpu cycles */
+ pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
+ p = &(pm_signal[ctr]);
+ p->signal_group = 21;
+ p->bus_word = 1;
+ p->sub_unit = 0;
+ p->bit = 0;
+ goto out;
+ } else {
+ pm_regs.pm07_cntrl[ctr] = 0;
+ }
+
+ bus_word = GET_BUS_WORD(unit_mask);
+ bus_type = GET_BUS_TYPE(unit_mask);
+ count_cycles = GET_COUNT_CYCLES(unit_mask);
+ polarity = GET_POLARITY(unit_mask);
+ input_control = GET_INPUT_CONTROL(unit_mask);
+ signal_bit = (event % 100);
+
+ p = &(pm_signal[ctr]);
+
+ p->signal_group = event / 100;
+ p->bus_word = bus_word;
+ p->sub_unit = unit_mask & 0x0000f000;
+
+ pm_regs.pm07_cntrl[ctr] = 0;
+ pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
+ pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
+ pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
+
+ if (input_control == 0) {
+ if (signal_bit > 31) {
+ signal_bit -= 32;
+ if (bus_word == 0x3)
+ bus_word = 0x2;
+ else if (bus_word == 0xc)
+ bus_word = 0x8;
+ }
+
+ if ((bus_type == 0) && p->signal_group >= 60)
+ bus_type = 2;
+ if ((bus_type == 1) && p->signal_group >= 50)
+ bus_type = 0;
+
+ pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
+ } else {
+ pm_regs.pm07_cntrl[ctr] = 0;
+ p->bit = signal_bit;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (bus_word & (1 << i)) {
+ pm_regs.debug_bus_control |=
+ (bus_type << (31 - (2 * i) + 1));
+
+ for (j = 0; j < 2; j++) {
+ if (input_bus[j] == 0xff) {
+ input_bus[j] = i;
+ pm_regs.group_control |=
+ (i << (31 - i));
+ break;
+ }
+ }
+ }
+ }
+out:
+ ;
+}
+
+static void write_pm_cntrl(int cpu, struct pm_cntrl *pm_cntrl)
+{
+ /* Oprofile will use 32 bit counters, set bits 7:10 to 0 */
+ u32 val = 0;
+ if (pm_cntrl->enable == 1)
+ val |= CBE_PM_ENABLE_PERF_MON;
+
+ if (pm_cntrl->stop_at_max == 1)
+ val |= CBE_PM_STOP_AT_MAX;
+
+ if (pm_cntrl->trace_mode == 1)
+ val |= CBE_PM_TRACE_MODE_SET(pm_cntrl->trace_mode);
+
+ if (pm_cntrl->freeze == 1)
+ val |= CBE_PM_FREEZE_ALL_CTRS;
+
+ /* Routine set_count_mode must be called previously to set
+ * the count mode based on the user selection of user and kernel.
+ */
+ val |= CBE_PM_COUNT_MODE_SET(pm_cntrl->count_mode);
+ cbe_write_pm(cpu, pm_control, val);
+}
+
+static inline void
+set_count_mode(u32 kernel, u32 user, struct pm_cntrl *pm_cntrl)
+{
+ /* The user must specify user and kernel if they want them. If
+ * neither is specified, OProfile will count in hypervisor mode
+ */
+ if (kernel) {
+ if (user)
+ pm_cntrl->count_mode = CBE_COUNT_ALL_MODES;
+ else
+ pm_cntrl->count_mode = CBE_COUNT_SUPERVISOR_MODE;
+ } else {
+ if (user)
+ pm_cntrl->count_mode = CBE_COUNT_PROBLEM_MODE;
+ else
+ pm_cntrl->count_mode = CBE_COUNT_HYPERVISOR_MODE;
+ }
+}
+
+static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
+{
+
+ pm07_cntrl[ctr] |= PM07_CTR_ENABLE(1);
+ cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
+}
+
+/*
+ * Oprofile is expected to collect data on all CPUs simultaneously.
+ * However, there is one set of performance counters per node. There are
+ * two hardware threads or virtual CPUs on each node. Hence, OProfile must
+ * multiplex in time the performance counter collection on the two virtual
+ * CPUs. The multiplexing of the performance counters is done by this
+ * virtual counter routine.
+ *
+ * The pmc_values used below is defined as 'per-cpu' but its use is
+ * more akin to 'per-node'. We need to store two sets of counter
+ * values per node -- one for the previous run and one for the next.
+ * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
+ * pair of per-cpu arrays is used for storing the previous and next
+ * pmc values for a given node.
+ * NOTE: We use the per-cpu variable to improve cache performance.
+ */
+static void cell_virtual_cntr(unsigned long data)
+{
+ /* This routine will alternate loading the virtual counters for
+ * virtual CPUs
+ */
+ int i, prev_hdw_thread, next_hdw_thread;
+ u32 cpu;
+ unsigned long flags;
+
+ /* Make sure that the interrupt_hander and
+ * the virt counter are not both playing with
+ * the counters on the same node.
+ */
+
+ spin_lock_irqsave(&virt_cntr_lock, flags);
+
+ prev_hdw_thread = hdw_thread;
+
+ /* switch the cpu handling the interrupts */
+ hdw_thread = 1 ^ hdw_thread;
+ next_hdw_thread = hdw_thread;
+
+ /* The following is done only once per each node, but
+ * we need cpu #, not node #, to pass to the cbe_xxx functions.
+ */
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ /* stop counters, save counter values, restore counts
+ * for previous thread
+ */
+ cbe_disable_pm(cpu);
+ cbe_disable_pm_interrupts(cpu);
+ for (i = 0; i < num_counters; i++) {
+ per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
+ = cbe_read_ctr(cpu, i);
+
+ if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
+ == 0xFFFFFFFF)
+ /* If the cntr value is 0xffffffff, we must
+ * reset that to 0xfffffff0 when the current
+ * thread is restarted. This will generate a new
+ * interrupt and make sure that we never restore
+ * the counters to the max value. If the counters
+ * were restored to the max value, they do not
+ * increment and no interrupts are generated. Hence
+ * no more samples will be collected on that cpu.
+ */
+ cbe_write_ctr(cpu, i, 0xFFFFFFF0);
+ else
+ cbe_write_ctr(cpu, i,
+ per_cpu(pmc_values,
+ cpu +
+ next_hdw_thread)[i]);
+ }
+
+ /* Switch to the other thread. Change the interrupt
+ * and control regs to be scheduled on the CPU
+ * corresponding to the thread to execute.
+ */
+ for (i = 0; i < num_counters; i++) {
+ if (pmc_cntrl[next_hdw_thread][i].enabled) {
+ /* There are some per thread events.
+ * Must do the set event, enable_cntr
+ * for each cpu.
+ */
+ set_pm_event(i,
+ pmc_cntrl[next_hdw_thread][i].evnts,
+ pmc_cntrl[next_hdw_thread][i].masks);
+ enable_ctr(cpu, i,
+ pm_regs.pm07_cntrl);
+ } else {
+ cbe_write_pm07_control(cpu, i, 0);
+ }
+ }
+
+ /* Enable interrupts on the CPU thread that is starting */
+ cbe_enable_pm_interrupts(cpu, next_hdw_thread,
+ virt_cntr_inter_mask);
+ cbe_enable_pm(cpu);
+ }
+
+ spin_unlock_irqrestore(&virt_cntr_lock, flags);
+
+ mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
+}
+
+static void start_virt_cntrs(void)
+{
+ init_timer(&timer_virt_cntr);
+ timer_virt_cntr.function = cell_virtual_cntr;
+ timer_virt_cntr.data = 0UL;
+ timer_virt_cntr.expires = jiffies + HZ / 10;
+ add_timer(&timer_virt_cntr);
+}
+
+/* This function is called once for all cpus combined */
+static void
+cell_reg_setup(struct op_counter_config *ctr,
+ struct op_system_config *sys, int num_ctrs)
+{
+ int i, j, cpu;
+
+ pm_rtas_token = rtas_token("ibm,cbe-perftools");
+ if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) {
+ printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n",
+ __FUNCTION__);
+ goto out;
+ }
+
+ num_counters = num_ctrs;
+
+ pm_regs.group_control = 0;
+ pm_regs.debug_bus_control = 0;
+
+ /* setup the pm_control register */
+ memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
+ pm_regs.pm_cntrl.stop_at_max = 1;
+ pm_regs.pm_cntrl.trace_mode = 0;
+ pm_regs.pm_cntrl.freeze = 1;
+
+ set_count_mode(sys->enable_kernel, sys->enable_user,
+ &pm_regs.pm_cntrl);
+
+ /* Setup the thread 0 events */
+ for (i = 0; i < num_ctrs; ++i) {
+
+ pmc_cntrl[0][i].evnts = ctr[i].event;
+ pmc_cntrl[0][i].masks = ctr[i].unit_mask;
+ pmc_cntrl[0][i].enabled = ctr[i].enabled;
+ pmc_cntrl[0][i].vcntr = i;
+
+ for_each_possible_cpu(j)
+ per_cpu(pmc_values, j)[i] = 0;
+ }
+
+ /* Setup the thread 1 events, map the thread 0 event to the
+ * equivalent thread 1 event.
+ */
+ for (i = 0; i < num_ctrs; ++i) {
+ if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
+ pmc_cntrl[1][i].evnts = ctr[i].event + 19;
+ else if (ctr[i].event == 2203)
+ pmc_cntrl[1][i].evnts = ctr[i].event;
+ else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
+ pmc_cntrl[1][i].evnts = ctr[i].event + 16;
+ else
+ pmc_cntrl[1][i].evnts = ctr[i].event;
+
+ pmc_cntrl[1][i].masks = ctr[i].unit_mask;
+ pmc_cntrl[1][i].enabled = ctr[i].enabled;
+ pmc_cntrl[1][i].vcntr = i;
+ }
+
+ for (i = 0; i < 4; i++)
+ trace_bus[i] = 0xff;
+
+ for (i = 0; i < 2; i++)
+ input_bus[i] = 0xff;
+
+ /* Our counters count up, and "count" refers to
+ * how much before the next interrupt, and we interrupt
+ * on overflow. So we calculate the starting value
+ * which will give us "count" until overflow.
+ * Then we set the events on the enabled counters.
+ */
+ for (i = 0; i < num_counters; ++i) {
+ /* start with virtual counter set 0 */
+ if (pmc_cntrl[0][i].enabled) {
+ /* Using 32bit counters, reset max - count */
+ reset_value[i] = 0xFFFFFFFF - ctr[i].count;
+ set_pm_event(i,
+ pmc_cntrl[0][i].evnts,
+ pmc_cntrl[0][i].masks);
+
+ /* global, used by cell_cpu_setup */
+ ctr_enabled |= (1 << i);
+ }
+ }
+
+ /* initialize the previous counts for the virtual cntrs */
+ for_each_online_cpu(cpu)
+ for (i = 0; i < num_counters; ++i) {
+ per_cpu(pmc_values, cpu)[i] = reset_value[i];
+ }
+out:
+ ;
+}
+
+/* This function is called once for each cpu */
+static void cell_cpu_setup(struct op_counter_config *cntr)
+{
+ u32 cpu = smp_processor_id();
+ u32 num_enabled = 0;
+ int i;
+
+ /* There is one performance monitor per processor chip (i.e. node),
+ * so we only need to perform this function once per node.
+ */
+ if (cbe_get_hw_thread_id(cpu))
+ goto out;
+
+ if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) {
+ printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n",
+ __FUNCTION__);
+ goto out;
+ }
+
+ /* Stop all counters */
+ cbe_disable_pm(cpu);
+ cbe_disable_pm_interrupts(cpu);
+
+ cbe_write_pm(cpu, pm_interval, 0);
+ cbe_write_pm(cpu, pm_start_stop, 0);
+ cbe_write_pm(cpu, group_control, pm_regs.group_control);
+ cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
+ write_pm_cntrl(cpu, &pm_regs.pm_cntrl);
+
+ for (i = 0; i < num_counters; ++i) {
+ if (ctr_enabled & (1 << i)) {
+ pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
+ num_enabled++;
+ }
+ }
+
+ pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
+out:
+ ;
+}
+
+static void cell_global_start(struct op_counter_config *ctr)
+{
+ u32 cpu;
+ u32 interrupt_mask = 0;
+ u32 i;
+
+ /* This routine gets called once for the system.
+ * There is one performance monitor per node, so we
+ * only need to perform this function once per node.
+ */
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ interrupt_mask = 0;
+
+ for (i = 0; i < num_counters; ++i) {
+ if (ctr_enabled & (1 << i)) {
+ cbe_write_ctr(cpu, i, reset_value[i]);
+ enable_ctr(cpu, i, pm_regs.pm07_cntrl);
+ interrupt_mask |=
+ CBE_PM_CTR_OVERFLOW_INTR(i);
+ } else {
+ /* Disable counter */
+ cbe_write_pm07_control(cpu, i, 0);
+ }
+ }
+
+ cbe_clear_pm_interrupts(cpu);
+ cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
+ cbe_enable_pm(cpu);
+ }
+
+ virt_cntr_inter_mask = interrupt_mask;
+ oprofile_running = 1;
+ smp_wmb();
+
+ /* NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
+ * executed which manipulates the PMU. We start the "virtual counter"
+ * here so that we do not need to synchronize access to the PMU in
+ * the above for-loop.
+ */
+ start_virt_cntrs();
+}
+
+static void cell_global_stop(void)
+{
+ int cpu;
+
+ /* This routine will be called once for the system.
+ * There is one performance monitor per node, so we
+ * only need to perform this function once per node.
+ */
+ del_timer_sync(&timer_virt_cntr);
+ oprofile_running = 0;
+ smp_wmb();
+
+ for_each_online_cpu(cpu) {
+ if (cbe_get_hw_thread_id(cpu))
+ continue;
+
+ cbe_sync_irq(cbe_cpu_to_node(cpu));
+ /* Stop the counters */
+ cbe_disable_pm(cpu);
+
+ /* Deactivate the signals */
+ pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+ /* Deactivate interrupts */
+ cbe_disable_pm_interrupts(cpu);
+ }
+}
+
+static void
+cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
+{
+ u32 cpu;
+ u64 pc;
+ int is_kernel;
+ unsigned long flags = 0;
+ u32 interrupt_mask;
+ int i;
+
+ cpu = smp_processor_id();
+
+ /* Need to make sure the interrupt handler and the virt counter
+ * routine are not running at the same time. See the
+ * cell_virtual_cntr() routine for additional comments.
+ */
+ spin_lock_irqsave(&virt_cntr_lock, flags);
+
+ /* Need to disable and reenable the performance counters
+ * to get the desired behavior from the hardware. This
+ * is hardware specific.
+ */
+
+ cbe_disable_pm(cpu);
+
+ interrupt_mask = cbe_clear_pm_interrupts(cpu);
+
+ /* If the interrupt mask has been cleared, then the virt cntr
+ * has cleared the interrupt. When the thread that generated
+ * the interrupt is restored, the data count will be restored to
+ * 0xffffff0 to cause the interrupt to be regenerated.
+ */
+
+ if ((oprofile_running == 1) && (interrupt_mask != 0)) {
+ pc = regs->nip;
+ is_kernel = is_kernel_addr(pc);
+
+ for (i = 0; i < num_counters; ++i) {
+ if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
+ && ctr[i].enabled) {
+ oprofile_add_pc(pc, is_kernel, i);
+ cbe_write_ctr(cpu, i, reset_value[i]);
+ }
+ }
+
+ /* The counters were frozen by the interrupt.
+ * Reenable the interrupt and restart the counters.
+ * If there was a race between the interrupt handler and
+ * the virtual counter routine. The virutal counter
+ * routine may have cleared the interrupts. Hence must
+ * use the virt_cntr_inter_mask to re-enable the interrupts.
+ */
+ cbe_enable_pm_interrupts(cpu, hdw_thread,
+ virt_cntr_inter_mask);
+
+ /* The writes to the various performance counters only writes
+ * to a latch. The new values (interrupt setting bits, reset
+ * counter value etc.) are not copied to the actual registers
+ * until the performance monitor is enabled. In order to get
+ * this to work as desired, the permormance monitor needs to
+ * be disabled while writting to the latches. This is a
+ * HW design issue.
+ */
+ cbe_enable_pm(cpu);
+ }
+ spin_unlock_irqrestore(&virt_cntr_lock, flags);
+}
+
+struct op_powerpc_model op_model_cell = {
+ .reg_setup = cell_reg_setup,
+ .cpu_setup = cell_cpu_setup,
+ .global_start = cell_global_start,
+ .global_stop = cell_global_stop,
+ .handle_interrupt = cell_handle_interrupt,
+};
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
new file mode 100644
index 00000000000..a46184a0c75
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for 52xx based boards
+#
+ifeq ($(CONFIG_PPC_MERGE),y)
+obj-y += mpc52xx_pic.o mpc52xx_common.o
+endif
+
+obj-$(CONFIG_PPC_EFIKA) += efika-setup.o efika-pci.o
+obj-$(CONFIG_PPC_LITE5200) += lite5200.o
diff --git a/arch/powerpc/platforms/52xx/efika-pci.c b/arch/powerpc/platforms/52xx/efika-pci.c
new file mode 100644
index 00000000000..62e05b2a922
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/efika-pci.c
@@ -0,0 +1,119 @@
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/sections.h>
+#include <asm/pci-bridge.h>
+#include <asm/rtas.h>
+
+#include "efika.h"
+
+#ifdef CONFIG_PCI
+/*
+ * Access functions for PCI config space using RTAS calls.
+ */
+static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
+ int len, u32 * val)
+{
+ struct pci_controller *hose = bus->sysdata;
+ unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
+ | (((bus->number - hose->first_busno) & 0xff) << 16)
+ | (hose->index << 24);
+ int ret = -1;
+ int rval;
+
+ rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len);
+ *val = ret;
+ return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+static int rtas_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val)
+{
+ struct pci_controller *hose = bus->sysdata;
+ unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
+ | (((bus->number - hose->first_busno) & 0xff) << 16)
+ | (hose->index << 24);
+ int rval;
+
+ rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL,
+ addr, len, val);
+ return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops rtas_pci_ops = {
+ rtas_read_config,
+ rtas_write_config
+};
+
+void __init efika_pcisetup(void)
+{
+ const int *bus_range;
+ int len;
+ struct pci_controller *hose;
+ struct device_node *root;
+ struct device_node *pcictrl;
+
+ root = of_find_node_by_path("/");
+ if (root == NULL) {
+ printk(KERN_WARNING EFIKA_PLATFORM_NAME
+ ": Unable to find the root node\n");
+ return;
+ }
+
+ for (pcictrl = NULL;;) {
+ pcictrl = of_get_next_child(root, pcictrl);
+ if ((pcictrl == NULL) || (strcmp(pcictrl->name, "pci") == 0))
+ break;
+ }
+
+ of_node_put(root);
+
+ if (pcictrl == NULL) {
+ printk(KERN_WARNING EFIKA_PLATFORM_NAME
+ ": Unable to find the PCI bridge node\n");
+ return;
+ }
+
+ bus_range = get_property(pcictrl, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int)) {
+ printk(KERN_WARNING EFIKA_PLATFORM_NAME
+ ": Can't get bus-range for %s\n", pcictrl->full_name);
+ return;
+ }
+
+ if (bus_range[1] == bus_range[0])
+ printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI bus %d",
+ bus_range[0]);
+ else
+ printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI buses %d..%d",
+ bus_range[0], bus_range[1]);
+ printk(" controlled by %s\n", pcictrl->full_name);
+ printk("\n");
+
+ hose = pcibios_alloc_controller();
+ if (!hose) {
+ printk(KERN_WARNING EFIKA_PLATFORM_NAME
+ ": Can't allocate PCI controller structure for %s\n",
+ pcictrl->full_name);
+ return;
+ }
+
+ hose->arch_data = of_node_get(pcictrl);
+ hose->first_busno = bus_range[0];
+ hose->last_busno = bus_range[1];
+ hose->ops = &rtas_pci_ops;
+
+ pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+}
+
+#else
+void __init efika_pcisetup(void)
+{}
+#endif
diff --git a/arch/powerpc/platforms/52xx/efika-setup.c b/arch/powerpc/platforms/52xx/efika-setup.c
new file mode 100644
index 00000000000..110c980ed1e
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/efika-setup.c
@@ -0,0 +1,150 @@
+/*
+ *
+ * Efika 5K2 platform setup
+ * Some code really inspired from the lite5200b platform.
+ *
+ * Copyright (C) 2006 bplan GmbH
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/utsrelease.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/rtas.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
+#include <asm/mpc52xx.h>
+
+#include "efika.h"
+
+static void efika_show_cpuinfo(struct seq_file *m)
+{
+ struct device_node *root;
+ const char *revision = NULL;
+ const char *codegendescription = NULL;
+ const char *codegenvendor = NULL;
+
+ root = of_find_node_by_path("/");
+ if (root) {
+ revision = get_property(root, "revision", NULL);
+ codegendescription =
+ get_property(root, "CODEGEN,description", NULL);
+ codegenvendor = get_property(root, "CODEGEN,vendor", NULL);
+
+ of_node_put(root);
+ }
+
+ if (codegendescription)
+ seq_printf(m, "machine\t\t: %s\n", codegendescription);
+ else
+ seq_printf(m, "machine\t\t: Efika\n");
+
+ if (revision)
+ seq_printf(m, "revision\t: %s\n", revision);
+
+ if (codegenvendor)
+ seq_printf(m, "vendor\t\t: %s\n", codegenvendor);
+
+ of_node_put(root);
+}
+
+static void __init efika_setup_arch(void)
+{
+ rtas_initialize();
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_below_start_ok = 1;
+
+ if (initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else
+#endif
+ ROOT_DEV = Root_SDA2; /* sda2 (sda1 is for the kernel) */
+
+ efika_pcisetup();
+
+ if (ppc_md.progress)
+ ppc_md.progress("Linux/PPC " UTS_RELEASE " runnung on Efika ;-)\n", 0x0);
+}
+
+static void __init efika_init(void)
+{
+ struct device_node *np;
+ struct device_node *cnp = NULL;
+ const u32 *base;
+
+ /* Find every child of the SOC node and add it to of_platform */
+ np = of_find_node_by_name(NULL, "builtin");
+ if (np) {
+ char name[BUS_ID_SIZE];
+ while ((cnp = of_get_next_child(np, cnp))) {
+ strcpy(name, cnp->name);
+
+ base = get_property(cnp, "reg", NULL);
+ if (base == NULL)
+ continue;
+
+ snprintf(name+strlen(name), BUS_ID_SIZE, "@%x", *base);
+ of_platform_device_create(cnp, name, NULL);
+
+ printk(KERN_INFO EFIKA_PLATFORM_NAME" : Added %s (type '%s' at '%s') to the known devices\n", name, cnp->type, cnp->full_name);
+ }
+ }
+
+ if (ppc_md.progress)
+ ppc_md.progress(" Have fun with your Efika! ", 0x7777);
+}
+
+static int __init efika_probe(void)
+{
+ char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
+ "model", NULL);
+
+ if (model == NULL)
+ return 0;
+ if (strcmp(model, "EFIKA5K2"))
+ return 0;
+
+ ISA_DMA_THRESHOLD = ~0L;
+ DMA_MODE_READ = 0x44;
+ DMA_MODE_WRITE = 0x48;
+
+ return 1;
+}
+
+define_machine(efika)
+{
+ .name = EFIKA_PLATFORM_NAME,
+ .probe = efika_probe,
+ .setup_arch = efika_setup_arch,
+ .init = efika_init,
+ .show_cpuinfo = efika_show_cpuinfo,
+ .init_IRQ = mpc52xx_init_irq,
+ .get_irq = mpc52xx_get_irq,
+ .restart = rtas_restart,
+ .power_off = rtas_power_off,
+ .halt = rtas_halt,
+ .set_rtc_time = rtas_set_rtc_time,
+ .get_rtc_time = rtas_get_rtc_time,
+ .progress = rtas_progress,
+ .get_boot_time = rtas_get_boot_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .phys_mem_access_prot = pci_phys_mem_access_prot,
+};
diff --git a/arch/powerpc/platforms/52xx/efika.h b/arch/powerpc/platforms/52xx/efika.h
new file mode 100644
index 00000000000..2f060fd097d
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/efika.h
@@ -0,0 +1,19 @@
+/*
+ * Efika 5K2 platform setup - Header file
+ *
+ * Copyright (C) 2006 bplan GmbH
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef __ARCH_POWERPC_EFIKA__
+#define __ARCH_POWERPC_EFIKA__
+
+#define EFIKA_PLATFORM_NAME "Efika"
+
+extern void __init efika_pcisetup(void);
+
+#endif
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
new file mode 100644
index 00000000000..a375c15b431
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -0,0 +1,162 @@
+/*
+ * Freescale Lite5200 board support
+ *
+ * Written by: Grant Likely <grant.likely@secretlab.ca>
+ *
+ * Copyright (C) Secret Lab Technologies Ltd. 2006. All rights reserved.
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Description:
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ipic.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <sysdev/fsl_soc.h>
+#include <asm/qe.h>
+#include <asm/qe_ic.h>
+#include <asm/of_platform.h>
+
+#include <asm/mpc52xx.h>
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+
+static void __init
+lite52xx_setup_cpu(void)
+{
+ struct mpc52xx_gpio __iomem *gpio;
+ u32 port_config;
+
+ /* Map zones */
+ gpio = mpc52xx_find_and_map("mpc52xx-gpio");
+ if (!gpio) {
+ printk(KERN_ERR __FILE__ ": "
+ "Error while mapping GPIO register for port config. "
+ "Expect some abnormal behavior\n");
+ goto error;
+ }
+
+ /* Set port config */
+ port_config = in_be32(&gpio->port_config);
+
+ port_config &= ~0x00800000; /* 48Mhz internal, pin is GPIO */
+
+ port_config &= ~0x00007000; /* USB port : Differential mode */
+ port_config |= 0x00001000; /* USB 1 only */
+
+ port_config &= ~0x03000000; /* ATA CS is on csb_4/5 */
+ port_config |= 0x01000000;
+
+ pr_debug("port_config: old:%x new:%x\n",
+ in_be32(&gpio->port_config), port_config);
+ out_be32(&gpio->port_config, port_config);
+
+ /* Unmap zone */
+error:
+ iounmap(gpio);
+}
+
+static void __init lite52xx_setup_arch(void)
+{
+ struct device_node *np;
+
+ if (ppc_md.progress)
+ ppc_md.progress("lite52xx_setup_arch()", 0);
+
+ np = of_find_node_by_type(NULL, "cpu");
+ if (np) {
+ unsigned int *fp =
+ (int *)get_property(np, "clock-frequency", NULL);
+ if (fp != 0)
+ loops_per_jiffy = *fp / HZ;
+ else
+ loops_per_jiffy = 50000000 / HZ;
+ of_node_put(np);
+ }
+
+ /* CPU & Port mux setup */
+ mpc52xx_setup_cpu(); /* Generic */
+ lite52xx_setup_cpu(); /* Platorm specific */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else
+#endif
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+
+}
+
+void lite52xx_show_cpuinfo(struct seq_file *m)
+{
+ struct device_node* np = of_find_all_nodes(NULL);
+ const char *model = NULL;
+
+ if (np)
+ model = get_property(np, "model", NULL);
+
+ seq_printf(m, "vendor\t\t: Freescale Semiconductor\n");
+ seq_printf(m, "machine\t\t: %s\n", model ? model : "unknown");
+
+ of_node_put(np);
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init lite52xx_probe(void)
+{
+ unsigned long node = of_get_flat_dt_root();
+ const char *model = of_get_flat_dt_prop(node, "model", NULL);
+
+ if (!of_flat_dt_is_compatible(node, "lite52xx"))
+ return 0;
+ pr_debug("%s board w/ mpc52xx found\n", model ? model : "unknown");
+
+ return 1;
+}
+
+define_machine(lite52xx) {
+ .name = "lite52xx",
+ .probe = lite52xx_probe,
+ .setup_arch = lite52xx_setup_arch,
+ .init_IRQ = mpc52xx_init_irq,
+ .get_irq = mpc52xx_get_irq,
+ .show_cpuinfo = lite52xx_show_cpuinfo,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
new file mode 100644
index 00000000000..8331ff45777
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -0,0 +1,126 @@
+/*
+ *
+ * Utility functions for the Freescale MPC52xx.
+ *
+ * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/of_platform.h>
+#include <asm/mpc52xx.h>
+
+
+void __iomem *
+mpc52xx_find_and_map(const char *compatible)
+{
+ struct device_node *ofn;
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+
+ ofn = of_find_compatible_node(NULL, NULL, compatible);
+ if (!ofn)
+ return NULL;
+
+ regaddr_p = of_get_address(ofn, 0, &size64, NULL);
+ if (!regaddr_p) {
+ of_node_put(ofn);
+ return NULL;
+ }
+
+ regaddr64 = of_translate_address(ofn, regaddr_p);
+
+ of_node_put(ofn);
+
+ return ioremap((u32)regaddr64, (u32)size64);
+}
+EXPORT_SYMBOL(mpc52xx_find_and_map);
+
+
+/**
+ * mpc52xx_find_ipb_freq - Find the IPB bus frequency for a device
+ * @node: device node
+ *
+ * Returns IPB bus frequency, or 0 if the bus frequency cannot be found.
+ */
+unsigned int
+mpc52xx_find_ipb_freq(struct device_node *node)
+{
+ struct device_node *np;
+ const unsigned int *p_ipb_freq = NULL;
+
+ of_node_get(node);
+ while (node) {
+ p_ipb_freq = get_property(node, "bus-frequency", NULL);
+ if (p_ipb_freq)
+ break;
+
+ np = of_get_parent(node);
+ of_node_put(node);
+ node = np;
+ }
+ if (node)
+ of_node_put(node);
+
+ return p_ipb_freq ? *p_ipb_freq : 0;
+}
+EXPORT_SYMBOL(mpc52xx_find_ipb_freq);
+
+
+void __init
+mpc52xx_setup_cpu(void)
+{
+ struct mpc52xx_cdm __iomem *cdm;
+ struct mpc52xx_xlb __iomem *xlb;
+
+ /* Map zones */
+ cdm = mpc52xx_find_and_map("mpc52xx-cdm");
+ xlb = mpc52xx_find_and_map("mpc52xx-xlb");
+
+ if (!cdm || !xlb) {
+ printk(KERN_ERR __FILE__ ": "
+ "Error while mapping CDM/XLB during mpc52xx_setup_cpu. "
+ "Expect some abnormal behavior\n");
+ goto unmap_regs;
+ }
+
+ /* Use internal 48 Mhz */
+ out_8(&cdm->ext_48mhz_en, 0x00);
+ out_8(&cdm->fd_enable, 0x01);
+ if (in_be32(&cdm->rstcfg) & 0x40) /* Assumes 33Mhz clock */
+ out_be16(&cdm->fd_counters, 0x0001);
+ else
+ out_be16(&cdm->fd_counters, 0x5555);
+
+ /* Configure the XLB Arbiter priorities */
+ out_be32(&xlb->master_pri_enable, 0xff);
+ out_be32(&xlb->master_priority, 0x11111111);
+
+ /* Disable XLB pipelining */
+ /* (cfr errate 292. We could do this only just before ATA PIO
+ transaction and re-enable it afterwards ...) */
+ out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
+
+ /* Unmap zones */
+unmap_regs:
+ if (cdm) iounmap(cdm);
+ if (xlb) iounmap(xlb);
+}
+
+static int __init
+mpc52xx_declare_of_platform_devices(void)
+{
+ /* Find every child of the SOC node and add it to of_platform */
+ return of_platform_bus_probe(NULL, NULL, NULL);
+}
+
+device_initcall(mpc52xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
new file mode 100644
index 00000000000..cd91a6c3aaf
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -0,0 +1,473 @@
+/*
+ *
+ * Programmable Interrupt Controller functions for the Freescale MPC52xx.
+ *
+ * Copyright (C) 2006 bplan GmbH
+ *
+ * Based on the code from the 2.4 kernel by
+ * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
+ *
+ * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 Montavista Software, Inc
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/stddef.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/hardirq.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/mpc52xx.h>
+#include "mpc52xx_pic.h"
+
+/*
+ *
+*/
+
+static struct mpc52xx_intr __iomem *intr;
+static struct mpc52xx_sdma __iomem *sdma;
+static struct irq_host *mpc52xx_irqhost = NULL;
+
+static unsigned char mpc52xx_map_senses[4] = {
+ IRQ_TYPE_LEVEL_HIGH,
+ IRQ_TYPE_EDGE_RISING,
+ IRQ_TYPE_EDGE_FALLING,
+ IRQ_TYPE_LEVEL_LOW,
+};
+
+/*
+ *
+*/
+
+static inline void io_be_setbit(u32 __iomem *addr, int bitno)
+{
+ out_be32(addr, in_be32(addr) | (1 << bitno));
+}
+
+static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
+{
+ out_be32(addr, in_be32(addr) & ~(1 << bitno));
+}
+
+/*
+ * IRQ[0-3] interrupt irq_chip
+*/
+
+static void mpc52xx_extirq_mask(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ io_be_clrbit(&intr->ctrl, 11 - l2irq);
+}
+
+static void mpc52xx_extirq_unmask(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ io_be_setbit(&intr->ctrl, 11 - l2irq);
+}
+
+static void mpc52xx_extirq_ack(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ io_be_setbit(&intr->ctrl, 27-l2irq);
+}
+
+static struct irq_chip mpc52xx_extirq_irqchip = {
+ .typename = " MPC52xx IRQ[0-3] ",
+ .mask = mpc52xx_extirq_mask,
+ .unmask = mpc52xx_extirq_unmask,
+ .ack = mpc52xx_extirq_ack,
+};
+
+/*
+ * Main interrupt irq_chip
+*/
+
+static void mpc52xx_main_mask(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ io_be_setbit(&intr->main_mask, 15 - l2irq);
+}
+
+static void mpc52xx_main_unmask(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ io_be_clrbit(&intr->main_mask, 15 - l2irq);
+}
+
+static struct irq_chip mpc52xx_main_irqchip = {
+ .typename = "MPC52xx Main",
+ .mask = mpc52xx_main_mask,
+ .mask_ack = mpc52xx_main_mask,
+ .unmask = mpc52xx_main_unmask,
+};
+
+/*
+ * Peripherals interrupt irq_chip
+*/
+
+static void mpc52xx_periph_mask(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ io_be_setbit(&intr->per_mask, 31 - l2irq);
+}
+
+static void mpc52xx_periph_unmask(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ io_be_clrbit(&intr->per_mask, 31 - l2irq);
+}
+
+static struct irq_chip mpc52xx_periph_irqchip = {
+ .typename = "MPC52xx Peripherals",
+ .mask = mpc52xx_periph_mask,
+ .mask_ack = mpc52xx_periph_mask,
+ .unmask = mpc52xx_periph_unmask,
+};
+
+/*
+ * SDMA interrupt irq_chip
+*/
+
+static void mpc52xx_sdma_mask(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ io_be_setbit(&sdma->IntMask, l2irq);
+}
+
+static void mpc52xx_sdma_unmask(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ io_be_clrbit(&sdma->IntMask, l2irq);
+}
+
+static void mpc52xx_sdma_ack(unsigned int virq)
+{
+ int irq;
+ int l2irq;
+
+ irq = irq_map[virq].hwirq;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+ out_be32(&sdma->IntPend, 1 << l2irq);
+}
+
+static struct irq_chip mpc52xx_sdma_irqchip = {
+ .typename = "MPC52xx SDMA",
+ .mask = mpc52xx_sdma_mask,
+ .unmask = mpc52xx_sdma_unmask,
+ .ack = mpc52xx_sdma_ack,
+};
+
+/*
+ * irq_host
+*/
+
+static int mpc52xx_irqhost_match(struct irq_host *h, struct device_node *node)
+{
+ pr_debug("%s: node=%p\n", __func__, node);
+ return mpc52xx_irqhost->host_data == node;
+}
+
+static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
+ u32 * intspec, unsigned int intsize,
+ irq_hw_number_t * out_hwirq,
+ unsigned int *out_flags)
+{
+ int intrvect_l1;
+ int intrvect_l2;
+ int intrvect_type;
+ int intrvect_linux;
+
+ if (intsize != 3)
+ return -1;
+
+ intrvect_l1 = (int)intspec[0];
+ intrvect_l2 = (int)intspec[1];
+ intrvect_type = (int)intspec[2];
+
+ intrvect_linux =
+ (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & MPC52xx_IRQ_L1_MASK;
+ intrvect_linux |=
+ (intrvect_l2 << MPC52xx_IRQ_L2_OFFSET) & MPC52xx_IRQ_L2_MASK;
+
+ pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1,
+ intrvect_l2);
+
+ *out_hwirq = intrvect_linux;
+ *out_flags = mpc52xx_map_senses[intrvect_type];
+
+ return 0;
+}
+
+/*
+ * this function retrieves the correct IRQ type out
+ * of the MPC regs
+ * Only externals IRQs needs this
+*/
+static int mpc52xx_irqx_gettype(int irq)
+{
+ int type;
+ u32 ctrl_reg;
+
+ ctrl_reg = in_be32(&intr->ctrl);
+ type = (ctrl_reg >> (22 - irq * 2)) & 0x3;
+
+ return mpc52xx_map_senses[type];
+}
+
+static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t irq)
+{
+ int l1irq;
+ int l2irq;
+ struct irq_chip *good_irqchip;
+ void *good_handle;
+ int type;
+
+ l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET;
+ l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+ /*
+ * Most of ours IRQs will be level low
+ * Only external IRQs on some platform may be others
+ */
+ type = IRQ_TYPE_LEVEL_LOW;
+
+ switch (l1irq) {
+ case MPC52xx_IRQ_L1_CRIT:
+ pr_debug("%s: Critical. l2=%x\n", __func__, l2irq);
+
+ BUG_ON(l2irq != 0);
+
+ type = mpc52xx_irqx_gettype(l2irq);
+ good_irqchip = &mpc52xx_extirq_irqchip;
+ break;
+
+ case MPC52xx_IRQ_L1_MAIN:
+ pr_debug("%s: Main IRQ[1-3] l2=%x\n", __func__, l2irq);
+
+ if ((l2irq >= 1) && (l2irq <= 3)) {
+ type = mpc52xx_irqx_gettype(l2irq);
+ good_irqchip = &mpc52xx_extirq_irqchip;
+ } else {
+ good_irqchip = &mpc52xx_main_irqchip;
+ }
+ break;
+
+ case MPC52xx_IRQ_L1_PERP:
+ pr_debug("%s: Peripherals. l2=%x\n", __func__, l2irq);
+ good_irqchip = &mpc52xx_periph_irqchip;
+ break;
+
+ case MPC52xx_IRQ_L1_SDMA:
+ pr_debug("%s: SDMA. l2=%x\n", __func__, l2irq);
+ good_irqchip = &mpc52xx_sdma_irqchip;
+ break;
+
+ default:
+ pr_debug("%s: Error, unknown L1 IRQ (0x%x)\n", __func__, l1irq);
+ printk(KERN_ERR "Unknow IRQ!\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_RISING:
+ good_handle = handle_edge_irq;
+ break;
+ default:
+ good_handle = handle_level_irq;
+ }
+
+ set_irq_chip_and_handler(virq, good_irqchip, good_handle);
+
+ pr_debug("%s: virq=%x, hw=%x. type=%x\n", __func__, virq,
+ (int)irq, type);
+
+ return 0;
+}
+
+static struct irq_host_ops mpc52xx_irqhost_ops = {
+ .match = mpc52xx_irqhost_match,
+ .xlate = mpc52xx_irqhost_xlate,
+ .map = mpc52xx_irqhost_map,
+};
+
+/*
+ * init (public)
+*/
+
+void __init mpc52xx_init_irq(void)
+{
+ u32 intr_ctrl;
+ struct device_node *picnode;
+
+ /* Remap the necessary zones */
+ picnode = of_find_compatible_node(NULL, NULL, "mpc52xx-pic");
+
+ intr = mpc52xx_find_and_map("mpc52xx-pic");
+ if (!intr)
+ panic(__FILE__ ": find_and_map failed on 'mpc52xx-pic'. "
+ "Check node !");
+
+ sdma = mpc52xx_find_and_map("mpc52xx-bestcomm");
+ if (!sdma)
+ panic(__FILE__ ": find_and_map failed on 'mpc52xx-bestcomm'. "
+ "Check node !");
+
+ /* Disable all interrupt sources. */
+ out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */
+ out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */
+ out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */
+ out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */
+ intr_ctrl = in_be32(&intr->ctrl);
+ intr_ctrl &= 0x00ff0000; /* Keeps IRQ[0-3] config */
+ intr_ctrl |= 0x0f000000 | /* clear IRQ 0-3 */
+ 0x00001000 | /* MEE master external enable */
+ 0x00000000 | /* 0 means disable IRQ 0-3 */
+ 0x00000001; /* CEb route critical normally */
+ out_be32(&intr->ctrl, intr_ctrl);
+
+ /* Zero a bunch of the priority settings. */
+ out_be32(&intr->per_pri1, 0);
+ out_be32(&intr->per_pri2, 0);
+ out_be32(&intr->per_pri3, 0);
+ out_be32(&intr->main_pri1, 0);
+ out_be32(&intr->main_pri2, 0);
+
+ /*
+ * As last step, add an irq host to translate the real
+ * hw irq information provided by the ofw to linux virq
+ */
+
+ mpc52xx_irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
+ MPC52xx_IRQ_HIGHTESTHWIRQ,
+ &mpc52xx_irqhost_ops, -1);
+
+ if (!mpc52xx_irqhost)
+ panic(__FILE__ ": Cannot allocate the IRQ host\n");
+
+ mpc52xx_irqhost->host_data = picnode;
+ printk(KERN_INFO "MPC52xx PIC is up and running!\n");
+}
+
+/*
+ * get_irq (public)
+*/
+unsigned int mpc52xx_get_irq(void)
+{
+ u32 status;
+ int irq = NO_IRQ_IGNORE;
+
+ status = in_be32(&intr->enc_status);
+ if (status & 0x00000400) { /* critical */
+ irq = (status >> 8) & 0x3;
+ if (irq == 2) /* high priority peripheral */
+ goto peripheral;
+ irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET) &
+ MPC52xx_IRQ_L1_MASK;
+ } else if (status & 0x00200000) { /* main */
+ irq = (status >> 16) & 0x1f;
+ if (irq == 4) /* low priority peripheral */
+ goto peripheral;
+ irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET) &
+ MPC52xx_IRQ_L1_MASK;
+ } else if (status & 0x20000000) { /* peripheral */
+ peripheral:
+ irq = (status >> 24) & 0x1f;
+ if (irq == 0) { /* bestcomm */
+ status = in_be32(&sdma->IntPend);
+ irq = ffs(status) - 1;
+ irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET) &
+ MPC52xx_IRQ_L1_MASK;
+ } else {
+ irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET) &
+ MPC52xx_IRQ_L1_MASK;
+ }
+ }
+
+ pr_debug("%s: irq=%x. virq=%d\n", __func__, irq,
+ irq_linear_revmap(mpc52xx_irqhost, irq));
+
+ return irq_linear_revmap(mpc52xx_irqhost, irq);
+}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.h b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
new file mode 100644
index 00000000000..1a26bcdb304
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
@@ -0,0 +1,53 @@
+/*
+ * Header file for Freescale MPC52xx Interrupt controller
+ *
+ * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __POWERPC_SYSDEV_MPC52xx_PIC_H__
+#define __POWERPC_SYSDEV_MPC52xx_PIC_H__
+
+#include <asm/types.h>
+
+
+/* HW IRQ mapping */
+#define MPC52xx_IRQ_L1_CRIT (0)
+#define MPC52xx_IRQ_L1_MAIN (1)
+#define MPC52xx_IRQ_L1_PERP (2)
+#define MPC52xx_IRQ_L1_SDMA (3)
+
+#define MPC52xx_IRQ_L1_OFFSET (6)
+#define MPC52xx_IRQ_L1_MASK (0x00c0)
+
+#define MPC52xx_IRQ_L2_OFFSET (0)
+#define MPC52xx_IRQ_L2_MASK (0x003f)
+
+#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
+
+
+/* Interrupt controller Register set */
+struct mpc52xx_intr {
+ u32 per_mask; /* INTR + 0x00 */
+ u32 per_pri1; /* INTR + 0x04 */
+ u32 per_pri2; /* INTR + 0x08 */
+ u32 per_pri3; /* INTR + 0x0c */
+ u32 ctrl; /* INTR + 0x10 */
+ u32 main_mask; /* INTR + 0x14 */
+ u32 main_pri1; /* INTR + 0x18 */
+ u32 main_pri2; /* INTR + 0x1c */
+ u32 reserved1; /* INTR + 0x20 */
+ u32 enc_status; /* INTR + 0x24 */
+ u32 crit_status; /* INTR + 0x28 */
+ u32 main_status; /* INTR + 0x2c */
+ u32 per_status; /* INTR + 0x30 */
+ u32 reserved2; /* INTR + 0x34 */
+ u32 per_error; /* INTR + 0x38 */
+};
+
+#endif /* __POWERPC_SYSDEV_MPC52xx_PIC_H__ */
+
diff --git a/arch/powerpc/platforms/82xx/mpc82xx_ads.c b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
index bb9acbb9817..ea880f1f0dc 100644
--- a/arch/powerpc/platforms/82xx/mpc82xx_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
@@ -515,16 +515,6 @@ static int m82xx_pci_exclude_device(u_char bus, u_char devfn)
return PCIBIOS_SUCCESSFUL;
}
-static void
-__init mpc82xx_pcibios_fixup(void)
-{
- struct pci_dev *dev = NULL;
-
- for_each_pci_dev(dev) {
- pci_read_irq_line(dev);
- }
-}
-
void __init add_bridge(struct device_node *np)
{
int len;
@@ -597,9 +587,6 @@ static void __init mpc82xx_ads_setup_arch(void)
add_bridge(np);
of_node_put(np);
- ppc_md.pci_map_irq = NULL;
- ppc_md.pcibios_fixup = mpc82xx_pcibios_fixup;
- ppc_md.pcibios_fixup_bus = NULL;
#endif
#ifdef CONFIG_ROOT_NFS
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 7edb6b46138..edcd5b875b6 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -21,7 +21,7 @@ config MPC834x_SYS
Be aware that PCI buses can only function when SYS board is plugged
into the PIB (Platform IO Board) board from Freescale which provide
3 PCI slots. The PIBs PCI initialization is the bootloader's
- responsiblilty.
+ responsibility.
config MPC834x_ITX
bool "Freescale MPC834x ITX"
@@ -30,7 +30,7 @@ config MPC834x_ITX
This option enables support for the MPC 834x ITX evaluation board.
Be aware that PCI initialization is the bootloader's
- responsiblilty.
+ responsibility.
config MPC8360E_PB
bool "Freescale MPC8360E PB"
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index a43ac71ab74..f58c9780b66 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -97,8 +97,6 @@ static void __init mpc832x_sys_setup_arch(void)
#ifdef CONFIG_PCI
for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
add_bridge(np);
-
- ppc_md.pci_swizzle = common_swizzle;
ppc_md.pci_exclude_device = mpc83xx_exclude_device;
#endif
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index e2bcaaf6b32..314c42ac604 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -118,7 +118,4 @@ define_machine(mpc834x_itx) {
.time_init = mpc83xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
-#ifdef CONFIG_PCI
- .pcibios_fixup = mpc83xx_pcibios_fixup,
-#endif
};
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.c b/arch/powerpc/platforms/83xx/mpc834x_sys.c
index 677196187a4..80b735a414d 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_sys.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_sys.c
@@ -137,7 +137,4 @@ define_machine(mpc834x_sys) {
.time_init = mpc83xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
-#ifdef CONFIG_PCI
- .pcibios_fixup = mpc83xx_pcibios_fixup,
-#endif
};
diff --git a/arch/powerpc/platforms/83xx/mpc8360e_pb.c b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
index 1a523c81c06..7bfd47ad723 100644
--- a/arch/powerpc/platforms/83xx/mpc8360e_pb.c
+++ b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
@@ -102,8 +102,6 @@ static void __init mpc8360_sys_setup_arch(void)
#ifdef CONFIG_PCI
for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
add_bridge(np);
-
- ppc_md.pci_swizzle = common_swizzle;
ppc_md.pci_exclude_device = mpc83xx_exclude_device;
#endif
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 2c82bca9bfb..01cae106912 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -11,7 +11,6 @@
extern int add_bridge(struct device_node *dev);
extern int mpc83xx_exclude_device(u_char bus, u_char devfn);
-extern void mpc83xx_pcibios_fixup(void);
extern void mpc83xx_restart(char *cmd);
extern long mpc83xx_time_init(void);
diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
index 4557ac5255c..9c365055514 100644
--- a/arch/powerpc/platforms/83xx/pci.c
+++ b/arch/powerpc/platforms/83xx/pci.c
@@ -45,15 +45,6 @@ int mpc83xx_exclude_device(u_char bus, u_char devfn)
return PCIBIOS_SUCCESSFUL;
}
-void __init mpc83xx_pcibios_fixup(void)
-{
- struct pci_dev *dev = NULL;
-
- /* map all the PCI irqs */
- for_each_pci_dev(dev)
- pci_read_irq_line(dev);
-}
-
int __init add_bridge(struct device_node *dev)
{
int len;
diff --git a/arch/powerpc/platforms/85xx/misc.c b/arch/powerpc/platforms/85xx/misc.c
index 26c5e822c7c..3e62fcb04c1 100644
--- a/arch/powerpc/platforms/85xx/misc.c
+++ b/arch/powerpc/platforms/85xx/misc.c
@@ -21,11 +21,3 @@ void mpc85xx_restart(char *cmd)
local_irq_disable();
abort();
}
-
-/* For now this is a pass through */
-phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
-{
- return addr;
-};
-
-EXPORT_SYMBOL(fixup_bigphys_addr);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index d3e669d69c7..bda2e55e6c4 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -53,15 +53,6 @@ mpc85xx_exclude_device(u_char bus, u_char devfn)
else
return PCIBIOS_SUCCESSFUL;
}
-
-void __init
-mpc85xx_pcibios_fixup(void)
-{
- struct pci_dev *dev = NULL;
-
- for_each_pci_dev(dev)
- pci_read_irq_line(dev);
-}
#endif /* CONFIG_PCI */
#ifdef CONFIG_CPM2
@@ -253,8 +244,6 @@ static void __init mpc85xx_ads_setup_arch(void)
#ifdef CONFIG_PCI
for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
add_bridge(np);
-
- ppc_md.pcibios_fixup = mpc85xx_pcibios_fixup;
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 1a1c226ad4d..f4dd5f2f8a2 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -398,15 +398,6 @@ mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
}
-void __init mpc86xx_hpcn_pcibios_fixup(void)
-{
- struct pci_dev *dev = NULL;
-
- for_each_pci_dev(dev)
- pci_read_irq_line(dev);
-}
-
-
/*
* Called very early, device-tree isn't unflattened
*/
@@ -461,7 +452,6 @@ define_machine(mpc86xx_hpcn) {
.setup_arch = mpc86xx_hpcn_setup_arch,
.init_IRQ = mpc86xx_hpcn_init_irq,
.show_cpuinfo = mpc86xx_hpcn_show_cpuinfo,
- .pcibios_fixup = mpc86xx_hpcn_pcibios_fixup,
.get_irq = mpic_get_irq,
.restart = mpc86xx_restart,
.time_init = mpc86xx_time_init,
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index e58fa953a50..44d95eaf22e 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -7,12 +7,14 @@ endif
endif
obj-$(CONFIG_PPC_CHRP) += chrp/
obj-$(CONFIG_4xx) += 4xx/
+obj-$(CONFIG_PPC_MPC52xx) += 52xx/
obj-$(CONFIG_PPC_83xx) += 83xx/
obj-$(CONFIG_PPC_85xx) += 85xx/
obj-$(CONFIG_PPC_86xx) += 86xx/
obj-$(CONFIG_PPC_PSERIES) += pseries/
obj-$(CONFIG_PPC_ISERIES) += iseries/
obj-$(CONFIG_PPC_MAPLE) += maple/
-obj-$(CONFIG_PPC_PASEMI) += pasemi/
+obj-$(CONFIG_PPC_PASEMI) += pasemi/
obj-$(CONFIG_PPC_CELL) += cell/
+obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 3e430b489bb..06a85b70433 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -20,4 +20,18 @@ config CBE_RAS
bool "RAS features for bare metal Cell BE"
default y
+config CBE_THERM
+ tristate "CBE thermal support"
+ default m
+ depends on CBE_RAS
+
+config CBE_CPUFREQ
+ tristate "CBE frequency scaling"
+ depends on CBE_RAS && CPU_FREQ
+ default m
+ help
+ This adds the cpufreq driver for Cell BE processors.
+ For details, take a look at <file:Documentation/cpu-freq/>.
+ If you don't have such processor, say N
+
endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index c89cdd67383..f90e8337796 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,11 @@
obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \
- cbe_regs.o spider-pic.o pervasive.o
+ cbe_regs.o spider-pic.o \
+ pervasive.o pmu.o io-workarounds.o
obj-$(CONFIG_CBE_RAS) += ras.o
+obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
+obj-$(CONFIG_CBE_CPUFREQ) += cbe_cpufreq.o
+
ifeq ($(CONFIG_SMP),y)
obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
endif
@@ -11,5 +15,6 @@ spufs-modular-$(CONFIG_SPU_FS) += spu_syscalls.o
spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
+ spu_coredump.o \
$(spufs-modular-m) \
$(spu-priv1-y) spufs/
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
new file mode 100644
index 00000000000..a3850fd1e94
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -0,0 +1,248 @@
+/*
+ * cpufreq driver for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/timer.h>
+
+#include <asm/hw_irq.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+
+#include "cbe_regs.h"
+
+static DEFINE_MUTEX(cbe_switch_mutex);
+
+
+/* the CBE supports an 8 step frequency scaling */
+static struct cpufreq_frequency_table cbe_freqs[] = {
+ {1, 0},
+ {2, 0},
+ {3, 0},
+ {4, 0},
+ {5, 0},
+ {6, 0},
+ {8, 0},
+ {10, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+/* to write to MIC register */
+static u64 MIC_Slow_Fast_Timer_table[] = {
+ [0 ... 7] = 0x007fc00000000000ull,
+};
+
+/* more values for the MIC */
+static u64 MIC_Slow_Next_Timer_table[] = {
+ 0x0000240000000000ull,
+ 0x0000268000000000ull,
+ 0x000029C000000000ull,
+ 0x00002D0000000000ull,
+ 0x0000300000000000ull,
+ 0x0000334000000000ull,
+ 0x000039C000000000ull,
+ 0x00003FC000000000ull,
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int get_pmode(int cpu)
+{
+ int ret;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+ ret = in_be64(&pmd_regs->pmsr) & 0x07;
+
+ return ret;
+}
+
+static int set_pmode(int cpu, unsigned int pmode)
+{
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+ u64 flags;
+ u64 value;
+
+ local_irq_save(flags);
+
+ mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+ pr_debug("pm register is mapped at %p\n", &pmd_regs->pmcr);
+ pr_debug("mic register is mapped at %p\n", &mic_tm_regs->slow_fast_timer_0);
+
+ out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
+
+ out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
+
+ value = in_be64(&pmd_regs->pmcr);
+ /* set bits to zero */
+ value &= 0xFFFFFFFFFFFFFFF8ull;
+ /* set bits to next pmode */
+ value |= pmode;
+
+ out_be64(&pmd_regs->pmcr, value);
+
+ /* wait until new pmode appears in status register */
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ while(value != pmode) {
+ cpu_relax();
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ }
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int cbe_cpufreq_cpu_init (struct cpufreq_policy *policy)
+{
+ u32 *max_freq;
+ int i, cur_pmode;
+ struct device_node *cpu;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
+
+ if(!cpu)
+ return -ENODEV;
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+
+ max_freq = (u32*) get_property(cpu, "clock-frequency", NULL);
+
+ if(!max_freq)
+ return -EINVAL;
+
+ // we need the freq in kHz
+ *max_freq /= 1000;
+
+ pr_debug("max clock-frequency is at %u kHz\n", *max_freq);
+ pr_debug("initializing frequency table\n");
+
+ // initialize frequency table
+ for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
+ cbe_freqs[i].frequency = *max_freq / cbe_freqs[i].index;
+ pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
+ }
+
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+ /* if DEBUG is enabled set_pmode() measures the correct latency of a transition */
+ policy->cpuinfo.transition_latency = 25000;
+
+ cur_pmode = get_pmode(policy->cpu);
+ pr_debug("current pmode is at %d\n",cur_pmode);
+
+ policy->cur = cbe_freqs[cur_pmode].frequency;
+
+#ifdef CONFIG_SMP
+ policy->cpus = cpu_sibling_map[policy->cpu];
+#endif
+
+ cpufreq_frequency_table_get_attr (cbe_freqs, policy->cpu);
+
+ /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
+ return cpufreq_frequency_table_cpuinfo (policy, cbe_freqs);
+}
+
+static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, cbe_freqs);
+}
+
+
+static int cbe_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq,
+ unsigned int relation)
+{
+ int rc;
+ struct cpufreq_freqs freqs;
+ int cbe_pmode_new;
+
+ cpufreq_frequency_table_target(policy,
+ cbe_freqs,
+ target_freq,
+ relation,
+ &cbe_pmode_new);
+
+ freqs.old = policy->cur;
+ freqs.new = cbe_freqs[cbe_pmode_new].frequency;
+ freqs.cpu = policy->cpu;
+
+ mutex_lock (&cbe_switch_mutex);
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
+ policy->cpu,
+ cbe_freqs[cbe_pmode_new].frequency,
+ cbe_freqs[cbe_pmode_new].index);
+
+ rc = set_pmode(policy->cpu, cbe_pmode_new);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ mutex_unlock(&cbe_switch_mutex);
+
+ return rc;
+}
+
+static struct cpufreq_driver cbe_cpufreq_driver = {
+ .verify = cbe_cpufreq_verify,
+ .target = cbe_cpufreq_target,
+ .init = cbe_cpufreq_cpu_init,
+ .exit = cbe_cpufreq_cpu_exit,
+ .name = "cbe-cpufreq",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_CONST_LOOPS,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init cbe_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&cbe_cpufreq_driver);
+}
+
+static void __exit cbe_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&cbe_cpufreq_driver);
+}
+
+module_init(cbe_cpufreq_init);
+module_exit(cbe_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 2f194ba2989..9a0ee62691d 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -8,6 +8,7 @@
#include <linux/percpu.h>
#include <linux/types.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/pgtable.h>
@@ -16,8 +17,6 @@
#include "cbe_regs.h"
-#define MAX_CBE 2
-
/*
* Current implementation uses "cpu" nodes. We build our own mapping
* array of cpu numbers to cpu nodes locally for now to allow interrupt
@@ -30,6 +29,8 @@ static struct cbe_regs_map
struct device_node *cpu_node;
struct cbe_pmd_regs __iomem *pmd_regs;
struct cbe_iic_regs __iomem *iic_regs;
+ struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+ struct cbe_pmd_shadow_regs pmd_shadow_regs;
} cbe_regs_maps[MAX_CBE];
static int cbe_regs_map_count;
@@ -42,6 +43,19 @@ static struct cbe_thread_map
static struct cbe_regs_map *cbe_find_map(struct device_node *np)
{
int i;
+ struct device_node *tmp_np;
+
+ if (strcasecmp(np->type, "spe") == 0) {
+ if (np->data == NULL) {
+ /* walk up path until cpu node was found */
+ tmp_np = np->parent;
+ while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0)
+ tmp_np = tmp_np->parent;
+
+ np->data = cbe_find_map(tmp_np);
+ }
+ return np->data;
+ }
for (i = 0; i < cbe_regs_map_count; i++)
if (cbe_regs_maps[i].cpu_node == np)
@@ -56,6 +70,7 @@ struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
return NULL;
return map->pmd_regs;
}
+EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
{
@@ -64,7 +79,23 @@ struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
return NULL;
return map->pmd_regs;
}
+EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
+struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
+{
+ struct cbe_regs_map *map = cbe_find_map(np);
+ if (map == NULL)
+ return NULL;
+ return &map->pmd_shadow_regs;
+}
+
+struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
+{
+ struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+ if (map == NULL)
+ return NULL;
+ return &map->pmd_shadow_regs;
+}
struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
{
@@ -73,6 +104,7 @@ struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
return NULL;
return map->iic_regs;
}
+
struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
{
struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
@@ -81,6 +113,36 @@ struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
return map->iic_regs;
}
+struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
+{
+ struct cbe_regs_map *map = cbe_find_map(np);
+ if (map == NULL)
+ return NULL;
+ return map->mic_tm_regs;
+}
+
+struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
+{
+ struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+ if (map == NULL)
+ return NULL;
+ return map->mic_tm_regs;
+}
+EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
+
+/* FIXME
+ * This is little more than a stub at the moment. It should be
+ * fleshed out so that it works for both SMT and non-SMT, no
+ * matter if the passed cpu is odd or even.
+ * For SMT enabled, returns 0 for even-numbered cpu; otherwise 1.
+ * For SMT disabled, returns 0 for all cpus.
+ */
+u32 cbe_get_hw_thread_id(int cpu)
+{
+ return (cpu & 1);
+}
+EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
+
void __init cbe_regs_init(void)
{
int i;
@@ -119,6 +181,11 @@ void __init cbe_regs_init(void)
prop = get_property(cpu, "iic", NULL);
if (prop != NULL)
map->iic_regs = ioremap(prop->address, prop->len);
+
+ prop = (struct address_prop *)get_property(cpu, "mic-tm",
+ NULL);
+ if (prop != NULL)
+ map->mic_tm_regs = ioremap(prop->address, prop->len);
}
}
diff --git a/arch/powerpc/platforms/cell/cbe_regs.h b/arch/powerpc/platforms/cell/cbe_regs.h
index e76e4a6af5b..440a7ecc66e 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.h
+++ b/arch/powerpc/platforms/cell/cbe_regs.h
@@ -4,12 +4,19 @@
* This file is intended to hold the various register definitions for CBE
* on-chip system devices (memory controller, IO controller, etc...)
*
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ * David J. Erb (djerb@us.ibm.com)
+ *
* (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
*/
#ifndef CBE_REGS_H
#define CBE_REGS_H
+#include <asm/cell-pmu.h>
+
/*
*
* Some HID register definitions
@@ -22,6 +29,7 @@
#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul
#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul
+#define MAX_CBE 2
/*
*
@@ -29,51 +37,124 @@
*
*/
+union spe_reg {
+ u64 val;
+ u8 spe[8];
+};
+
+union ppe_spe_reg {
+ u64 val;
+ struct {
+ u32 ppe;
+ u32 spe;
+ };
+};
+
+
struct cbe_pmd_regs {
- u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */
+ /* Debug Bus Control */
+ u64 pad_0x0000; /* 0x0000 */
+
+ u64 group_control; /* 0x0008 */
+
+ u8 pad_0x0010_0x00a8 [0x00a8 - 0x0010]; /* 0x0010 */
+
+ u64 debug_bus_control; /* 0x00a8 */
+
+ u8 pad_0x00b0_0x0100 [0x0100 - 0x00b0]; /* 0x00b0 */
+
+ u64 trace_aux_data; /* 0x0100 */
+ u64 trace_buffer_0_63; /* 0x0108 */
+ u64 trace_buffer_64_127; /* 0x0110 */
+ u64 trace_address; /* 0x0118 */
+ u64 ext_tr_timer; /* 0x0120 */
+
+ u8 pad_0x0128_0x0400 [0x0400 - 0x0128]; /* 0x0128 */
+
+ /* Performance Monitor */
+ u64 pm_status; /* 0x0400 */
+ u64 pm_control; /* 0x0408 */
+ u64 pm_interval; /* 0x0410 */
+ u64 pm_ctr[4]; /* 0x0418 */
+ u64 pm_start_stop; /* 0x0438 */
+ u64 pm07_control[8]; /* 0x0440 */
+
+ u8 pad_0x0480_0x0800 [0x0800 - 0x0480]; /* 0x0480 */
/* Thermal Sensor Registers */
- u64 ts_ctsr1; /* 0x0800 */
- u64 ts_ctsr2; /* 0x0808 */
- u64 ts_mtsr1; /* 0x0810 */
- u64 ts_mtsr2; /* 0x0818 */
- u64 ts_itr1; /* 0x0820 */
- u64 ts_itr2; /* 0x0828 */
- u64 ts_gitr; /* 0x0830 */
- u64 ts_isr; /* 0x0838 */
- u64 ts_imr; /* 0x0840 */
- u64 tm_cr1; /* 0x0848 */
- u64 tm_cr2; /* 0x0850 */
- u64 tm_simr; /* 0x0858 */
- u64 tm_tpr; /* 0x0860 */
- u64 tm_str1; /* 0x0868 */
- u64 tm_str2; /* 0x0870 */
- u64 tm_tsr; /* 0x0878 */
+ union spe_reg ts_ctsr1; /* 0x0800 */
+ u64 ts_ctsr2; /* 0x0808 */
+ union spe_reg ts_mtsr1; /* 0x0810 */
+ u64 ts_mtsr2; /* 0x0818 */
+ union spe_reg ts_itr1; /* 0x0820 */
+ u64 ts_itr2; /* 0x0828 */
+ u64 ts_gitr; /* 0x0830 */
+ u64 ts_isr; /* 0x0838 */
+ u64 ts_imr; /* 0x0840 */
+ union spe_reg tm_cr1; /* 0x0848 */
+ u64 tm_cr2; /* 0x0850 */
+ u64 tm_simr; /* 0x0858 */
+ union ppe_spe_reg tm_tpr; /* 0x0860 */
+ union spe_reg tm_str1; /* 0x0868 */
+ u64 tm_str2; /* 0x0870 */
+ union ppe_spe_reg tm_tsr; /* 0x0878 */
/* Power Management */
- u64 pm_control; /* 0x0880 */
-#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000
- u64 pm_status; /* 0x0888 */
+ u64 pmcr; /* 0x0880 */
+#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000
+ u64 pmsr; /* 0x0888 */
/* Time Base Register */
- u64 tbr; /* 0x0890 */
+ u64 tbr; /* 0x0890 */
- u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */
+ u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */
/* Fault Isolation Registers */
- u64 checkstop_fir; /* 0x0c00 */
- u64 recoverable_fir;
- u64 spec_att_mchk_fir;
- u64 fir_mode_reg;
- u64 fir_enable_mask;
+ u64 checkstop_fir; /* 0x0c00 */
+ u64 recoverable_fir; /* 0x0c08 */
+ u64 spec_att_mchk_fir; /* 0x0c10 */
+ u64 fir_mode_reg; /* 0x0c18 */
+ u64 fir_enable_mask; /* 0x0c20 */
- u8 pad_0x0c28_0x1000 [0x1000 - 0x0c28]; /* 0x0c28 */
+ u8 pad_0x0c28_0x1000 [0x1000 - 0x0c28]; /* 0x0c28 */
};
extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
/*
+ * PMU shadow registers
+ *
+ * Many of the registers in the performance monitoring unit are write-only,
+ * so we need to save a copy of what we write to those registers.
+ *
+ * The actual data counters are read/write. However, writing to the counters
+ * only takes effect if the PMU is enabled. Otherwise the value is stored in
+ * a hardware latch until the next time the PMU is enabled. So we save a copy
+ * of the counter values if we need to read them back while the PMU is
+ * disabled. The counter_value_in_latch field is a bitmap indicating which
+ * counters currently have a value waiting to be written.
+ */
+
+struct cbe_pmd_shadow_regs {
+ u32 group_control;
+ u32 debug_bus_control;
+ u32 trace_address;
+ u32 ext_tr_timer;
+ u32 pm_status;
+ u32 pm_control;
+ u32 pm_interval;
+ u32 pm_start_stop;
+ u32 pm07_control[NR_CTRS];
+
+ u32 pm_ctr[NR_PHYS_CTRS];
+ u32 counter_value_in_latch;
+};
+
+extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
+extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
+
+/*
*
* IIC unit register definitions
*
@@ -102,18 +183,28 @@ struct cbe_iic_regs {
/* IIC interrupt registers */
struct cbe_iic_thread_regs thread[2]; /* 0x0400 */
- u64 iic_ir; /* 0x0440 */
- u64 iic_is; /* 0x0448 */
+
+ u64 iic_ir; /* 0x0440 */
+#define CBE_IIC_IR_PRIO(x) (((x) & 0xf) << 12)
+#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
+#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
+#define CBE_IIC_IR_IOC_0 0x0
+#define CBE_IIC_IR_IOC_1S 0xb
+#define CBE_IIC_IR_PT_0 0xe
+#define CBE_IIC_IR_PT_1 0xf
+
+ u64 iic_is; /* 0x0448 */
+#define CBE_IIC_IS_PMI 0x2
u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */
/* IOC FIR */
u64 ioc_fir_reset; /* 0x0500 */
- u64 ioc_fir_set;
- u64 ioc_checkstop_enable;
- u64 ioc_fir_error_mask;
- u64 ioc_syserr_enable;
- u64 ioc_fir;
+ u64 ioc_fir_set; /* 0x0508 */
+ u64 ioc_checkstop_enable; /* 0x0510 */
+ u64 ioc_fir_error_mask; /* 0x0518 */
+ u64 ioc_syserr_enable; /* 0x0520 */
+ u64 ioc_fir; /* 0x0528 */
u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */
};
@@ -122,6 +213,48 @@ extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
+struct cbe_mic_tm_regs {
+ u8 pad_0x0000_0x0040[0x0040 - 0x0000]; /* 0x0000 */
+
+ u64 mic_ctl_cnfg2; /* 0x0040 */
+#define CBE_MIC_ENABLE_AUX_TRC 0x8000000000000000LL
+#define CBE_MIC_DISABLE_PWR_SAV_2 0x0200000000000000LL
+#define CBE_MIC_DISABLE_AUX_TRC_WRAP 0x0100000000000000LL
+#define CBE_MIC_ENABLE_AUX_TRC_INT 0x0080000000000000LL
+
+ u64 pad_0x0048; /* 0x0048 */
+
+ u64 mic_aux_trc_base; /* 0x0050 */
+ u64 mic_aux_trc_max_addr; /* 0x0058 */
+ u64 mic_aux_trc_cur_addr; /* 0x0060 */
+ u64 mic_aux_trc_grf_addr; /* 0x0068 */
+ u64 mic_aux_trc_grf_data; /* 0x0070 */
+
+ u64 pad_0x0078; /* 0x0078 */
+
+ u64 mic_ctl_cnfg_0; /* 0x0080 */
+#define CBE_MIC_DISABLE_PWR_SAV_0 0x8000000000000000LL
+
+ u64 pad_0x0088; /* 0x0088 */
+
+ u64 slow_fast_timer_0; /* 0x0090 */
+ u64 slow_next_timer_0; /* 0x0098 */
+
+ u8 pad_0x00a0_0x01c0[0x01c0 - 0x0a0]; /* 0x00a0 */
+
+ u64 mic_ctl_cnfg_1; /* 0x01c0 */
+#define CBE_MIC_DISABLE_PWR_SAV_1 0x8000000000000000LL
+ u64 pad_0x01c8; /* 0x01c8 */
+
+ u64 slow_fast_timer_1; /* 0x01d0 */
+ u64 slow_next_timer_1; /* 0x01d8 */
+
+ u8 pad_0x01e0_0x1000[0x1000 - 0x01e0]; /* 0x01e0 */
+};
+
+extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
+extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
+
/* Init this module early */
extern void cbe_regs_init(void);
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
new file mode 100644
index 00000000000..616a0a3fd0e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -0,0 +1,226 @@
+/*
+ * thermal support for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <asm/spu.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+
+#include "cbe_regs.h"
+#include "spu_priv1_mmio.h"
+
+static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
+{
+ struct spu *spu;
+
+ spu = container_of(sysdev, struct spu, sysdev);
+
+ return cbe_get_pmd_regs(spu_devnode(spu));
+}
+
+/* returns the value for a given spu in a given register */
+static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iomem *reg)
+{
+ unsigned int *id;
+ union spe_reg value;
+ struct spu *spu;
+
+ /* getting the id from the reg attribute will not work on future device-tree layouts
+ * in future we should store the id to the spu struct and use it here */
+ spu = container_of(sysdev, struct spu, sysdev);
+ id = (unsigned int *)get_property(spu_devnode(spu), "reg", NULL);
+ value.val = in_be64(&reg->val);
+
+ return value.spe[*id];
+}
+
+static ssize_t spu_show_temp(struct sys_device *sysdev, char *buf)
+{
+ int value;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+
+ pmd_regs = get_pmd_regs(sysdev);
+
+ value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1);
+ /* clear all other bits */
+ value &= 0x3F;
+ /* temp is stored in steps of 2 degrees */
+ value *= 2;
+ /* base temp is 65 degrees */
+ value += 65;
+
+ return sprintf(buf, "%d\n", (int) value);
+}
+
+static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
+{
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ u64 value;
+
+ pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+ value = in_be64(&pmd_regs->ts_ctsr2);
+
+ /* access the corresponding byte */
+ value >>= pos;
+ /* clear all other bits */
+ value &= 0x3F;
+ /* temp is stored in steps of 2 degrees */
+ value *= 2;
+ /* base temp is 65 degrees */
+ value += 65;
+
+ return sprintf(buf, "%d\n", (int) value);
+}
+
+
+/* shows the temperature of the DTS on the PPE,
+ * located near the linear thermal sensor */
+static ssize_t ppe_show_temp0(struct sys_device *sysdev, char *buf)
+{
+ return ppe_show_temp(sysdev, buf, 32);
+}
+
+/* shows the temperature of the second DTS on the PPE */
+static ssize_t ppe_show_temp1(struct sys_device *sysdev, char *buf)
+{
+ return ppe_show_temp(sysdev, buf, 0);
+}
+
+static struct sysdev_attribute attr_spu_temperature = {
+ .attr = {.name = "temperature", .mode = 0400 },
+ .show = spu_show_temp,
+};
+
+static struct attribute *spu_attributes[] = {
+ &attr_spu_temperature.attr,
+};
+
+static struct attribute_group spu_attribute_group = {
+ .name = "thermal",
+ .attrs = spu_attributes,
+};
+
+static struct sysdev_attribute attr_ppe_temperature0 = {
+ .attr = {.name = "temperature0", .mode = 0400 },
+ .show = ppe_show_temp0,
+};
+
+static struct sysdev_attribute attr_ppe_temperature1 = {
+ .attr = {.name = "temperature1", .mode = 0400 },
+ .show = ppe_show_temp1,
+};
+
+static struct attribute *ppe_attributes[] = {
+ &attr_ppe_temperature0.attr,
+ &attr_ppe_temperature1.attr,
+};
+
+static struct attribute_group ppe_attribute_group = {
+ .name = "thermal",
+ .attrs = ppe_attributes,
+};
+
+/*
+ * initialize throttling with default values
+ */
+static void __init init_default_values(void)
+{
+ int cpu;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ struct sys_device *sysdev;
+ union ppe_spe_reg tpr;
+ union spe_reg str1;
+ u64 str2;
+ union spe_reg cr1;
+ u64 cr2;
+
+ /* TPR defaults */
+ /* ppe
+ * 1F - no full stop
+ * 08 - dynamic throttling starts if over 80 degrees
+ * 03 - dynamic throttling ceases if below 70 degrees */
+ tpr.ppe = 0x1F0803;
+ /* spe
+ * 10 - full stopped when over 96 degrees
+ * 08 - dynamic throttling starts if over 80 degrees
+ * 03 - dynamic throttling ceases if below 70 degrees
+ */
+ tpr.spe = 0x100803;
+
+ /* STR defaults */
+ /* str1
+ * 10 - stop 16 of 32 cycles
+ */
+ str1.val = 0x1010101010101010ull;
+ /* str2
+ * 10 - stop 16 of 32 cycles
+ */
+ str2 = 0x10;
+
+ /* CR defaults */
+ /* cr1
+ * 4 - normal operation
+ */
+ cr1.val = 0x0404040404040404ull;
+ /* cr2
+ * 4 - normal operation
+ */
+ cr2 = 0x04;
+
+ for_each_possible_cpu (cpu) {
+ pr_debug("processing cpu %d\n", cpu);
+ sysdev = get_cpu_sysdev(cpu);
+ pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+
+ out_be64(&pmd_regs->tm_str2, str2);
+ out_be64(&pmd_regs->tm_str1.val, str1.val);
+ out_be64(&pmd_regs->tm_tpr.val, tpr.val);
+ out_be64(&pmd_regs->tm_cr1.val, cr1.val);
+ out_be64(&pmd_regs->tm_cr2, cr2);
+ }
+}
+
+
+static int __init thermal_init(void)
+{
+ init_default_values();
+
+ spu_add_sysdev_attr_group(&spu_attribute_group);
+ cpu_add_sysdev_attr_group(&ppe_attribute_group);
+
+ return 0;
+}
+module_init(thermal_init);
+
+static void __exit thermal_exit(void)
+{
+ spu_remove_sysdev_attr_group(&spu_attribute_group);
+ cpu_remove_sysdev_attr_group(&ppe_attribute_group);
+}
+module_exit(thermal_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
+
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index a914c12b406..6666d037eb4 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -396,3 +396,19 @@ void __init iic_init_IRQ(void)
/* Enable on current CPU */
iic_setup_cpu();
}
+
+void iic_set_interrupt_routing(int cpu, int thread, int priority)
+{
+ struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
+ u64 iic_ir = 0;
+ int node = cpu >> 1;
+
+ /* Set which node and thread will handle the next interrupt */
+ iic_ir |= CBE_IIC_IR_PRIO(priority) |
+ CBE_IIC_IR_DEST_NODE(node);
+ if (thread == 0)
+ iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
+ else
+ iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
+ out_be64(&iic_regs->iic_ir, iic_ir);
+}
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 9ba1d3c17b4..942dc39d604 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -83,5 +83,7 @@ extern u8 iic_get_target_id(int cpu);
extern void spider_init_IRQ(void);
+extern void iic_set_interrupt_routing(int cpu, int thread, int priority);
+
#endif
#endif /* ASM_CELL_PIC_H */
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
new file mode 100644
index 00000000000..580d4259591
--- /dev/null
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * IBM, Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+
+
+#define SPIDER_PCI_REG_BASE 0xd000
+#define SPIDER_PCI_VCI_CNTL_STAT 0x0110
+#define SPIDER_PCI_DUMMY_READ 0x0810
+#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
+
+/* Undefine that to re-enable bogus prefetch
+ *
+ * Without that workaround, the chip will do bogus prefetch past
+ * page boundary from system memory. This setting will disable that,
+ * though the documentation is unclear as to the consequences of doing
+ * so, either purely performances, or possible misbehaviour... It's not
+ * clear wether the chip can handle unaligned accesses at all without
+ * prefetching enabled.
+ *
+ * For now, things appear to be behaving properly with that prefetching
+ * disabled and IDE, possibly because IDE isn't doing any unaligned
+ * access.
+ */
+#define SPIDER_DISABLE_PREFETCH
+
+#define MAX_SPIDERS 2
+
+static struct spider_pci_bus {
+ void __iomem *regs;
+ unsigned long mmio_start;
+ unsigned long mmio_end;
+ unsigned long pio_vstart;
+ unsigned long pio_vend;
+} spider_pci_busses[MAX_SPIDERS];
+static int spider_pci_count;
+
+static struct spider_pci_bus *spider_pci_find(unsigned long vaddr,
+ unsigned long paddr)
+{
+ int i;
+
+ for (i = 0; i < spider_pci_count; i++) {
+ struct spider_pci_bus *bus = &spider_pci_busses[i];
+ if (paddr && paddr >= bus->mmio_start && paddr < bus->mmio_end)
+ return bus;
+ if (vaddr && vaddr >= bus->pio_vstart && vaddr < bus->pio_vend)
+ return bus;
+ }
+ return NULL;
+}
+
+static void spider_io_flush(const volatile void __iomem *addr)
+{
+ struct spider_pci_bus *bus;
+ int token;
+
+ /* Get platform token (set by ioremap) from address */
+ token = PCI_GET_ADDR_TOKEN(addr);
+
+ /* Fast path if we have a non-0 token, it indicates which bus we
+ * are on.
+ *
+ * If the token is 0, that means either the the ioremap was done
+ * before we initialized this layer, or it's a PIO operation. We
+ * fallback to a low path in this case. Hopefully, internal devices
+ * which are ioremap'ed early should use in_XX/out_XX functions
+ * instead of the PCI ones and thus not suffer from the slowdown.
+ *
+ * Also note that currently, the workaround will not work for areas
+ * that are not mapped with PTEs (bolted in the hash table). This
+ * is the case for ioremaps done very early at boot (before
+ * mem_init_done) and includes the mapping of the ISA IO space.
+ *
+ * Fortunately, none of the affected devices is expected to do DMA
+ * and thus there should be no problem in practice.
+ *
+ * In order to improve performances, we only do the PTE search for
+ * addresses falling in the PHB IO space area. That means it will
+ * not work for hotplug'ed PHBs but those don't exist with Spider.
+ */
+ if (token && token <= spider_pci_count)
+ bus = &spider_pci_busses[token - 1];
+ else {
+ unsigned long vaddr, paddr;
+ pte_t *ptep;
+
+ /* Fixup physical address */
+ vaddr = (unsigned long)PCI_FIX_ADDR(addr);
+
+ /* Check if it's in allowed range for PIO */
+ if (vaddr < PHBS_IO_BASE || vaddr >= IMALLOC_BASE)
+ return;
+
+ /* Try to find a PTE. If not, clear the paddr, we'll do
+ * a vaddr only lookup (PIO only)
+ */
+ ptep = find_linux_pte(init_mm.pgd, vaddr);
+ if (ptep == NULL)
+ paddr = 0;
+ else
+ paddr = pte_pfn(*ptep) << PAGE_SHIFT;
+
+ bus = spider_pci_find(vaddr, paddr);
+ if (bus == NULL)
+ return;
+ }
+
+ /* Now do the workaround
+ */
+ (void)in_be32(bus->regs + SPIDER_PCI_DUMMY_READ);
+}
+
+static u8 spider_readb(const volatile void __iomem *addr)
+{
+ u8 val = __do_readb(addr);
+ spider_io_flush(addr);
+ return val;
+}
+
+static u16 spider_readw(const volatile void __iomem *addr)
+{
+ u16 val = __do_readw(addr);
+ spider_io_flush(addr);
+ return val;
+}
+
+static u32 spider_readl(const volatile void __iomem *addr)
+{
+ u32 val = __do_readl(addr);
+ spider_io_flush(addr);
+ return val;
+}
+
+static u64 spider_readq(const volatile void __iomem *addr)
+{
+ u64 val = __do_readq(addr);
+ spider_io_flush(addr);
+ return val;
+}
+
+static u16 spider_readw_be(const volatile void __iomem *addr)
+{
+ u16 val = __do_readw_be(addr);
+ spider_io_flush(addr);
+ return val;
+}
+
+static u32 spider_readl_be(const volatile void __iomem *addr)
+{
+ u32 val = __do_readl_be(addr);
+ spider_io_flush(addr);
+ return val;
+}
+
+static u64 spider_readq_be(const volatile void __iomem *addr)
+{
+ u64 val = __do_readq_be(addr);
+ spider_io_flush(addr);
+ return val;
+}
+
+static void spider_readsb(const volatile void __iomem *addr, void *buf,
+ unsigned long count)
+{
+ __do_readsb(addr, buf, count);
+ spider_io_flush(addr);
+}
+
+static void spider_readsw(const volatile void __iomem *addr, void *buf,
+ unsigned long count)
+{
+ __do_readsw(addr, buf, count);
+ spider_io_flush(addr);
+}
+
+static void spider_readsl(const volatile void __iomem *addr, void *buf,
+ unsigned long count)
+{
+ __do_readsl(addr, buf, count);
+ spider_io_flush(addr);
+}
+
+static void spider_memcpy_fromio(void *dest, const volatile void __iomem *src,
+ unsigned long n)
+{
+ __do_memcpy_fromio(dest, src, n);
+ spider_io_flush(src);
+}
+
+
+static void __iomem * spider_ioremap(unsigned long addr, unsigned long size,
+ unsigned long flags)
+{
+ struct spider_pci_bus *bus;
+ void __iomem *res = __ioremap(addr, size, flags);
+ int busno;
+
+ pr_debug("spider_ioremap(0x%lx, 0x%lx, 0x%lx) -> 0x%p\n",
+ addr, size, flags, res);
+
+ bus = spider_pci_find(0, addr);
+ if (bus != NULL) {
+ busno = bus - spider_pci_busses;
+ pr_debug(" found bus %d, setting token\n", busno);
+ PCI_SET_ADDR_TOKEN(res, busno + 1);
+ }
+ pr_debug(" result=0x%p\n", res);
+
+ return res;
+}
+
+static void __init spider_pci_setup_chip(struct spider_pci_bus *bus)
+{
+#ifdef SPIDER_DISABLE_PREFETCH
+ u32 val = in_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT);
+ pr_debug(" PVCI_Control_Status was 0x%08x\n", val);
+ out_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8);
+#endif
+
+ /* Configure the dummy address for the workaround */
+ out_be32(bus->regs + SPIDER_PCI_DUMMY_READ_BASE, 0x80000000);
+}
+
+static void __init spider_pci_add_one(struct pci_controller *phb)
+{
+ struct spider_pci_bus *bus = &spider_pci_busses[spider_pci_count];
+ struct device_node *np = phb->arch_data;
+ struct resource rsrc;
+ void __iomem *regs;
+
+ if (spider_pci_count >= MAX_SPIDERS) {
+ printk(KERN_ERR "Too many spider bridges, workarounds"
+ " disabled for %s\n", np->full_name);
+ return;
+ }
+
+ /* Get the registers for the beast */
+ if (of_address_to_resource(np, 0, &rsrc)) {
+ printk(KERN_ERR "Failed to get registers for spider %s"
+ " workarounds disabled\n", np->full_name);
+ return;
+ }
+
+ /* Mask out some useless bits in there to get to the base of the
+ * spider chip
+ */
+ rsrc.start &= ~0xfffffffful;
+
+ /* Map them */
+ regs = ioremap(rsrc.start + SPIDER_PCI_REG_BASE, 0x1000);
+ if (regs == NULL) {
+ printk(KERN_ERR "Failed to map registers for spider %s"
+ " workarounds disabled\n", np->full_name);
+ return;
+ }
+
+ spider_pci_count++;
+
+ /* We assume spiders only have one MMIO resource */
+ bus->mmio_start = phb->mem_resources[0].start;
+ bus->mmio_end = phb->mem_resources[0].end + 1;
+
+ bus->pio_vstart = (unsigned long)phb->io_base_virt;
+ bus->pio_vend = bus->pio_vstart + phb->pci_io_size;
+
+ bus->regs = regs;
+
+ printk(KERN_INFO "PCI: Spider MMIO workaround for %s\n",np->full_name);
+
+ pr_debug(" mmio (P) = 0x%016lx..0x%016lx\n",
+ bus->mmio_start, bus->mmio_end);
+ pr_debug(" pio (V) = 0x%016lx..0x%016lx\n",
+ bus->pio_vstart, bus->pio_vend);
+ pr_debug(" regs (P) = 0x%016lx (V) = 0x%p\n",
+ rsrc.start + SPIDER_PCI_REG_BASE, bus->regs);
+
+ spider_pci_setup_chip(bus);
+}
+
+static struct ppc_pci_io __initdata spider_pci_io = {
+ .readb = spider_readb,
+ .readw = spider_readw,
+ .readl = spider_readl,
+ .readq = spider_readq,
+ .readw_be = spider_readw_be,
+ .readl_be = spider_readl_be,
+ .readq_be = spider_readq_be,
+ .readsb = spider_readsb,
+ .readsw = spider_readsw,
+ .readsl = spider_readsl,
+ .memcpy_fromio = spider_memcpy_fromio,
+};
+
+static int __init spider_pci_workaround_init(void)
+{
+ struct pci_controller *phb;
+
+ if (!machine_is(cell))
+ return 0;
+
+ /* Find spider bridges. We assume they have been all probed
+ * in setup_arch(). If that was to change, we would need to
+ * update this code to cope with dynamically added busses
+ */
+ list_for_each_entry(phb, &hose_list, list_node) {
+ struct device_node *np = phb->arch_data;
+ const char *model = get_property(np, "model", NULL);
+
+ /* If no model property or name isn't exactly "pci", skip */
+ if (model == NULL || strcmp(np->name, "pci"))
+ continue;
+ /* If model is not "Spider", skip */
+ if (strcmp(model, "Spider"))
+ continue;
+ spider_pci_add_one(phb);
+ }
+
+ /* No Spider PCI found, exit */
+ if (spider_pci_count == 0)
+ return 0;
+
+ /* Setup IO callbacks. We only setup MMIO reads. PIO reads will
+ * fallback to MMIO reads (though without a token, thus slower)
+ */
+ ppc_pci_io = spider_pci_io;
+
+ /* Setup ioremap callback */
+ ppc_md.ioremap = spider_ioremap;
+
+ return 0;
+}
+arch_initcall(spider_pci_workaround_init);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index aca4c3db0dd..b43466ba809 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1,514 +1,747 @@
/*
* IOMMU implementation for Cell Broadband Processor Architecture
- * We just establish a linear mapping at boot by setting all the
- * IOPT cache entries in the CPU.
- * The mapping functions should be identical to pci_direct_iommu,
- * except for the handling of the high order bit that is required
- * by the Spider bridge. These should be split into a separate
- * file at the point where we get a different bridge chip.
*
- * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH,
- * Arnd Bergmann <arndb@de.ibm.com>
+ * (C) Copyright IBM Corporation 2006
*
- * Based on linear mapping
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ * Author: Jeremy Kerr <jk@ozlabs.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#undef DEBUG
#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/compiler.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
-#include <asm/sections.h>
-#include <asm/iommu.h>
-#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
+#include <asm/iommu.h>
#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/abs_addr.h>
-#include <asm/system.h>
-#include <asm/ppc-pci.h>
+#include <asm/pci-bridge.h>
#include <asm/udbg.h>
+#include <asm/of_platform.h>
+#include <asm/lmb.h>
-#include "iommu.h"
+#include "cbe_regs.h"
+#include "interrupt.h"
-static inline unsigned long
-get_iopt_entry(unsigned long real_address, unsigned long ioid,
- unsigned long prot)
-{
- return (prot & IOPT_PROT_MASK)
- | (IOPT_COHERENT)
- | (IOPT_ORDER_VC)
- | (real_address & IOPT_RPN_MASK)
- | (ioid & IOPT_IOID_MASK);
-}
+/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
+ * instead of leaving them mapped to some dummy page. This can be
+ * enabled once the appropriate workarounds for spider bugs have
+ * been enabled
+ */
+#define CELL_IOMMU_REAL_UNMAP
-typedef struct {
- unsigned long val;
-} ioste;
+/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
+ * IO PTEs based on the transfer direction. That can be enabled
+ * once spider-net has been fixed to pass the correct direction
+ * to the DMA mapping functions
+ */
+#define CELL_IOMMU_STRICT_PROTECTION
+
+
+#define NR_IOMMUS 2
+
+/* IOC mmap registers */
+#define IOC_Reg_Size 0x2000
+
+#define IOC_IOPT_CacheInvd 0x908
+#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
+#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
+#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
+
+#define IOC_IOST_Origin 0x918
+#define IOC_IOST_Origin_E 0x8000000000000000ul
+#define IOC_IOST_Origin_HW 0x0000000000000800ul
+#define IOC_IOST_Origin_HL 0x0000000000000400ul
+
+#define IOC_IO_ExcpStat 0x920
+#define IOC_IO_ExcpStat_V 0x8000000000000000ul
+#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
+#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
+#define IOC_IO_ExcpStat_SPF_P 0x4000000000000000ul
+#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
+#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
+#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
+
+#define IOC_IO_ExcpMask 0x928
+#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
+#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
+
+#define IOC_IOCmd_Offset 0x1000
+
+#define IOC_IOCmd_Cfg 0xc00
+#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
+
+
+/* Segment table entries */
+#define IOSTE_V 0x8000000000000000ul /* valid */
+#define IOSTE_H 0x4000000000000000ul /* cache hint */
+#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
+#define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
+#define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
+#define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
+#define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
+#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
+#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
+
+/* Page table entries */
+#define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
+#define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
+#define IOPTE_M 0x2000000000000000ul /* coherency required */
+#define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
+#define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
+#define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
+#define IOPTE_H 0x0000000000000800ul /* cache hint */
+#define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
+
+
+/* IOMMU sizing */
+#define IO_SEGMENT_SHIFT 28
+#define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT)
+
+/* The high bit needs to be set on every DMA address */
+#define SPIDER_DMA_OFFSET 0x80000000ul
+
+struct iommu_window {
+ struct list_head list;
+ struct cbe_iommu *iommu;
+ unsigned long offset;
+ unsigned long size;
+ unsigned long pte_offset;
+ unsigned int ioid;
+ struct iommu_table table;
+};
-static inline ioste
-mk_ioste(unsigned long val)
-{
- ioste ioste = { .val = val, };
- return ioste;
-}
+#define NAMESIZE 8
+struct cbe_iommu {
+ int nid;
+ char name[NAMESIZE];
+ void __iomem *xlate_regs;
+ void __iomem *cmd_regs;
+ unsigned long *stab;
+ unsigned long *ptab;
+ void *pad_page;
+ struct list_head windows;
+};
+
+/* Static array of iommus, one per node
+ * each contains a list of windows, keyed from dma_window property
+ * - on bus setup, look for a matching window, or create one
+ * - on dev setup, assign iommu_table ptr
+ */
+static struct cbe_iommu iommus[NR_IOMMUS];
+static int cbe_nr_iommus;
-static inline ioste
-get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size)
+static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
+ long n_ptes)
{
- unsigned long ps;
- unsigned long iostep;
- unsigned long nnpt;
- unsigned long shift;
-
- switch (page_size) {
- case 0x1000000:
- ps = IOST_PS_16M;
- nnpt = 0; /* one page per segment */
- shift = 5; /* segment has 16 iopt entries */
- break;
-
- case 0x100000:
- ps = IOST_PS_1M;
- nnpt = 0; /* one page per segment */
- shift = 1; /* segment has 256 iopt entries */
- break;
-
- case 0x10000:
- ps = IOST_PS_64K;
- nnpt = 0x07; /* 8 pages per io page table */
- shift = 0; /* all entries are used */
- break;
-
- case 0x1000:
- ps = IOST_PS_4K;
- nnpt = 0x7f; /* 128 pages per io page table */
- shift = 0; /* all entries are used */
- break;
-
- default: /* not a known compile time constant */
- {
- /* BUILD_BUG_ON() is not usable here */
- extern void __get_iost_entry_bad_page_size(void);
- __get_iost_entry_bad_page_size();
- }
- break;
- }
+ unsigned long *reg, val;
+ long n;
- iostep = iopt_base +
- /* need 8 bytes per iopte */
- (((io_address / page_size * 8)
- /* align io page tables on 4k page boundaries */
- << shift)
- /* nnpt+1 pages go into each iopt */
- & ~(nnpt << 12));
-
- nnpt++; /* this seems to work, but the documentation is not clear
- about wether we put nnpt or nnpt-1 into the ioste bits.
- In theory, this can't work for 4k pages. */
- return mk_ioste(IOST_VALID_MASK
- | (iostep & IOST_PT_BASE_MASK)
- | ((nnpt << 5) & IOST_NNPT_MASK)
- | (ps & IOST_PS_MASK));
-}
+ reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
-/* compute the address of an io pte */
-static inline unsigned long
-get_ioptep(ioste iost_entry, unsigned long io_address)
-{
- unsigned long iopt_base;
- unsigned long page_size;
- unsigned long page_number;
- unsigned long iopt_offset;
-
- iopt_base = iost_entry.val & IOST_PT_BASE_MASK;
- page_size = iost_entry.val & IOST_PS_MASK;
-
- /* decode page size to compute page number */
- page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size);
- /* page number is an offset into the io page table */
- iopt_offset = (page_number << 3) & 0x7fff8ul;
- return iopt_base + iopt_offset;
-}
+ while (n_ptes > 0) {
+ /* we can invalidate up to 1 << 11 PTEs at once */
+ n = min(n_ptes, 1l << 11);
+ val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
+ | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
+ | IOC_IOPT_CacheInvd_Busy;
-/* compute the tag field of the iopt cache entry */
-static inline unsigned long
-get_ioc_tag(ioste iost_entry, unsigned long io_address)
-{
- unsigned long iopte = get_ioptep(iost_entry, io_address);
+ out_be64(reg, val);
+ while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
+ ;
- return IOPT_VALID_MASK
- | ((iopte & 0x00000000000000ff8ul) >> 3)
- | ((iopte & 0x0000003fffffc0000ul) >> 9);
+ n_ptes -= n;
+ pte += n;
+ }
}
-/* compute the hashed 6 bit index for the 4-way associative pte cache */
-static inline unsigned long
-get_ioc_hash(ioste iost_entry, unsigned long io_address)
+static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction)
{
- unsigned long iopte = get_ioptep(iost_entry, io_address);
-
- return ((iopte & 0x000000000000001f8ul) >> 3)
- ^ ((iopte & 0x00000000000020000ul) >> 17)
- ^ ((iopte & 0x00000000000010000ul) >> 15)
- ^ ((iopte & 0x00000000000008000ul) >> 13)
- ^ ((iopte & 0x00000000000004000ul) >> 11)
- ^ ((iopte & 0x00000000000002000ul) >> 9)
- ^ ((iopte & 0x00000000000001000ul) >> 7);
+ int i;
+ unsigned long *io_pte, base_pte;
+ struct iommu_window *window =
+ container_of(tbl, struct iommu_window, table);
+
+ /* implementing proper protection causes problems with the spidernet
+ * driver - check mapping directions later, but allow read & write by
+ * default for now.*/
+#ifdef CELL_IOMMU_STRICT_PROTECTION
+ /* to avoid referencing a global, we use a trick here to setup the
+ * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
+ * together for each of the 3 supported direction values. It is then
+ * shifted left so that the fields matching the desired direction
+ * lands on the appropriate bits, and other bits are masked out.
+ */
+ const unsigned long prot = 0xc48;
+ base_pte =
+ ((prot << (52 + 4 * direction)) & (IOPTE_PP_W | IOPTE_PP_R))
+ | IOPTE_M | IOPTE_SO_RW | (window->ioid & IOPTE_IOID_Mask);
+#else
+ base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
+ (window->ioid & IOPTE_IOID_Mask);
+#endif
+
+ io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
+
+ for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
+ io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
+
+ mb();
+
+ invalidate_tce_cache(window->iommu, io_pte, npages);
+
+ pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
+ index, npages, direction, base_pte);
}
-/* same as above, but pretend that we have a simpler 1-way associative
- pte cache with an 8 bit index */
-static inline unsigned long
-get_ioc_hash_1way(ioste iost_entry, unsigned long io_address)
+static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
{
- unsigned long iopte = get_ioptep(iost_entry, io_address);
-
- return ((iopte & 0x000000000000001f8ul) >> 3)
- ^ ((iopte & 0x00000000000020000ul) >> 17)
- ^ ((iopte & 0x00000000000010000ul) >> 15)
- ^ ((iopte & 0x00000000000008000ul) >> 13)
- ^ ((iopte & 0x00000000000004000ul) >> 11)
- ^ ((iopte & 0x00000000000002000ul) >> 9)
- ^ ((iopte & 0x00000000000001000ul) >> 7)
- ^ ((iopte & 0x0000000000000c000ul) >> 8);
-}
-static inline ioste
-get_iost_cache(void __iomem *base, unsigned long index)
-{
- unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
- return mk_ioste(in_be64(&p[index]));
-}
+ int i;
+ unsigned long *io_pte, pte;
+ struct iommu_window *window =
+ container_of(tbl, struct iommu_window, table);
-static inline void
-set_iost_cache(void __iomem *base, unsigned long index, ioste ste)
-{
- unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
- pr_debug("ioste %02lx was %016lx, store %016lx", index,
- get_iost_cache(base, index).val, ste.val);
- out_be64(&p[index], ste.val);
- pr_debug(" now %016lx\n", get_iost_cache(base, index).val);
-}
+ pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
-static inline unsigned long
-get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag)
-{
- unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR);
- unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG);
+#ifdef CELL_IOMMU_REAL_UNMAP
+ pte = 0;
+#else
+ /* spider bridge does PCI reads after freeing - insert a mapping
+ * to a scratch page instead of an invalid entry */
+ pte = IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | __pa(window->iommu->pad_page)
+ | (window->ioid & IOPTE_IOID_Mask);
+#endif
- *tag = tags[index];
- rmb();
- return *p;
-}
+ io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
-static inline void
-set_iopt_cache(void __iomem *base, unsigned long index,
- unsigned long tag, unsigned long val)
-{
- unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
- unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
+ for (i = 0; i < npages; i++)
+ io_pte[i] = pte;
+
+ mb();
- out_be64(p, val);
- out_be64(&tags[index], tag);
+ invalidate_tce_cache(window->iommu, io_pte, npages);
}
-static inline void
-set_iost_origin(void __iomem *base)
+static irqreturn_t ioc_interrupt(int irq, void *data)
{
- unsigned long __iomem *p = base + IOC_ST_ORIGIN;
- unsigned long origin = IOSTO_ENABLE | IOSTO_SW;
-
- pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin);
- out_be64(p, origin);
+ unsigned long stat;
+ struct cbe_iommu *iommu = data;
+
+ stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
+
+ /* Might want to rate limit it */
+ printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
+ printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
+ !!(stat & IOC_IO_ExcpStat_V),
+ (stat & IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
+ (stat & IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
+ (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
+ (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
+ printk(KERN_ERR " page=0x%016lx\n",
+ stat & IOC_IO_ExcpStat_ADDR_Mask);
+
+ /* clear interrupt */
+ stat &= ~IOC_IO_ExcpStat_V;
+ out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
+
+ return IRQ_HANDLED;
}
-static inline void
-set_iocmd_config(void __iomem *base)
+static int cell_iommu_find_ioc(int nid, unsigned long *base)
{
- unsigned long __iomem *p = base + 0xc00;
- unsigned long conf;
+ struct device_node *np;
+ struct resource r;
+
+ *base = 0;
+
+ /* First look for new style /be nodes */
+ for_each_node_by_name(np, "ioc") {
+ if (of_node_to_nid(np) != nid)
+ continue;
+ if (of_address_to_resource(np, 0, &r)) {
+ printk(KERN_ERR "iommu: can't get address for %s\n",
+ np->full_name);
+ continue;
+ }
+ *base = r.start;
+ of_node_put(np);
+ return 0;
+ }
- conf = in_be64(p);
- pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE);
- out_be64(p, conf | IOCMD_CONF_TE);
+ /* Ok, let's try the old way */
+ for_each_node_by_type(np, "cpu") {
+ const unsigned int *nidp;
+ const unsigned long *tmp;
+
+ nidp = get_property(np, "node-id", NULL);
+ if (nidp && *nidp == nid) {
+ tmp = get_property(np, "ioc-translation", NULL);
+ if (tmp) {
+ *base = *tmp;
+ of_node_put(np);
+ return 0;
+ }
+ }
+ }
+
+ return -ENODEV;
}
-static void enable_mapping(void __iomem *base, void __iomem *mmio_base)
+static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long size)
{
- set_iocmd_config(base);
- set_iost_origin(mmio_base);
-}
+ struct page *page;
+ int ret, i;
+ unsigned long reg, segments, pages_per_segment, ptab_size, n_pte_pages;
+ unsigned long xlate_base;
+ unsigned int virq;
+
+ if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
+ panic("%s: missing IOC register mappings for node %d\n",
+ __FUNCTION__, iommu->nid);
+
+ iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
+ iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
+
+ segments = size >> IO_SEGMENT_SHIFT;
+ pages_per_segment = 1ull << IO_PAGENO_BITS;
+
+ pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
+ __FUNCTION__, iommu->nid, segments, pages_per_segment);
+
+ /* set up the segment table */
+ page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
+ BUG_ON(!page);
+ iommu->stab = page_address(page);
+ clear_page(iommu->stab);
+
+ /* ... and the page tables. Since these are contiguous, we can treat
+ * the page tables as one array of ptes, like pSeries does.
+ */
+ ptab_size = segments * pages_per_segment * sizeof(unsigned long);
+ pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
+ iommu->nid, ptab_size, get_order(ptab_size));
+ page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
+ BUG_ON(!page);
+
+ iommu->ptab = page_address(page);
+ memset(iommu->ptab, 0, ptab_size);
+
+ /* allocate a bogus page for the end of each mapping */
+ page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
+ BUG_ON(!page);
+ iommu->pad_page = page_address(page);
+ clear_page(iommu->pad_page);
+
+ /* number of pages needed for a page table */
+ n_pte_pages = (pages_per_segment *
+ sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT;
+
+ pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
+ __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab,
+ n_pte_pages);
+
+ /* initialise the STEs */
+ reg = IOSTE_V | ((n_pte_pages - 1) << 5);
+
+ if (IOMMU_PAGE_SIZE == 0x1000)
+ reg |= IOSTE_PS_4K;
+ else if (IOMMU_PAGE_SIZE == 0x10000)
+ reg |= IOSTE_PS_64K;
+ else {
+ extern void __unknown_page_size_error(void);
+ __unknown_page_size_error();
+ }
+
+ pr_debug("Setting up IOMMU stab:\n");
+ for (i = 0; i * (1ul << IO_SEGMENT_SHIFT) < size; i++) {
+ iommu->stab[i] = reg |
+ (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i);
+ pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
+ }
-static void iommu_dev_setup_null(struct pci_dev *d) { }
-static void iommu_bus_setup_null(struct pci_bus *b) { }
+ /* ensure that the STEs have updated */
+ mb();
-struct cell_iommu {
- unsigned long base;
- unsigned long mmio_base;
- void __iomem *mapped_base;
- void __iomem *mapped_mmio_base;
-};
+ /* setup interrupts for the iommu. */
+ reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
+ out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
+ reg & ~IOC_IO_ExcpStat_V);
+ out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
+ IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
-static struct cell_iommu cell_iommus[NR_CPUS];
+ virq = irq_create_mapping(NULL,
+ IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
+ BUG_ON(virq == NO_IRQ);
-/* initialize the iommu to support a simple linear mapping
- * for each DMA window used by any device. For now, we
- * happen to know that there is only one DMA window in use,
- * starting at iopt_phys_offset. */
-static void cell_do_map_iommu(struct cell_iommu *iommu,
- unsigned int ioid,
- unsigned long map_start,
- unsigned long map_size)
-{
- unsigned long io_address, real_address;
- void __iomem *ioc_base, *ioc_mmio_base;
- ioste ioste;
- unsigned long index;
+ ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED,
+ iommu->name, iommu);
+ BUG_ON(ret);
- /* we pretend the io page table was at a very high address */
- const unsigned long fake_iopt = 0x10000000000ul;
- const unsigned long io_page_size = 0x1000000; /* use 16M pages */
- const unsigned long io_segment_size = 0x10000000; /* 256M */
-
- ioc_base = iommu->mapped_base;
- ioc_mmio_base = iommu->mapped_mmio_base;
-
- for (real_address = 0, io_address = map_start;
- io_address <= map_start + map_size;
- real_address += io_page_size, io_address += io_page_size) {
- ioste = get_iost_entry(fake_iopt, io_address, io_page_size);
- if ((real_address % io_segment_size) == 0) /* segment start */
- set_iost_cache(ioc_mmio_base,
- io_address >> 28, ioste);
- index = get_ioc_hash_1way(ioste, io_address);
- pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
- io_address, index, ioste.val);
- set_iopt_cache(ioc_mmio_base,
- get_ioc_hash_1way(ioste, io_address),
- get_ioc_tag(ioste, io_address),
- get_iopt_entry(real_address, ioid, IOPT_PROT_RW));
- }
+ /* set the IOC segment table origin register (and turn on the iommu) */
+ reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
+ out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
+ in_be64(iommu->xlate_regs + IOC_IOST_Origin);
+
+ /* turn on IO translation */
+ reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
+ out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
}
-static void iommu_devnode_setup(struct device_node *d)
+#if 0/* Unused for now */
+static struct iommu_window *find_window(struct cbe_iommu *iommu,
+ unsigned long offset, unsigned long size)
{
- const unsigned int *ioid;
- unsigned long map_start, map_size, token;
- const unsigned long *dma_window;
- struct cell_iommu *iommu;
+ struct iommu_window *window;
- ioid = get_property(d, "ioid", NULL);
- if (!ioid)
- pr_debug("No ioid entry found !\n");
+ /* todo: check for overlapping (but not equal) windows) */
- dma_window = get_property(d, "ibm,dma-window", NULL);
- if (!dma_window)
- pr_debug("No ibm,dma-window entry found !\n");
+ list_for_each_entry(window, &(iommu->windows), list) {
+ if (window->offset == offset && window->size == size)
+ return window;
+ }
- map_start = dma_window[1];
- map_size = dma_window[2];
- token = dma_window[0] >> 32;
+ return NULL;
+}
+#endif
- iommu = &cell_iommus[token];
+static struct iommu_window * __init
+cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
+ unsigned long offset, unsigned long size,
+ unsigned long pte_offset)
+{
+ struct iommu_window *window;
+ const unsigned int *ioid;
- cell_do_map_iommu(iommu, *ioid, map_start, map_size);
+ ioid = get_property(np, "ioid", NULL);
+ if (ioid == NULL)
+ printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
+ np->full_name);
+
+ window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
+ BUG_ON(window == NULL);
+
+ window->offset = offset;
+ window->size = size;
+ window->ioid = ioid ? *ioid : 0;
+ window->iommu = iommu;
+ window->pte_offset = pte_offset;
+
+ window->table.it_blocksize = 16;
+ window->table.it_base = (unsigned long)iommu->ptab;
+ window->table.it_index = iommu->nid;
+ window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) +
+ window->pte_offset;
+ window->table.it_size = size >> IOMMU_PAGE_SHIFT;
+
+ iommu_init_table(&window->table, iommu->nid);
+
+ pr_debug("\tioid %d\n", window->ioid);
+ pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
+ pr_debug("\tbase 0x%016lx\n", window->table.it_base);
+ pr_debug("\toffset 0x%lx\n", window->table.it_offset);
+ pr_debug("\tsize %ld\n", window->table.it_size);
+
+ list_add(&window->list, &iommu->windows);
+
+ if (offset != 0)
+ return window;
+
+ /* We need to map and reserve the first IOMMU page since it's used
+ * by the spider workaround. In theory, we only need to do that when
+ * running on spider but it doesn't really matter.
+ *
+ * This code also assumes that we have a window that starts at 0,
+ * which is the case on all spider based blades.
+ */
+ __set_bit(0, window->table.it_map);
+ tce_build_cell(&window->table, window->table.it_offset, 1,
+ (unsigned long)iommu->pad_page, DMA_TO_DEVICE);
+ window->table.it_hint = window->table.it_blocksize;
+
+ return window;
}
-static void iommu_bus_setup(struct pci_bus *b)
+static struct cbe_iommu *cell_iommu_for_node(int nid)
{
- struct device_node *d = (struct device_node *)b->sysdata;
- iommu_devnode_setup(d);
-}
+ int i;
+ for (i = 0; i < cbe_nr_iommus; i++)
+ if (iommus[i].nid == nid)
+ return &iommus[i];
+ return NULL;
+}
-static int cell_map_iommu_hardcoded(int num_nodes)
+static void cell_dma_dev_setup(struct device *dev)
{
- struct cell_iommu *iommu = NULL;
-
- pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__);
+ struct iommu_window *window;
+ struct cbe_iommu *iommu;
+ struct dev_archdata *archdata = &dev->archdata;
+
+ /* If we run without iommu, no need to do anything */
+ if (pci_dma_ops == &dma_direct_ops)
+ return;
+
+ /* Current implementation uses the first window available in that
+ * node's iommu. We -might- do something smarter later though it may
+ * never be necessary
+ */
+ iommu = cell_iommu_for_node(archdata->numa_node);
+ if (iommu == NULL || list_empty(&iommu->windows)) {
+ printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
+ archdata->of_node ? archdata->of_node->full_name : "?",
+ archdata->numa_node);
+ return;
+ }
+ window = list_entry(iommu->windows.next, struct iommu_window, list);
- /* node 0 */
- iommu = &cell_iommus[0];
- iommu->mapped_base = ioremap(0x20000511000ul, 0x1000);
- iommu->mapped_mmio_base = ioremap(0x20000510000ul, 0x1000);
+ archdata->dma_data = &window->table;
+}
- enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
+static void cell_pci_dma_dev_setup(struct pci_dev *dev)
+{
+ cell_dma_dev_setup(&dev->dev);
+}
- cell_do_map_iommu(iommu, 0x048a,
- 0x20000000ul,0x20000000ul);
+static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
- if (num_nodes < 2)
+ /* We are only intereted in device addition */
+ if (action != BUS_NOTIFY_ADD_DEVICE)
return 0;
- /* node 1 */
- iommu = &cell_iommus[1];
- iommu->mapped_base = ioremap(0x30000511000ul, 0x1000);
- iommu->mapped_mmio_base = ioremap(0x30000510000ul, 0x1000);
-
- enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
+ /* We use the PCI DMA ops */
+ dev->archdata.dma_ops = pci_dma_ops;
- cell_do_map_iommu(iommu, 0x048a,
- 0x20000000,0x20000000ul);
+ cell_dma_dev_setup(dev);
return 0;
}
+static struct notifier_block cell_of_bus_notifier = {
+ .notifier_call = cell_of_bus_notify
+};
-static int cell_map_iommu(void)
+static int __init cell_iommu_get_window(struct device_node *np,
+ unsigned long *base,
+ unsigned long *size)
{
- unsigned int num_nodes = 0;
- const unsigned int *node_id;
- const unsigned long *base, *mmio_base;
- struct device_node *dn;
- struct cell_iommu *iommu = NULL;
-
- /* determine number of nodes (=iommus) */
- pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__);
- for(dn = of_find_node_by_type(NULL, "cpu");
- dn;
- dn = of_find_node_by_type(dn, "cpu")) {
- node_id = get_property(dn, "node-id", NULL);
-
- if (num_nodes < *node_id)
- num_nodes = *node_id;
- }
-
- num_nodes++;
- pr_debug("%i found.\n", num_nodes);
+ const void *dma_window;
+ unsigned long index;
- /* map the iommu registers for each node */
- pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__);
- for(dn = of_find_node_by_type(NULL, "cpu");
- dn;
- dn = of_find_node_by_type(dn, "cpu")) {
+ /* Use ibm,dma-window if available, else, hard code ! */
+ dma_window = get_property(np, "ibm,dma-window", NULL);
+ if (dma_window == NULL) {
+ *base = 0;
+ *size = 0x80000000u;
+ return -ENODEV;
+ }
- node_id = get_property(dn, "node-id", NULL);
- base = get_property(dn, "ioc-cache", NULL);
- mmio_base = get_property(dn, "ioc-translation", NULL);
+ of_parse_dma_window(np, dma_window, &index, base, size);
+ return 0;
+}
- if (!base || !mmio_base || !node_id)
- return cell_map_iommu_hardcoded(num_nodes);
+static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset)
+{
+ struct cbe_iommu *iommu;
+ unsigned long base, size;
+ int nid, i;
+
+ /* Get node ID */
+ nid = of_node_to_nid(np);
+ if (nid < 0) {
+ printk(KERN_ERR "iommu: failed to get node for %s\n",
+ np->full_name);
+ return;
+ }
+ pr_debug("iommu: setting up iommu for node %d (%s)\n",
+ nid, np->full_name);
+
+ /* XXX todo: If we can have multiple windows on the same IOMMU, which
+ * isn't the case today, we probably want here to check wether the
+ * iommu for that node is already setup.
+ * However, there might be issue with getting the size right so let's
+ * ignore that for now. We might want to completely get rid of the
+ * multiple window support since the cell iommu supports per-page ioids
+ */
+
+ if (cbe_nr_iommus >= NR_IOMMUS) {
+ printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
+ np->full_name);
+ return;
+ }
- iommu = &cell_iommus[*node_id];
- iommu->base = *base;
- iommu->mmio_base = *mmio_base;
+ /* Init base fields */
+ i = cbe_nr_iommus++;
+ iommu = &iommus[i];
+ iommu->stab = 0;
+ iommu->nid = nid;
+ snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
+ INIT_LIST_HEAD(&iommu->windows);
- iommu->mapped_base = ioremap(*base, 0x1000);
- iommu->mapped_mmio_base = ioremap(*mmio_base, 0x1000);
+ /* Obtain a window for it */
+ cell_iommu_get_window(np, &base, &size);
- enable_mapping(iommu->mapped_base,
- iommu->mapped_mmio_base);
+ pr_debug("\ttranslating window 0x%lx...0x%lx\n",
+ base, base + size - 1);
- /* everything else will be done in iommu_bus_setup */
- }
+ /* Initialize the hardware */
+ cell_iommu_setup_hardware(iommu, size);
- return 1;
+ /* Setup the iommu_table */
+ cell_iommu_setup_window(iommu, np, base, size,
+ offset >> IOMMU_PAGE_SHIFT);
}
-static void *cell_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static void __init cell_disable_iommus(void)
{
- void *ret;
-
- ret = (void *)__get_free_pages(flag, get_order(size));
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = virt_to_abs(ret) | CELL_DMA_VALID;
+ int node;
+ unsigned long base, val;
+ void __iomem *xregs, *cregs;
+
+ /* Make sure IOC translation is disabled on all nodes */
+ for_each_online_node(node) {
+ if (cell_iommu_find_ioc(node, &base))
+ continue;
+ xregs = ioremap(base, IOC_Reg_Size);
+ if (xregs == NULL)
+ continue;
+ cregs = xregs + IOC_IOCmd_Offset;
+
+ pr_debug("iommu: cleaning up iommu on node %d\n", node);
+
+ out_be64(xregs + IOC_IOST_Origin, 0);
+ (void)in_be64(xregs + IOC_IOST_Origin);
+ val = in_be64(cregs + IOC_IOCmd_Cfg);
+ val &= ~IOC_IOCmd_Cfg_TE;
+ out_be64(cregs + IOC_IOCmd_Cfg, val);
+ (void)in_be64(cregs + IOC_IOCmd_Cfg);
+
+ iounmap(xregs);
}
- return ret;
}
-static void cell_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static int __init cell_iommu_init_disabled(void)
{
- free_pages((unsigned long)vaddr, get_order(size));
-}
+ struct device_node *np = NULL;
+ unsigned long base = 0, size;
+
+ /* When no iommu is present, we use direct DMA ops */
+ pci_dma_ops = &dma_direct_ops;
+
+ /* First make sure all IOC translation is turned off */
+ cell_disable_iommus();
+
+ /* If we have no Axon, we set up the spider DMA magic offset */
+ if (of_find_node_by_name(NULL, "axon") == NULL)
+ dma_direct_offset = SPIDER_DMA_OFFSET;
+
+ /* Now we need to check to see where the memory is mapped
+ * in PCI space. We assume that all busses use the same dma
+ * window which is always the case so far on Cell, thus we
+ * pick up the first pci-internal node we can find and check
+ * the DMA window from there.
+ */
+ for_each_node_by_name(np, "axon") {
+ if (np->parent == NULL || np->parent->parent != NULL)
+ continue;
+ if (cell_iommu_get_window(np, &base, &size) == 0)
+ break;
+ }
+ if (np == NULL) {
+ for_each_node_by_name(np, "pci-internal") {
+ if (np->parent == NULL || np->parent->parent != NULL)
+ continue;
+ if (cell_iommu_get_window(np, &base, &size) == 0)
+ break;
+ }
+ }
+ of_node_put(np);
+
+ /* If we found a DMA window, we check if it's big enough to enclose
+ * all of physical memory. If not, we force enable IOMMU
+ */
+ if (np && size < lmb_end_of_DRAM()) {
+ printk(KERN_WARNING "iommu: force-enabled, dma window"
+ " (%ldMB) smaller than total memory (%ldMB)\n",
+ size >> 20, lmb_end_of_DRAM() >> 20);
+ return -ENODEV;
+ }
-static dma_addr_t cell_map_single(struct device *hwdev, void *ptr,
- size_t size, enum dma_data_direction direction)
-{
- return virt_to_abs(ptr) | CELL_DMA_VALID;
-}
+ dma_direct_offset += base;
-static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
+ printk("iommu: disabled, direct DMA offset is 0x%lx\n",
+ dma_direct_offset);
+
+ return 0;
}
-static int cell_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+static int __init cell_iommu_init(void)
{
- int i;
+ struct device_node *np;
+
+ if (!machine_is(cell))
+ return -ENODEV;
+
+ /* If IOMMU is disabled or we have little enough RAM to not need
+ * to enable it, we setup a direct mapping.
+ *
+ * Note: should we make sure we have the IOMMU actually disabled ?
+ */
+ if (iommu_is_off ||
+ (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull))
+ if (cell_iommu_init_disabled() == 0)
+ goto bail;
+
+ /* Setup various ppc_md. callbacks */
+ ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ ppc_md.tce_build = tce_build_cell;
+ ppc_md.tce_free = tce_free_cell;
+
+ /* Create an iommu for each /axon node. */
+ for_each_node_by_name(np, "axon") {
+ if (np->parent == NULL || np->parent->parent != NULL)
+ continue;
+ cell_iommu_init_one(np, 0);
+ }
- for (i = 0; i < nents; i++, sg++) {
- sg->dma_address = (page_to_phys(sg->page) + sg->offset)
- | CELL_DMA_VALID;
- sg->dma_length = sg->length;
+ /* Create an iommu for each toplevel /pci-internal node for
+ * old hardware/firmware
+ */
+ for_each_node_by_name(np, "pci-internal") {
+ if (np->parent == NULL || np->parent->parent != NULL)
+ continue;
+ cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
}
- return nents;
-}
+ /* Setup default PCI iommu ops */
+ pci_dma_ops = &dma_iommu_ops;
-static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
-}
+ bail:
+ /* Register callbacks on OF platform device addition/removal
+ * to handle linking them to the right DMA operations
+ */
+ bus_register_notifier(&of_platform_bus_type, &cell_of_bus_notifier);
-static int cell_dma_supported(struct device *dev, u64 mask)
-{
- return mask < 0x100000000ull;
+ return 0;
}
+arch_initcall(cell_iommu_init);
-static struct dma_mapping_ops cell_iommu_ops = {
- .alloc_coherent = cell_alloc_coherent,
- .free_coherent = cell_free_coherent,
- .map_single = cell_map_single,
- .unmap_single = cell_unmap_single,
- .map_sg = cell_map_sg,
- .unmap_sg = cell_unmap_sg,
- .dma_supported = cell_dma_supported,
-};
-
-void cell_init_iommu(void)
-{
- int setup_bus = 0;
-
- if (of_find_node_by_path("/mambo")) {
- pr_info("Not using iommu on systemsim\n");
- } else {
-
- if (!(of_chosen &&
- get_property(of_chosen, "linux,iommu-off", NULL)))
- setup_bus = cell_map_iommu();
-
- if (setup_bus) {
- pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__);
- ppc_md.iommu_dev_setup = iommu_dev_setup_null;
- ppc_md.iommu_bus_setup = iommu_bus_setup;
- } else {
- pr_debug("%s: IOMMU mapping activated, "
- "no device action necessary\n", __FUNCTION__);
- /* Direct I/O, IOMMU off */
- ppc_md.iommu_dev_setup = iommu_dev_setup_null;
- ppc_md.iommu_bus_setup = iommu_bus_setup_null;
- }
- }
-
- pci_dma_ops = cell_iommu_ops;
-}
diff --git a/arch/powerpc/platforms/cell/iommu.h b/arch/powerpc/platforms/cell/iommu.h
deleted file mode 100644
index 490d77abfe8..00000000000
--- a/arch/powerpc/platforms/cell/iommu.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef CELL_IOMMU_H
-#define CELL_IOMMU_H
-
-/* some constants */
-enum {
- /* segment table entries */
- IOST_VALID_MASK = 0x8000000000000000ul,
- IOST_TAG_MASK = 0x3000000000000000ul,
- IOST_PT_BASE_MASK = 0x000003fffffff000ul,
- IOST_NNPT_MASK = 0x0000000000000fe0ul,
- IOST_PS_MASK = 0x000000000000000ful,
-
- IOST_PS_4K = 0x1,
- IOST_PS_64K = 0x3,
- IOST_PS_1M = 0x5,
- IOST_PS_16M = 0x7,
-
- /* iopt tag register */
- IOPT_VALID_MASK = 0x0000000200000000ul,
- IOPT_TAG_MASK = 0x00000001fffffffful,
-
- /* iopt cache register */
- IOPT_PROT_MASK = 0xc000000000000000ul,
- IOPT_PROT_NONE = 0x0000000000000000ul,
- IOPT_PROT_READ = 0x4000000000000000ul,
- IOPT_PROT_WRITE = 0x8000000000000000ul,
- IOPT_PROT_RW = 0xc000000000000000ul,
- IOPT_COHERENT = 0x2000000000000000ul,
-
- IOPT_ORDER_MASK = 0x1800000000000000ul,
- /* order access to same IOID/VC on same address */
- IOPT_ORDER_ADDR = 0x0800000000000000ul,
- /* similar, but only after a write access */
- IOPT_ORDER_WRITES = 0x1000000000000000ul,
- /* Order all accesses to same IOID/VC */
- IOPT_ORDER_VC = 0x1800000000000000ul,
-
- IOPT_RPN_MASK = 0x000003fffffff000ul,
- IOPT_HINT_MASK = 0x0000000000000800ul,
- IOPT_IOID_MASK = 0x00000000000007fful,
-
- IOSTO_ENABLE = 0x8000000000000000ul,
- IOSTO_ORIGIN = 0x000003fffffff000ul,
- IOSTO_HW = 0x0000000000000800ul,
- IOSTO_SW = 0x0000000000000400ul,
-
- IOCMD_CONF_TE = 0x0000800000000000ul,
-
- /* memory mapped registers */
- IOC_PT_CACHE_DIR = 0x000,
- IOC_ST_CACHE_DIR = 0x800,
- IOC_PT_CACHE_REG = 0x910,
- IOC_ST_ORIGIN = 0x918,
- IOC_CONF = 0x930,
-
- /* The high bit needs to be set on every DMA address,
- only 2GB are addressable */
- CELL_DMA_VALID = 0x80000000,
- CELL_DMA_MASK = 0x7fffffff,
-};
-
-
-void cell_init_iommu(void);
-
-#endif
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 9f2e4ed20a5..8c20f0fb865 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -38,32 +38,25 @@
#include "pervasive.h"
#include "cbe_regs.h"
-static DEFINE_SPINLOCK(cbe_pervasive_lock);
-
-static void __init cbe_enable_pause_zero(void)
+static void cbe_power_save(void)
{
- unsigned long thread_switch_control;
- unsigned long temp_register;
- struct cbe_pmd_regs __iomem *pregs;
-
- spin_lock_irq(&cbe_pervasive_lock);
- pregs = cbe_get_cpu_pmd_regs(smp_processor_id());
- if (pregs == NULL)
- goto out;
+ unsigned long ctrl, thread_switch_control;
- pr_debug("Power Management: CPU %d\n", smp_processor_id());
-
- /* Enable Pause(0) control bit */
- temp_register = in_be64(&pregs->pm_control);
+ /*
+ * We need to hard disable interrupts, but we also need to mark them
+ * hard disabled in the PACA so that the local_irq_enable() done by
+ * our caller upon return propertly hard enables.
+ */
+ hard_irq_disable();
+ get_paca()->hard_enabled = 0;
- out_be64(&pregs->pm_control,
- temp_register | CBE_PMD_PAUSE_ZERO_CONTROL);
+ ctrl = mfspr(SPRN_CTRLF);
/* Enable DEC and EE interrupt request */
thread_switch_control = mfspr(SPRN_TSC_CELL);
thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
- switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
+ switch (ctrl & CTRL_CT) {
case CTRL_CT0:
thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
break;
@@ -75,58 +68,21 @@ static void __init cbe_enable_pause_zero(void)
__FUNCTION__);
break;
}
-
mtspr(SPRN_TSC_CELL, thread_switch_control);
-out:
- spin_unlock_irq(&cbe_pervasive_lock);
-}
-
-static void cbe_idle(void)
-{
- unsigned long ctrl;
+ /*
+ * go into low thread priority, medium priority will be
+ * restored for us after wake-up.
+ */
+ HMT_low();
- /* Why do we do that on every idle ? Couldn't that be done once for
- * all or do we lose the state some way ? Also, the pm_control
- * register setting, that can't be set once at boot ? We really want
- * to move that away in order to implement a simple powersave
+ /*
+ * atomically disable thread execution and runlatch.
+ * External and Decrementer exceptions are still handled when the
+ * thread is disabled but now enter in cbe_system_reset_exception()
*/
- cbe_enable_pause_zero();
-
- while (1) {
- if (!need_resched()) {
- local_irq_disable();
- while (!need_resched()) {
- /* go into low thread priority */
- HMT_low();
-
- /*
- * atomically disable thread execution
- * and runlatch.
- * External and Decrementer exceptions
- * are still handled when the thread
- * is disabled but now enter in
- * cbe_system_reset_exception()
- */
- ctrl = mfspr(SPRN_CTRLF);
- ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
- mtspr(SPRN_CTRLT, ctrl);
- }
- /* restore thread prio */
- HMT_medium();
- local_irq_enable();
- }
-
- /*
- * turn runlatch on again before scheduling the
- * process we just woke up
- */
- ppc64_runlatch_on();
-
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
+ ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
+ mtspr(SPRN_CTRLT, ctrl);
}
static int cbe_system_reset_exception(struct pt_regs *regs)
@@ -158,9 +114,20 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
void __init cbe_pervasive_init(void)
{
+ int cpu;
if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
return;
- ppc_md.idle_loop = cbe_idle;
+ for_each_possible_cpu(cpu) {
+ struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
+ if (!regs)
+ continue;
+
+ /* Enable Pause(0) control bit */
+ out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
+ CBE_PMD_PAUSE_ZERO_CONTROL);
+ }
+
+ ppc_md.power_save = cbe_power_save;
ppc_md.system_reset_exception = cbe_system_reset_exception;
}
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
new file mode 100644
index 00000000000..99c612025e8
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -0,0 +1,429 @@
+/*
+ * Cell Broadband Engine Performance Monitor
+ *
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Author:
+ * David Erb (djerb@us.ibm.com)
+ * Kevin Corry (kevcorry@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/irq_regs.h>
+#include <asm/machdep.h>
+#include <asm/pmc.h>
+#include <asm/reg.h>
+#include <asm/spu.h>
+
+#include "cbe_regs.h"
+#include "interrupt.h"
+
+/*
+ * When writing to write-only mmio addresses, save a shadow copy. All of the
+ * registers are 32-bit, but stored in the upper-half of a 64-bit field in
+ * pmd_regs.
+ */
+
+#define WRITE_WO_MMIO(reg, x) \
+ do { \
+ u32 _x = (x); \
+ struct cbe_pmd_regs __iomem *pmd_regs; \
+ struct cbe_pmd_shadow_regs *shadow_regs; \
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
+ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
+ out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \
+ shadow_regs->reg = _x; \
+ } while (0)
+
+#define READ_SHADOW_REG(val, reg) \
+ do { \
+ struct cbe_pmd_shadow_regs *shadow_regs; \
+ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
+ (val) = shadow_regs->reg; \
+ } while (0)
+
+#define READ_MMIO_UPPER32(val, reg) \
+ do { \
+ struct cbe_pmd_regs __iomem *pmd_regs; \
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
+ (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \
+ } while (0)
+
+/*
+ * Physical counter registers.
+ * Each physical counter can act as one 32-bit counter or two 16-bit counters.
+ */
+
+u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
+{
+ u32 val_in_latch, val = 0;
+
+ if (phys_ctr < NR_PHYS_CTRS) {
+ READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
+
+ /* Read the latch or the actual counter, whichever is newer. */
+ if (val_in_latch & (1 << phys_ctr)) {
+ READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
+ } else {
+ READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
+ }
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
+
+void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
+{
+ struct cbe_pmd_shadow_regs *shadow_regs;
+ u32 pm_ctrl;
+
+ if (phys_ctr < NR_PHYS_CTRS) {
+ /* Writing to a counter only writes to a hardware latch.
+ * The new value is not propagated to the actual counter
+ * until the performance monitor is enabled.
+ */
+ WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
+
+ pm_ctrl = cbe_read_pm(cpu, pm_control);
+ if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
+ /* The counters are already active, so we need to
+ * rewrite the pm_control register to "re-enable"
+ * the PMU.
+ */
+ cbe_write_pm(cpu, pm_control, pm_ctrl);
+ } else {
+ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
+ shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
+
+/*
+ * "Logical" counter registers.
+ * These will read/write 16-bits or 32-bits depending on the
+ * current size of the counter. Counters 4 - 7 are always 16-bit.
+ */
+
+u32 cbe_read_ctr(u32 cpu, u32 ctr)
+{
+ u32 val;
+ u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
+
+ val = cbe_read_phys_ctr(cpu, phys_ctr);
+
+ if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
+ val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_ctr);
+
+void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
+{
+ u32 phys_ctr;
+ u32 phys_val;
+
+ phys_ctr = ctr & (NR_PHYS_CTRS - 1);
+
+ if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
+ phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
+
+ if (ctr < NR_PHYS_CTRS)
+ val = (val << 16) | (phys_val & 0xffff);
+ else
+ val = (val & 0xffff) | (phys_val & 0xffff0000);
+ }
+
+ cbe_write_phys_ctr(cpu, phys_ctr, val);
+}
+EXPORT_SYMBOL_GPL(cbe_write_ctr);
+
+/*
+ * Counter-control registers.
+ * Each "logical" counter has a corresponding control register.
+ */
+
+u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
+{
+ u32 pm07_control = 0;
+
+ if (ctr < NR_CTRS)
+ READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
+
+ return pm07_control;
+}
+EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
+
+void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
+{
+ if (ctr < NR_CTRS)
+ WRITE_WO_MMIO(pm07_control[ctr], val);
+}
+EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
+
+/*
+ * Other PMU control registers. Most of these are write-only.
+ */
+
+u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
+{
+ u32 val = 0;
+
+ switch (reg) {
+ case group_control:
+ READ_SHADOW_REG(val, group_control);
+ break;
+
+ case debug_bus_control:
+ READ_SHADOW_REG(val, debug_bus_control);
+ break;
+
+ case trace_address:
+ READ_MMIO_UPPER32(val, trace_address);
+ break;
+
+ case ext_tr_timer:
+ READ_SHADOW_REG(val, ext_tr_timer);
+ break;
+
+ case pm_status:
+ READ_MMIO_UPPER32(val, pm_status);
+ break;
+
+ case pm_control:
+ READ_SHADOW_REG(val, pm_control);
+ break;
+
+ case pm_interval:
+ READ_SHADOW_REG(val, pm_interval);
+ break;
+
+ case pm_start_stop:
+ READ_SHADOW_REG(val, pm_start_stop);
+ break;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_pm);
+
+void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
+{
+ switch (reg) {
+ case group_control:
+ WRITE_WO_MMIO(group_control, val);
+ break;
+
+ case debug_bus_control:
+ WRITE_WO_MMIO(debug_bus_control, val);
+ break;
+
+ case trace_address:
+ WRITE_WO_MMIO(trace_address, val);
+ break;
+
+ case ext_tr_timer:
+ WRITE_WO_MMIO(ext_tr_timer, val);
+ break;
+
+ case pm_status:
+ WRITE_WO_MMIO(pm_status, val);
+ break;
+
+ case pm_control:
+ WRITE_WO_MMIO(pm_control, val);
+ break;
+
+ case pm_interval:
+ WRITE_WO_MMIO(pm_interval, val);
+ break;
+
+ case pm_start_stop:
+ WRITE_WO_MMIO(pm_start_stop, val);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(cbe_write_pm);
+
+/*
+ * Get/set the size of a physical counter to either 16 or 32 bits.
+ */
+
+u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
+{
+ u32 pm_ctrl, size = 0;
+
+ if (phys_ctr < NR_PHYS_CTRS) {
+ pm_ctrl = cbe_read_pm(cpu, pm_control);
+ size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
+ }
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
+
+void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
+{
+ u32 pm_ctrl;
+
+ if (phys_ctr < NR_PHYS_CTRS) {
+ pm_ctrl = cbe_read_pm(cpu, pm_control);
+ switch (ctr_size) {
+ case 16:
+ pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
+ break;
+
+ case 32:
+ pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
+ break;
+ }
+ cbe_write_pm(cpu, pm_control, pm_ctrl);
+ }
+}
+EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
+
+/*
+ * Enable/disable the entire performance monitoring unit.
+ * When we enable the PMU, all pending writes to counters get committed.
+ */
+
+void cbe_enable_pm(u32 cpu)
+{
+ struct cbe_pmd_shadow_regs *shadow_regs;
+ u32 pm_ctrl;
+
+ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
+ shadow_regs->counter_value_in_latch = 0;
+
+ pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
+ cbe_write_pm(cpu, pm_control, pm_ctrl);
+}
+EXPORT_SYMBOL_GPL(cbe_enable_pm);
+
+void cbe_disable_pm(u32 cpu)
+{
+ u32 pm_ctrl;
+ pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
+ cbe_write_pm(cpu, pm_control, pm_ctrl);
+}
+EXPORT_SYMBOL_GPL(cbe_disable_pm);
+
+/*
+ * Reading from the trace_buffer.
+ * The trace buffer is two 64-bit registers. Reading from
+ * the second half automatically increments the trace_address.
+ */
+
+void cbe_read_trace_buffer(u32 cpu, u64 *buf)
+{
+ struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+ *buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
+ *buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
+}
+EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
+
+/*
+ * Enabling/disabling interrupts for the entire performance monitoring unit.
+ */
+
+u32 cbe_query_pm_interrupts(u32 cpu)
+{
+ return cbe_read_pm(cpu, pm_status);
+}
+EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts);
+
+u32 cbe_clear_pm_interrupts(u32 cpu)
+{
+ /* Reading pm_status clears the interrupt bits. */
+ return cbe_query_pm_interrupts(cpu);
+}
+EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts);
+
+void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
+{
+ /* Set which node and thread will handle the next interrupt. */
+ iic_set_interrupt_routing(cpu, thread, 0);
+
+ /* Enable the interrupt bits in the pm_status register. */
+ if (mask)
+ cbe_write_pm(cpu, pm_status, mask);
+}
+EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
+
+void cbe_disable_pm_interrupts(u32 cpu)
+{
+ cbe_clear_pm_interrupts(cpu);
+ cbe_write_pm(cpu, pm_status, 0);
+}
+EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
+
+static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
+{
+ perf_irq(get_irq_regs());
+ return IRQ_HANDLED;
+}
+
+int __init cbe_init_pm_irq(void)
+{
+ unsigned int irq;
+ int rc, node;
+
+ for_each_node(node) {
+ irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
+ (node << IIC_IRQ_NODE_SHIFT));
+ if (irq == NO_IRQ) {
+ printk("ERROR: Unable to allocate irq for node %d\n",
+ node);
+ return -EINVAL;
+ }
+
+ rc = request_irq(irq, cbe_pm_irq,
+ IRQF_DISABLED, "cbe-pmu-0", NULL);
+ if (rc) {
+ printk("ERROR: Request for irq on node %d failed\n",
+ node);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+arch_initcall(cbe_init_pm_irq);
+
+void cbe_sync_irq(int node)
+{
+ unsigned int irq;
+
+ irq = irq_find_mapping(NULL,
+ IIC_IRQ_IOEX_PMI
+ | (node << IIC_IRQ_NODE_SHIFT));
+
+ if (irq == NO_IRQ) {
+ printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
+ "for node %d\n", irq, node);
+ return;
+ }
+
+ synchronize_irq(irq);
+}
+EXPORT_SYMBOL_GPL(cbe_sync_irq);
+
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 22c228a49c3..36989c2eee6 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -50,9 +50,10 @@
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <asm/of_platform.h>
#include "interrupt.h"
-#include "iommu.h"
#include "cbe_regs.h"
#include "pervasive.h"
#include "ras.h"
@@ -80,24 +81,72 @@ static void cell_progress(char *s, unsigned short hex)
printk("*** %04x : %s\n", hex, s ? s : "");
}
-static void __init cell_pcibios_fixup(void)
+static int __init cell_publish_devices(void)
{
- struct pci_dev *dev = NULL;
+ if (!machine_is(cell))
+ return 0;
+
+ /* Publish OF platform devices for southbridge IOs */
+ of_platform_bus_probe(NULL, NULL, NULL);
+
+ return 0;
+}
+device_initcall(cell_publish_devices);
+
+static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct mpic *mpic = desc->handler_data;
+ unsigned int virq;
+
+ virq = mpic_get_one_irq(mpic);
+ if (virq != NO_IRQ)
+ generic_handle_irq(virq);
+ desc->chip->eoi(irq);
+}
- for_each_pci_dev(dev)
- pci_read_irq_line(dev);
+static void __init mpic_init_IRQ(void)
+{
+ struct device_node *dn;
+ struct mpic *mpic;
+ unsigned int virq;
+
+ for (dn = NULL;
+ (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
+ if (!device_is_compatible(dn, "CBEA,platform-open-pic"))
+ continue;
+
+ /* The MPIC driver will get everything it needs from the
+ * device-tree, just pass 0 to all arguments
+ */
+ mpic = mpic_alloc(dn, 0, 0, 0, 0, " MPIC ");
+ if (mpic == NULL)
+ continue;
+ mpic_init(mpic);
+
+ virq = irq_of_parse_and_map(dn, 0);
+ if (virq == NO_IRQ)
+ continue;
+
+ printk(KERN_INFO "%s : hooking up to IRQ %d\n",
+ dn->full_name, virq);
+ set_irq_data(virq, mpic);
+ set_irq_chained_handler(virq, cell_mpic_cascade);
+ }
}
+
static void __init cell_init_irq(void)
{
iic_init_IRQ();
spider_init_IRQ();
+ mpic_init_IRQ();
}
static void __init cell_setup_arch(void)
{
#ifdef CONFIG_SPU_BASE
- spu_priv1_ops = &spu_priv1_mmio_ops;
+ spu_priv1_ops = &spu_priv1_mmio_ops;
+ spu_management_ops = &spu_management_of_ops;
#endif
cbe_regs_init();
@@ -109,7 +158,6 @@ static void __init cell_setup_arch(void)
#ifdef CONFIG_SMP
smp_init_cell();
#endif
-
/* init to some ~sane value until calibrate_delay() runs */
loops_per_jiffy = 50000000;
@@ -129,19 +177,6 @@ static void __init cell_setup_arch(void)
mmio_nvram_init();
}
-/*
- * Early initialization. Relocation is on but do not reference unbolted pages
- */
-static void __init cell_init_early(void)
-{
- DBG(" -> cell_init_early()\n");
-
- cell_init_iommu();
-
- DBG(" <- cell_init_early()\n");
-}
-
-
static int __init cell_probe(void)
{
unsigned long root = of_get_flat_dt_root();
@@ -168,7 +203,6 @@ define_machine(cell) {
.name = "Cell",
.probe = cell_probe,
.setup_arch = cell_setup_arch,
- .init_early = cell_init_early,
.show_cpuinfo = cell_show_cpuinfo,
.restart = rtas_restart,
.power_off = rtas_power_off,
@@ -180,7 +214,7 @@ define_machine(cell) {
.check_legacy_ioport = cell_check_legacy_ioport,
.progress = cell_progress,
.init_IRQ = cell_init_irq,
- .pcibios_fixup = cell_pcibios_fixup,
+ .pci_setup_phb = rtas_setup_phb,
#ifdef CONFIG_KEXEC
.machine_kexec = default_machine_kexec,
.machine_kexec_prepare = default_machine_kexec_prepare,
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 7aa809d5a24..bd7bffc3ddd 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -25,22 +25,17 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/wait.h>
-
-#include <asm/firmware.h>
-#include <asm/io.h>
-#include <asm/prom.h>
+#include <linux/mm.h>
+#include <linux/io.h>
#include <linux/mutex.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
-#include <asm/mmu_context.h>
-
-#include "interrupt.h"
+#include <asm/xmon.h>
+const struct spu_management_ops *spu_management_ops;
const struct spu_priv1_ops *spu_priv1_ops;
EXPORT_SYMBOL_GPL(spu_priv1_ops);
@@ -89,7 +84,30 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
printk("%s: invalid access during switch!\n", __func__);
return 1;
}
- if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
+ esid = (ea & ESID_MASK) | SLB_ESID_V;
+
+ switch(REGION_ID(ea)) {
+ case USER_REGION_ID:
+#ifdef CONFIG_HUGETLB_PAGE
+ if (in_hugepage_area(mm->context, ea))
+ llp = mmu_psize_defs[mmu_huge_psize].sllp;
+ else
+#endif
+ llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
+ SLB_VSID_USER | llp;
+ break;
+ case VMALLOC_REGION_ID:
+ llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
+ SLB_VSID_KERNEL | llp;
+ break;
+ case KERNEL_REGION_ID:
+ llp = mmu_psize_defs[mmu_linear_psize].sllp;
+ vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
+ SLB_VSID_KERNEL | llp;
+ break;
+ default:
/* Future: support kernel segments so that drivers
* can use SPUs.
*/
@@ -97,16 +115,6 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
return 1;
}
- esid = (ea & ESID_MASK) | SLB_ESID_V;
-#ifdef CONFIG_HUGETLB_PAGE
- if (in_hugepage_area(mm->context, ea))
- llp = mmu_psize_defs[mmu_huge_psize].sllp;
- else
-#endif
- llp = mmu_psize_defs[mmu_virtual_psize].sllp;
- vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
- SLB_VSID_USER | llp;
-
out_be64(&priv2->slb_index_W, spu->slb_replace);
out_be64(&priv2->slb_vsid_RW, vsid);
out_be64(&priv2->slb_esid_RW, esid);
@@ -320,6 +328,7 @@ static void spu_free_irqs(struct spu *spu)
}
static struct list_head spu_list[MAX_NUMNODES];
+static LIST_HEAD(spu_full_list);
static DEFINE_MUTEX(spu_mutex);
static void spu_init_channels(struct spu *spu)
@@ -364,8 +373,7 @@ struct spu *spu_alloc_node(int node)
if (!list_empty(&spu_list[node])) {
spu = list_entry(spu_list[node].next, struct spu, list);
list_del_init(&spu->list);
- pr_debug("Got SPU %x %d %d\n",
- spu->isrc, spu->number, spu->node);
+ pr_debug("Got SPU %d %d\n", spu->number, spu->node);
spu_init_channels(spu);
}
mutex_unlock(&spu_mutex);
@@ -493,280 +501,65 @@ int spu_irq_class_1_bottom(struct spu *spu)
if (!error) {
spu_restart_dma(spu);
} else {
- __spu_trap_invalid_dma(spu);
+ spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
}
return ret;
}
-static int __init find_spu_node_id(struct device_node *spe)
-{
- const unsigned int *id;
- struct device_node *cpu;
- cpu = spe->parent->parent;
- id = get_property(cpu, "node-id", NULL);
- return id ? *id : 0;
-}
-
-static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
- const char *prop)
-{
- static DEFINE_MUTEX(add_spumem_mutex);
-
- const struct address_prop {
- unsigned long address;
- unsigned int len;
- } __attribute__((packed)) *p;
- int proplen;
-
- unsigned long start_pfn, nr_pages;
- struct pglist_data *pgdata;
- struct zone *zone;
- int ret;
-
- p = get_property(spe, prop, &proplen);
- WARN_ON(proplen != sizeof (*p));
-
- start_pfn = p->address >> PAGE_SHIFT;
- nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- pgdata = NODE_DATA(spu->nid);
- zone = pgdata->node_zones;
-
- /* XXX rethink locking here */
- mutex_lock(&add_spumem_mutex);
- ret = __add_pages(zone, start_pfn, nr_pages);
- mutex_unlock(&add_spumem_mutex);
-
- return ret;
-}
+struct sysdev_class spu_sysdev_class = {
+ set_kset_name("spu")
+};
-static void __iomem * __init map_spe_prop(struct spu *spu,
- struct device_node *n, const char *name)
+int spu_add_sysdev_attr(struct sysdev_attribute *attr)
{
- const struct address_prop {
- unsigned long address;
- unsigned int len;
- } __attribute__((packed)) *prop;
-
- const void *p;
- int proplen;
- void __iomem *ret = NULL;
- int err = 0;
-
- p = get_property(n, name, &proplen);
- if (proplen != sizeof (struct address_prop))
- return NULL;
-
- prop = p;
-
- err = cell_spuprop_present(spu, n, name);
- if (err && (err != -EEXIST))
- goto out;
-
- ret = ioremap(prop->address, prop->len);
-
- out:
- return ret;
-}
+ struct spu *spu;
+ mutex_lock(&spu_mutex);
-static void spu_unmap(struct spu *spu)
-{
- iounmap(spu->priv2);
- iounmap(spu->priv1);
- iounmap(spu->problem);
- iounmap((__force u8 __iomem *)spu->local_store);
-}
+ list_for_each_entry(spu, &spu_full_list, full_list)
+ sysdev_create_file(&spu->sysdev, attr);
-/* This function shall be abstracted for HV platforms */
-static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
-{
- unsigned int isrc;
- const u32 *tmp;
-
- /* Get the interrupt source unit from the device-tree */
- tmp = get_property(np, "isrc", NULL);
- if (!tmp)
- return -ENODEV;
- isrc = tmp[0];
-
- /* Add the node number */
- isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
- spu->isrc = isrc;
-
- /* Now map interrupts of all 3 classes */
- spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
- spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
- spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
-
- /* Right now, we only fail if class 2 failed */
- return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
+ mutex_unlock(&spu_mutex);
+ return 0;
}
+EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
-static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
+int spu_add_sysdev_attr_group(struct attribute_group *attrs)
{
- const char *prop;
- int ret;
-
- ret = -ENODEV;
- spu->name = get_property(node, "name", NULL);
- if (!spu->name)
- goto out;
-
- prop = get_property(node, "local-store", NULL);
- if (!prop)
- goto out;
- spu->local_store_phys = *(unsigned long *)prop;
-
- /* we use local store as ram, not io memory */
- spu->local_store = (void __force *)
- map_spe_prop(spu, node, "local-store");
- if (!spu->local_store)
- goto out;
-
- prop = get_property(node, "problem", NULL);
- if (!prop)
- goto out_unmap;
- spu->problem_phys = *(unsigned long *)prop;
-
- spu->problem= map_spe_prop(spu, node, "problem");
- if (!spu->problem)
- goto out_unmap;
-
- spu->priv1= map_spe_prop(spu, node, "priv1");
- /* priv1 is not available on a hypervisor */
-
- spu->priv2= map_spe_prop(spu, node, "priv2");
- if (!spu->priv2)
- goto out_unmap;
- ret = 0;
- goto out;
-
-out_unmap:
- spu_unmap(spu);
-out:
- return ret;
-}
+ struct spu *spu;
+ mutex_lock(&spu_mutex);
-static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
-{
- struct of_irq oirq;
- int ret;
- int i;
+ list_for_each_entry(spu, &spu_full_list, full_list)
+ sysfs_create_group(&spu->sysdev.kobj, attrs);
- for (i=0; i < 3; i++) {
- ret = of_irq_map_one(np, i, &oirq);
- if (ret) {
- pr_debug("spu_new: failed to get irq %d\n", i);
- goto err;
- }
- ret = -EINVAL;
- pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
- oirq.controller->full_name);
- spu->irqs[i] = irq_create_of_mapping(oirq.controller,
- oirq.specifier, oirq.size);
- if (spu->irqs[i] == NO_IRQ) {
- pr_debug("spu_new: failed to map it !\n");
- goto err;
- }
- }
+ mutex_unlock(&spu_mutex);
return 0;
-
-err:
- pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
- for (; i >= 0; i--) {
- if (spu->irqs[i] != NO_IRQ)
- irq_dispose_mapping(spu->irqs[i]);
- }
- return ret;
}
+EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
-static int spu_map_resource(struct device_node *node, int nr,
- void __iomem** virt, unsigned long *phys)
-{
- struct resource resource = { };
- int ret;
-
- ret = of_address_to_resource(node, nr, &resource);
- if (ret)
- goto out;
- if (phys)
- *phys = resource.start;
- *virt = ioremap(resource.start, resource.end - resource.start);
- if (!*virt)
- ret = -EINVAL;
-
-out:
- return ret;
-}
-
-static int __init spu_map_device(struct spu *spu, struct device_node *node)
+void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
{
- int ret = -ENODEV;
- spu->name = get_property(node, "name", NULL);
- if (!spu->name)
- goto out;
-
- ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
- &spu->local_store_phys);
- if (ret) {
- pr_debug("spu_new: failed to map %s resource 0\n",
- node->full_name);
- goto out;
- }
- ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
- &spu->problem_phys);
- if (ret) {
- pr_debug("spu_new: failed to map %s resource 1\n",
- node->full_name);
- goto out_unmap;
- }
- ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
- NULL);
- if (ret) {
- pr_debug("spu_new: failed to map %s resource 2\n",
- node->full_name);
- goto out_unmap;
- }
-
- if (!firmware_has_feature(FW_FEATURE_LPAR))
- ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
- NULL);
- if (ret) {
- pr_debug("spu_new: failed to map %s resource 3\n",
- node->full_name);
- goto out_unmap;
- }
- pr_debug("spu_new: %s maps:\n", node->full_name);
- pr_debug(" local store : 0x%016lx -> 0x%p\n",
- spu->local_store_phys, spu->local_store);
- pr_debug(" problem state : 0x%016lx -> 0x%p\n",
- spu->problem_phys, spu->problem);
- pr_debug(" priv2 : 0x%p\n", spu->priv2);
- pr_debug(" priv1 : 0x%p\n", spu->priv1);
+ struct spu *spu;
+ mutex_lock(&spu_mutex);
- return 0;
+ list_for_each_entry(spu, &spu_full_list, full_list)
+ sysdev_remove_file(&spu->sysdev, attr);
-out_unmap:
- spu_unmap(spu);
-out:
- pr_debug("failed to map spe %s: %d\n", spu->name, ret);
- return ret;
+ mutex_unlock(&spu_mutex);
}
+EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
-struct sysdev_class spu_sysdev_class = {
- set_kset_name("spu")
-};
-
-static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
+void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
{
- struct spu *spu = container_of(sysdev, struct spu, sysdev);
- return sprintf(buf, "%d\n", spu->isrc);
+ struct spu *spu;
+ mutex_lock(&spu_mutex);
-}
-static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);
+ list_for_each_entry(spu, &spu_full_list, full_list)
+ sysfs_remove_group(&spu->sysdev.kobj, attrs);
-extern int attach_sysdev_to_node(struct sys_device *dev, int nid);
+ mutex_unlock(&spu_mutex);
+}
+EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
static int spu_create_sysdev(struct spu *spu)
{
@@ -781,21 +574,18 @@ static int spu_create_sysdev(struct spu *spu)
return ret;
}
- if (spu->isrc != 0)
- sysdev_create_file(&spu->sysdev, &attr_isrc);
- sysfs_add_device_to_node(&spu->sysdev, spu->nid);
+ sysfs_add_device_to_node(&spu->sysdev, spu->node);
return 0;
}
static void spu_destroy_sysdev(struct spu *spu)
{
- sysdev_remove_file(&spu->sysdev, &attr_isrc);
- sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
+ sysfs_remove_device_from_node(&spu->sysdev, spu->node);
sysdev_unregister(&spu->sysdev);
}
-static int __init create_spu(struct device_node *spe)
+static int __init create_spu(void *data)
{
struct spu *spu;
int ret;
@@ -806,57 +596,37 @@ static int __init create_spu(struct device_node *spe)
if (!spu)
goto out;
- spu->node = find_spu_node_id(spe);
- if (spu->node >= MAX_NUMNODES) {
- printk(KERN_WARNING "SPE %s on node %d ignored,"
- " node number too big\n", spe->full_name, spu->node);
- printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
- return -ENODEV;
- }
- spu->nid = of_node_to_nid(spe);
- if (spu->nid == -1)
- spu->nid = 0;
+ spin_lock_init(&spu->register_lock);
+ mutex_lock(&spu_mutex);
+ spu->number = number++;
+ mutex_unlock(&spu_mutex);
+
+ ret = spu_create_spu(spu, data);
- ret = spu_map_device(spu, spe);
- /* try old method */
- if (ret)
- ret = spu_map_device_old(spu, spe);
if (ret)
goto out_free;
- ret = spu_map_interrupts(spu, spe);
- if (ret)
- ret = spu_map_interrupts_old(spu, spe);
- if (ret)
- goto out_unmap;
- spin_lock_init(&spu->register_lock);
- spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
+ spu_mfc_sdr_setup(spu);
spu_mfc_sr1_set(spu, 0x33);
- mutex_lock(&spu_mutex);
-
- spu->number = number++;
ret = spu_request_irqs(spu);
if (ret)
- goto out_unlock;
+ goto out_destroy;
ret = spu_create_sysdev(spu);
if (ret)
goto out_free_irqs;
+ mutex_lock(&spu_mutex);
list_add(&spu->list, &spu_list[spu->node]);
+ list_add(&spu->full_list, &spu_full_list);
mutex_unlock(&spu_mutex);
- pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
- spu->name, spu->isrc, spu->local_store,
- spu->problem, spu->priv1, spu->priv2, spu->number);
goto out;
out_free_irqs:
spu_free_irqs(spu);
-out_unlock:
- mutex_unlock(&spu_mutex);
-out_unmap:
- spu_unmap(spu);
+out_destroy:
+ spu_destroy_spu(spu);
out_free:
kfree(spu);
out:
@@ -866,10 +636,11 @@ out:
static void destroy_spu(struct spu *spu)
{
list_del_init(&spu->list);
+ list_del_init(&spu->full_list);
spu_destroy_sysdev(spu);
spu_free_irqs(spu);
- spu_unmap(spu);
+ spu_destroy_spu(spu);
kfree(spu);
}
@@ -890,9 +661,11 @@ module_exit(cleanup_spu_base);
static int __init init_spu_base(void)
{
- struct device_node *node;
int i, ret;
+ if (!spu_management_ops)
+ return 0;
+
/* create sysdev class for spus */
ret = sysdev_class_register(&spu_sysdev_class);
if (ret)
@@ -901,17 +674,17 @@ static int __init init_spu_base(void)
for (i = 0; i < MAX_NUMNODES; i++)
INIT_LIST_HEAD(&spu_list[i]);
- ret = -ENODEV;
- for (node = of_find_node_by_type(NULL, "spe");
- node; node = of_find_node_by_type(node, "spe")) {
- ret = create_spu(node);
- if (ret) {
- printk(KERN_WARNING "%s: Error initializing %s\n",
- __FUNCTION__, node->name);
- cleanup_spu_base();
- break;
- }
+ ret = spu_enumerate_spus(create_spu);
+
+ if (ret) {
+ printk(KERN_WARNING "%s: Error initializing spus\n",
+ __FUNCTION__);
+ cleanup_spu_base();
+ return ret;
}
+
+ xmon_register_spus(&spu_full_list);
+
return ret;
}
module_init(init_spu_base);
diff --git a/arch/powerpc/platforms/cell/spu_coredump.c b/arch/powerpc/platforms/cell/spu_coredump.c
new file mode 100644
index 00000000000..6915b418ee7
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_coredump.c
@@ -0,0 +1,81 @@
+/*
+ * SPU core dump code
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/spu.h>
+
+static struct spu_coredump_calls spu_coredump_calls;
+static DEFINE_MUTEX(spu_coredump_mutex);
+
+int arch_notes_size(void)
+{
+ long ret;
+ struct module *owner = spu_coredump_calls.owner;
+
+ ret = -ENOSYS;
+ mutex_lock(&spu_coredump_mutex);
+ if (owner && try_module_get(owner)) {
+ ret = spu_coredump_calls.arch_notes_size();
+ module_put(owner);
+ }
+ mutex_unlock(&spu_coredump_mutex);
+ return ret;
+}
+
+void arch_write_notes(struct file *file)
+{
+ struct module *owner = spu_coredump_calls.owner;
+
+ mutex_lock(&spu_coredump_mutex);
+ if (owner && try_module_get(owner)) {
+ spu_coredump_calls.arch_write_notes(file);
+ module_put(owner);
+ }
+ mutex_unlock(&spu_coredump_mutex);
+}
+
+int register_arch_coredump_calls(struct spu_coredump_calls *calls)
+{
+ if (spu_coredump_calls.owner)
+ return -EBUSY;
+
+ mutex_lock(&spu_coredump_mutex);
+ spu_coredump_calls.arch_notes_size = calls->arch_notes_size;
+ spu_coredump_calls.arch_write_notes = calls->arch_write_notes;
+ spu_coredump_calls.owner = calls->owner;
+ mutex_unlock(&spu_coredump_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_arch_coredump_calls);
+
+void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
+{
+ BUG_ON(spu_coredump_calls.owner != calls->owner);
+
+ mutex_lock(&spu_coredump_mutex);
+ spu_coredump_calls.owner = NULL;
+ mutex_unlock(&spu_coredump_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 71b69f0a1a4..a5de0430c56 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -18,120 +18,498 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/interrupt.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
-#include <asm/io.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
+#include <asm/firmware.h>
+#include <asm/prom.h>
#include "interrupt.h"
+#include "spu_priv1_mmio.h"
+
+struct spu_pdata {
+ int nid;
+ struct device_node *devnode;
+ struct spu_priv1 __iomem *priv1;
+};
+
+static struct spu_pdata *spu_get_pdata(struct spu *spu)
+{
+ BUG_ON(!spu->pdata);
+ return spu->pdata;
+}
+
+struct device_node *spu_devnode(struct spu *spu)
+{
+ return spu_get_pdata(spu)->devnode;
+}
+
+EXPORT_SYMBOL_GPL(spu_devnode);
+
+static int __init find_spu_node_id(struct device_node *spe)
+{
+ const unsigned int *id;
+ struct device_node *cpu;
+ cpu = spe->parent->parent;
+ id = get_property(cpu, "node-id", NULL);
+ return id ? *id : 0;
+}
+
+static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
+ const char *prop)
+{
+ static DEFINE_MUTEX(add_spumem_mutex);
+
+ const struct address_prop {
+ unsigned long address;
+ unsigned int len;
+ } __attribute__((packed)) *p;
+ int proplen;
+
+ unsigned long start_pfn, nr_pages;
+ struct pglist_data *pgdata;
+ struct zone *zone;
+ int ret;
+
+ p = get_property(spe, prop, &proplen);
+ WARN_ON(proplen != sizeof (*p));
+
+ start_pfn = p->address >> PAGE_SHIFT;
+ nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ pgdata = NODE_DATA(spu_get_pdata(spu)->nid);
+ zone = pgdata->node_zones;
+
+ /* XXX rethink locking here */
+ mutex_lock(&add_spumem_mutex);
+ ret = __add_pages(zone, start_pfn, nr_pages);
+ mutex_unlock(&add_spumem_mutex);
+
+ return ret;
+}
+
+static void __iomem * __init map_spe_prop(struct spu *spu,
+ struct device_node *n, const char *name)
+{
+ const struct address_prop {
+ unsigned long address;
+ unsigned int len;
+ } __attribute__((packed)) *prop;
+
+ const void *p;
+ int proplen;
+ void __iomem *ret = NULL;
+ int err = 0;
+
+ p = get_property(n, name, &proplen);
+ if (proplen != sizeof (struct address_prop))
+ return NULL;
+
+ prop = p;
+
+ err = cell_spuprop_present(spu, n, name);
+ if (err && (err != -EEXIST))
+ goto out;
+
+ ret = ioremap(prop->address, prop->len);
+
+ out:
+ return ret;
+}
+
+static void spu_unmap(struct spu *spu)
+{
+ iounmap(spu->priv2);
+ iounmap(spu_get_pdata(spu)->priv1);
+ iounmap(spu->problem);
+ iounmap((__force u8 __iomem *)spu->local_store);
+}
+
+static int __init spu_map_interrupts_old(struct spu *spu,
+ struct device_node *np)
+{
+ unsigned int isrc;
+ const u32 *tmp;
+
+ /* Get the interrupt source unit from the device-tree */
+ tmp = get_property(np, "isrc", NULL);
+ if (!tmp)
+ return -ENODEV;
+ isrc = tmp[0];
+
+ /* Add the node number */
+ isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
+
+ /* Now map interrupts of all 3 classes */
+ spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
+ spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
+ spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
+
+ /* Right now, we only fail if class 2 failed */
+ return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
+}
+
+static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
+{
+ const char *prop;
+ int ret;
+
+ ret = -ENODEV;
+ spu->name = get_property(node, "name", NULL);
+ if (!spu->name)
+ goto out;
+
+ prop = get_property(node, "local-store", NULL);
+ if (!prop)
+ goto out;
+ spu->local_store_phys = *(unsigned long *)prop;
+
+ /* we use local store as ram, not io memory */
+ spu->local_store = (void __force *)
+ map_spe_prop(spu, node, "local-store");
+ if (!spu->local_store)
+ goto out;
+
+ prop = get_property(node, "problem", NULL);
+ if (!prop)
+ goto out_unmap;
+ spu->problem_phys = *(unsigned long *)prop;
+
+ spu->problem= map_spe_prop(spu, node, "problem");
+ if (!spu->problem)
+ goto out_unmap;
+
+ spu_get_pdata(spu)->priv1= map_spe_prop(spu, node, "priv1");
+
+ spu->priv2= map_spe_prop(spu, node, "priv2");
+ if (!spu->priv2)
+ goto out_unmap;
+ ret = 0;
+ goto out;
+
+out_unmap:
+ spu_unmap(spu);
+out:
+ return ret;
+}
+
+static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
+{
+ struct of_irq oirq;
+ int ret;
+ int i;
+
+ for (i=0; i < 3; i++) {
+ ret = of_irq_map_one(np, i, &oirq);
+ if (ret) {
+ pr_debug("spu_new: failed to get irq %d\n", i);
+ goto err;
+ }
+ ret = -EINVAL;
+ pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
+ oirq.controller->full_name);
+ spu->irqs[i] = irq_create_of_mapping(oirq.controller,
+ oirq.specifier, oirq.size);
+ if (spu->irqs[i] == NO_IRQ) {
+ pr_debug("spu_new: failed to map it !\n");
+ goto err;
+ }
+ }
+ return 0;
+
+err:
+ pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
+ spu->name);
+ for (; i >= 0; i--) {
+ if (spu->irqs[i] != NO_IRQ)
+ irq_dispose_mapping(spu->irqs[i]);
+ }
+ return ret;
+}
+
+static int spu_map_resource(struct device_node *node, int nr,
+ void __iomem** virt, unsigned long *phys)
+{
+ struct resource resource = { };
+ int ret;
+
+ ret = of_address_to_resource(node, nr, &resource);
+ if (ret)
+ goto out;
+
+ if (phys)
+ *phys = resource.start;
+ *virt = ioremap(resource.start, resource.end - resource.start);
+ if (!*virt)
+ ret = -EINVAL;
+
+out:
+ return ret;
+}
+
+static int __init spu_map_device(struct spu *spu, struct device_node *node)
+{
+ int ret = -ENODEV;
+ spu->name = get_property(node, "name", NULL);
+ if (!spu->name)
+ goto out;
+
+ ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
+ &spu->local_store_phys);
+ if (ret) {
+ pr_debug("spu_new: failed to map %s resource 0\n",
+ node->full_name);
+ goto out;
+ }
+ ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
+ &spu->problem_phys);
+ if (ret) {
+ pr_debug("spu_new: failed to map %s resource 1\n",
+ node->full_name);
+ goto out_unmap;
+ }
+ ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
+ NULL);
+ if (ret) {
+ pr_debug("spu_new: failed to map %s resource 2\n",
+ node->full_name);
+ goto out_unmap;
+ }
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ ret = spu_map_resource(node, 3,
+ (void __iomem**)&spu_get_pdata(spu)->priv1, NULL);
+ if (ret) {
+ pr_debug("spu_new: failed to map %s resource 3\n",
+ node->full_name);
+ goto out_unmap;
+ }
+ pr_debug("spu_new: %s maps:\n", node->full_name);
+ pr_debug(" local store : 0x%016lx -> 0x%p\n",
+ spu->local_store_phys, spu->local_store);
+ pr_debug(" problem state : 0x%016lx -> 0x%p\n",
+ spu->problem_phys, spu->problem);
+ pr_debug(" priv2 : 0x%p\n", spu->priv2);
+ pr_debug(" priv1 : 0x%p\n",
+ spu_get_pdata(spu)->priv1);
+
+ return 0;
+
+out_unmap:
+ spu_unmap(spu);
+out:
+ pr_debug("failed to map spe %s: %d\n", spu->name, ret);
+ return ret;
+}
+
+static int __init of_enumerate_spus(int (*fn)(void *data))
+{
+ int ret;
+ struct device_node *node;
+
+ ret = -ENODEV;
+ for (node = of_find_node_by_type(NULL, "spe");
+ node; node = of_find_node_by_type(node, "spe")) {
+ ret = fn(node);
+ if (ret) {
+ printk(KERN_WARNING "%s: Error initializing %s\n",
+ __FUNCTION__, node->name);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int __init of_create_spu(struct spu *spu, void *data)
+{
+ int ret;
+ struct device_node *spe = (struct device_node *)data;
+
+ spu->pdata = kzalloc(sizeof(struct spu_pdata),
+ GFP_KERNEL);
+ if (!spu->pdata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ spu->node = find_spu_node_id(spe);
+ if (spu->node >= MAX_NUMNODES) {
+ printk(KERN_WARNING "SPE %s on node %d ignored,"
+ " node number too big\n", spe->full_name, spu->node);
+ printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ spu_get_pdata(spu)->nid = of_node_to_nid(spe);
+ if (spu_get_pdata(spu)->nid == -1)
+ spu_get_pdata(spu)->nid = 0;
+
+ ret = spu_map_device(spu, spe);
+ /* try old method */
+ if (ret)
+ ret = spu_map_device_old(spu, spe);
+ if (ret)
+ goto out_free;
+
+ ret = spu_map_interrupts(spu, spe);
+ if (ret)
+ ret = spu_map_interrupts_old(spu, spe);
+ if (ret)
+ goto out_unmap;
+
+ spu_get_pdata(spu)->devnode = of_node_get(spe);
+
+ pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n", spu->name,
+ spu->local_store, spu->problem, spu_get_pdata(spu)->priv1,
+ spu->priv2, spu->number);
+ goto out;
+
+out_unmap:
+ spu_unmap(spu);
+out_free:
+ kfree(spu->pdata);
+ spu->pdata = NULL;
+out:
+ return ret;
+}
+
+static int of_destroy_spu(struct spu *spu)
+{
+ spu_unmap(spu);
+ of_node_put(spu_get_pdata(spu)->devnode);
+ kfree(spu->pdata);
+ spu->pdata = NULL;
+ return 0;
+}
+
+const struct spu_management_ops spu_management_of_ops = {
+ .enumerate_spus = of_enumerate_spus,
+ .create_spu = of_create_spu,
+ .destroy_spu = of_destroy_spu,
+};
static void int_mask_and(struct spu *spu, int class, u64 mask)
{
u64 old_mask;
- old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
- out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
+ old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
+ out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
+ old_mask & mask);
}
static void int_mask_or(struct spu *spu, int class, u64 mask)
{
u64 old_mask;
- old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
- out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
+ old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
+ out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
+ old_mask | mask);
}
static void int_mask_set(struct spu *spu, int class, u64 mask)
{
- out_be64(&spu->priv1->int_mask_RW[class], mask);
+ out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class], mask);
}
static u64 int_mask_get(struct spu *spu, int class)
{
- return in_be64(&spu->priv1->int_mask_RW[class]);
+ return in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
}
static void int_stat_clear(struct spu *spu, int class, u64 stat)
{
- out_be64(&spu->priv1->int_stat_RW[class], stat);
+ out_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class], stat);
}
static u64 int_stat_get(struct spu *spu, int class)
{
- return in_be64(&spu->priv1->int_stat_RW[class]);
+ return in_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class]);
}
static void cpu_affinity_set(struct spu *spu, int cpu)
{
u64 target = iic_get_target_id(cpu);
u64 route = target << 48 | target << 32 | target << 16;
- out_be64(&spu->priv1->int_route_RW, route);
+ out_be64(&spu_get_pdata(spu)->priv1->int_route_RW, route);
}
static u64 mfc_dar_get(struct spu *spu)
{
- return in_be64(&spu->priv1->mfc_dar_RW);
+ return in_be64(&spu_get_pdata(spu)->priv1->mfc_dar_RW);
}
static u64 mfc_dsisr_get(struct spu *spu)
{
- return in_be64(&spu->priv1->mfc_dsisr_RW);
+ return in_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW);
}
static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
{
- out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
+ out_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW, dsisr);
}
-static void mfc_sdr_set(struct spu *spu, u64 sdr)
+static void mfc_sdr_setup(struct spu *spu)
{
- out_be64(&spu->priv1->mfc_sdr_RW, sdr);
+ out_be64(&spu_get_pdata(spu)->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
}
static void mfc_sr1_set(struct spu *spu, u64 sr1)
{
- out_be64(&spu->priv1->mfc_sr1_RW, sr1);
+ out_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW, sr1);
}
static u64 mfc_sr1_get(struct spu *spu)
{
- return in_be64(&spu->priv1->mfc_sr1_RW);
+ return in_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW);
}
static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
{
- out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
+ out_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW, tclass_id);
}
static u64 mfc_tclass_id_get(struct spu *spu)
{
- return in_be64(&spu->priv1->mfc_tclass_id_RW);
+ return in_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW);
}
static void tlb_invalidate(struct spu *spu)
{
- out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
+ out_be64(&spu_get_pdata(spu)->priv1->tlb_invalidate_entry_W, 0ul);
}
static void resource_allocation_groupID_set(struct spu *spu, u64 id)
{
- out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
+ out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW,
+ id);
}
static u64 resource_allocation_groupID_get(struct spu *spu)
{
- return in_be64(&spu->priv1->resource_allocation_groupID_RW);
+ return in_be64(
+ &spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW);
}
static void resource_allocation_enable_set(struct spu *spu, u64 enable)
{
- out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
+ out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_enable_RW,
+ enable);
}
static u64 resource_allocation_enable_get(struct spu *spu)
{
- return in_be64(&spu->priv1->resource_allocation_enable_RW);
+ return in_be64(
+ &spu_get_pdata(spu)->priv1->resource_allocation_enable_RW);
}
const struct spu_priv1_ops spu_priv1_mmio_ops =
@@ -146,7 +524,7 @@ const struct spu_priv1_ops spu_priv1_mmio_ops =
.mfc_dar_get = mfc_dar_get,
.mfc_dsisr_get = mfc_dsisr_get,
.mfc_dsisr_set = mfc_dsisr_set,
- .mfc_sdr_set = mfc_sdr_set,
+ .mfc_sdr_setup = mfc_sdr_setup,
.mfc_sr1_set = mfc_sr1_set,
.mfc_sr1_get = mfc_sr1_get,
.mfc_tclass_id_set = mfc_tclass_id_set,
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
new file mode 100644
index 00000000000..7b62bd1cc25
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
@@ -0,0 +1,26 @@
+/*
+ * spu hypervisor abstraction for direct hardware access.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SPU_PRIV1_MMIO_H
+#define SPU_PRIV1_MMIO_H
+
+struct device_node *spu_devnode(struct spu *spu);
+
+#endif /* SPU_PRIV1_MMIO_H */
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index ecdfbb35f82..472217d19fa 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,7 +1,7 @@
obj-y += switch.o
obj-$(CONFIG_SPU_FS) += spufs.o
-spufs-y += inode.o file.o context.o syscalls.o
+spufs-y += inode.o file.o context.o syscalls.o coredump.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
# Rules to build switch.o with the help of SPU tool chain
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 2d22cd59d6f..1898f0d3a8b 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -36,6 +36,7 @@
#include <asm/io.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
+#include <asm/spu_info.h>
#include <asm/mmu_context.h>
#include "spufs.h"
@@ -267,6 +268,11 @@ static char *spu_backing_get_ls(struct spu_context *ctx)
return ctx->csa.lscsa->ls;
}
+static u32 spu_backing_runcntl_read(struct spu_context *ctx)
+{
+ return ctx->csa.prob.spu_runcntl_RW;
+}
+
static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
{
spin_lock(&ctx->csa.register_lock);
@@ -279,9 +285,26 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
spin_unlock(&ctx->csa.register_lock);
}
-static void spu_backing_runcntl_stop(struct spu_context *ctx)
+static void spu_backing_master_start(struct spu_context *ctx)
+{
+ struct spu_state *csa = &ctx->csa;
+ u64 sr1;
+
+ spin_lock(&csa->register_lock);
+ sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+ csa->priv1.mfc_sr1_RW = sr1;
+ spin_unlock(&csa->register_lock);
+}
+
+static void spu_backing_master_stop(struct spu_context *ctx)
{
- spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
+ struct spu_state *csa = &ctx->csa;
+ u64 sr1;
+
+ spin_lock(&csa->register_lock);
+ sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+ csa->priv1.mfc_sr1_RW = sr1;
+ spin_unlock(&csa->register_lock);
}
static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
@@ -345,8 +368,10 @@ struct spu_context_ops spu_backing_ops = {
.npc_write = spu_backing_npc_write,
.status_read = spu_backing_status_read,
.get_ls = spu_backing_get_ls,
+ .runcntl_read = spu_backing_runcntl_read,
.runcntl_write = spu_backing_runcntl_write,
- .runcntl_stop = spu_backing_runcntl_stop,
+ .master_start = spu_backing_master_start,
+ .master_stop = spu_backing_master_stop,
.set_mfc_query = spu_backing_set_mfc_query,
.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
.get_mfc_free_elements = spu_backing_get_mfc_free_elements,
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 034cf6af53a..0870009f56d 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -120,6 +120,33 @@ void spu_unmap_mappings(struct spu_context *ctx)
unmap_mapping_range(ctx->signal2, 0, 0x4000, 1);
}
+int spu_acquire_exclusive(struct spu_context *ctx)
+{
+ int ret = 0;
+
+ down_write(&ctx->state_sema);
+ /* ctx is about to be freed, can't acquire any more */
+ if (!ctx->owner) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ctx->state == SPU_STATE_SAVED) {
+ ret = spu_activate(ctx, 0);
+ if (ret)
+ goto out;
+ ctx->state = SPU_STATE_RUNNABLE;
+ } else {
+ /* We need to exclude userspace access to the context. */
+ spu_unmap_mappings(ctx);
+ }
+
+out:
+ if (ret)
+ up_write(&ctx->state_sema);
+ return ret;
+}
+
int spu_acquire_runnable(struct spu_context *ctx)
{
int ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
new file mode 100644
index 00000000000..26945c491f6
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -0,0 +1,238 @@
+/*
+ * SPU core dump code
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/elf.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/uaccess.h>
+
+#include "spufs.h"
+
+struct spufs_ctx_info {
+ struct list_head list;
+ int dfd;
+ int memsize; /* in bytes */
+ struct spu_context *ctx;
+};
+
+static LIST_HEAD(ctx_info_list);
+
+static ssize_t do_coredump_read(int num, struct spu_context *ctx, void __user *buffer,
+ size_t size, loff_t *off)
+{
+ u64 data;
+ int ret;
+
+ if (spufs_coredump_read[num].read)
+ return spufs_coredump_read[num].read(ctx, buffer, size, off);
+
+ data = spufs_coredump_read[num].get(ctx);
+ ret = copy_to_user(buffer, &data, 8);
+ return ret ? -EFAULT : 8;
+}
+
+/*
+ * These are the only things you should do on a core-file: use only these
+ * functions to write out all the necessary info.
+ */
+static int spufs_dump_write(struct file *file, const void *addr, int nr)
+{
+ return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+
+static int spufs_dump_seek(struct file *file, loff_t off)
+{
+ if (file->f_op->llseek) {
+ if (file->f_op->llseek(file, off, 0) != off)
+ return 0;
+ } else
+ file->f_pos = off;
+ return 1;
+}
+
+static void spufs_fill_memsize(struct spufs_ctx_info *ctx_info)
+{
+ struct spu_context *ctx;
+ unsigned long long lslr;
+
+ ctx = ctx_info->ctx;
+ lslr = ctx->csa.priv2.spu_lslr_RW;
+ ctx_info->memsize = lslr + 1;
+}
+
+static int spufs_ctx_note_size(struct spufs_ctx_info *ctx_info)
+{
+ int dfd, memsize, i, sz, total = 0;
+ char *name;
+ char fullname[80];
+
+ dfd = ctx_info->dfd;
+ memsize = ctx_info->memsize;
+
+ for (i = 0; spufs_coredump_read[i].name; i++) {
+ name = spufs_coredump_read[i].name;
+ sz = spufs_coredump_read[i].size;
+
+ sprintf(fullname, "SPU/%d/%s", dfd, name);
+
+ total += sizeof(struct elf_note);
+ total += roundup(strlen(fullname) + 1, 4);
+ if (!strcmp(name, "mem"))
+ total += roundup(memsize, 4);
+ else
+ total += roundup(sz, 4);
+ }
+
+ return total;
+}
+
+static int spufs_add_one_context(struct file *file, int dfd)
+{
+ struct spu_context *ctx;
+ struct spufs_ctx_info *ctx_info;
+ int size;
+
+ ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+ if (ctx->flags & SPU_CREATE_NOSCHED)
+ return 0;
+
+ ctx_info = kzalloc(sizeof(*ctx_info), GFP_KERNEL);
+ if (unlikely(!ctx_info))
+ return -ENOMEM;
+
+ ctx_info->dfd = dfd;
+ ctx_info->ctx = ctx;
+
+ spufs_fill_memsize(ctx_info);
+
+ size = spufs_ctx_note_size(ctx_info);
+ list_add(&ctx_info->list, &ctx_info_list);
+ return size;
+}
+
+/*
+ * The additional architecture-specific notes for Cell are various
+ * context files in the spu context.
+ *
+ * This function iterates over all open file descriptors and sees
+ * if they are a directory in spufs. In that case we use spufs
+ * internal functionality to dump them without needing to actually
+ * open the files.
+ */
+static int spufs_arch_notes_size(void)
+{
+ struct fdtable *fdt = files_fdtable(current->files);
+ int size = 0, fd;
+
+ for (fd = 0; fd < fdt->max_fdset && fd < fdt->max_fds; fd++) {
+ if (FD_ISSET(fd, fdt->open_fds)) {
+ struct file *file = fcheck(fd);
+
+ if (file && file->f_op == &spufs_context_fops) {
+ int rval = spufs_add_one_context(file, fd);
+ if (rval < 0)
+ break;
+ size += rval;
+ }
+ }
+ }
+
+ return size;
+}
+
+static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
+ struct file *file)
+{
+ struct spu_context *ctx;
+ loff_t pos = 0;
+ int sz, dfd, rc, total = 0;
+ const int bufsz = 4096;
+ char *name;
+ char fullname[80], *buf;
+ struct elf_note en;
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ dfd = ctx_info->dfd;
+ name = spufs_coredump_read[i].name;
+
+ if (!strcmp(name, "mem"))
+ sz = ctx_info->memsize;
+ else
+ sz = spufs_coredump_read[i].size;
+
+ ctx = ctx_info->ctx;
+ if (!ctx) {
+ return;
+ }
+
+ sprintf(fullname, "SPU/%d/%s", dfd, name);
+ en.n_namesz = strlen(fullname) + 1;
+ en.n_descsz = sz;
+ en.n_type = NT_SPU;
+
+ if (!spufs_dump_write(file, &en, sizeof(en)))
+ return;
+ if (!spufs_dump_write(file, fullname, en.n_namesz))
+ return;
+ if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4)))
+ return;
+
+ do {
+ rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
+ if (rc > 0) {
+ if (!spufs_dump_write(file, buf, rc))
+ return;
+ total += rc;
+ }
+ } while (rc == bufsz && total < sz);
+
+ spufs_dump_seek(file, roundup((unsigned long)file->f_pos
+ - total + sz, 4));
+}
+
+static void spufs_arch_write_notes(struct file *file)
+{
+ int j;
+ struct spufs_ctx_info *ctx_info, *next;
+
+ list_for_each_entry_safe(ctx_info, next, &ctx_info_list, list) {
+ spu_acquire_saved(ctx_info->ctx);
+ for (j = 0; j < spufs_coredump_num_notes; j++)
+ spufs_arch_write_note(ctx_info, j, file);
+ spu_release(ctx_info->ctx);
+ list_del(&ctx_info->list);
+ kfree(ctx_info);
+ }
+}
+
+struct spu_coredump_calls spufs_coredump_calls = {
+ .arch_notes_size = spufs_arch_notes_size,
+ .arch_write_notes = spufs_arch_write_notes,
+ .owner = THIS_MODULE,
+};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 533e2723e18..347eff56fcb 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -32,13 +32,13 @@
#include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/spu.h>
+#include <asm/spu_info.h>
#include <asm/uaccess.h>
#include "spufs.h"
#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
-
static int
spufs_mem_open(struct inode *inode, struct file *file)
{
@@ -51,18 +51,23 @@ spufs_mem_open(struct inode *inode, struct file *file)
}
static ssize_t
+__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ char *local_store = ctx->ops->get_ls(ctx);
+ return simple_read_from_buffer(buffer, size, pos, local_store,
+ LS_SIZE);
+}
+
+static ssize_t
spufs_mem_read(struct file *file, char __user *buffer,
size_t size, loff_t *pos)
{
- struct spu_context *ctx = file->private_data;
- char *local_store;
int ret;
+ struct spu_context *ctx = file->private_data;
spu_acquire(ctx);
-
- local_store = ctx->ops->get_ls(ctx);
- ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
-
+ ret = __spufs_mem_read(ctx, buffer, size, pos);
spu_release(ctx);
return ret;
}
@@ -104,11 +109,11 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma,
if (ctx->state == SPU_STATE_SAVED) {
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
+ & ~_PAGE_NO_CACHE);
page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
} else {
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ | _PAGE_NO_CACHE);
page = pfn_to_page((ctx->spu->local_store_phys + offset)
>> PAGE_SHIFT);
}
@@ -131,7 +136,7 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- /* FIXME: */
+ vma->vm_flags |= VM_IO;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE);
@@ -200,7 +205,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_IO;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -261,18 +266,23 @@ spufs_regs_open(struct inode *inode, struct file *file)
}
static ssize_t
+__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ return simple_read_from_buffer(buffer, size, pos,
+ lscsa->gprs, sizeof lscsa->gprs);
+}
+
+static ssize_t
spufs_regs_read(struct file *file, char __user *buffer,
size_t size, loff_t *pos)
{
- struct spu_context *ctx = file->private_data;
- struct spu_lscsa *lscsa = ctx->csa.lscsa;
int ret;
+ struct spu_context *ctx = file->private_data;
spu_acquire_saved(ctx);
-
- ret = simple_read_from_buffer(buffer, size, pos,
- lscsa->gprs, sizeof lscsa->gprs);
-
+ ret = __spufs_regs_read(ctx, buffer, size, pos);
spu_release(ctx);
return ret;
}
@@ -307,18 +317,23 @@ static struct file_operations spufs_regs_fops = {
};
static ssize_t
+__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
+ size_t size, loff_t * pos)
+{
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ return simple_read_from_buffer(buffer, size, pos,
+ &lscsa->fpcr, sizeof(lscsa->fpcr));
+}
+
+static ssize_t
spufs_fpcr_read(struct file *file, char __user * buffer,
size_t size, loff_t * pos)
{
- struct spu_context *ctx = file->private_data;
- struct spu_lscsa *lscsa = ctx->csa.lscsa;
int ret;
+ struct spu_context *ctx = file->private_data;
spu_acquire_saved(ctx);
-
- ret = simple_read_from_buffer(buffer, size, pos,
- &lscsa->fpcr, sizeof(lscsa->fpcr));
-
+ ret = __spufs_fpcr_read(ctx, buffer, size, pos);
spu_release(ctx);
return ret;
}
@@ -718,23 +733,41 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
}
-static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
+static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
size_t len, loff_t *pos)
{
- struct spu_context *ctx = file->private_data;
+ int ret = 0;
u32 data;
if (len < 4)
return -EINVAL;
- spu_acquire(ctx);
- data = ctx->ops->signal1_read(ctx);
- spu_release(ctx);
+ if (ctx->csa.spu_chnlcnt_RW[3]) {
+ data = ctx->csa.spu_chnldata_RW[3];
+ ret = 4;
+ }
+
+ if (!ret)
+ goto out;
if (copy_to_user(buf, &data, 4))
return -EFAULT;
- return 4;
+out:
+ return ret;
+}
+
+static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ int ret;
+ struct spu_context *ctx = file->private_data;
+
+ spu_acquire_saved(ctx);
+ ret = __spufs_signal1_read(ctx, buf, len, pos);
+ spu_release(ctx);
+
+ return ret;
}
static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
@@ -782,7 +815,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_IO;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -807,25 +840,41 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
}
-static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
+static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
size_t len, loff_t *pos)
{
- struct spu_context *ctx;
+ int ret = 0;
u32 data;
- ctx = file->private_data;
-
if (len < 4)
return -EINVAL;
- spu_acquire(ctx);
- data = ctx->ops->signal2_read(ctx);
- spu_release(ctx);
+ if (ctx->csa.spu_chnlcnt_RW[4]) {
+ data = ctx->csa.spu_chnldata_RW[4];
+ ret = 4;
+ }
+
+ if (!ret)
+ goto out;
if (copy_to_user(buf, &data, 4))
return -EFAULT;
- return 4;
+out:
+ return ret;
+}
+
+static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ int ret;
+
+ spu_acquire_saved(ctx);
+ ret = __spufs_signal2_read(ctx, buf, len, pos);
+ spu_release(ctx);
+
+ return ret;
}
static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
@@ -874,8 +923,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- /* FIXME: */
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_IO;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -902,13 +950,19 @@ static void spufs_signal1_type_set(void *data, u64 val)
spu_release(ctx);
}
+static u64 __spufs_signal1_type_get(void *data)
+{
+ struct spu_context *ctx = data;
+ return ctx->ops->signal1_type_get(ctx);
+}
+
static u64 spufs_signal1_type_get(void *data)
{
struct spu_context *ctx = data;
u64 ret;
spu_acquire(ctx);
- ret = ctx->ops->signal1_type_get(ctx);
+ ret = __spufs_signal1_type_get(data);
spu_release(ctx);
return ret;
@@ -925,13 +979,19 @@ static void spufs_signal2_type_set(void *data, u64 val)
spu_release(ctx);
}
+static u64 __spufs_signal2_type_get(void *data)
+{
+ struct spu_context *ctx = data;
+ return ctx->ops->signal2_type_get(ctx);
+}
+
static u64 spufs_signal2_type_get(void *data)
{
struct spu_context *ctx = data;
u64 ret;
spu_acquire(ctx);
- ret = ctx->ops->signal2_type_get(ctx);
+ ret = __spufs_signal2_type_get(data);
spu_release(ctx);
return ret;
@@ -958,7 +1018,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_IO;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -1000,7 +1060,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_IO;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -1041,7 +1101,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_IO;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -1265,6 +1325,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
goto out;
ctx->tagwait |= 1 << cmd.tag;
+ ret = size;
out:
return ret;
@@ -1360,7 +1421,8 @@ static u64 spufs_npc_get(void *data)
spu_release(ctx);
return ret;
}
-DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
+DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
+ "0x%llx\n")
static void spufs_decr_set(void *data, u64 val)
{
@@ -1371,18 +1433,24 @@ static void spufs_decr_set(void *data, u64 val)
spu_release(ctx);
}
-static u64 spufs_decr_get(void *data)
+static u64 __spufs_decr_get(void *data)
{
struct spu_context *ctx = data;
struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ return lscsa->decr.slot[0];
+}
+
+static u64 spufs_decr_get(void *data)
+{
+ struct spu_context *ctx = data;
u64 ret;
spu_acquire_saved(ctx);
- ret = lscsa->decr.slot[0];
+ ret = __spufs_decr_get(data);
spu_release(ctx);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
- "%llx\n")
+ "0x%llx\n")
static void spufs_decr_status_set(void *data, u64 val)
{
@@ -1393,62 +1461,76 @@ static void spufs_decr_status_set(void *data, u64 val)
spu_release(ctx);
}
-static u64 spufs_decr_status_get(void *data)
+static u64 __spufs_decr_status_get(void *data)
{
struct spu_context *ctx = data;
struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ return lscsa->decr_status.slot[0];
+}
+
+static u64 spufs_decr_status_get(void *data)
+{
+ struct spu_context *ctx = data;
u64 ret;
spu_acquire_saved(ctx);
- ret = lscsa->decr_status.slot[0];
+ ret = __spufs_decr_status_get(data);
spu_release(ctx);
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
- spufs_decr_status_set, "%llx\n")
+ spufs_decr_status_set, "0x%llx\n")
-static void spufs_spu_tag_mask_set(void *data, u64 val)
+static void spufs_event_mask_set(void *data, u64 val)
{
struct spu_context *ctx = data;
struct spu_lscsa *lscsa = ctx->csa.lscsa;
spu_acquire_saved(ctx);
- lscsa->tag_mask.slot[0] = (u32) val;
+ lscsa->event_mask.slot[0] = (u32) val;
spu_release(ctx);
}
-static u64 spufs_spu_tag_mask_get(void *data)
+static u64 __spufs_event_mask_get(void *data)
{
struct spu_context *ctx = data;
struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ return lscsa->event_mask.slot[0];
+}
+
+static u64 spufs_event_mask_get(void *data)
+{
+ struct spu_context *ctx = data;
u64 ret;
spu_acquire_saved(ctx);
- ret = lscsa->tag_mask.slot[0];
+ ret = __spufs_event_mask_get(data);
spu_release(ctx);
return ret;
}
-DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
- spufs_spu_tag_mask_set, "%llx\n")
+DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
+ spufs_event_mask_set, "0x%llx\n")
-static void spufs_event_mask_set(void *data, u64 val)
+static u64 __spufs_event_status_get(void *data)
{
struct spu_context *ctx = data;
- struct spu_lscsa *lscsa = ctx->csa.lscsa;
- spu_acquire_saved(ctx);
- lscsa->event_mask.slot[0] = (u32) val;
- spu_release(ctx);
+ struct spu_state *state = &ctx->csa;
+ u64 stat;
+ stat = state->spu_chnlcnt_RW[0];
+ if (stat)
+ return state->spu_chnldata_RW[0];
+ return 0;
}
-static u64 spufs_event_mask_get(void *data)
+static u64 spufs_event_status_get(void *data)
{
struct spu_context *ctx = data;
- struct spu_lscsa *lscsa = ctx->csa.lscsa;
- u64 ret;
+ u64 ret = 0;
+
spu_acquire_saved(ctx);
- ret = lscsa->event_mask.slot[0];
+ ret = __spufs_event_status_get(data);
spu_release(ctx);
return ret;
}
-DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
- spufs_event_mask_set, "%llx\n")
+DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
+ NULL, "0x%llx\n")
static void spufs_srr0_set(void *data, u64 val)
{
@@ -1470,7 +1552,7 @@ static u64 spufs_srr0_get(void *data)
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
- "%llx\n")
+ "0x%llx\n")
static u64 spufs_id_get(void *data)
{
@@ -1488,12 +1570,18 @@ static u64 spufs_id_get(void *data)
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
-static u64 spufs_object_id_get(void *data)
+static u64 __spufs_object_id_get(void *data)
{
struct spu_context *ctx = data;
return ctx->object_id;
}
+static u64 spufs_object_id_get(void *data)
+{
+ /* FIXME: Should there really be no locking here? */
+ return __spufs_object_id_get(data);
+}
+
static void spufs_object_id_set(void *data, u64 id)
{
struct spu_context *ctx = data;
@@ -1503,6 +1591,250 @@ static void spufs_object_id_set(void *data, u64 id)
DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
spufs_object_id_set, "0x%llx\n");
+static u64 __spufs_lslr_get(void *data)
+{
+ struct spu_context *ctx = data;
+ return ctx->csa.priv2.spu_lslr_RW;
+}
+
+static u64 spufs_lslr_get(void *data)
+{
+ struct spu_context *ctx = data;
+ u64 ret;
+
+ spu_acquire_saved(ctx);
+ ret = __spufs_lslr_get(data);
+ spu_release(ctx);
+
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
+
+static int spufs_info_open(struct inode *inode, struct file *file)
+{
+ struct spufs_inode_info *i = SPUFS_I(inode);
+ struct spu_context *ctx = i->i_ctx;
+ file->private_data = ctx;
+ return 0;
+}
+
+static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
+ char __user *buf, size_t len, loff_t *pos)
+{
+ u32 mbox_stat;
+ u32 data;
+
+ mbox_stat = ctx->csa.prob.mb_stat_R;
+ if (mbox_stat & 0x0000ff) {
+ data = ctx->csa.prob.pu_mb_R;
+ }
+
+ return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+}
+
+static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ int ret;
+ struct spu_context *ctx = file->private_data;
+
+ if (!access_ok(VERIFY_WRITE, buf, len))
+ return -EFAULT;
+
+ spu_acquire_saved(ctx);
+ spin_lock(&ctx->csa.register_lock);
+ ret = __spufs_mbox_info_read(ctx, buf, len, pos);
+ spin_unlock(&ctx->csa.register_lock);
+ spu_release(ctx);
+
+ return ret;
+}
+
+static struct file_operations spufs_mbox_info_fops = {
+ .open = spufs_info_open,
+ .read = spufs_mbox_info_read,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
+ char __user *buf, size_t len, loff_t *pos)
+{
+ u32 ibox_stat;
+ u32 data;
+
+ ibox_stat = ctx->csa.prob.mb_stat_R;
+ if (ibox_stat & 0xff0000) {
+ data = ctx->csa.priv2.puint_mb_R;
+ }
+
+ return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+}
+
+static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ int ret;
+
+ if (!access_ok(VERIFY_WRITE, buf, len))
+ return -EFAULT;
+
+ spu_acquire_saved(ctx);
+ spin_lock(&ctx->csa.register_lock);
+ ret = __spufs_ibox_info_read(ctx, buf, len, pos);
+ spin_unlock(&ctx->csa.register_lock);
+ spu_release(ctx);
+
+ return ret;
+}
+
+static struct file_operations spufs_ibox_info_fops = {
+ .open = spufs_info_open,
+ .read = spufs_ibox_info_read,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
+ char __user *buf, size_t len, loff_t *pos)
+{
+ int i, cnt;
+ u32 data[4];
+ u32 wbox_stat;
+
+ wbox_stat = ctx->csa.prob.mb_stat_R;
+ cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
+ for (i = 0; i < cnt; i++) {
+ data[i] = ctx->csa.spu_mailbox_data[i];
+ }
+
+ return simple_read_from_buffer(buf, len, pos, &data,
+ cnt * sizeof(u32));
+}
+
+static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ int ret;
+
+ if (!access_ok(VERIFY_WRITE, buf, len))
+ return -EFAULT;
+
+ spu_acquire_saved(ctx);
+ spin_lock(&ctx->csa.register_lock);
+ ret = __spufs_wbox_info_read(ctx, buf, len, pos);
+ spin_unlock(&ctx->csa.register_lock);
+ spu_release(ctx);
+
+ return ret;
+}
+
+static struct file_operations spufs_wbox_info_fops = {
+ .open = spufs_info_open,
+ .read = spufs_wbox_info_read,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
+ char __user *buf, size_t len, loff_t *pos)
+{
+ struct spu_dma_info info;
+ struct mfc_cq_sr *qp, *spuqp;
+ int i;
+
+ info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
+ info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
+ info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
+ info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
+ info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
+ for (i = 0; i < 16; i++) {
+ qp = &info.dma_info_command_data[i];
+ spuqp = &ctx->csa.priv2.spuq[i];
+
+ qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
+ qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
+ qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
+ qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
+ }
+
+ return simple_read_from_buffer(buf, len, pos, &info,
+ sizeof info);
+}
+
+static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ int ret;
+
+ if (!access_ok(VERIFY_WRITE, buf, len))
+ return -EFAULT;
+
+ spu_acquire_saved(ctx);
+ spin_lock(&ctx->csa.register_lock);
+ ret = __spufs_dma_info_read(ctx, buf, len, pos);
+ spin_unlock(&ctx->csa.register_lock);
+ spu_release(ctx);
+
+ return ret;
+}
+
+static struct file_operations spufs_dma_info_fops = {
+ .open = spufs_info_open,
+ .read = spufs_dma_info_read,
+};
+
+static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
+ char __user *buf, size_t len, loff_t *pos)
+{
+ struct spu_proxydma_info info;
+ struct mfc_cq_sr *qp, *puqp;
+ int ret = sizeof info;
+ int i;
+
+ if (len < ret)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, buf, len))
+ return -EFAULT;
+
+ info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
+ info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
+ info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
+ for (i = 0; i < 8; i++) {
+ qp = &info.proxydma_info_command_data[i];
+ puqp = &ctx->csa.priv2.puq[i];
+
+ qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
+ qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
+ qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
+ qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
+ }
+
+ return simple_read_from_buffer(buf, len, pos, &info,
+ sizeof info);
+}
+
+static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ int ret;
+
+ spu_acquire_saved(ctx);
+ spin_lock(&ctx->csa.register_lock);
+ ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
+ spin_unlock(&ctx->csa.register_lock);
+ spu_release(ctx);
+
+ return ret;
+}
+
+static struct file_operations spufs_proxydma_info_fops = {
+ .open = spufs_info_open,
+ .read = spufs_proxydma_info_read,
+};
+
struct tree_descr spufs_dir_contents[] = {
{ "mem", &spufs_mem_fops, 0666, },
{ "regs", &spufs_regs_fops, 0666, },
@@ -1516,18 +1848,70 @@ struct tree_descr spufs_dir_contents[] = {
{ "signal2", &spufs_signal2_fops, 0666, },
{ "signal1_type", &spufs_signal1_type, 0666, },
{ "signal2_type", &spufs_signal2_type, 0666, },
- { "mss", &spufs_mss_fops, 0666, },
- { "mfc", &spufs_mfc_fops, 0666, },
{ "cntl", &spufs_cntl_fops, 0666, },
- { "npc", &spufs_npc_ops, 0666, },
{ "fpcr", &spufs_fpcr_fops, 0666, },
+ { "lslr", &spufs_lslr_ops, 0444, },
+ { "mfc", &spufs_mfc_fops, 0666, },
+ { "mss", &spufs_mss_fops, 0666, },
+ { "npc", &spufs_npc_ops, 0666, },
+ { "srr0", &spufs_srr0_ops, 0666, },
{ "decr", &spufs_decr_ops, 0666, },
{ "decr_status", &spufs_decr_status_ops, 0666, },
- { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
{ "event_mask", &spufs_event_mask_ops, 0666, },
- { "srr0", &spufs_srr0_ops, 0666, },
+ { "event_status", &spufs_event_status_ops, 0444, },
+ { "psmap", &spufs_psmap_fops, 0666, },
+ { "phys-id", &spufs_id_ops, 0666, },
+ { "object-id", &spufs_object_id_ops, 0666, },
+ { "mbox_info", &spufs_mbox_info_fops, 0444, },
+ { "ibox_info", &spufs_ibox_info_fops, 0444, },
+ { "wbox_info", &spufs_wbox_info_fops, 0444, },
+ { "dma_info", &spufs_dma_info_fops, 0444, },
+ { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
+ {},
+};
+
+struct tree_descr spufs_dir_nosched_contents[] = {
+ { "mem", &spufs_mem_fops, 0666, },
+ { "mbox", &spufs_mbox_fops, 0444, },
+ { "ibox", &spufs_ibox_fops, 0444, },
+ { "wbox", &spufs_wbox_fops, 0222, },
+ { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
+ { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
+ { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+ { "signal1", &spufs_signal1_fops, 0666, },
+ { "signal2", &spufs_signal2_fops, 0666, },
+ { "signal1_type", &spufs_signal1_type, 0666, },
+ { "signal2_type", &spufs_signal2_type, 0666, },
+ { "mss", &spufs_mss_fops, 0666, },
+ { "mfc", &spufs_mfc_fops, 0666, },
+ { "cntl", &spufs_cntl_fops, 0666, },
+ { "npc", &spufs_npc_ops, 0666, },
{ "psmap", &spufs_psmap_fops, 0666, },
{ "phys-id", &spufs_id_ops, 0666, },
{ "object-id", &spufs_object_id_ops, 0666, },
{},
};
+
+struct spufs_coredump_reader spufs_coredump_read[] = {
+ { "regs", __spufs_regs_read, NULL, 128 * 16 },
+ { "fpcr", __spufs_fpcr_read, NULL, 16 },
+ { "lslr", NULL, __spufs_lslr_get, 11 },
+ { "decr", NULL, __spufs_decr_get, 11 },
+ { "decr_status", NULL, __spufs_decr_status_get, 11 },
+ { "mem", __spufs_mem_read, NULL, 256 * 1024, },
+ { "signal1", __spufs_signal1_read, NULL, 4 },
+ { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
+ { "signal2", __spufs_signal2_read, NULL, 4 },
+ { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
+ { "event_mask", NULL, __spufs_event_mask_get, 8 },
+ { "event_status", NULL, __spufs_event_status_get, 8 },
+ { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
+ { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
+ { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
+ { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
+ { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
+ { "object-id", NULL, __spufs_object_id_get, 19 },
+ { },
+};
+int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
+
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index d805ffed892..ae42e03b8c8 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -135,21 +135,11 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
return ret;
}
-static u32 spu_hw_signal1_read(struct spu_context *ctx)
-{
- return in_be32(&ctx->spu->problem->signal_notify1);
-}
-
static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
{
out_be32(&ctx->spu->problem->signal_notify1, data);
}
-static u32 spu_hw_signal2_read(struct spu_context *ctx)
-{
- return in_be32(&ctx->spu->problem->signal_notify2);
-}
-
static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
{
out_be32(&ctx->spu->problem->signal_notify2, data);
@@ -217,21 +207,42 @@ static char *spu_hw_get_ls(struct spu_context *ctx)
return ctx->spu->local_store;
}
-static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
+static u32 spu_hw_runcntl_read(struct spu_context *ctx)
{
- eieio();
- out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
+ return in_be32(&ctx->spu->problem->spu_runcntl_RW);
}
-static void spu_hw_runcntl_stop(struct spu_context *ctx)
+static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
{
spin_lock_irq(&ctx->spu->register_lock);
- out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
- while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
- cpu_relax();
+ if (val & SPU_RUNCNTL_ISOLATE)
+ out_be64(&ctx->spu->priv2->spu_privcntl_RW, 4LL);
+ out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
spin_unlock_irq(&ctx->spu->register_lock);
}
+static void spu_hw_master_start(struct spu_context *ctx)
+{
+ struct spu *spu = ctx->spu;
+ u64 sr1;
+
+ spin_lock_irq(&spu->register_lock);
+ sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+ spu_mfc_sr1_set(spu, sr1);
+ spin_unlock_irq(&spu->register_lock);
+}
+
+static void spu_hw_master_stop(struct spu_context *ctx)
+{
+ struct spu *spu = ctx->spu;
+ u64 sr1;
+
+ spin_lock_irq(&spu->register_lock);
+ sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+ spu_mfc_sr1_set(spu, sr1);
+ spin_unlock_irq(&spu->register_lock);
+}
+
static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
{
struct spu_problem __iomem *prob = ctx->spu->problem;
@@ -291,9 +302,7 @@ struct spu_context_ops spu_hw_ops = {
.mbox_stat_poll = spu_hw_mbox_stat_poll,
.ibox_read = spu_hw_ibox_read,
.wbox_write = spu_hw_wbox_write,
- .signal1_read = spu_hw_signal1_read,
.signal1_write = spu_hw_signal1_write,
- .signal2_read = spu_hw_signal2_read,
.signal2_write = spu_hw_signal2_write,
.signal1_type_set = spu_hw_signal1_type_set,
.signal1_type_get = spu_hw_signal1_type_get,
@@ -303,8 +312,10 @@ struct spu_context_ops spu_hw_ops = {
.npc_write = spu_hw_npc_write,
.status_read = spu_hw_status_read,
.get_ls = spu_hw_get_ls,
+ .runcntl_read = spu_hw_runcntl_read,
.runcntl_write = spu_hw_runcntl_write,
- .runcntl_stop = spu_hw_runcntl_stop,
+ .master_start = spu_hw_master_start,
+ .master_stop = spu_hw_master_stop,
.set_mfc_query = spu_hw_set_mfc_query,
.read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
.get_mfc_free_elements = spu_hw_get_mfc_free_elements,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 427d00a4f6a..e3af9112c02 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -33,21 +33,22 @@
#include <linux/slab.h>
#include <linux/parser.h>
-#include <asm/io.h>
+#include <asm/prom.h>
#include <asm/semaphore.h>
#include <asm/spu.h>
#include <asm/uaccess.h>
#include "spufs.h"
-static kmem_cache_t *spufs_inode_cache;
+static struct kmem_cache *spufs_inode_cache;
+char *isolated_loader;
static struct inode *
spufs_alloc_inode(struct super_block *sb)
{
struct spufs_inode_info *ei;
- ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
+ ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
if (!ei)
return NULL;
@@ -64,7 +65,7 @@ spufs_destroy_inode(struct inode *inode)
}
static void
-spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
+spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
{
struct spufs_inode_info *ei = p;
@@ -231,6 +232,7 @@ struct file_operations spufs_context_fops = {
.readdir = dcache_readdir,
.fsync = simple_sync_file,
};
+EXPORT_SYMBOL_GPL(spufs_context_fops);
static int
spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
@@ -255,10 +257,14 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
goto out_iput;
ctx->flags = flags;
-
inode->i_op = &spufs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
- ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
+ if (flags & SPU_CREATE_NOSCHED)
+ ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
+ mode, ctx);
+ else
+ ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
+
if (ret)
goto out_free_ctx;
@@ -307,6 +313,20 @@ static int spufs_create_context(struct inode *inode,
{
int ret;
+ ret = -EPERM;
+ if ((flags & SPU_CREATE_NOSCHED) &&
+ !capable(CAP_SYS_NICE))
+ goto out_unlock;
+
+ ret = -EINVAL;
+ if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
+ == SPU_CREATE_ISOLATE)
+ goto out_unlock;
+
+ ret = -ENODEV;
+ if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
+ goto out_unlock;
+
ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
if (ret)
goto out_unlock;
@@ -540,6 +560,30 @@ spufs_parse_options(char *options, struct inode *root)
return 1;
}
+static void
+spufs_init_isolated_loader(void)
+{
+ struct device_node *dn;
+ const char *loader;
+ int size;
+
+ dn = of_find_node_by_path("/spu-isolation");
+ if (!dn)
+ return;
+
+ loader = get_property(dn, "loader", &size);
+ if (!loader)
+ return;
+
+ /* kmalloc should align on a 16 byte boundary..* */
+ isolated_loader = kmalloc(size, GFP_KERNEL);
+ if (!isolated_loader)
+ return;
+
+ memcpy(isolated_loader, loader, size);
+ printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
+}
+
static int
spufs_create_root(struct super_block *sb, void *data)
{
@@ -608,6 +652,7 @@ static struct file_system_type spufs_type = {
static int __init spufs_init(void)
{
int ret;
+
ret = -ENOMEM;
spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
sizeof(struct spufs_inode_info), 0,
@@ -625,6 +670,12 @@ static int __init spufs_init(void)
ret = register_spu_syscalls(&spufs_calls);
if (ret)
goto out_fs;
+ ret = register_arch_coredump_calls(&spufs_coredump_calls);
+ if (ret)
+ goto out_fs;
+
+ spufs_init_isolated_loader();
+
return 0;
out_fs:
unregister_filesystem(&spufs_type);
@@ -638,6 +689,7 @@ module_init(spufs_init);
static void __exit spufs_exit(void)
{
spu_sched_exit();
+ unregister_arch_coredump_calls(&spufs_coredump_calls);
unregister_spu_syscalls(&spufs_calls);
unregister_filesystem(&spufs_type);
kmem_cache_destroy(spufs_inode_cache);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 63df8cf4ba1..1acc2ffef8c 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -1,7 +1,11 @@
+#define DEBUG
+
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/io.h>
#include <asm/unistd.h>
#include "spufs.h"
@@ -24,6 +28,7 @@ void spufs_dma_callback(struct spu *spu, int type)
} else {
switch (type) {
case SPE_EVENT_DMA_ALIGNMENT:
+ case SPE_EVENT_SPE_DATA_STORAGE:
case SPE_EVENT_INVALID_DMA:
force_sig(SIGBUS, /* info, */ current);
break;
@@ -48,15 +53,122 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
}
+static int spu_setup_isolated(struct spu_context *ctx)
+{
+ int ret;
+ u64 __iomem *mfc_cntl;
+ u64 sr1;
+ u32 status;
+ unsigned long timeout;
+ const u32 status_loading = SPU_STATUS_RUNNING
+ | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
+
+ if (!isolated_loader)
+ return -ENODEV;
+
+ ret = spu_acquire_exclusive(ctx);
+ if (ret)
+ goto out;
+
+ mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
+
+ /* purge the MFC DMA queue to ensure no spurious accesses before we
+ * enter kernel mode */
+ timeout = jiffies + HZ;
+ out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
+ while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
+ != MFC_CNTL_PURGE_DMA_COMPLETE) {
+ if (time_after(jiffies, timeout)) {
+ printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
+ __FUNCTION__);
+ ret = -EIO;
+ goto out_unlock;
+ }
+ cond_resched();
+ }
+
+ /* put the SPE in kernel mode to allow access to the loader */
+ sr1 = spu_mfc_sr1_get(ctx->spu);
+ sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
+ spu_mfc_sr1_set(ctx->spu, sr1);
+
+ /* start the loader */
+ ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
+ ctx->ops->signal2_write(ctx,
+ (unsigned long)isolated_loader & 0xffffffff);
+
+ ctx->ops->runcntl_write(ctx,
+ SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
+
+ ret = 0;
+ timeout = jiffies + HZ;
+ while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
+ status_loading) {
+ if (time_after(jiffies, timeout)) {
+ printk(KERN_ERR "%s: timeout waiting for loader\n",
+ __FUNCTION__);
+ ret = -EIO;
+ goto out_drop_priv;
+ }
+ cond_resched();
+ }
+
+ if (!(status & SPU_STATUS_RUNNING)) {
+ /* If isolated LOAD has failed: run SPU, we will get a stop-and
+ * signal later. */
+ pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
+ ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
+ ret = -EACCES;
+
+ } else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
+ /* This isn't allowed by the CBEA, but check anyway */
+ pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
+ ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
+ ret = -EINVAL;
+ }
+
+out_drop_priv:
+ /* Finished accessing the loader. Drop kernel mode */
+ sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
+ spu_mfc_sr1_set(ctx->spu, sr1);
+
+out_unlock:
+ spu_release_exclusive(ctx);
+out:
+ return ret;
+}
+
static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
{
int ret;
+ unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
- if ((ret = spu_acquire_runnable(ctx)) != 0)
+ ret = spu_acquire_runnable(ctx);
+ if (ret)
return ret;
- ctx->ops->npc_write(ctx, *npc);
- ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
- return 0;
+
+ if (ctx->flags & SPU_CREATE_ISOLATE) {
+ if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
+ /* Need to release ctx, because spu_setup_isolated will
+ * acquire it exclusively.
+ */
+ spu_release(ctx);
+ ret = spu_setup_isolated(ctx);
+ if (!ret)
+ ret = spu_acquire_runnable(ctx);
+ }
+
+ /* if userspace has set the runcntrl register (eg, to issue an
+ * isolated exit), we need to re-set it here */
+ runcntl = ctx->ops->runcntl_read(ctx) &
+ (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
+ if (runcntl == 0)
+ runcntl = SPU_RUNCNTL_RUNNABLE;
+ } else
+ ctx->ops->npc_write(ctx, *npc);
+
+ ctx->ops->runcntl_write(ctx, runcntl);
+ return ret;
}
static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
@@ -70,13 +182,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
if (signal_pending(current))
ret = -ERESTARTSYS;
- if (unlikely(current->ptrace & PT_PTRACED)) {
- if ((*status & SPU_STATUS_STOPPED_BY_STOP)
- && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
- force_sig(SIGTRAP, current);
- ret = -ERESTARTSYS;
- }
- }
+
return ret;
}
@@ -204,6 +310,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
if (down_interruptible(&ctx->run_sema))
return -ERESTARTSYS;
+ ctx->ops->master_start(ctx);
ctx->event_return = 0;
ret = spu_run_init(ctx, npc);
if (ret)
@@ -223,7 +330,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status);
if (ret)
- goto out;
+ goto out2;
continue;
}
ret = spu_process_events(ctx);
@@ -231,12 +338,24 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT)));
- ctx->ops->runcntl_stop(ctx);
+ ctx->ops->master_stop(ctx);
ret = spu_run_fini(ctx, npc, &status);
- if (!ret)
- ret = status;
spu_yield(ctx);
+out2:
+ if ((ret == 0) ||
+ ((ret == -ERESTARTSYS) &&
+ ((status & SPU_STATUS_STOPPED_BY_HALT) ||
+ ((status & SPU_STATUS_STOPPED_BY_STOP) &&
+ (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
+ ret = status;
+
+ if ((status & SPU_STATUS_STOPPED_BY_STOP)
+ && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
+ force_sig(SIGTRAP, current);
+ ret = -ERESTARTSYS;
+ }
+
out:
*event = ctx->event_return;
up(&ctx->run_sema);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index a0f55ca2d48..70fb13395c0 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -29,6 +29,7 @@
#include <asm/spu.h>
#include <asm/spu_csa.h>
+#include <asm/spu_info.h>
/* The magic number for our file system */
enum {
@@ -114,13 +115,19 @@ struct spu_context_ops {
void (*npc_write) (struct spu_context * ctx, u32 data);
u32(*status_read) (struct spu_context * ctx);
char*(*get_ls) (struct spu_context * ctx);
+ u32 (*runcntl_read) (struct spu_context * ctx);
void (*runcntl_write) (struct spu_context * ctx, u32 data);
- void (*runcntl_stop) (struct spu_context * ctx);
+ void (*master_start) (struct spu_context * ctx);
+ void (*master_stop) (struct spu_context * ctx);
int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
u32 (*get_mfc_free_elements)(struct spu_context *ctx);
- int (*send_mfc_command)(struct spu_context *ctx,
- struct mfc_dma_command *cmd);
+ int (*send_mfc_command)(struct spu_context * ctx,
+ struct mfc_dma_command * cmd);
+ void (*dma_info_read) (struct spu_context * ctx,
+ struct spu_dma_info * info);
+ void (*proxydma_info_read) (struct spu_context * ctx,
+ struct spu_proxydma_info * info);
};
extern struct spu_context_ops spu_hw_ops;
@@ -135,6 +142,7 @@ struct spufs_inode_info {
container_of(inode, struct spufs_inode_info, vfs_inode)
extern struct tree_descr spufs_dir_contents[];
+extern struct tree_descr spufs_dir_nosched_contents[];
/* system call implementation */
long spufs_run_spu(struct file *file,
@@ -162,6 +170,12 @@ void spu_acquire(struct spu_context *ctx);
void spu_release(struct spu_context *ctx);
int spu_acquire_runnable(struct spu_context *ctx);
void spu_acquire_saved(struct spu_context *ctx);
+int spu_acquire_exclusive(struct spu_context *ctx);
+
+static inline void spu_release_exclusive(struct spu_context *ctx)
+{
+ up_write(&ctx->state_sema);
+}
int spu_activate(struct spu_context *ctx, u64 flags);
void spu_deactivate(struct spu_context *ctx);
@@ -169,6 +183,8 @@ void spu_yield(struct spu_context *ctx);
int __init spu_sched_init(void);
void __exit spu_sched_exit(void);
+extern char *isolated_loader;
+
/*
* spufs_wait
* Same as wait_event_interruptible(), except that here
@@ -207,4 +223,15 @@ void spufs_stop_callback(struct spu *spu);
void spufs_mfc_callback(struct spu *spu);
void spufs_dma_callback(struct spu *spu, int type);
+extern struct spu_coredump_calls spufs_coredump_calls;
+struct spufs_coredump_reader {
+ char *name;
+ ssize_t (*read)(struct spu_context *ctx,
+ char __user *buffer, size_t size, loff_t *pos);
+ u64 (*get)(void *data);
+ size_t size;
+};
+extern struct spufs_coredump_reader spufs_coredump_read[];
+extern int spufs_coredump_num_notes;
+
#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 0f782ca662b..c08981ff7fc 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -102,7 +102,7 @@ static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
* saved at this time.
*/
isolate_state = SPU_STATUS_ISOLATED_STATE |
- SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS;
+ SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
}
@@ -1046,12 +1046,12 @@ static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
*/
if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
if (in_be32(&prob->spu_status_R) &
- SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
+ SPU_STATUS_ISOLATED_EXIT_STATUS) {
POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
SPU_STATUS_RUNNING);
}
if ((in_be32(&prob->spu_status_R) &
- SPU_STATUS_ISOLATED_LOAD_STAUTUS)
+ SPU_STATUS_ISOLATED_LOAD_STATUS)
|| (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_STATE)) {
out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
@@ -1085,7 +1085,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
*/
if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
if (in_be32(&prob->spu_status_R) &
- SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
+ SPU_STATUS_ISOLATED_EXIT_STATUS) {
spu_mfc_sr1_set(spu,
MFC_STATE1_MASTER_RUN_CONTROL_MASK);
eieio();
@@ -1095,7 +1095,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
SPU_STATUS_RUNNING);
}
if ((in_be32(&prob->spu_status_R) &
- SPU_STATUS_ISOLATED_LOAD_STAUTUS)
+ SPU_STATUS_ISOLATED_LOAD_STATUS)
|| (in_be32(&prob->spu_status_R) &
SPU_STATUS_ISOLATED_STATE)) {
spu_mfc_sr1_set(spu,
@@ -1916,6 +1916,51 @@ static void save_lscsa(struct spu_state *prev, struct spu *spu)
wait_spu_stopped(prev, spu); /* Step 57. */
}
+static void force_spu_isolate_exit(struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Stop SPE execution and wait for completion. */
+ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
+ iobarrier_rw();
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
+
+ /* Restart SPE master runcntl. */
+ spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
+ iobarrier_w();
+
+ /* Initiate isolate exit request and wait for completion. */
+ out_be64(&priv2->spu_privcntl_RW, 4LL);
+ iobarrier_w();
+ out_be32(&prob->spu_runcntl_RW, 2);
+ iobarrier_rw();
+ POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
+ & SPU_STATUS_STOPPED_BY_STOP));
+
+ /* Reset load request to normal. */
+ out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
+ iobarrier_w();
+}
+
+/**
+ * stop_spu_isolate
+ * Check SPU run-control state and force isolated
+ * exit function as necessary.
+ */
+static void stop_spu_isolate(struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
+ /* The SPU is in isolated state; the only way
+ * to get it out is to perform an isolated
+ * exit (clean) operation.
+ */
+ force_spu_isolate_exit(spu);
+ }
+}
+
static void harvest(struct spu_state *prev, struct spu *spu)
{
/*
@@ -1928,6 +1973,7 @@ static void harvest(struct spu_state *prev, struct spu *spu)
inhibit_user_access(prev, spu); /* Step 3. */
terminate_spu_app(prev, spu); /* Step 4. */
set_switch_pending(prev, spu); /* Step 5. */
+ stop_spu_isolate(spu); /* NEW. */
remove_other_spu_access(prev, spu); /* Step 6. */
suspend_mfc(prev, spu); /* Step 7. */
wait_suspend_mfc_complete(prev, spu); /* Step 8. */
@@ -2096,11 +2142,11 @@ int spu_save(struct spu_state *prev, struct spu *spu)
acquire_spu_lock(spu); /* Step 1. */
rc = __do_spu_save(prev, spu); /* Steps 2-53. */
release_spu_lock(spu);
- if (rc) {
+ if (rc != 0 && rc != 2 && rc != 6) {
panic("%s failed on SPU[%d], rc=%d.\n",
__func__, spu->number, rc);
}
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(spu_save);
@@ -2165,9 +2211,6 @@ static void init_priv1(struct spu_state *csa)
MFC_STATE1_PROBLEM_STATE_MASK |
MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
- /* Set storage description. */
- csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
-
/* Enable OS-specific set of interrupts. */
csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
diff --git a/arch/powerpc/platforms/chrp/chrp.h b/arch/powerpc/platforms/chrp/chrp.h
index 996c28744e9..63f0aee4c15 100644
--- a/arch/powerpc/platforms/chrp/chrp.h
+++ b/arch/powerpc/platforms/chrp/chrp.h
@@ -9,4 +9,3 @@ extern long chrp_time_init(void);
extern void chrp_find_bridges(void);
extern void chrp_event_scan(unsigned long);
-extern void chrp_pcibios_fixup(void);
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 0f4340506c7..ddb4a116ea8 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -156,15 +156,6 @@ hydra_init(void)
return 1;
}
-void __init
-chrp_pcibios_fixup(void)
-{
- struct pci_dev *dev = NULL;
-
- for_each_pci_dev(dev)
- pci_read_irq_line(dev);
-}
-
#define PRG_CL_RESET_VALID 0x00010000
static void __init
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 49b8dabcbc9..e1f51d45598 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -588,7 +588,6 @@ static int __init chrp_probe(void)
ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
- isa_io_base = CHRP_ISA_IO_BASE; /* default value */
return 1;
}
@@ -600,7 +599,6 @@ define_machine(chrp) {
.init = chrp_init2,
.show_cpuinfo = chrp_show_cpuinfo,
.init_IRQ = chrp_init_IRQ,
- .pcibios_fixup = chrp_pcibios_fixup,
.restart = rtas_restart,
.power_off = rtas_power_off,
.halt = rtas_halt,
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 234a861870a..ddbe398fbd4 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -74,6 +74,18 @@ config SANDPOINT
Select SANDPOINT if configuring for a Motorola Sandpoint X3
(any flavor).
+config LINKSTATION
+ bool "Linkstation / Kurobox(HG) from Buffalo"
+ select MPIC
+ select FSL_SOC
+ select PPC_UDBG_16550 if SERIAL_8250
+ help
+ Select LINKSTATION if configuring for one of PPC- (MPC8241)
+ based NAS systems from Buffalo Technology. So far only
+ KuroboxHG has been tested. In the future classical Kurobox,
+ Linkstation-I HD-HLAN and HD-HGLAN versions, and PPC-based
+ Terastation systems should be supported too.
+
config MPC7448HPC2
bool "Freescale MPC7448HPC2(Taiga)"
select TSI108_BRIDGE
@@ -146,15 +158,6 @@ config PQ2FADS
Select PQ2FADS if you wish to configure for a Freescale
PQ2FADS board (-VR or -ZU).
-config LITE5200
- bool "Freescale LITE5200 / (IceCube)"
- select PPC_MPC52xx
- help
- Support for the LITE5200 dev board for the MPC5200 from Freescale.
- This is for the LITE5200 version 2.0 board. Don't know if it changes
- much but it's only been tested on this board version. I think this
- board is also known as IceCube.
-
config EV64360
bool "Marvell-EV64360BP"
help
@@ -172,9 +175,6 @@ config TQM8xxL
depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
default y
-config PPC_MPC52xx
- bool
-
config 8260
bool "CPM2 Support" if WILLOW
depends on 6xx
@@ -208,7 +208,7 @@ config PPC_GEN550
depends on SANDPOINT || SPRUCE || PPLUS || \
PRPMC750 || PRPMC800 || LOPEC || \
(EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
- 83xx
+ 83xx || LINKSTATION
default y
config FORCE
@@ -282,13 +282,13 @@ config EPIC_SERIAL_MODE
config MPC10X_BRIDGE
bool
- depends on POWERPMC250 || LOPEC || SANDPOINT
+ depends on POWERPMC250 || LOPEC || SANDPOINT || LINKSTATION
select PPC_INDIRECT_PCI
default y
config MPC10X_OPENPIC
bool
- depends on POWERPMC250 || LOPEC || SANDPOINT
+ depends on POWERPMC250 || LOPEC || SANDPOINT || LINKSTATION
default y
config MPC10X_STORE_GATHERING
diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile
index fa499fe5929..d3d11a3cd65 100644
--- a/arch/powerpc/platforms/embedded6xx/Makefile
+++ b/arch/powerpc/platforms/embedded6xx/Makefile
@@ -2,3 +2,4 @@
# Makefile for the 6xx/7xx/7xxxx linux kernel.
#
obj-$(CONFIG_MPC7448HPC2) += mpc7448_hpc2.o
+obj-$(CONFIG_LINKSTATION) += linkstation.o ls_uart.o
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
new file mode 100644
index 00000000000..61599d919ea
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -0,0 +1,211 @@
+/*
+ * Board setup routines for the Buffalo Linkstation / Kurobox Platform.
+ *
+ * Copyright (C) 2006 G. Liakhovetski (g.liakhovetski@gmx.de)
+ *
+ * Based on sandpoint.c by Mark A. Greer
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of
+ * any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#include <linux/mtd/physmap.h>
+
+#include <asm/time.h>
+#include <asm/prom.h>
+#include <asm/mpic.h>
+#include <asm/mpc10x.h>
+#include <asm/pci-bridge.h>
+
+static struct mtd_partition linkstation_physmap_partitions[] = {
+ {
+ .name = "mtd_firmimg",
+ .offset = 0x000000,
+ .size = 0x300000,
+ },
+ {
+ .name = "mtd_bootcode",
+ .offset = 0x300000,
+ .size = 0x070000,
+ },
+ {
+ .name = "mtd_status",
+ .offset = 0x370000,
+ .size = 0x010000,
+ },
+ {
+ .name = "mtd_conf",
+ .offset = 0x380000,
+ .size = 0x080000,
+ },
+ {
+ .name = "mtd_allflash",
+ .offset = 0x000000,
+ .size = 0x400000,
+ },
+ {
+ .name = "mtd_data",
+ .offset = 0x310000,
+ .size = 0x0f0000,
+ },
+};
+
+static int __init add_bridge(struct device_node *dev)
+{
+ int len;
+ struct pci_controller *hose;
+ int *bus_range;
+
+ printk("Adding PCI host bridge %s\n", dev->full_name);
+
+ bus_range = (int *) get_property(dev, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int))
+ printk(KERN_WARNING "Can't get bus-range for %s, assume"
+ " bus 0\n", dev->full_name);
+
+ hose = pcibios_alloc_controller();
+ if (hose == NULL)
+ return -ENOMEM;
+ hose->first_busno = bus_range ? bus_range[0] : 0;
+ hose->last_busno = bus_range ? bus_range[1] : 0xff;
+ hose->arch_data = dev;
+ setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
+
+ /* Interpret the "ranges" property */
+ /* This also maps the I/O region and sets isa_io/mem_base */
+ pci_process_bridge_OF_ranges(hose, dev, 1);
+
+ return 0;
+}
+
+static void __init linkstation_setup_arch(void)
+{
+ struct device_node *np;
+#ifdef CONFIG_MTD_PHYSMAP
+ physmap_set_partitions(linkstation_physmap_partitions,
+ ARRAY_SIZE(linkstation_physmap_partitions));
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else
+#endif
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+
+ /* Lookup PCI host bridges */
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+ add_bridge(np);
+
+ printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
+ printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
+}
+
+/*
+ * Interrupt setup and service. Interrrupts on the linkstation come
+ * from the four PCI slots plus onboard 8241 devices: I2C, DUART.
+ */
+static void __init linkstation_init_IRQ(void)
+{
+ struct mpic *mpic;
+ struct device_node *dnp;
+ void *prop;
+ int size;
+ phys_addr_t paddr;
+
+ dnp = of_find_node_by_type(NULL, "open-pic");
+ if (dnp == NULL)
+ return;
+
+ prop = (struct device_node *)get_property(dnp, "reg", &size);
+ paddr = (phys_addr_t)of_translate_address(dnp, prop);
+
+ mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 4, 32, " EPIC ");
+ BUG_ON(mpic == NULL);
+
+ /* PCI IRQs */
+ mpic_assign_isu(mpic, 0, paddr + 0x10200);
+
+ /* I2C */
+ mpic_assign_isu(mpic, 1, paddr + 0x11000);
+
+ /* ttyS0, ttyS1 */
+ mpic_assign_isu(mpic, 2, paddr + 0x11100);
+
+ mpic_init(mpic);
+}
+
+extern void avr_uart_configure(void);
+extern void avr_uart_send(const char);
+
+static void linkstation_restart(char *cmd)
+{
+ local_irq_disable();
+
+ /* Reset system via AVR */
+ avr_uart_configure();
+ /* Send reboot command */
+ avr_uart_send('C');
+
+ for(;;) /* Spin until reset happens */
+ avr_uart_send('G'); /* "kick" */
+}
+
+static void linkstation_power_off(void)
+{
+ local_irq_disable();
+
+ /* Power down system via AVR */
+ avr_uart_configure();
+ /* send shutdown command */
+ avr_uart_send('E');
+
+ for(;;) /* Spin until power-off happens */
+ avr_uart_send('G'); /* "kick" */
+ /* NOTREACHED */
+}
+
+static void linkstation_halt(void)
+{
+ linkstation_power_off();
+ /* NOTREACHED */
+}
+
+static void linkstation_show_cpuinfo(struct seq_file *m)
+{
+ seq_printf(m, "vendor\t\t: Buffalo Technology\n");
+ seq_printf(m, "machine\t\t: Linkstation I/Kurobox(HG)\n");
+}
+
+static int __init linkstation_probe(void)
+{
+ unsigned long root;
+
+ root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "linkstation"))
+ return 0;
+ return 1;
+}
+
+define_machine(linkstation){
+ .name = "Buffalo Linkstation",
+ .probe = linkstation_probe,
+ .setup_arch = linkstation_setup_arch,
+ .init_IRQ = linkstation_init_IRQ,
+ .show_cpuinfo = linkstation_show_cpuinfo,
+ .get_irq = mpic_get_irq,
+ .restart = linkstation_restart,
+ .power_off = linkstation_power_off,
+ .halt = linkstation_halt,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
new file mode 100644
index 00000000000..0e837762cc5
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c
@@ -0,0 +1,131 @@
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+#include <asm/io.h>
+#include <asm/mpc10x.h>
+#include <asm/ppc_sys.h>
+#include <asm/prom.h>
+#include <asm/termbits.h>
+
+static void __iomem *avr_addr;
+static unsigned long avr_clock;
+
+static struct work_struct wd_work;
+
+static void wd_stop(struct work_struct *unused)
+{
+ const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
+ int i = 0, rescue = 8;
+ int len = strlen(string);
+
+ while (rescue--) {
+ int j;
+ char lsr = in_8(avr_addr + UART_LSR);
+
+ if (lsr & (UART_LSR_THRE | UART_LSR_TEMT)) {
+ for (j = 0; j < 16 && i < len; j++, i++)
+ out_8(avr_addr + UART_TX, string[i]);
+ if (i == len) {
+ /* Read "OK" back: 4ms for the last "KKKK"
+ plus a couple bytes back */
+ msleep(7);
+ printk("linkstation: disarming the AVR watchdog: ");
+ while (in_8(avr_addr + UART_LSR) & UART_LSR_DR)
+ printk("%c", in_8(avr_addr + UART_RX));
+ break;
+ }
+ }
+ msleep(17);
+ }
+ printk("\n");
+}
+
+#define AVR_QUOT(clock) ((clock) + 8 * 9600) / (16 * 9600)
+
+void avr_uart_configure(void)
+{
+ unsigned char cval = UART_LCR_WLEN8;
+ unsigned int quot = AVR_QUOT(avr_clock);
+
+ if (!avr_addr || !avr_clock)
+ return;
+
+ out_8(avr_addr + UART_LCR, cval); /* initialise UART */
+ out_8(avr_addr + UART_MCR, 0);
+ out_8(avr_addr + UART_IER, 0);
+
+ cval |= UART_LCR_STOP | UART_LCR_PARITY | UART_LCR_EPAR;
+
+ out_8(avr_addr + UART_LCR, cval); /* Set character format */
+
+ out_8(avr_addr + UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
+ out_8(avr_addr + UART_DLL, quot & 0xff); /* LS of divisor */
+ out_8(avr_addr + UART_DLM, quot >> 8); /* MS of divisor */
+ out_8(avr_addr + UART_LCR, cval); /* reset DLAB */
+ out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO); /* enable FIFO */
+}
+
+void avr_uart_send(const char c)
+{
+ if (!avr_addr || !avr_clock)
+ return;
+
+ out_8(avr_addr + UART_TX, c);
+ out_8(avr_addr + UART_TX, c);
+ out_8(avr_addr + UART_TX, c);
+ out_8(avr_addr + UART_TX, c);
+}
+
+static void __init ls_uart_init(void)
+{
+ local_irq_disable();
+
+#ifndef CONFIG_SERIAL_8250
+ out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO); /* enable FIFO */
+ out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); /* clear FIFOs */
+ out_8(avr_addr + UART_FCR, 0);
+ out_8(avr_addr + UART_IER, 0);
+
+ /* Clear up interrupts */
+ (void) in_8(avr_addr + UART_LSR);
+ (void) in_8(avr_addr + UART_RX);
+ (void) in_8(avr_addr + UART_IIR);
+ (void) in_8(avr_addr + UART_MSR);
+#endif
+ avr_uart_configure();
+
+ local_irq_enable();
+}
+
+static int __init ls_uarts_init(void)
+{
+ struct device_node *avr;
+ phys_addr_t phys_addr;
+ int len;
+
+ avr = of_find_node_by_path("/soc10x/serial@80004500");
+ if (!avr)
+ return -EINVAL;
+
+ avr_clock = *(u32*)get_property(avr, "clock-frequency", &len);
+ phys_addr = ((u32*)get_property(avr, "reg", &len))[0];
+
+ if (!avr_clock || !phys_addr)
+ return -EINVAL;
+
+ avr_addr = ioremap(phys_addr, 32);
+ if (!avr_addr)
+ return -EFAULT;
+
+ ls_uart_init();
+
+ INIT_WORK(&wd_work, wd_stop);
+ schedule_work(&wd_work);
+
+ return 0;
+}
+
+late_initcall(ls_uarts_init);
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index bdb475c65cb..3fcc85f60fb 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -60,7 +60,7 @@ pci_dram_offset = MPC7448_HPC2_PCI_MEM_OFFSET;
extern int tsi108_setup_pci(struct device_node *dev);
extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
-extern void tsi108_pci_int_init(void);
+extern void tsi108_pci_int_init(struct device_node *node);
extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
@@ -71,65 +71,6 @@ int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
return PCIBIOS_SUCCESSFUL;
}
-/*
- * find pci slot by devfn in interrupt map of OF tree
- */
-u8 find_slot_by_devfn(unsigned int *interrupt_map, unsigned int devfn)
-{
- int i;
- unsigned int tmp;
- for (i = 0; i < 4; i++){
- tmp = interrupt_map[i*4*7];
- if ((tmp >> 11) == (devfn >> 3))
- return i;
- }
- return i;
-}
-
-/*
- * Scans the interrupt map for pci device
- */
-void mpc7448_hpc2_fixup_irq(struct pci_dev *dev)
-{
- struct pci_controller *hose;
- struct device_node *node;
- const unsigned int *interrupt;
- int busnr;
- int len;
- u8 slot;
- u8 pin;
-
- /* Lookup the hose */
- busnr = dev->bus->number;
- hose = pci_bus_to_hose(busnr);
- if (!hose)
- printk(KERN_ERR "No pci hose found\n");
-
- /* Check it has an OF node associated */
- node = (struct device_node *) hose->arch_data;
- if (!node)
- printk(KERN_ERR "No pci node found\n");
-
- interrupt = get_property(node, "interrupt-map", &len);
- slot = find_slot_by_devfn(interrupt, dev->devfn);
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
- if (pin == 0 || pin > 4)
- pin = 1;
- pin--;
- dev->irq = interrupt[slot*4*7 + pin*7 + 5];
- DBG("TSI_PCI: dev->irq = 0x%x\n", dev->irq);
-}
-/* temporary pci irq map fixup*/
-
-void __init mpc7448_hpc2_pcibios_fixup(void)
-{
- struct pci_dev *dev = NULL;
- for_each_pci_dev(dev) {
- mpc7448_hpc2_fixup_irq(dev);
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
- }
-}
-
static void __init mpc7448_hpc2_setup_arch(void)
{
struct device_node *cpu;
@@ -192,9 +133,12 @@ static void __init mpc7448_hpc2_init_IRQ(void)
{
struct mpic *mpic;
phys_addr_t mpic_paddr = 0;
+ struct device_node *tsi_pic;
+#ifdef CONFIG_PCI
unsigned int cascade_pci_irq;
struct device_node *tsi_pci;
- struct device_node *tsi_pic;
+ struct device_node *cascade_node = NULL;
+#endif
tsi_pic = of_find_node_by_type(NULL, "open-pic");
if (tsi_pic) {
@@ -208,31 +152,41 @@ static void __init mpc7448_hpc2_init_IRQ(void)
return;
}
- DBG("%s: tsi108pic phys_addr = 0x%x\n", __FUNCTION__,
+ DBG("%s: tsi108 pic phys_addr = 0x%x\n", __FUNCTION__,
(u32) mpic_paddr);
mpic = mpic_alloc(tsi_pic, mpic_paddr,
MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
- 0, /* num_sources used */
- 0, /* num_sources used */
+ 24,
+ NR_IRQS-4, /* num_sources used */
"Tsi108_PIC");
- BUG_ON(mpic == NULL); /* XXXX */
+ BUG_ON(mpic == NULL);
+
+ mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
+
mpic_init(mpic);
+#ifdef CONFIG_PCI
tsi_pci = of_find_node_by_type(NULL, "pci");
- if (tsi_pci == 0) {
+ if (tsi_pci == NULL) {
printk("%s: No tsi108 pci node found !\n", __FUNCTION__);
return;
}
+ cascade_node = of_find_node_by_type(NULL, "pic-router");
+ if (cascade_node == NULL) {
+ printk("%s: No tsi108 pci cascade node found !\n", __FUNCTION__);
+ return;
+ }
cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
+ DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __FUNCTION__,
+ (u32) cascade_pci_irq);
+ tsi108_pci_int_init(cascade_node);
set_irq_data(cascade_pci_irq, mpic);
set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
-
- tsi108_pci_int_init();
-
+#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
of_node_put(tsi_pic);
@@ -290,7 +244,6 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
return 1;
}
return 0;
-
}
define_machine(mpc7448_hpc2){
@@ -300,7 +253,6 @@ define_machine(mpc7448_hpc2){
.init_IRQ = mpc7448_hpc2_init_IRQ,
.show_cpuinfo = mpc7448_hpc2_show_cpuinfo,
.get_irq = mpic_get_irq,
- .pcibios_fixup = mpc7448_hpc2_pcibios_fixup,
.restart = mpc7448_hpc2_restart,
.calibrate_decr = generic_calibrate_decr,
.machine_check_exception= mpc7448_machine_check_exception,
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
index dee4eb4d8be..13ac3015d91 100644
--- a/arch/powerpc/platforms/iseries/Makefile
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -1,5 +1,7 @@
EXTRA_CFLAGS += -mno-minimal-toc
+extra-y += dt.o
+
obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt_mod.o mf.o lpevents.o \
hvcall.o proc.o htab.o iommu.o misc.o irq.o
obj-$(CONFIG_PCI) += pci.o vpdinfo.o
@@ -7,5 +9,9 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_VIOPATH) += viopath.o
obj-$(CONFIG_MODULES) += ksyms.o
+quiet_cmd_dt_strings = DT_STR $@
+ cmd_dt_strings = $(OBJCOPY) --rename-section .rodata.str1.8=.dt_strings \
+ $< $@
+
$(obj)/dt_mod.o: $(obj)/dt.o
- @$(OBJCOPY) --rename-section .rodata.str1.8=.dt_strings $(obj)/dt.o $(obj)/dt_mod.o
+ $(call if_changed,dt_strings)
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index e305deee7f4..9e8a334a518 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -41,6 +41,7 @@
#include "call_pci.h"
#include "pci.h"
#include "it_exp_vpd_panel.h"
+#include "naca.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -205,13 +206,11 @@ static void __init dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
dt_prop(dt, name, &data, sizeof(u32));
}
-#ifdef notyet
static void __init dt_prop_u64(struct iseries_flat_dt *dt, const char *name,
u64 data)
{
dt_prop(dt, name, &data, sizeof(u64));
}
-#endif
static void __init dt_prop_u64_list(struct iseries_flat_dt *dt,
const char *name, u64 *data, int n)
@@ -306,6 +305,17 @@ static void __init dt_model(struct iseries_flat_dt *dt)
dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
}
+static void __init dt_initrd(struct iseries_flat_dt *dt)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (naca.xRamDisk) {
+ dt_prop_u64(dt, "linux,initrd-start", (u64)naca.xRamDisk);
+ dt_prop_u64(dt, "linux,initrd-end",
+ (u64)naca.xRamDisk + naca.xRamDiskSize * HW_PAGE_SIZE);
+ }
+#endif
+}
+
static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
const char *name, u32 reg, int unit,
const char *type, const char *compat, int end)
@@ -641,6 +651,7 @@ void * __init build_flat_dt(unsigned long phys_mem_size)
/* /chosen */
dt_start_node(iseries_dt, "chosen");
dt_prop_str(iseries_dt, "bootargs", cmd_line);
+ dt_initrd(iseries_dt);
dt_end_node(iseries_dt);
dt_cpus(iseries_dt);
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 218817d13c5..d7a756d5135 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
+#include <linux/pci.h>
#include <asm/iommu.h>
#include <asm/tce.h>
@@ -114,12 +115,10 @@ void iommu_table_getparms_iSeries(unsigned long busno,
{
struct iommu_table_cb *parms;
- parms = kmalloc(sizeof(*parms), GFP_KERNEL);
+ parms = kzalloc(sizeof(*parms), GFP_KERNEL);
if (parms == NULL)
panic("PCI_DMA: TCE Table Allocation failed.");
- memset(parms, 0, sizeof(*parms));
-
parms->itc_busno = busno;
parms->itc_slotno = slotno;
parms->itc_virtbus = virtbus;
@@ -168,7 +167,7 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
}
-void iommu_devnode_init_iSeries(struct device_node *dn)
+void iommu_devnode_init_iSeries(struct pci_dev *pdev, struct device_node *dn)
{
struct iommu_table *tbl;
struct pci_dn *pdn = PCI_DN(dn);
@@ -186,19 +185,14 @@ void iommu_devnode_init_iSeries(struct device_node *dn)
pdn->iommu_table = iommu_init_table(tbl, -1);
else
kfree(tbl);
+ pdev->dev.archdata.dma_data = pdn->iommu_table;
}
#endif
-static void iommu_dev_setup_iSeries(struct pci_dev *dev) { }
-static void iommu_bus_setup_iSeries(struct pci_bus *bus) { }
-
void iommu_init_early_iSeries(void)
{
ppc_md.tce_build = tce_build_iSeries;
ppc_md.tce_free = tce_free_iSeries;
- ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries;
- ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries;
-
- pci_iommu_init();
+ pci_dma_ops = &dma_iommu_ops;
}
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
index a2200842f4e..2430848b98e 100644
--- a/arch/powerpc/platforms/iseries/ksyms.c
+++ b/arch/powerpc/platforms/iseries/ksyms.c
@@ -19,9 +19,3 @@ EXPORT_SYMBOL(HvCall4);
EXPORT_SYMBOL(HvCall5);
EXPORT_SYMBOL(HvCall6);
EXPORT_SYMBOL(HvCall7);
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(local_get_flags);
-EXPORT_SYMBOL(local_irq_disable);
-EXPORT_SYMBOL(local_irq_restore);
-#endif
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
index 7641fc7e550..2c6ff0fdac9 100644
--- a/arch/powerpc/platforms/iseries/misc.S
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -19,39 +19,8 @@
.text
-/* unsigned long local_save_flags(void) */
-_GLOBAL(local_get_flags)
- lbz r3,PACAPROCENABLED(r13)
- blr
-
-/* unsigned long local_irq_disable(void) */
-_GLOBAL(local_irq_disable)
- lbz r3,PACAPROCENABLED(r13)
- li r4,0
- stb r4,PACAPROCENABLED(r13)
- blr /* Done */
-
-/* void local_irq_restore(unsigned long flags) */
-_GLOBAL(local_irq_restore)
- lbz r5,PACAPROCENABLED(r13)
- /* Check if things are setup the way we want _already_. */
- cmpw 0,r3,r5
- beqlr
- /* are we enabling interrupts? */
- cmpdi 0,r3,0
- stb r3,PACAPROCENABLED(r13)
- beqlr
- /* Check pending interrupts */
- /* A decrementer, IPI or PMC interrupt may have occurred
- * while we were in the hypervisor (which enables) */
- ld r4,PACALPPACAPTR(r13)
- ld r4,LPPACAANYINT(r4)
- cmpdi r4,0
- beqlr
-
- /*
- * Handle pending interrupts in interrupt context
- */
+/* Handle pending interrupts in interrupt context */
+_GLOBAL(iseries_handle_interrupts)
li r0,0x5555
sc
blr
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index 4aa165e010d..4a06d9c3498 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -156,53 +156,6 @@ static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
}
/*
- * iSeries_pcibios_init
- *
- * Description:
- * This function checks for all possible system PCI host bridges that connect
- * PCI buses. The system hypervisor is queried as to the guest partition
- * ownership status. A pci_controller is built for any bus which is partially
- * owned or fully owned by this guest partition.
- */
-void iSeries_pcibios_init(void)
-{
- struct pci_controller *phb;
- struct device_node *root = of_find_node_by_path("/");
- struct device_node *node = NULL;
-
- if (root == NULL) {
- printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
- "of device tree\n");
- return;
- }
- while ((node = of_get_next_child(root, node)) != NULL) {
- HvBusNumber bus;
- const u32 *busp;
-
- if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
- continue;
-
- busp = get_property(node, "bus-range", NULL);
- if (busp == NULL)
- continue;
- bus = *busp;
- printk("bus %d appears to exist\n", bus);
- phb = pcibios_alloc_controller(node);
- if (phb == NULL)
- continue;
-
- phb->pci_mem_offset = phb->local_number = bus;
- phb->first_busno = bus;
- phb->last_busno = bus;
- phb->ops = &iSeries_pci_ops;
- }
-
- of_node_put(root);
-
- pci_devs_phb_init();
-}
-
-/*
* iSeries_pci_final_fixup(void)
*/
void __init iSeries_pci_final_fixup(void)
@@ -253,7 +206,7 @@ void __init iSeries_pci_final_fixup(void)
PCI_DN(node)->pcidev = pdev;
allocate_device_bars(pdev);
iSeries_Device_Information(pdev, DeviceCount);
- iommu_devnode_init_iSeries(node);
+ iommu_devnode_init_iSeries(pdev, node);
} else
printk("PCI: Device Tree not found for 0x%016lX\n",
(unsigned long)pdev);
@@ -438,11 +391,7 @@ static inline struct device_node *xlate_iomm_address(
/*
* Read MM I/O Instructions for the iSeries
* On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
- * else, data is returned in big Endian format.
- *
- * iSeries_Read_Byte = Read Byte ( 8 bit)
- * iSeries_Read_Word = Read Word (16 bit)
- * iSeries_Read_Long = Read Long (32 bit)
+ * else, data is returned in Big Endian format.
*/
static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
{
@@ -462,14 +411,15 @@ static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
num_printed = 0;
}
if (num_printed++ < 10)
- printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
+ printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n",
+ IoAddress);
return 0xff;
}
do {
HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
} while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
- return (u8)ret.value;
+ return ret.value;
}
static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
@@ -490,7 +440,8 @@ static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
num_printed = 0;
}
if (num_printed++ < 10)
- printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
+ printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n",
+ IoAddress);
return 0xffff;
}
do {
@@ -498,7 +449,7 @@ static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
BarOffset, 0);
} while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
- return swab16((u16)ret.value);
+ return ret.value;
}
static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
@@ -519,7 +470,8 @@ static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
num_printed = 0;
}
if (num_printed++ < 10)
- printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
+ printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n",
+ IoAddress);
return 0xffffffff;
}
do {
@@ -527,15 +479,12 @@ static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
BarOffset, 0);
} while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
- return swab32((u32)ret.value);
+ return ret.value;
}
/*
* Write MM I/O Instructions for the iSeries
*
- * iSeries_Write_Byte = Write Byte (8 bit)
- * iSeries_Write_Word = Write Word(16 bit)
- * iSeries_Write_Long = Write Long(32 bit)
*/
static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
{
@@ -581,11 +530,12 @@ static void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
num_printed = 0;
}
if (num_printed++ < 10)
- printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
+ printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n",
+ IoAddress);
return;
}
do {
- rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
+ rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, data, 0);
} while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
}
@@ -607,231 +557,221 @@ static void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
num_printed = 0;
}
if (num_printed++ < 10)
- printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
+ printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n",
+ IoAddress);
return;
}
do {
- rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
+ rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, data, 0);
} while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
}
-extern unsigned char __raw_readb(const volatile void __iomem *addr)
+static u8 iseries_readb(const volatile void __iomem *addr)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- return *(volatile unsigned char __force *)addr;
+ return iSeries_Read_Byte(addr);
}
-EXPORT_SYMBOL(__raw_readb);
-extern unsigned short __raw_readw(const volatile void __iomem *addr)
+static u16 iseries_readw(const volatile void __iomem *addr)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- return *(volatile unsigned short __force *)addr;
+ return le16_to_cpu(iSeries_Read_Word(addr));
}
-EXPORT_SYMBOL(__raw_readw);
-extern unsigned int __raw_readl(const volatile void __iomem *addr)
+static u32 iseries_readl(const volatile void __iomem *addr)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- return *(volatile unsigned int __force *)addr;
+ return le32_to_cpu(iSeries_Read_Long(addr));
}
-EXPORT_SYMBOL(__raw_readl);
-extern unsigned long __raw_readq(const volatile void __iomem *addr)
+static u16 iseries_readw_be(const volatile void __iomem *addr)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- return *(volatile unsigned long __force *)addr;
+ return iSeries_Read_Word(addr);
}
-EXPORT_SYMBOL(__raw_readq);
-extern void __raw_writeb(unsigned char v, volatile void __iomem *addr)
+static u32 iseries_readl_be(const volatile void __iomem *addr)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- *(volatile unsigned char __force *)addr = v;
+ return iSeries_Read_Long(addr);
}
-EXPORT_SYMBOL(__raw_writeb);
-extern void __raw_writew(unsigned short v, volatile void __iomem *addr)
+static void iseries_writeb(u8 data, volatile void __iomem *addr)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- *(volatile unsigned short __force *)addr = v;
+ iSeries_Write_Byte(data, addr);
}
-EXPORT_SYMBOL(__raw_writew);
-extern void __raw_writel(unsigned int v, volatile void __iomem *addr)
+static void iseries_writew(u16 data, volatile void __iomem *addr)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- *(volatile unsigned int __force *)addr = v;
+ iSeries_Write_Word(cpu_to_le16(data), addr);
}
-EXPORT_SYMBOL(__raw_writel);
-extern void __raw_writeq(unsigned long v, volatile void __iomem *addr)
+static void iseries_writel(u32 data, volatile void __iomem *addr)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- *(volatile unsigned long __force *)addr = v;
+ iSeries_Write_Long(cpu_to_le32(data), addr);
}
-EXPORT_SYMBOL(__raw_writeq);
-int in_8(const volatile unsigned char __iomem *addr)
+static void iseries_writew_be(u16 data, volatile void __iomem *addr)
{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return iSeries_Read_Byte(addr);
- return __in_8(addr);
+ iSeries_Write_Word(data, addr);
}
-EXPORT_SYMBOL(in_8);
-void out_8(volatile unsigned char __iomem *addr, int val)
+static void iseries_writel_be(u32 data, volatile void __iomem *addr)
{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- iSeries_Write_Byte(val, addr);
- else
- __out_8(addr, val);
+ iSeries_Write_Long(data, addr);
}
-EXPORT_SYMBOL(out_8);
-int in_le16(const volatile unsigned short __iomem *addr)
+static void iseries_readsb(const volatile void __iomem *addr, void *buf,
+ unsigned long count)
{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return iSeries_Read_Word(addr);
- return __in_le16(addr);
+ u8 *dst = buf;
+ while(count-- > 0)
+ *(dst++) = iSeries_Read_Byte(addr);
}
-EXPORT_SYMBOL(in_le16);
-int in_be16(const volatile unsigned short __iomem *addr)
+static void iseries_readsw(const volatile void __iomem *addr, void *buf,
+ unsigned long count)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- return __in_be16(addr);
+ u16 *dst = buf;
+ while(count-- > 0)
+ *(dst++) = iSeries_Read_Word(addr);
}
-EXPORT_SYMBOL(in_be16);
-void out_le16(volatile unsigned short __iomem *addr, int val)
+static void iseries_readsl(const volatile void __iomem *addr, void *buf,
+ unsigned long count)
{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- iSeries_Write_Word(val, addr);
- else
- __out_le16(addr, val);
+ u32 *dst = buf;
+ while(count-- > 0)
+ *(dst++) = iSeries_Read_Long(addr);
}
-EXPORT_SYMBOL(out_le16);
-void out_be16(volatile unsigned short __iomem *addr, int val)
+static void iseries_writesb(volatile void __iomem *addr, const void *buf,
+ unsigned long count)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- __out_be16(addr, val);
+ const u8 *src = buf;
+ while(count-- > 0)
+ iSeries_Write_Byte(*(src++), addr);
}
-EXPORT_SYMBOL(out_be16);
-unsigned in_le32(const volatile unsigned __iomem *addr)
+static void iseries_writesw(volatile void __iomem *addr, const void *buf,
+ unsigned long count)
{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return iSeries_Read_Long(addr);
- return __in_le32(addr);
+ const u16 *src = buf;
+ while(count-- > 0)
+ iSeries_Write_Word(*(src++), addr);
}
-EXPORT_SYMBOL(in_le32);
-unsigned in_be32(const volatile unsigned __iomem *addr)
+static void iseries_writesl(volatile void __iomem *addr, const void *buf,
+ unsigned long count)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- return __in_be32(addr);
+ const u32 *src = buf;
+ while(count-- > 0)
+ iSeries_Write_Long(*(src++), addr);
}
-EXPORT_SYMBOL(in_be32);
-void out_le32(volatile unsigned __iomem *addr, int val)
+static void iseries_memset_io(volatile void __iomem *addr, int c,
+ unsigned long n)
{
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- iSeries_Write_Long(val, addr);
- else
- __out_le32(addr, val);
-}
-EXPORT_SYMBOL(out_le32);
+ volatile char __iomem *d = addr;
-void out_be32(volatile unsigned __iomem *addr, int val)
-{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
- __out_be32(addr, val);
+ while (n-- > 0)
+ iSeries_Write_Byte(c, d++);
}
-EXPORT_SYMBOL(out_be32);
-unsigned long in_le64(const volatile unsigned long __iomem *addr)
+static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
+ unsigned long n)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
+ char *d = dest;
+ const volatile char __iomem *s = src;
- return __in_le64(addr);
+ while (n-- > 0)
+ *d++ = iSeries_Read_Byte(s++);
}
-EXPORT_SYMBOL(in_le64);
-unsigned long in_be64(const volatile unsigned long __iomem *addr)
+static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
+ unsigned long n)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
+ const char *s = src;
+ volatile char __iomem *d = dest;
- return __in_be64(addr);
+ while (n-- > 0)
+ iSeries_Write_Byte(*s++, d++);
}
-EXPORT_SYMBOL(in_be64);
-
-void out_le64(volatile unsigned long __iomem *addr, unsigned long val)
-{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
- __out_le64(addr, val);
-}
-EXPORT_SYMBOL(out_le64);
+/* We only set MMIO ops. The default PIO ops will be default
+ * to the MMIO ops + pci_io_base which is 0 on iSeries as
+ * expected so both should work.
+ *
+ * Note that we don't implement the readq/writeq versions as
+ * I don't know of an HV call for doing so. Thus, the default
+ * operation will be used instead, which will fault a the value
+ * return by iSeries for MMIO addresses always hits a non mapped
+ * area. This is as good as the BUG() we used to have there.
+ */
+static struct ppc_pci_io __initdata iseries_pci_io = {
+ .readb = iseries_readb,
+ .readw = iseries_readw,
+ .readl = iseries_readl,
+ .readw_be = iseries_readw_be,
+ .readl_be = iseries_readl_be,
+ .writeb = iseries_writeb,
+ .writew = iseries_writew,
+ .writel = iseries_writel,
+ .writew_be = iseries_writew_be,
+ .writel_be = iseries_writel_be,
+ .readsb = iseries_readsb,
+ .readsw = iseries_readsw,
+ .readsl = iseries_readsl,
+ .writesb = iseries_writesb,
+ .writesw = iseries_writesw,
+ .writesl = iseries_writesl,
+ .memset_io = iseries_memset_io,
+ .memcpy_fromio = iseries_memcpy_fromio,
+ .memcpy_toio = iseries_memcpy_toio,
+};
-void out_be64(volatile unsigned long __iomem *addr, unsigned long val)
+/*
+ * iSeries_pcibios_init
+ *
+ * Description:
+ * This function checks for all possible system PCI host bridges that connect
+ * PCI buses. The system hypervisor is queried as to the guest partition
+ * ownership status. A pci_controller is built for any bus which is partially
+ * owned or fully owned by this guest partition.
+ */
+void __init iSeries_pcibios_init(void)
{
- BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
+ struct pci_controller *phb;
+ struct device_node *root = of_find_node_by_path("/");
+ struct device_node *node = NULL;
- __out_be64(addr, val);
-}
-EXPORT_SYMBOL(out_be64);
+ /* Install IO hooks */
+ ppc_pci_io = iseries_pci_io;
-void memset_io(volatile void __iomem *addr, int c, unsigned long n)
-{
- if (firmware_has_feature(FW_FEATURE_ISERIES)) {
- volatile char __iomem *d = addr;
+ if (root == NULL) {
+ printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
+ "of device tree\n");
+ return;
+ }
+ while ((node = of_get_next_child(root, node)) != NULL) {
+ HvBusNumber bus;
+ const u32 *busp;
- while (n-- > 0) {
- iSeries_Write_Byte(c, d++);
- }
- } else
- eeh_memset_io(addr, c, n);
-}
-EXPORT_SYMBOL(memset_io);
+ if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
+ continue;
-void memcpy_fromio(void *dest, const volatile void __iomem *src,
- unsigned long n)
-{
- if (firmware_has_feature(FW_FEATURE_ISERIES)) {
- char *d = dest;
- const volatile char __iomem *s = src;
+ busp = get_property(node, "bus-range", NULL);
+ if (busp == NULL)
+ continue;
+ bus = *busp;
+ printk("bus %d appears to exist\n", bus);
+ phb = pcibios_alloc_controller(node);
+ if (phb == NULL)
+ continue;
- while (n-- > 0) {
- *d++ = iSeries_Read_Byte(s++);
- }
- } else
- eeh_memcpy_fromio(dest, src, n);
-}
-EXPORT_SYMBOL(memcpy_fromio);
+ phb->pci_mem_offset = phb->local_number = bus;
+ phb->first_busno = bus;
+ phb->last_busno = bus;
+ phb->ops = &iSeries_pci_ops;
+ }
-void memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
-{
- if (firmware_has_feature(FW_FEATURE_ISERIES)) {
- const char *s = src;
- volatile char __iomem *d = dest;
+ of_node_put(root);
- while (n-- > 0) {
- iSeries_Write_Byte(*s++, d++);
- }
- } else
- eeh_memcpy_toio(dest, src, n);
+ pci_devs_phb_init();
}
-EXPORT_SYMBOL(memcpy_toio);
+
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 6f73469fd3b..bdf2afbb60c 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -21,7 +21,6 @@
#include <linux/smp.h>
#include <linux/param.h>
#include <linux/string.h>
-#include <linux/initrd.h>
#include <linux/seq_file.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
@@ -80,8 +79,6 @@ extern void iSeries_pci_final_fixup(void);
static void iSeries_pci_final_fixup(void) { }
#endif
-extern int rd_size; /* Defined in drivers/block/rd.c */
-
extern unsigned long iSeries_recal_tb;
extern unsigned long iSeries_recal_titan;
@@ -295,24 +292,6 @@ static void __init iSeries_init_early(void)
{
DBG(" -> iSeries_init_early()\n");
-#if defined(CONFIG_BLK_DEV_INITRD)
- /*
- * If the init RAM disk has been configured and there is
- * a non-zero starting address for it, set it up
- */
- if (naca.xRamDisk) {
- initrd_start = (unsigned long)__va(naca.xRamDisk);
- initrd_end = initrd_start + naca.xRamDiskSize * HW_PAGE_SIZE;
- initrd_below_start_ok = 1; // ramdisk in kernel space
- ROOT_DEV = Root_RAM0;
- if (((rd_size * 1024) / HW_PAGE_SIZE) < naca.xRamDiskSize)
- rd_size = (naca.xRamDiskSize * HW_PAGE_SIZE) / 1024;
- } else
-#endif /* CONFIG_BLK_DEV_INITRD */
- {
- /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
- }
-
iSeries_recal_tb = get_tb();
iSeries_recal_titan = HvCallXm_loadTod();
@@ -331,17 +310,6 @@ static void __init iSeries_init_early(void)
mf_init();
- /* If we were passed an initrd, set the ROOT_DEV properly if the values
- * look sensible. If not, clear initrd reference.
- */
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
- initrd_end > initrd_start)
- ROOT_DEV = Root_RAM0;
- else
- initrd_start = initrd_end = 0;
-#endif /* CONFIG_BLK_DEV_INITRD */
-
DBG(" <- iSeries_init_early()\n");
}
@@ -649,6 +617,16 @@ static void iseries_dedicated_idle(void)
void __init iSeries_init_IRQ(void) { }
#endif
+static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size,
+ unsigned long flags)
+{
+ return (void __iomem *)address;
+}
+
+static void iseries_iounmap(volatile void __iomem *token)
+{
+}
+
/*
* iSeries has no legacy IO, anything calling this function has to
* fail or bad things will happen
@@ -665,6 +643,8 @@ static int __init iseries_probe(void)
return 0;
hpte_init_iSeries();
+ /* iSeries does not support 16M pages */
+ cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE;
return 1;
}
@@ -687,6 +667,8 @@ define_machine(iseries) {
.progress = iSeries_progress,
.probe = iseries_probe,
.check_legacy_ioport = iseries_check_legacy_ioport,
+ .ioremap = iseries_ioremap,
+ .iounmap = iseries_iounmap,
/* XXX Implement enable_pmcs for iSeries */
};
@@ -697,7 +679,7 @@ void * __init iSeries_early_setup(void)
/* Identify CPU type. This is done again by the common code later
* on but calling this function multiple times is fine.
*/
- identify_cpu(0);
+ identify_cpu(0, mfspr(SPRN_PVR));
powerpc_firmware_features |= FW_FEATURE_ISERIES;
powerpc_firmware_features |= FW_FEATURE_LPAR;
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index 04e07e5da0c..84e7ee2c086 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -119,10 +119,9 @@ static int proc_viopath_show(struct seq_file *m, void *v)
struct device_node *node;
const char *sysid;
- buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL);
+ buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
if (!buf)
return 0;
- memset(buf, 0, HW_PAGE_SIZE);
handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
DMA_FROM_DEVICE);
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
index 0657c579b84..c6911ddc479 100644
--- a/arch/powerpc/platforms/maple/maple.h
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -8,5 +8,5 @@ extern void maple_get_rtc_time(struct rtc_time *tm);
extern unsigned long maple_get_boot_time(void);
extern void maple_calibrate_decr(void);
extern void maple_pci_init(void);
-extern void maple_pcibios_fixup(void);
+extern void maple_pci_irq_fixup(struct pci_dev *dev);
extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 63b4d1bff35..3a32deda765 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -502,38 +502,29 @@ static int __init add_bridge(struct device_node *dev)
}
-void __init maple_pcibios_fixup(void)
+void __devinit maple_pci_irq_fixup(struct pci_dev *dev)
{
- struct pci_dev *dev = NULL;
-
- DBG(" -> maple_pcibios_fixup\n");
-
- for_each_pci_dev(dev) {
- /* Fixup IRQ for PCIe host */
- if (u4_pcie != NULL && dev->bus->number == 0 &&
- pci_bus_to_host(dev->bus) == u4_pcie) {
- printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
- dev->irq = irq_create_mapping(NULL, 1);
- if (dev->irq != NO_IRQ)
- set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
- continue;
- }
-
- /* Hide AMD8111 IDE interrupt when in legacy mode so
- * the driver calls pci_get_legacy_ide_irq()
- */
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
- dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
- (dev->class & 5) != 5) {
- dev->irq = NO_IRQ;
- continue;
- }
+ DBG(" -> maple_pci_irq_fixup\n");
+
+ /* Fixup IRQ for PCIe host */
+ if (u4_pcie != NULL && dev->bus->number == 0 &&
+ pci_bus_to_host(dev->bus) == u4_pcie) {
+ printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
+ dev->irq = irq_create_mapping(NULL, 1);
+ if (dev->irq != NO_IRQ)
+ set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
+ }
- /* For all others, map the interrupt from the device-tree */
- pci_read_irq_line(dev);
+ /* Hide AMD8111 IDE interrupt when in legacy mode so
+ * the driver calls pci_get_legacy_ide_irq()
+ */
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
+ (dev->class & 5) != 5) {
+ dev->irq = NO_IRQ;
}
- DBG(" <- maple_pcibios_fixup\n");
+ DBG(" <- maple_pci_irq_fixup\n");
}
static void __init maple_fixup_phb_resources(void)
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index fe6b9bff61b..094989d50ba 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -312,7 +312,7 @@ define_machine(maple_md) {
.setup_arch = maple_setup_arch,
.init_early = maple_init_early,
.init_IRQ = maple_init_IRQ,
- .pcibios_fixup = maple_pcibios_fixup,
+ .pci_irq_fixup = maple_pci_irq_fixup,
.pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
.restart = maple_restart,
.power_off = maple_power_off,
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index fd71d72736b..51c2a2397ec 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -3,6 +3,5 @@
extern unsigned long pas_get_boot_time(void);
extern void pas_pci_init(void);
-extern void pas_pcibios_fixup(void);
#endif /* _PASEMI_PASEMI_H */
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index 39020c1fa13..faa618e0404 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -148,14 +148,6 @@ static int __init add_bridge(struct device_node *dev)
}
-void __init pas_pcibios_fixup(void)
-{
- struct pci_dev *dev = NULL;
-
- for_each_pci_dev(dev)
- pci_read_irq_line(dev);
-}
-
static void __init pas_fixup_phb_resources(void)
{
struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 106896c3b60..89d6e295dbf 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/console.h>
+#include <linux/pci.h>
#include <asm/prom.h>
#include <asm/system.h>
@@ -71,6 +72,9 @@ void __init pas_setup_arch(void)
/* Setup SMP callback */
smp_ops = &pas_smp_ops;
#endif
+ /* no iommu yet */
+ pci_dma_ops = &dma_direct_ops;
+
/* Lookup PCI hosts */
pas_pci_init();
@@ -81,17 +85,6 @@ void __init pas_setup_arch(void)
printk(KERN_DEBUG "Using default idle loop\n");
}
-static void iommu_dev_setup_null(struct pci_dev *dev) { }
-static void iommu_bus_setup_null(struct pci_bus *bus) { }
-
-static void __init pas_init_early(void)
-{
- /* No iommu code yet */
- ppc_md.iommu_dev_setup = iommu_dev_setup_null;
- ppc_md.iommu_bus_setup = iommu_bus_setup_null;
- pci_direct_iommu_init();
-}
-
/* No legacy IO on our parts */
static int pas_check_legacy_ioport(unsigned int baseport)
{
@@ -173,10 +166,8 @@ define_machine(pas) {
.name = "PA Semi PA6T-1682M",
.probe = pas_probe,
.setup_arch = pas_setup_arch,
- .init_early = pas_init_early,
.init_IRQ = pas_init_IRQ,
.get_irq = mpic_get_irq,
- .pcibios_fixup = pas_pcibios_fixup,
.restart = pas_restart,
.power_off = pas_power_off,
.halt = pas_halt,
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index afa593a8544..c3a89414ddc 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -18,11 +18,11 @@
#define OLD_BACKLIGHT_MAX 15
-static void pmac_backlight_key_worker(void *data);
-static void pmac_backlight_set_legacy_worker(void *data);
+static void pmac_backlight_key_worker(struct work_struct *work);
+static void pmac_backlight_set_legacy_worker(struct work_struct *work);
-static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
-static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
+static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
+static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
/* Although these variables are used in interrupt context, it makes no sense to
* protect them. No user is able to produce enough key events per second and
@@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value)
return level;
}
-static void pmac_backlight_key_worker(void *data)
+static void pmac_backlight_key_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
@@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness)
return error;
}
-static void pmac_backlight_set_legacy_worker(void *data)
+static void pmac_backlight_set_legacy_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index e49621be664..c29a6a064d2 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -486,10 +486,6 @@ static long heathrow_sound_enable(struct device_node *node, long param,
static u32 save_fcr[6];
static u32 save_mbcr;
-static u32 save_gpio_levels[2];
-static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
-static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
-static u32 save_unin_clock_ctl;
static struct dbdma_regs save_dbdma[13];
static struct dbdma_regs save_alt_dbdma[13];
@@ -1548,6 +1544,10 @@ void g5_phy_disable_cpu1(void)
#ifdef CONFIG_PM
+static u32 save_gpio_levels[2];
+static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
+static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
+static u32 save_unin_clock_ctl;
static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
{
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 9923adc5248..f42475b27c1 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -48,7 +48,6 @@ static struct pci_controller *u3_ht;
static int has_second_ohare;
#endif /* CONFIG_PPC64 */
-extern u8 pci_cache_line_size;
extern int pcibios_assign_bus_offset;
struct device_node *k2_skiplist[2];
@@ -985,30 +984,23 @@ static int __init add_bridge(struct device_node *dev)
return 0;
}
-void __init pmac_pcibios_fixup(void)
+void __devinit pmac_pci_irq_fixup(struct pci_dev *dev)
{
- struct pci_dev* dev = NULL;
-
- for_each_pci_dev(dev) {
- /* Read interrupt from the device-tree */
- pci_read_irq_line(dev);
-
#ifdef CONFIG_PPC32
- /* Fixup interrupt for the modem/ethernet combo controller.
- * on machines with a second ohare chip.
- * The number in the device tree (27) is bogus (correct for
- * the ethernet-only board but not the combo ethernet/modem
- * board). The real interrupt is 28 on the second controller
- * -> 28+32 = 60.
- */
- if (has_second_ohare &&
- dev->vendor == PCI_VENDOR_ID_DEC &&
- dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
- dev->irq = irq_create_mapping(NULL, 60);
- set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
- }
-#endif /* CONFIG_PPC32 */
+ /* Fixup interrupt for the modem/ethernet combo controller.
+ * on machines with a second ohare chip.
+ * The number in the device tree (27) is bogus (correct for
+ * the ethernet-only board but not the combo ethernet/modem
+ * board). The real interrupt is 28 on the second controller
+ * -> 28+32 = 60.
+ */
+ if (has_second_ohare &&
+ dev->vendor == PCI_VENDOR_ID_DEC &&
+ dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
+ dev->irq = irq_create_mapping(NULL, 60);
+ set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
}
+#endif /* CONFIG_PPC32 */
}
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 94e7b24b840..6e090a7dea8 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -20,7 +20,7 @@ extern void pmac_get_rtc_time(struct rtc_time *);
extern int pmac_set_rtc_time(struct rtc_time *);
extern void pmac_read_rtc_time(void);
extern void pmac_calibrate_decr(void);
-extern void pmac_pcibios_fixup(void);
+extern void pmac_pci_irq_fixup(struct pci_dev *);
extern void pmac_pci_init(void);
extern unsigned long pmac_ide_get_base(int index);
extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 824a618396a..d949e9df41e 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -70,6 +70,7 @@
#include <asm/pmac_feature.h>
#include <asm/time.h>
#include <asm/of_device.h>
+#include <asm/of_platform.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/smu.h>
@@ -361,7 +362,7 @@ char *bootdevice;
void *boot_host;
int boot_target;
int boot_part;
-extern dev_t boot_dev;
+static dev_t boot_dev;
#ifdef CONFIG_SCSI
void __init note_scsi_host(struct device_node *node, void *host)
@@ -676,8 +677,6 @@ static int __init pmac_probe(void)
#ifdef CONFIG_PPC32
/* isa_io_base gets set in pmac_pci_init */
- isa_mem_base = PMAC_ISA_MEM_BASE;
- pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 1;
DMA_MODE_WRITE = 2;
@@ -727,7 +726,7 @@ define_machine(powermac) {
.show_cpuinfo = pmac_show_cpuinfo,
.init_IRQ = pmac_pic_init,
.get_irq = NULL, /* changed later */
- .pcibios_fixup = pmac_pcibios_fixup,
+ .pci_irq_fixup = pmac_pci_irq_fixup,
.restart = pmac_restart,
.power_off = pmac_power_off,
.halt = pmac_halt,
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
new file mode 100644
index 00000000000..451bfcd5502
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -0,0 +1,43 @@
+menu "PS3 Platform Options"
+ depends on PPC_PS3
+
+config PS3_HTAB_SIZE
+ depends on PPC_PS3
+ int "PS3 Platform pagetable size"
+ range 18 20
+ default 20
+ help
+ This option is only for experts who may have the desire to fine
+ tune the pagetable size on their system. The value here is
+ expressed as the log2 of the page table size. Valid values are
+ 18, 19, and 20, corresponding to 256KB, 512KB and 1MB respectively.
+
+ If unsure, choose the default (20) with the confidence that your
+ system will have optimal runtime performance.
+
+config PS3_DYNAMIC_DMA
+ depends on PPC_PS3 && EXPERIMENTAL
+ bool "PS3 Platform dynamic DMA page table management"
+ default n
+ help
+ This option will enable kernel support to take advantage of the
+ per device dynamic DMA page table management provided by the Cell
+ processor's IO Controller. This support incurs some runtime
+ overhead and also slightly increases kernel memory usage. The
+ current implementation should be considered experimental.
+
+ This support is mainly for Linux kernel development. If unsure,
+ say N.
+
+config PS3_USE_LPAR_ADDR
+ depends on PPC_PS3 && EXPERIMENTAL
+ bool "PS3 use lpar address space"
+ default y
+ help
+ This option is solely for experimentation by experts. Disables
+ translation of lpar addresses. SPE support currently won't work
+ without this set to y.
+
+ If you have any doubt, choose the default y.
+
+endmenu
diff --git a/arch/powerpc/platforms/ps3/Makefile b/arch/powerpc/platforms/ps3/Makefile
new file mode 100644
index 00000000000..3757cfabc8c
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/Makefile
@@ -0,0 +1,4 @@
+obj-y += setup.o mm.o smp.o time.o hvcall.o htab.o repository.o
+obj-y += interrupt.o exports.o os-area.o
+
+obj-$(CONFIG_SPU_BASE) += spu.o
diff --git a/arch/powerpc/platforms/ps3/exports.c b/arch/powerpc/platforms/ps3/exports.c
new file mode 100644
index 00000000000..a7e8ffd24a6
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/exports.c
@@ -0,0 +1,27 @@
+/*
+ * PS3 hvcall exports for modules.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+
+#define LV1_CALL(name, in, out, num) \
+ extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL); \
+ EXPORT_SYMBOL(_lv1_##name);
+
+#include <asm/lv1call.h>
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
new file mode 100644
index 00000000000..8fe1769655a
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -0,0 +1,277 @@
+/*
+ * PS3 pagetable management routines.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/machdep.h>
+#include <asm/lmb.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+static hpte_t *htab;
+static unsigned long htab_addr;
+static unsigned char *bolttab;
+static unsigned char *inusetab;
+
+static spinlock_t ps3_bolttab_lock = SPIN_LOCK_UNLOCKED;
+
+#define debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g) \
+ _debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
+static void _debug_dump_hpte(unsigned long pa, unsigned long va,
+ unsigned long group, unsigned long bitmap, hpte_t lhpte, int psize,
+ unsigned long slot, const char* func, int line)
+{
+ DBG("%s:%d: pa = %lxh\n", func, line, pa);
+ DBG("%s:%d: lpar = %lxh\n", func, line,
+ ps3_mm_phys_to_lpar(pa));
+ DBG("%s:%d: va = %lxh\n", func, line, va);
+ DBG("%s:%d: group = %lxh\n", func, line, group);
+ DBG("%s:%d: bitmap = %lxh\n", func, line, bitmap);
+ DBG("%s:%d: hpte.v = %lxh\n", func, line, lhpte.v);
+ DBG("%s:%d: hpte.r = %lxh\n", func, line, lhpte.r);
+ DBG("%s:%d: psize = %xh\n", func, line, psize);
+ DBG("%s:%d: slot = %lxh\n", func, line, slot);
+}
+
+static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
+ unsigned long pa, unsigned long rflags, unsigned long vflags, int psize)
+{
+ unsigned long slot;
+ hpte_t lhpte;
+ int secondary = 0;
+ unsigned long result;
+ unsigned long bitmap;
+ unsigned long flags;
+ unsigned long p_pteg, s_pteg, b_index, b_mask, cb, ci;
+
+ vflags &= ~HPTE_V_SECONDARY; /* this bit is ignored */
+
+ lhpte.v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID;
+ lhpte.r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
+
+ p_pteg = hpte_group / HPTES_PER_GROUP;
+ s_pteg = ~p_pteg & htab_hash_mask;
+
+ spin_lock_irqsave(&ps3_bolttab_lock, flags);
+
+ BUG_ON(bolttab[p_pteg] == 0xff && bolttab[s_pteg] == 0xff);
+
+ bitmap = (inusetab[p_pteg] << 8) | inusetab[s_pteg];
+
+ if (bitmap == 0xffff) {
+ /*
+ * PTEG is full. Search for victim.
+ */
+ bitmap &= ~((bolttab[p_pteg] << 8) | bolttab[s_pteg]);
+ do {
+ ci = mftb() & 15;
+ cb = 0x8000UL >> ci;
+ } while ((cb & bitmap) == 0);
+ } else {
+ /*
+ * search free slot in hardware order
+ * [primary] 0, 2, 4, 6, 1, 3, 5, 7
+ * [secondary] 0, 2, 4, 6, 1, 3, 5, 7
+ */
+ for (ci = 0; ci < HPTES_PER_GROUP; ci += 2) {
+ cb = 0x8000UL >> ci;
+ if ((cb & bitmap) == 0)
+ goto found;
+ }
+ for (ci = 1; ci < HPTES_PER_GROUP; ci += 2) {
+ cb = 0x8000UL >> ci;
+ if ((cb & bitmap) == 0)
+ goto found;
+ }
+ for (ci = HPTES_PER_GROUP; ci < HPTES_PER_GROUP*2; ci += 2) {
+ cb = 0x8000UL >> ci;
+ if ((cb & bitmap) == 0)
+ goto found;
+ }
+ for (ci = HPTES_PER_GROUP+1; ci < HPTES_PER_GROUP*2; ci += 2) {
+ cb = 0x8000UL >> ci;
+ if ((cb & bitmap) == 0)
+ goto found;
+ }
+ }
+
+found:
+ if (ci < HPTES_PER_GROUP) {
+ slot = p_pteg * HPTES_PER_GROUP + ci;
+ } else {
+ slot = s_pteg * HPTES_PER_GROUP + (ci & 7);
+ /* lhpte.dw0.dw0.h = 1; */
+ vflags |= HPTE_V_SECONDARY;
+ lhpte.v |= HPTE_V_SECONDARY;
+ }
+
+ result = lv1_write_htab_entry(0, slot, lhpte.v, lhpte.r);
+
+ if (result) {
+ debug_dump_hpte(pa, va, hpte_group, bitmap, lhpte, psize, slot);
+ BUG();
+ }
+
+ /*
+ * If used slot is not in primary HPTE group,
+ * the slot should be in secondary HPTE group.
+ */
+
+ if ((hpte_group ^ slot) & ~(HPTES_PER_GROUP - 1)) {
+ secondary = 1;
+ b_index = s_pteg;
+ } else {
+ secondary = 0;
+ b_index = p_pteg;
+ }
+
+ b_mask = (lhpte.v & HPTE_V_BOLTED) ? 1 << 7 : 0 << 7;
+ bolttab[b_index] |= b_mask >> (slot & 7);
+ b_mask = 1 << 7;
+ inusetab[b_index] |= b_mask >> (slot & 7);
+ spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+
+ return (slot & 7) | (secondary << 3);
+}
+
+static long ps3_hpte_remove(unsigned long hpte_group)
+{
+ panic("ps3_hpte_remove() not implemented");
+ return 0;
+}
+
+static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
+ unsigned long va, int psize, int local)
+{
+ unsigned long flags;
+ unsigned long result;
+ unsigned long pteg, bit;
+ unsigned long hpte_v, want_v;
+
+ want_v = hpte_encode_v(va, psize);
+
+ spin_lock_irqsave(&ps3_bolttab_lock, flags);
+
+ hpte_v = htab[slot].v;
+ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
+ spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+
+ /* ps3_hpte_insert() will be used to update PTE */
+ return -1;
+ }
+
+ result = lv1_write_htab_entry(0, slot, 0, 0);
+
+ if (result) {
+ DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n",
+ __func__, va, slot, psize, result, result);
+ BUG();
+ }
+
+ pteg = slot / HPTES_PER_GROUP;
+ bit = slot % HPTES_PER_GROUP;
+ inusetab[pteg] &= ~(0x80 >> bit);
+
+ spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+
+ /* ps3_hpte_insert() will be used to update PTE */
+ return -1;
+}
+
+static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
+ int psize)
+{
+ panic("ps3_hpte_updateboltedpp() not implemented");
+}
+
+static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
+ int psize, int local)
+{
+ unsigned long flags;
+ unsigned long result;
+ unsigned long pteg, bit;
+
+ spin_lock_irqsave(&ps3_bolttab_lock, flags);
+ result = lv1_write_htab_entry(0, slot, 0, 0);
+
+ if (result) {
+ DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n",
+ __func__, va, slot, psize, result, result);
+ BUG();
+ }
+
+ pteg = slot / HPTES_PER_GROUP;
+ bit = slot % HPTES_PER_GROUP;
+ inusetab[pteg] &= ~(0x80 >> bit);
+ spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+}
+
+static void ps3_hpte_clear(void)
+{
+ lv1_unmap_htab(htab_addr);
+}
+
+void __init ps3_hpte_init(unsigned long htab_size)
+{
+ long bitmap_size;
+
+ DBG(" -> %s:%d\n", __func__, __LINE__);
+
+ ppc_md.hpte_invalidate = ps3_hpte_invalidate;
+ ppc_md.hpte_updatepp = ps3_hpte_updatepp;
+ ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
+ ppc_md.hpte_insert = ps3_hpte_insert;
+ ppc_md.hpte_remove = ps3_hpte_remove;
+ ppc_md.hpte_clear_all = ps3_hpte_clear;
+
+ ppc64_pft_size = __ilog2(htab_size);
+
+ bitmap_size = htab_size / sizeof(hpte_t) / 8;
+
+ bolttab = __va(lmb_alloc(bitmap_size, 1));
+ inusetab = __va(lmb_alloc(bitmap_size, 1));
+
+ memset(bolttab, 0, bitmap_size);
+ memset(inusetab, 0, bitmap_size);
+
+ DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+void __init ps3_map_htab(void)
+{
+ long result;
+ unsigned long htab_size = (1UL << ppc64_pft_size);
+
+ result = lv1_map_htab(0, &htab_addr);
+
+ htab = (hpte_t *)__ioremap(htab_addr, htab_size, PAGE_READONLY_X);
+
+ DBG("%s:%d: lpar %016lxh, virt %016lxh\n", __func__, __LINE__,
+ htab_addr, (unsigned long)htab);
+}
diff --git a/arch/powerpc/platforms/ps3/hvcall.S b/arch/powerpc/platforms/ps3/hvcall.S
new file mode 100644
index 00000000000..54be6523a0e
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/hvcall.S
@@ -0,0 +1,804 @@
+/*
+ * PS3 hvcall interface.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ * Copyright 2003, 2004 (c) MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+#define lv1call .long 0x44000022; extsw r3, r3
+
+#define LV1_N_IN_0_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_0_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_1_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_2_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_3_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_4_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_5_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_6_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_7_IN_0_OUT LV1_N_IN_0_OUT
+
+#define LV1_0_IN_1_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ stdu r3, -8(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 8; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_0_IN_2_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r3, -8(r1); \
+ stdu r4, -16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 16; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_0_IN_3_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r3, -8(r1); \
+ std r4, -16(r1); \
+ stdu r5, -24(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 24; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_0_IN_7_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r3, -8(r1); \
+ std r4, -16(r1); \
+ std r5, -24(r1); \
+ std r6, -32(r1); \
+ std r7, -40(r1); \
+ std r8, -48(r1); \
+ stdu r9, -56(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 56; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ ld r11, -32(r1); \
+ std r7, 0(r11); \
+ ld r11, -40(r1); \
+ std r8, 0(r11); \
+ ld r11, -48(r1); \
+ std r9, 0(r11); \
+ ld r11, -56(r1); \
+ std r10, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_1_IN_1_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ stdu r4, -8(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 8; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_1_IN_2_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r4, -8(r1); \
+ stdu r5, -16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 16; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_1_IN_3_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r4, -8(r1); \
+ std r5, -16(r1); \
+ stdu r6, -24(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 24; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_1_IN_4_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r4, -8(r1); \
+ std r5, -16(r1); \
+ std r6, -24(r1); \
+ stdu r7, -32(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 32; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ ld r11, -32(r1); \
+ std r7, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_1_IN_5_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r4, -8(r1); \
+ std r5, -16(r1); \
+ std r6, -24(r1); \
+ std r7, -32(r1); \
+ stdu r8, -40(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 40; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ ld r11, -32(r1); \
+ std r7, 0(r11); \
+ ld r11, -40(r1); \
+ std r8, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_1_IN_6_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r4, -8(r1); \
+ std r5, -16(r1); \
+ std r6, -24(r1); \
+ std r7, -32(r1); \
+ std r8, -40(r1); \
+ stdu r9, -48(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 48; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ ld r11, -32(r1); \
+ std r7, 0(r11); \
+ ld r11, -40(r1); \
+ std r8, 0(r11); \
+ ld r11, -48(r1); \
+ std r9, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_1_IN_7_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r4, -8(r1); \
+ std r5, -16(r1); \
+ std r6, -24(r1); \
+ std r7, -32(r1); \
+ std r8, -40(r1); \
+ std r9, -48(r1); \
+ stdu r10, -56(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 56; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ ld r11, -32(r1); \
+ std r7, 0(r11); \
+ ld r11, -40(r1); \
+ std r8, 0(r11); \
+ ld r11, -48(r1); \
+ std r9, 0(r11); \
+ ld r11, -56(r1); \
+ std r10, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_2_IN_1_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ stdu r5, -8(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 8; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_2_IN_2_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r5, -8(r1); \
+ stdu r6, -16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 16; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_2_IN_3_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r5, -8(r1); \
+ std r6, -16(r1); \
+ stdu r7, -24(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 24; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_2_IN_4_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r5, -8(r1); \
+ std r6, -16(r1); \
+ std r7, -24(r1); \
+ stdu r8, -32(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 32; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ ld r11, -32(r1); \
+ std r7, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_2_IN_5_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r5, -8(r1); \
+ std r6, -16(r1); \
+ std r7, -24(r1); \
+ std r8, -32(r1); \
+ stdu r9, -40(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 40; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ ld r11, -32(r1); \
+ std r7, 0(r11); \
+ ld r11, -40(r1); \
+ std r8, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_3_IN_1_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ stdu r6, -8(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 8; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_3_IN_2_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r6, -8(r1); \
+ stdu r7, -16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 16; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_3_IN_3_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r6, -8(r1); \
+ std r7, -16(r1); \
+ stdu r8, -24(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 24; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_4_IN_1_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ stdu r7, -8(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 8; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_4_IN_2_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r7, -8(r1); \
+ stdu r8, -16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 16; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_4_IN_3_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r7, -8(r1); \
+ std r8, -16(r1); \
+ stdu r9, -24(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 24; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_5_IN_1_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ stdu r8, -8(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 8; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_5_IN_2_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r8, -8(r1); \
+ stdu r9, -16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 16; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_5_IN_3_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r8, -8(r1); \
+ std r9, -16(r1); \
+ stdu r10, -24(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 24; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, -24(r1); \
+ std r6, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_6_IN_1_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ stdu r9, -8(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 8; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_6_IN_2_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r9, -8(r1); \
+ stdu r10, -16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 16; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_6_IN_3_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r9, -8(r1); \
+ stdu r10, -16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 16; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ ld r11, -16(r1); \
+ std r5, 0(r11); \
+ ld r11, 48+8*8(r1); \
+ std r6, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_7_IN_1_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ stdu r10, -8(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ addi r1, r1, 8; \
+ ld r11, -8(r1); \
+ std r4, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_7_IN_6_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ std r10, 48+8*7(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ ld r11, 48+8*7(r1); \
+ std r4, 0(r11); \
+ ld r11, 48+8*8(r1); \
+ std r5, 0(r11); \
+ ld r11, 48+8*9(r1); \
+ std r6, 0(r11); \
+ ld r11, 48+8*10(r1); \
+ std r7, 0(r11); \
+ ld r11, 48+8*11(r1); \
+ std r8, 0(r11); \
+ ld r11, 48+8*12(r1); \
+ std r9, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+#define LV1_8_IN_1_OUT(API_NAME, API_NUMBER) \
+_GLOBAL(_##API_NAME) \
+ \
+ mflr r0; \
+ std r0, 16(r1); \
+ \
+ li r11, API_NUMBER; \
+ lv1call; \
+ \
+ ld r11, 48+8*8(r1); \
+ std r4, 0(r11); \
+ \
+ ld r0, 16(r1); \
+ mtlr r0; \
+ blr
+
+ .text
+
+/* the lv1 underscored call definitions expand here */
+
+#define LV1_CALL(name, in, out, num) LV1_##in##_IN_##out##_OUT(lv1_##name, num)
+#include <asm/lv1call.h>
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
new file mode 100644
index 00000000000..056c1e4141b
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -0,0 +1,575 @@
+/*
+ * PS3 interrupt routines.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+/**
+ * ps3_alloc_io_irq - Assign a virq to a system bus device.
+ * interrupt_id: The device interrupt id read from the system repository.
+ * @virq: The assigned Linux virq.
+ *
+ * An io irq represents a non-virtualized device interrupt. interrupt_id
+ * coresponds to the interrupt number of the interrupt controller.
+ */
+
+int ps3_alloc_io_irq(unsigned int interrupt_id, unsigned int *virq)
+{
+ int result;
+ unsigned long outlet;
+
+ result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
+
+ if (result) {
+ pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ return result;
+ }
+
+ *virq = irq_create_mapping(NULL, outlet);
+
+ pr_debug("%s:%d: interrupt_id %u => outlet %lu, virq %u\n",
+ __func__, __LINE__, interrupt_id, outlet, *virq);
+
+ return 0;
+}
+
+int ps3_free_io_irq(unsigned int virq)
+{
+ int result;
+
+ result = lv1_destruct_io_irq_outlet(virq_to_hw(virq));
+
+ if (!result)
+ pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+
+ irq_dispose_mapping(virq);
+
+ return result;
+}
+
+/**
+ * ps3_alloc_event_irq - Allocate a virq for use with a system event.
+ * @virq: The assigned Linux virq.
+ *
+ * The virq can be used with lv1_connect_interrupt_event_receive_port() to
+ * arrange to receive events, or with ps3_send_event_locally() to signal
+ * events.
+ */
+
+int ps3_alloc_event_irq(unsigned int *virq)
+{
+ int result;
+ unsigned long outlet;
+
+ result = lv1_construct_event_receive_port(&outlet);
+
+ if (result) {
+ pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ *virq = NO_IRQ;
+ return result;
+ }
+
+ *virq = irq_create_mapping(NULL, outlet);
+
+ pr_debug("%s:%d: outlet %lu, virq %u\n", __func__, __LINE__, outlet,
+ *virq);
+
+ return 0;
+}
+
+int ps3_free_event_irq(unsigned int virq)
+{
+ int result;
+
+ pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+ result = lv1_destruct_event_receive_port(virq_to_hw(virq));
+
+ if (result)
+ pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+
+ irq_dispose_mapping(virq);
+
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return result;
+}
+
+int ps3_send_event_locally(unsigned int virq)
+{
+ return lv1_send_event_locally(virq_to_hw(virq));
+}
+
+/**
+ * ps3_connect_event_irq - Assign a virq to a system bus device.
+ * @did: The HV device identifier read from the system repository.
+ * @interrupt_id: The device interrupt id read from the system repository.
+ * @virq: The assigned Linux virq.
+ *
+ * An event irq represents a virtual device interrupt. The interrupt_id
+ * coresponds to the software interrupt number.
+ */
+
+int ps3_connect_event_irq(const struct ps3_device_id *did,
+ unsigned int interrupt_id, unsigned int *virq)
+{
+ int result;
+
+ result = ps3_alloc_event_irq(virq);
+
+ if (result)
+ return result;
+
+ result = lv1_connect_interrupt_event_receive_port(did->bus_id,
+ did->dev_id, virq_to_hw(*virq), interrupt_id);
+
+ if (result) {
+ pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port"
+ " failed: %s\n", __func__, __LINE__,
+ ps3_result(result));
+ ps3_free_event_irq(*virq);
+ *virq = NO_IRQ;
+ return result;
+ }
+
+ pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+ interrupt_id, *virq);
+
+ return 0;
+}
+
+int ps3_disconnect_event_irq(const struct ps3_device_id *did,
+ unsigned int interrupt_id, unsigned int virq)
+{
+ int result;
+
+ pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+ interrupt_id, virq);
+
+ result = lv1_disconnect_interrupt_event_receive_port(did->bus_id,
+ did->dev_id, virq_to_hw(virq), interrupt_id);
+
+ if (result)
+ pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port"
+ " failed: %s\n", __func__, __LINE__,
+ ps3_result(result));
+
+ ps3_free_event_irq(virq);
+
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return result;
+}
+
+/**
+ * ps3_alloc_vuart_irq - Configure the system virtual uart virq.
+ * @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap.
+ * @virq: The assigned Linux virq.
+ *
+ * The system supports only a single virtual uart, so multiple calls without
+ * freeing the interrupt will return a wrong state error.
+ */
+
+int ps3_alloc_vuart_irq(void* virt_addr_bmp, unsigned int *virq)
+{
+ int result;
+ unsigned long outlet;
+ unsigned long lpar_addr;
+
+ BUG_ON(!is_kernel_addr((unsigned long)virt_addr_bmp));
+
+ lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp));
+
+ result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
+
+ if (result) {
+ pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ return result;
+ }
+
+ *virq = irq_create_mapping(NULL, outlet);
+
+ pr_debug("%s:%d: outlet %lu, virq %u\n", __func__, __LINE__,
+ outlet, *virq);
+
+ return 0;
+}
+
+int ps3_free_vuart_irq(unsigned int virq)
+{
+ int result;
+
+ result = lv1_deconfigure_virtual_uart_irq();
+
+ if (result) {
+ pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ return result;
+ }
+
+ irq_dispose_mapping(virq);
+
+ return result;
+}
+
+/**
+ * ps3_alloc_spe_irq - Configure an spe virq.
+ * @spe_id: The spe_id returned from lv1_construct_logical_spe().
+ * @class: The spe interrupt class {0,1,2}.
+ * @virq: The assigned Linux virq.
+ *
+ */
+
+int ps3_alloc_spe_irq(unsigned long spe_id, unsigned int class,
+ unsigned int *virq)
+{
+ int result;
+ unsigned long outlet;
+
+ BUG_ON(class > 2);
+
+ result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
+
+ if (result) {
+ pr_debug("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ return result;
+ }
+
+ *virq = irq_create_mapping(NULL, outlet);
+
+ pr_debug("%s:%d: spe_id %lu, class %u, outlet %lu, virq %u\n",
+ __func__, __LINE__, spe_id, class, outlet, *virq);
+
+ return 0;
+}
+
+int ps3_free_spe_irq(unsigned int virq)
+{
+ irq_dispose_mapping(virq);
+ return 0;
+}
+
+#define PS3_INVALID_OUTLET ((irq_hw_number_t)-1)
+#define PS3_PLUG_MAX 63
+
+/**
+ * struct bmp - a per cpu irq status and mask bitmap structure
+ * @status: 256 bit status bitmap indexed by plug
+ * @unused_1:
+ * @mask: 256 bit mask bitmap indexed by plug
+ * @unused_2:
+ * @lock:
+ * @ipi_debug_brk_mask:
+ *
+ * The HV mantains per SMT thread mappings of HV outlet to HV plug on
+ * behalf of the guest. These mappings are implemented as 256 bit guest
+ * supplied bitmaps indexed by plug number. The address of the bitmaps are
+ * registered with the HV through lv1_configure_irq_state_bitmap().
+ *
+ * The HV supports 256 plugs per thread, assigned as {0..255}, for a total
+ * of 512 plugs supported on a processor. To simplify the logic this
+ * implementation equates HV plug value to linux virq value, constrains each
+ * interrupt to have a system wide unique plug number, and limits the range
+ * of the plug values to map into the first dword of the bitmaps. This
+ * gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
+ * that there is no constraint on how many in this set an individual thread
+ * can aquire.
+ */
+
+struct bmp {
+ struct {
+ unsigned long status;
+ unsigned long unused_1[3];
+ unsigned long mask;
+ unsigned long unused_2[3];
+ } __attribute__ ((packed));
+ spinlock_t lock;
+ unsigned long ipi_debug_brk_mask;
+};
+
+/**
+ * struct private - a per cpu data structure
+ * @node: HV node id
+ * @cpu: HV thread id
+ * @bmp: an HV bmp structure
+ */
+
+struct private {
+ unsigned long node;
+ unsigned int cpu;
+ struct bmp bmp;
+};
+
+#if defined(DEBUG)
+static void _dump_64_bmp(const char *header, const unsigned long *p, unsigned cpu,
+ const char* func, int line)
+{
+ pr_debug("%s:%d: %s %u {%04lx_%04lx_%04lx_%04lx}\n",
+ func, line, header, cpu,
+ *p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
+ *p & 0xffff);
+}
+
+static void __attribute__ ((unused)) _dump_256_bmp(const char *header,
+ const unsigned long *p, unsigned cpu, const char* func, int line)
+{
+ pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n",
+ func, line, header, cpu, p[0], p[1], p[2], p[3]);
+}
+
+#define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__)
+static void _dump_bmp(struct private* pd, const char* func, int line)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd->bmp.lock, flags);
+ _dump_64_bmp("stat", &pd->bmp.status, pd->cpu, func, line);
+ _dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
+ spin_unlock_irqrestore(&pd->bmp.lock, flags);
+}
+
+#define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
+static void __attribute__ ((unused)) _dump_mask(struct private* pd,
+ const char* func, int line)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pd->bmp.lock, flags);
+ _dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
+ spin_unlock_irqrestore(&pd->bmp.lock, flags);
+}
+#else
+static void dump_bmp(struct private* pd) {};
+#endif /* defined(DEBUG) */
+
+static void chip_mask(unsigned int virq)
+{
+ unsigned long flags;
+ struct private *pd = get_irq_chip_data(virq);
+
+ pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
+
+ BUG_ON(virq < NUM_ISA_INTERRUPTS);
+ BUG_ON(virq > PS3_PLUG_MAX);
+
+ spin_lock_irqsave(&pd->bmp.lock, flags);
+ pd->bmp.mask &= ~(0x8000000000000000UL >> virq);
+ spin_unlock_irqrestore(&pd->bmp.lock, flags);
+
+ lv1_did_update_interrupt_mask(pd->node, pd->cpu);
+}
+
+static void chip_unmask(unsigned int virq)
+{
+ unsigned long flags;
+ struct private *pd = get_irq_chip_data(virq);
+
+ pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
+
+ BUG_ON(virq < NUM_ISA_INTERRUPTS);
+ BUG_ON(virq > PS3_PLUG_MAX);
+
+ spin_lock_irqsave(&pd->bmp.lock, flags);
+ pd->bmp.mask |= (0x8000000000000000UL >> virq);
+ spin_unlock_irqrestore(&pd->bmp.lock, flags);
+
+ lv1_did_update_interrupt_mask(pd->node, pd->cpu);
+}
+
+static void chip_eoi(unsigned int virq)
+{
+ lv1_end_of_interrupt(virq);
+}
+
+static struct irq_chip irq_chip = {
+ .typename = "ps3",
+ .mask = chip_mask,
+ .unmask = chip_unmask,
+ .eoi = chip_eoi,
+};
+
+static void host_unmap(struct irq_host *h, unsigned int virq)
+{
+ int result;
+
+ pr_debug("%s:%d: virq %d\n", __func__, __LINE__, virq);
+
+ lv1_disconnect_irq_plug(virq);
+
+ result = set_irq_chip_data(virq, NULL);
+ BUG_ON(result);
+}
+
+static DEFINE_PER_CPU(struct private, private);
+
+static int host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ int result;
+ unsigned int cpu;
+
+ pr_debug(" -> %s:%d\n", __func__, __LINE__);
+ pr_debug("%s:%d: hwirq %lu => virq %u\n", __func__, __LINE__, hwirq,
+ virq);
+
+ /* bind this virq to a cpu */
+
+ preempt_disable();
+ cpu = smp_processor_id();
+ result = lv1_connect_irq_plug(virq, hwirq);
+ preempt_enable();
+
+ if (result) {
+ pr_info("%s:%d: lv1_connect_irq_plug failed:"
+ " %s\n", __func__, __LINE__, ps3_result(result));
+ return -EPERM;
+ }
+
+ result = set_irq_chip_data(virq, &per_cpu(private, cpu));
+ BUG_ON(result);
+
+ set_irq_chip_and_handler(virq, &irq_chip, handle_fasteoi_irq);
+
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return result;
+}
+
+static struct irq_host_ops host_ops = {
+ .map = host_map,
+ .unmap = host_unmap,
+};
+
+void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
+{
+ struct private *pd = &per_cpu(private, cpu);
+
+ pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq;
+
+ pr_debug("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
+ cpu, virq, pd->bmp.ipi_debug_brk_mask);
+}
+
+static int bmp_get_and_clear_status_bit(struct bmp *m)
+{
+ unsigned long flags;
+ unsigned int bit;
+ unsigned long x;
+
+ spin_lock_irqsave(&m->lock, flags);
+
+ /* check for ipi break first to stop this cpu ASAP */
+
+ if (m->status & m->ipi_debug_brk_mask) {
+ m->status &= ~m->ipi_debug_brk_mask;
+ spin_unlock_irqrestore(&m->lock, flags);
+ return __ilog2(m->ipi_debug_brk_mask);
+ }
+
+ x = (m->status & m->mask);
+
+ for (bit = NUM_ISA_INTERRUPTS, x <<= bit; x; bit++, x <<= 1)
+ if (x & 0x8000000000000000UL) {
+ m->status &= ~(0x8000000000000000UL >> bit);
+ spin_unlock_irqrestore(&m->lock, flags);
+ return bit;
+ }
+
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ pr_debug("%s:%d: not found\n", __func__, __LINE__);
+ return -1;
+}
+
+unsigned int ps3_get_irq(void)
+{
+ int plug;
+
+ struct private *pd = &__get_cpu_var(private);
+
+ plug = bmp_get_and_clear_status_bit(&pd->bmp);
+
+ if (plug < 1) {
+ pr_debug("%s:%d: no plug found: cpu %u\n", __func__, __LINE__,
+ pd->cpu);
+ dump_bmp(&per_cpu(private, 0));
+ dump_bmp(&per_cpu(private, 1));
+ return NO_IRQ;
+ }
+
+#if defined(DEBUG)
+ if (plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX) {
+ dump_bmp(&per_cpu(private, 0));
+ dump_bmp(&per_cpu(private, 1));
+ BUG();
+ }
+#endif
+ return plug;
+}
+
+void __init ps3_init_IRQ(void)
+{
+ int result;
+ unsigned long node;
+ unsigned cpu;
+ struct irq_host *host;
+
+ lv1_get_logical_ppe_id(&node);
+
+ host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &host_ops,
+ PS3_INVALID_OUTLET);
+ irq_set_default_host(host);
+ irq_set_virq_count(PS3_PLUG_MAX + 1);
+
+ for_each_possible_cpu(cpu) {
+ struct private *pd = &per_cpu(private, cpu);
+
+ pd->node = node;
+ pd->cpu = cpu;
+ spin_lock_init(&pd->bmp.lock);
+
+ result = lv1_configure_irq_state_bitmap(node, cpu,
+ ps3_mm_phys_to_lpar(__pa(&pd->bmp.status)));
+
+ if (result)
+ pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:"
+ " %s\n", __func__, __LINE__,
+ ps3_result(result));
+ }
+
+ ppc_md.get_irq = ps3_get_irq;
+}
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
new file mode 100644
index 00000000000..49c0d010d49
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -0,0 +1,831 @@
+/*
+ * PS3 address space management.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/memory_hotplug.h>
+
+#include <asm/firmware.h>
+#include <asm/lmb.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+enum {
+#if defined(CONFIG_PS3_USE_LPAR_ADDR)
+ USE_LPAR_ADDR = 1,
+#else
+ USE_LPAR_ADDR = 0,
+#endif
+#if defined(CONFIG_PS3_DYNAMIC_DMA)
+ USE_DYNAMIC_DMA = 1,
+#else
+ USE_DYNAMIC_DMA = 0,
+#endif
+};
+
+enum {
+ PAGE_SHIFT_4K = 12U,
+ PAGE_SHIFT_64K = 16U,
+ PAGE_SHIFT_16M = 24U,
+};
+
+static unsigned long make_page_sizes(unsigned long a, unsigned long b)
+{
+ return (a << 56) | (b << 48);
+}
+
+enum {
+ ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
+ ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
+};
+
+/* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
+
+enum {
+ HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
+ HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
+};
+
+/*============================================================================*/
+/* virtual address space routines */
+/*============================================================================*/
+
+/**
+ * struct mem_region - memory region structure
+ * @base: base address
+ * @size: size in bytes
+ * @offset: difference between base and rm.size
+ */
+
+struct mem_region {
+ unsigned long base;
+ unsigned long size;
+ unsigned long offset;
+};
+
+/**
+ * struct map - address space state variables holder
+ * @total: total memory available as reported by HV
+ * @vas_id - HV virtual address space id
+ * @htab_size: htab size in bytes
+ *
+ * The HV virtual address space (vas) allows for hotplug memory regions.
+ * Memory regions can be created and destroyed in the vas at runtime.
+ * @rm: real mode (bootmem) region
+ * @r1: hotplug memory region(s)
+ *
+ * ps3 addresses
+ * virt_addr: a cpu 'translated' effective address
+ * phys_addr: an address in what Linux thinks is the physical address space
+ * lpar_addr: an address in the HV virtual address space
+ * bus_addr: an io controller 'translated' address on a device bus
+ */
+
+struct map {
+ unsigned long total;
+ unsigned long vas_id;
+ unsigned long htab_size;
+ struct mem_region rm;
+ struct mem_region r1;
+};
+
+#define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
+static void _debug_dump_map(const struct map* m, const char* func, int line)
+{
+ DBG("%s:%d: map.total = %lxh\n", func, line, m->total);
+ DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size);
+ DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id);
+ DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
+ DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base);
+ DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
+ DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size);
+}
+
+static struct map map;
+
+/**
+ * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
+ * @phys_addr: linux physical address
+ */
+
+unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
+{
+ BUG_ON(is_kernel_addr(phys_addr));
+ if (USE_LPAR_ADDR)
+ return phys_addr;
+ else
+ return (phys_addr < map.rm.size || phys_addr >= map.total)
+ ? phys_addr : phys_addr + map.r1.offset;
+}
+
+EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
+
+/**
+ * ps3_mm_vas_create - create the virtual address space
+ */
+
+void __init ps3_mm_vas_create(unsigned long* htab_size)
+{
+ int result;
+ unsigned long start_address;
+ unsigned long size;
+ unsigned long access_right;
+ unsigned long max_page_size;
+ unsigned long flags;
+
+ result = lv1_query_logical_partition_address_region_info(0,
+ &start_address, &size, &access_right, &max_page_size,
+ &flags);
+
+ if (result) {
+ DBG("%s:%d: lv1_query_logical_partition_address_region_info "
+ "failed: %s\n", __func__, __LINE__,
+ ps3_result(result));
+ goto fail;
+ }
+
+ if (max_page_size < PAGE_SHIFT_16M) {
+ DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
+ max_page_size);
+ goto fail;
+ }
+
+ BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
+ BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
+
+ result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
+ 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
+ &map.vas_id, &map.htab_size);
+
+ if (result) {
+ DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ goto fail;
+ }
+
+ result = lv1_select_virtual_address_space(map.vas_id);
+
+ if (result) {
+ DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ goto fail;
+ }
+
+ *htab_size = map.htab_size;
+
+ debug_dump_map(&map);
+
+ return;
+
+fail:
+ panic("ps3_mm_vas_create failed");
+}
+
+/**
+ * ps3_mm_vas_destroy -
+ */
+
+void ps3_mm_vas_destroy(void)
+{
+ if (map.vas_id) {
+ lv1_select_virtual_address_space(0);
+ lv1_destruct_virtual_address_space(map.vas_id);
+ map.vas_id = 0;
+ }
+}
+
+/*============================================================================*/
+/* memory hotplug routines */
+/*============================================================================*/
+
+/**
+ * ps3_mm_region_create - create a memory region in the vas
+ * @r: pointer to a struct mem_region to accept initialized values
+ * @size: requested region size
+ *
+ * This implementation creates the region with the vas large page size.
+ * @size is rounded down to a multiple of the vas large page size.
+ */
+
+int ps3_mm_region_create(struct mem_region *r, unsigned long size)
+{
+ int result;
+ unsigned long muid;
+
+ r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
+
+ DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
+ DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size);
+ DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
+ (unsigned long)(size - r->size),
+ (size - r->size) / 1024 / 1024);
+
+ if (r->size == 0) {
+ DBG("%s:%d: size == 0\n", __func__, __LINE__);
+ result = -1;
+ goto zero_region;
+ }
+
+ result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
+ ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
+
+ if (result || r->base < map.rm.size) {
+ DBG("%s:%d: lv1_allocate_memory failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ goto zero_region;
+ }
+
+ r->offset = r->base - map.rm.size;
+ return result;
+
+zero_region:
+ r->size = r->base = r->offset = 0;
+ return result;
+}
+
+/**
+ * ps3_mm_region_destroy - destroy a memory region
+ * @r: pointer to struct mem_region
+ */
+
+void ps3_mm_region_destroy(struct mem_region *r)
+{
+ if (r->base) {
+ lv1_release_memory(r->base);
+ r->size = r->base = r->offset = 0;
+ map.total = map.rm.size;
+ }
+}
+
+/**
+ * ps3_mm_add_memory - hot add memory
+ */
+
+static int __init ps3_mm_add_memory(void)
+{
+ int result;
+ unsigned long start_addr;
+ unsigned long start_pfn;
+ unsigned long nr_pages;
+
+ if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+ return 0;
+
+ BUG_ON(!mem_init_done);
+
+ start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
+ start_pfn = start_addr >> PAGE_SHIFT;
+ nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
+ __func__, __LINE__, start_addr, start_pfn, nr_pages);
+
+ result = add_memory(0, start_addr, map.r1.size);
+
+ if (result) {
+ DBG("%s:%d: add_memory failed: (%d)\n",
+ __func__, __LINE__, result);
+ return result;
+ }
+
+ result = online_pages(start_pfn, nr_pages);
+
+ if (result)
+ DBG("%s:%d: online_pages failed: (%d)\n",
+ __func__, __LINE__, result);
+
+ return result;
+}
+
+core_initcall(ps3_mm_add_memory);
+
+/*============================================================================*/
+/* dma routines */
+/*============================================================================*/
+
+/**
+ * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
+ * @r: pointer to dma region structure
+ * @lpar_addr: HV lpar address
+ */
+
+static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r,
+ unsigned long lpar_addr)
+{
+ BUG_ON(lpar_addr >= map.r1.base + map.r1.size);
+ return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr
+ : lpar_addr - map.r1.offset);
+}
+
+#define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
+static void _dma_dump_region(const struct ps3_dma_region *r, const char* func,
+ int line)
+{
+ DBG("%s:%d: dev %u:%u\n", func, line, r->did.bus_id,
+ r->did.dev_id);
+ DBG("%s:%d: page_size %u\n", func, line, r->page_size);
+ DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
+ DBG("%s:%d: len %lxh\n", func, line, r->len);
+}
+
+/**
+ * dma_chunk - A chunk of dma pages mapped by the io controller.
+ * @region - The dma region that owns this chunk.
+ * @lpar_addr: Starting lpar address of the area to map.
+ * @bus_addr: Starting ioc bus address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
+ * list of all chuncks owned by the region.
+ *
+ * This implementation uses a very simple dma page manager
+ * based on the dma_chunk structure. This scheme assumes
+ * that all drivers use very well behaved dma ops.
+ */
+
+struct dma_chunk {
+ struct ps3_dma_region *region;
+ unsigned long lpar_addr;
+ unsigned long bus_addr;
+ unsigned long len;
+ struct list_head link;
+ unsigned int usage_count;
+};
+
+#define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
+static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
+ int line)
+{
+ DBG("%s:%d: r.dev %u:%u\n", func, line,
+ c->region->did.bus_id, c->region->did.dev_id);
+ DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
+ DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
+ DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
+ DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
+ DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
+ DBG("%s:%d: c.len %lxh\n", func, line, c->len);
+}
+
+static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
+ unsigned long bus_addr, unsigned long len)
+{
+ struct dma_chunk *c;
+ unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
+ unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
+
+ list_for_each_entry(c, &r->chunk_list.head, link) {
+ /* intersection */
+ if (aligned_bus >= c->bus_addr
+ && aligned_bus < c->bus_addr + c->len
+ && aligned_bus + aligned_len <= c->bus_addr + c->len) {
+ return c;
+ }
+ /* below */
+ if (aligned_bus + aligned_len <= c->bus_addr) {
+ continue;
+ }
+ /* above */
+ if (aligned_bus >= c->bus_addr + c->len) {
+ continue;
+ }
+
+ /* we don't handle the multi-chunk case for now */
+
+ dma_dump_chunk(c);
+ BUG();
+ }
+ return NULL;
+}
+
+static int dma_free_chunk(struct dma_chunk *c)
+{
+ int result = 0;
+
+ if (c->bus_addr) {
+ result = lv1_unmap_device_dma_region(c->region->did.bus_id,
+ c->region->did.dev_id, c->bus_addr, c->len);
+ BUG_ON(result);
+ }
+
+ kfree(c);
+ return result;
+}
+
+/**
+ * dma_map_pages - Maps dma pages into the io controller bus address space.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @phys_addr: Starting physical address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * c_out: A pointer to receive an allocated struct dma_chunk for this area.
+ *
+ * This is the lowest level dma mapping routine, and is the one that will
+ * make the HV call to add the pages into the io controller address space.
+ */
+
+static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
+ unsigned long len, struct dma_chunk **c_out)
+{
+ int result;
+ struct dma_chunk *c;
+
+ c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
+
+ if (!c) {
+ result = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ c->region = r;
+ c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
+ c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr);
+ c->len = len;
+
+ result = lv1_map_device_dma_region(c->region->did.bus_id,
+ c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len,
+ 0xf800000000000000UL);
+
+ if (result) {
+ DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ goto fail_map;
+ }
+
+ list_add(&c->link, &r->chunk_list.head);
+
+ *c_out = c;
+ return 0;
+
+fail_map:
+ kfree(c);
+fail_alloc:
+ *c_out = NULL;
+ DBG(" <- %s:%d\n", __func__, __LINE__);
+ return result;
+}
+
+/**
+ * dma_region_create - Create a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This is the lowest level dma region create routine, and is the one that
+ * will make the HV call to create the region.
+ */
+
+static int dma_region_create(struct ps3_dma_region* r)
+{
+ int result;
+
+ r->len = _ALIGN_UP(map.total, 1 << r->page_size);
+ INIT_LIST_HEAD(&r->chunk_list.head);
+ spin_lock_init(&r->chunk_list.lock);
+
+ result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id,
+ r->len, r->page_size, r->region_type, &r->bus_addr);
+
+ dma_dump_region(r);
+
+ if (result) {
+ DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ r->len = r->bus_addr = 0;
+ }
+
+ return result;
+}
+
+/**
+ * dma_region_free - Free a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This is the lowest level dma region free routine, and is the one that
+ * will make the HV call to free the region.
+ */
+
+static int dma_region_free(struct ps3_dma_region* r)
+{
+ int result;
+ struct dma_chunk *c;
+ struct dma_chunk *tmp;
+
+ list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
+ list_del(&c->link);
+ dma_free_chunk(c);
+ }
+
+ result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id,
+ r->bus_addr);
+
+ if (result)
+ DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+
+ r->len = r->bus_addr = 0;
+
+ return result;
+}
+
+/**
+ * dma_map_area - Map an area of memory into a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @virt_addr: Starting virtual address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * @bus_addr: A pointer to return the starting ioc bus address of the area to
+ * map.
+ *
+ * This is the common dma mapping routine.
+ */
+
+static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
+ unsigned long len, unsigned long *bus_addr)
+{
+ int result;
+ unsigned long flags;
+ struct dma_chunk *c;
+ unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
+ : virt_addr;
+
+ *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
+
+ if (!USE_DYNAMIC_DMA) {
+ unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
+ DBG(" -> %s:%d\n", __func__, __LINE__);
+ DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
+ virt_addr);
+ DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
+ phys_addr);
+ DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
+ lpar_addr);
+ DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
+ DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__,
+ *bus_addr, len);
+ }
+
+ spin_lock_irqsave(&r->chunk_list.lock, flags);
+ c = dma_find_chunk(r, *bus_addr, len);
+
+ if (c) {
+ c->usage_count++;
+ spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+ return 0;
+ }
+
+ result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size),
+ _ALIGN_UP(len, 1 << r->page_size), &c);
+
+ if (result) {
+ *bus_addr = 0;
+ DBG("%s:%d: dma_map_pages failed (%d)\n",
+ __func__, __LINE__, result);
+ spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+ return result;
+ }
+
+ c->usage_count = 1;
+
+ spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+ return result;
+}
+
+/**
+ * dma_unmap_area - Unmap an area of memory from a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @bus_addr: The starting ioc bus address of the area to unmap.
+ * @len: Length in bytes of the area to unmap.
+ *
+ * This is the common dma unmap routine.
+ */
+
+int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
+ unsigned long len)
+{
+ unsigned long flags;
+ struct dma_chunk *c;
+
+ spin_lock_irqsave(&r->chunk_list.lock, flags);
+ c = dma_find_chunk(r, bus_addr, len);
+
+ if (!c) {
+ unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
+ 1 << r->page_size);
+ unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
+ DBG("%s:%d: not found: bus_addr %lxh\n",
+ __func__, __LINE__, bus_addr);
+ DBG("%s:%d: not found: len %lxh\n",
+ __func__, __LINE__, len);
+ DBG("%s:%d: not found: aligned_bus %lxh\n",
+ __func__, __LINE__, aligned_bus);
+ DBG("%s:%d: not found: aligned_len %lxh\n",
+ __func__, __LINE__, aligned_len);
+ BUG();
+ }
+
+ c->usage_count--;
+
+ if (!c->usage_count) {
+ list_del(&c->link);
+ dma_free_chunk(c);
+ }
+
+ spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+ return 0;
+}
+
+/**
+ * dma_region_create_linear - Setup a linear dma maping for a device.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This routine creates an HV dma region for the device and maps all available
+ * ram into the io controller bus address space.
+ */
+
+static int dma_region_create_linear(struct ps3_dma_region *r)
+{
+ int result;
+ unsigned long tmp;
+
+ /* force 16M dma pages for linear mapping */
+
+ if (r->page_size != PS3_DMA_16M) {
+ pr_info("%s:%d: forcing 16M pages for linear map\n",
+ __func__, __LINE__);
+ r->page_size = PS3_DMA_16M;
+ }
+
+ result = dma_region_create(r);
+ BUG_ON(result);
+
+ result = dma_map_area(r, map.rm.base, map.rm.size, &tmp);
+ BUG_ON(result);
+
+ if (USE_LPAR_ADDR)
+ result = dma_map_area(r, map.r1.base, map.r1.size,
+ &tmp);
+ else
+ result = dma_map_area(r, map.rm.size, map.r1.size,
+ &tmp);
+
+ BUG_ON(result);
+
+ return result;
+}
+
+/**
+ * dma_region_free_linear - Free a linear dma mapping for a device.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This routine will unmap all mapped areas and free the HV dma region.
+ */
+
+static int dma_region_free_linear(struct ps3_dma_region *r)
+{
+ int result;
+
+ result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size);
+ BUG_ON(result);
+
+ result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base),
+ map.r1.size);
+ BUG_ON(result);
+
+ result = dma_region_free(r);
+ BUG_ON(result);
+
+ return result;
+}
+
+/**
+ * dma_map_area_linear - Map an area of memory into a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @virt_addr: Starting virtual address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * @bus_addr: A pointer to return the starting ioc bus address of the area to
+ * map.
+ *
+ * This routine just returns the coresponding bus address. Actual mapping
+ * occurs in dma_region_create_linear().
+ */
+
+static int dma_map_area_linear(struct ps3_dma_region *r,
+ unsigned long virt_addr, unsigned long len, unsigned long *bus_addr)
+{
+ unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
+ : virt_addr;
+ *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
+ return 0;
+}
+
+/**
+ * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @bus_addr: The starting ioc bus address of the area to unmap.
+ * @len: Length in bytes of the area to unmap.
+ *
+ * This routine does nothing. Unmapping occurs in dma_region_free_linear().
+ */
+
+static int dma_unmap_area_linear(struct ps3_dma_region *r,
+ unsigned long bus_addr, unsigned long len)
+{
+ return 0;
+}
+
+int ps3_dma_region_create(struct ps3_dma_region *r)
+{
+ return (USE_DYNAMIC_DMA)
+ ? dma_region_create(r)
+ : dma_region_create_linear(r);
+}
+
+int ps3_dma_region_free(struct ps3_dma_region *r)
+{
+ return (USE_DYNAMIC_DMA)
+ ? dma_region_free(r)
+ : dma_region_free_linear(r);
+}
+
+int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
+ unsigned long len, unsigned long *bus_addr)
+{
+ return (USE_DYNAMIC_DMA)
+ ? dma_map_area(r, virt_addr, len, bus_addr)
+ : dma_map_area_linear(r, virt_addr, len, bus_addr);
+}
+
+int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
+ unsigned long len)
+{
+ return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len)
+ : dma_unmap_area_linear(r, bus_addr, len);
+}
+
+/*============================================================================*/
+/* system startup routines */
+/*============================================================================*/
+
+/**
+ * ps3_mm_init - initialize the address space state variables
+ */
+
+void __init ps3_mm_init(void)
+{
+ int result;
+
+ DBG(" -> %s:%d\n", __func__, __LINE__);
+
+ result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
+ &map.total);
+
+ if (result)
+ panic("ps3_repository_read_mm_info() failed");
+
+ map.rm.offset = map.rm.base;
+ map.vas_id = map.htab_size = 0;
+
+ /* this implementation assumes map.rm.base is zero */
+
+ BUG_ON(map.rm.base);
+ BUG_ON(!map.rm.size);
+
+ lmb_add(map.rm.base, map.rm.size);
+ lmb_analyze();
+
+ /* arrange to do this in ps3_mm_add_memory */
+ ps3_mm_region_create(&map.r1, map.total - map.rm.size);
+
+ DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+/**
+ * ps3_mm_shutdown - final cleanup of address space
+ */
+
+void ps3_mm_shutdown(void)
+{
+ ps3_mm_region_destroy(&map.r1);
+ map.total = map.rm.size;
+}
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
new file mode 100644
index 00000000000..58358305dc1
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -0,0 +1,259 @@
+/*
+ * PS3 'Other OS' area data.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include <asm/lmb.h>
+#include <asm/ps3.h>
+
+#include "platform.h"
+
+enum {
+ OS_AREA_SEGMENT_SIZE = 0X200,
+};
+
+enum {
+ HEADER_LDR_FORMAT_RAW = 0,
+ HEADER_LDR_FORMAT_GZIP = 1,
+};
+
+/**
+ * struct os_area_header - os area header segment.
+ * @magic_num: Always 'cell_ext_os_area'.
+ * @hdr_version: Header format version number.
+ * @os_area_offset: Starting segment number of os image area.
+ * @ldr_area_offset: Starting segment number of bootloader image area.
+ * @ldr_format: HEADER_LDR_FORMAT flag.
+ * @ldr_size: Size of bootloader image in bytes.
+ *
+ * Note that the docs refer to area offsets. These are offsets in units of
+ * segments from the start of the os area (top of the header). These are
+ * better thought of as segment numbers. The os area of the os area is
+ * reserved for the os image.
+ */
+
+struct os_area_header {
+ s8 magic_num[16];
+ u32 hdr_version;
+ u32 os_area_offset;
+ u32 ldr_area_offset;
+ u32 _reserved_1;
+ u32 ldr_format;
+ u32 ldr_size;
+ u32 _reserved_2[6];
+} __attribute__ ((packed));
+
+enum {
+ PARAM_BOOT_FLAG_GAME_OS = 0,
+ PARAM_BOOT_FLAG_OTHER_OS = 1,
+};
+
+enum {
+ PARAM_AV_MULTI_OUT_NTSC = 0,
+ PARAM_AV_MULTI_OUT_PAL_RGB = 1,
+ PARAM_AV_MULTI_OUT_PAL_YCBCR = 2,
+ PARAM_AV_MULTI_OUT_SECAM = 3,
+};
+
+enum {
+ PARAM_CTRL_BUTTON_O_IS_YES = 0,
+ PARAM_CTRL_BUTTON_X_IS_YES = 1,
+};
+
+/**
+ * struct os_area_params - os area params segment.
+ * @boot_flag: User preference of operating system, PARAM_BOOT_FLAG flag.
+ * @num_params: Number of params in this (params) segment.
+ * @rtc_diff: Difference in seconds between 1970 and the ps3 rtc value.
+ * @av_multi_out: User preference of AV output, PARAM_AV_MULTI_OUT flag.
+ * @ctrl_button: User preference of controller button config, PARAM_CTRL_BUTTON
+ * flag.
+ * @static_ip_addr: User preference of static IP address.
+ * @network_mask: User preference of static network mask.
+ * @default_gateway: User preference of static default gateway.
+ * @dns_primary: User preference of static primary dns server.
+ * @dns_secondary: User preference of static secondary dns server.
+ *
+ * User preference of zero for static_ip_addr means use dhcp.
+ */
+
+struct os_area_params {
+ u32 boot_flag;
+ u32 _reserved_1[3];
+ u32 num_params;
+ u32 _reserved_2[3];
+ /* param 0 */
+ s64 rtc_diff;
+ u8 av_multi_out;
+ u8 ctrl_button;
+ u8 _reserved_3[6];
+ /* param 1 */
+ u8 static_ip_addr[4];
+ u8 network_mask[4];
+ u8 default_gateway[4];
+ u8 _reserved_4[4];
+ /* param 2 */
+ u8 dns_primary[4];
+ u8 dns_secondary[4];
+ u8 _reserved_5[8];
+} __attribute__ ((packed));
+
+/**
+ * struct saved_params - Static working copies of data from the 'Other OS' area.
+ *
+ * For the convinience of the guest, the HV makes a copy of the 'Other OS' area
+ * in flash to a high address in the boot memory region and then puts that RAM
+ * address and the byte count into the repository for retreval by the guest.
+ * We copy the data we want into a static variable and allow the memory setup
+ * by the HV to be claimed by the lmb manager.
+ */
+
+struct saved_params {
+ /* param 0 */
+ s64 rtc_diff;
+ unsigned int av_multi_out;
+ unsigned int ctrl_button;
+ /* param 1 */
+ u8 static_ip_addr[4];
+ u8 network_mask[4];
+ u8 default_gateway[4];
+ /* param 2 */
+ u8 dns_primary[4];
+ u8 dns_secondary[4];
+} static saved_params;
+
+#define dump_header(_a) _dump_header(_a, __func__, __LINE__)
+static void _dump_header(const struct os_area_header __iomem *h, const char* func,
+ int line)
+{
+ pr_debug("%s:%d: h.magic_num: '%s'\n", func, line,
+ h->magic_num);
+ pr_debug("%s:%d: h.hdr_version: %u\n", func, line,
+ h->hdr_version);
+ pr_debug("%s:%d: h.os_area_offset: %u\n", func, line,
+ h->os_area_offset);
+ pr_debug("%s:%d: h.ldr_area_offset: %u\n", func, line,
+ h->ldr_area_offset);
+ pr_debug("%s:%d: h.ldr_format: %u\n", func, line,
+ h->ldr_format);
+ pr_debug("%s:%d: h.ldr_size: %xh\n", func, line,
+ h->ldr_size);
+}
+
+#define dump_params(_a) _dump_params(_a, __func__, __LINE__)
+static void _dump_params(const struct os_area_params __iomem *p, const char* func,
+ int line)
+{
+ pr_debug("%s:%d: p.boot_flag: %u\n", func, line, p->boot_flag);
+ pr_debug("%s:%d: p.num_params: %u\n", func, line, p->num_params);
+ pr_debug("%s:%d: p.rtc_diff %ld\n", func, line, p->rtc_diff);
+ pr_debug("%s:%d: p.av_multi_out %u\n", func, line, p->av_multi_out);
+ pr_debug("%s:%d: p.ctrl_button: %u\n", func, line, p->ctrl_button);
+ pr_debug("%s:%d: p.static_ip_addr: %u.%u.%u.%u\n", func, line,
+ p->static_ip_addr[0], p->static_ip_addr[1],
+ p->static_ip_addr[2], p->static_ip_addr[3]);
+ pr_debug("%s:%d: p.network_mask: %u.%u.%u.%u\n", func, line,
+ p->network_mask[0], p->network_mask[1],
+ p->network_mask[2], p->network_mask[3]);
+ pr_debug("%s:%d: p.default_gateway: %u.%u.%u.%u\n", func, line,
+ p->default_gateway[0], p->default_gateway[1],
+ p->default_gateway[2], p->default_gateway[3]);
+ pr_debug("%s:%d: p.dns_primary: %u.%u.%u.%u\n", func, line,
+ p->dns_primary[0], p->dns_primary[1],
+ p->dns_primary[2], p->dns_primary[3]);
+ pr_debug("%s:%d: p.dns_secondary: %u.%u.%u.%u\n", func, line,
+ p->dns_secondary[0], p->dns_secondary[1],
+ p->dns_secondary[2], p->dns_secondary[3]);
+}
+
+static int __init verify_header(const struct os_area_header *header)
+{
+ if (memcmp(header->magic_num, "cell_ext_os_area", 16)) {
+ pr_debug("%s:%d magic_num failed\n", __func__, __LINE__);
+ return -1;
+ }
+
+ if (header->hdr_version < 1) {
+ pr_debug("%s:%d hdr_version failed\n", __func__, __LINE__);
+ return -1;
+ }
+
+ if (header->os_area_offset > header->ldr_area_offset) {
+ pr_debug("%s:%d offsets failed\n", __func__, __LINE__);
+ return -1;
+ }
+
+ return 0;
+}
+
+int __init ps3_os_area_init(void)
+{
+ int result;
+ u64 lpar_addr;
+ unsigned int size;
+ struct os_area_header *header;
+ struct os_area_params *params;
+
+ result = ps3_repository_read_boot_dat_info(&lpar_addr, &size);
+
+ if (result) {
+ pr_debug("%s:%d ps3_repository_read_boot_dat_info failed\n",
+ __func__, __LINE__);
+ return result;
+ }
+
+ header = (struct os_area_header *)__va(lpar_addr);
+ params = (struct os_area_params *)__va(lpar_addr + OS_AREA_SEGMENT_SIZE);
+
+ result = verify_header(header);
+
+ if (result) {
+ pr_debug("%s:%d verify_header failed\n", __func__, __LINE__);
+ dump_header(header);
+ return -EIO;
+ }
+
+ dump_header(header);
+ dump_params(params);
+
+ saved_params.rtc_diff = params->rtc_diff;
+ saved_params.av_multi_out = params->av_multi_out;
+ saved_params.ctrl_button = params->ctrl_button;
+ memcpy(saved_params.static_ip_addr, params->static_ip_addr, 4);
+ memcpy(saved_params.network_mask, params->network_mask, 4);
+ memcpy(saved_params.default_gateway, params->default_gateway, 4);
+ memcpy(saved_params.dns_secondary, params->dns_secondary, 4);
+
+ return result;
+}
+
+/**
+ * ps3_os_area_rtc_diff - Returns the ps3 rtc diff value.
+ *
+ * The ps3 rtc maintains a value that approximates seconds since
+ * 2000-01-01 00:00:00 UTC. Returns the exact number of seconds from 1970 to
+ * 2000 when saved_params.rtc_diff has not been properly set up.
+ */
+
+u64 ps3_os_area_rtc_diff(void)
+{
+ return saved_params.rtc_diff ? saved_params.rtc_diff : 946684800UL;
+}
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
new file mode 100644
index 00000000000..23b111bea9d
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -0,0 +1,68 @@
+/*
+ * PS3 platform declarations.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#if !defined(_PS3_PLATFORM_H)
+#define _PS3_PLATFORM_H
+
+#include <linux/rtc.h>
+
+/* htab */
+
+void __init ps3_hpte_init(unsigned long htab_size);
+void __init ps3_map_htab(void);
+
+/* mm */
+
+void __init ps3_mm_init(void);
+void __init ps3_mm_vas_create(unsigned long* htab_size);
+void ps3_mm_vas_destroy(void);
+void ps3_mm_shutdown(void);
+
+/* irq */
+
+void ps3_init_IRQ(void);
+void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
+
+/* smp */
+
+void smp_init_ps3(void);
+void ps3_smp_cleanup_cpu(int cpu);
+
+/* time */
+
+void __init ps3_calibrate_decr(void);
+unsigned long __init ps3_get_boot_time(void);
+void ps3_get_rtc_time(struct rtc_time *time);
+int ps3_set_rtc_time(struct rtc_time *time);
+
+/* os area */
+
+int __init ps3_os_area_init(void);
+u64 ps3_os_area_rtc_diff(void);
+
+/* spu */
+
+#if defined(CONFIG_SPU_BASE)
+void ps3_spu_set_platform (void);
+#else
+static inline void ps3_spu_set_platform (void) {}
+#endif
+
+#endif
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
new file mode 100644
index 00000000000..273a0d621bd
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -0,0 +1,840 @@
+/*
+ * PS3 repository routines.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+enum ps3_vendor_id {
+ PS3_VENDOR_ID_NONE = 0,
+ PS3_VENDOR_ID_SONY = 0x8000000000000000UL,
+};
+
+enum ps3_lpar_id {
+ PS3_LPAR_ID_CURRENT = 0,
+ PS3_LPAR_ID_PME = 1,
+};
+
+#define dump_field(_a, _b) _dump_field(_a, _b, __func__, __LINE__)
+static void _dump_field(const char *hdr, u64 n, const char* func, int line)
+{
+#if defined(DEBUG)
+ char s[16];
+ const char *const in = (const char *)&n;
+ unsigned int i;
+
+ for (i = 0; i < 8; i++)
+ s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.';
+ s[i] = 0;
+
+ pr_debug("%s:%d: %s%016lx : %s\n", func, line, hdr, n, s);
+#endif
+}
+
+#define dump_node_name(_a, _b, _c, _d, _e) \
+ _dump_node_name(_a, _b, _c, _d, _e, __func__, __LINE__)
+static void _dump_node_name (unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
+ u64 n4, const char* func, int line)
+{
+ pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+ _dump_field("n1: ", n1, func, line);
+ _dump_field("n2: ", n2, func, line);
+ _dump_field("n3: ", n3, func, line);
+ _dump_field("n4: ", n4, func, line);
+}
+
+#define dump_node(_a, _b, _c, _d, _e, _f, _g) \
+ _dump_node(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
+static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
+ u64 v1, u64 v2, const char* func, int line)
+{
+ pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+ _dump_field("n1: ", n1, func, line);
+ _dump_field("n2: ", n2, func, line);
+ _dump_field("n3: ", n3, func, line);
+ _dump_field("n4: ", n4, func, line);
+ pr_debug("%s:%d: v1: %016lx\n", func, line, v1);
+ pr_debug("%s:%d: v2: %016lx\n", func, line, v2);
+}
+
+/**
+ * make_first_field - Make the first field of a repository node name.
+ * @text: Text portion of the field.
+ * @index: Numeric index portion of the field. Use zero for 'don't care'.
+ *
+ * This routine sets the vendor id to zero (non-vendor specific).
+ * Returns field value.
+ */
+
+static u64 make_first_field(const char *text, u64 index)
+{
+ u64 n;
+
+ strncpy((char *)&n, text, 8);
+ return PS3_VENDOR_ID_NONE + (n >> 32) + index;
+}
+
+/**
+ * make_field - Make subsequent fields of a repository node name.
+ * @text: Text portion of the field. Use "" for 'don't care'.
+ * @index: Numeric index portion of the field. Use zero for 'don't care'.
+ *
+ * Returns field value.
+ */
+
+static u64 make_field(const char *text, u64 index)
+{
+ u64 n;
+
+ strncpy((char *)&n, text, 8);
+ return n + index;
+}
+
+/**
+ * read_node - Read a repository node from raw fields.
+ * @n1: First field of node name.
+ * @n2: Second field of node name. Use zero for 'don't care'.
+ * @n3: Third field of node name. Use zero for 'don't care'.
+ * @n4: Fourth field of node name. Use zero for 'don't care'.
+ * @v1: First repository value (high word).
+ * @v2: Second repository value (low word). Optional parameter, use zero
+ * for 'don't care'.
+ */
+
+static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
+ u64 *_v1, u64 *_v2)
+{
+ int result;
+ u64 v1;
+ u64 v2;
+
+ if (lpar_id == PS3_LPAR_ID_CURRENT) {
+ u64 id;
+ lv1_get_logical_partition_id(&id);
+ lpar_id = id;
+ }
+
+ result = lv1_get_repository_node_value(lpar_id, n1, n2, n3, n4, &v1,
+ &v2);
+
+ if (result) {
+ pr_debug("%s:%d: lv1_get_repository_node_value failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ dump_node_name(lpar_id, n1, n2, n3, n4);
+ return result;
+ }
+
+ dump_node(lpar_id, n1, n2, n3, n4, v1, v2);
+
+ if (_v1)
+ *_v1 = v1;
+ if (_v2)
+ *_v2 = v2;
+
+ if (v1 && !_v1)
+ pr_debug("%s:%d: warning: discarding non-zero v1: %016lx\n",
+ __func__, __LINE__, v1);
+ if (v2 && !_v2)
+ pr_debug("%s:%d: warning: discarding non-zero v2: %016lx\n",
+ __func__, __LINE__, v2);
+
+ return result;
+}
+
+int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
+ u64 *value)
+{
+ return read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field(bus_str, 0),
+ 0, 0,
+ value, 0);
+}
+
+int ps3_repository_read_bus_id(unsigned int bus_index, unsigned int *bus_id)
+{
+ int result;
+ u64 v1;
+ u64 v2; /* unused */
+
+ result = read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field("id", 0),
+ 0, 0,
+ &v1, &v2);
+ *bus_id = v1;
+ return result;
+}
+
+int ps3_repository_read_bus_type(unsigned int bus_index,
+ enum ps3_bus_type *bus_type)
+{
+ int result;
+ u64 v1;
+
+ result = read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field("type", 0),
+ 0, 0,
+ &v1, 0);
+ *bus_type = v1;
+ return result;
+}
+
+int ps3_repository_read_bus_num_dev(unsigned int bus_index,
+ unsigned int *num_dev)
+{
+ int result;
+ u64 v1;
+
+ result = read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field("num_dev", 0),
+ 0, 0,
+ &v1, 0);
+ *num_dev = v1;
+ return result;
+}
+
+int ps3_repository_read_dev_str(unsigned int bus_index,
+ unsigned int dev_index, const char *dev_str, u64 *value)
+{
+ return read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field("dev", dev_index),
+ make_field(dev_str, 0),
+ 0,
+ value, 0);
+}
+
+int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
+ unsigned int *dev_id)
+{
+ int result;
+ u64 v1;
+
+ result = read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field("dev", dev_index),
+ make_field("id", 0),
+ 0,
+ &v1, 0);
+ *dev_id = v1;
+ return result;
+}
+
+int ps3_repository_read_dev_type(unsigned int bus_index,
+ unsigned int dev_index, enum ps3_dev_type *dev_type)
+{
+ int result;
+ u64 v1;
+
+ result = read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field("dev", dev_index),
+ make_field("type", 0),
+ 0,
+ &v1, 0);
+ *dev_type = v1;
+ return result;
+}
+
+int ps3_repository_read_dev_intr(unsigned int bus_index,
+ unsigned int dev_index, unsigned int intr_index,
+ unsigned int *intr_type, unsigned int* interrupt_id)
+{
+ int result;
+ u64 v1;
+ u64 v2;
+
+ result = read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field("dev", dev_index),
+ make_field("intr", intr_index),
+ 0,
+ &v1, &v2);
+ *intr_type = v1;
+ *interrupt_id = v2;
+ return result;
+}
+
+int ps3_repository_read_dev_reg_type(unsigned int bus_index,
+ unsigned int dev_index, unsigned int reg_index, unsigned int *reg_type)
+{
+ int result;
+ u64 v1;
+
+ result = read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field("dev", dev_index),
+ make_field("reg", reg_index),
+ make_field("type", 0),
+ &v1, 0);
+ *reg_type = v1;
+ return result;
+}
+
+int ps3_repository_read_dev_reg_addr(unsigned int bus_index,
+ unsigned int dev_index, unsigned int reg_index, u64 *bus_addr, u64 *len)
+{
+ return read_node(PS3_LPAR_ID_PME,
+ make_first_field("bus", bus_index),
+ make_field("dev", dev_index),
+ make_field("reg", reg_index),
+ make_field("data", 0),
+ bus_addr, len);
+}
+
+int ps3_repository_read_dev_reg(unsigned int bus_index,
+ unsigned int dev_index, unsigned int reg_index, unsigned int *reg_type,
+ u64 *bus_addr, u64 *len)
+{
+ int result = ps3_repository_read_dev_reg_type(bus_index, dev_index,
+ reg_index, reg_type);
+ return result ? result
+ : ps3_repository_read_dev_reg_addr(bus_index, dev_index,
+ reg_index, bus_addr, len);
+}
+
+#if defined(DEBUG)
+int ps3_repository_dump_resource_info(unsigned int bus_index,
+ unsigned int dev_index)
+{
+ int result = 0;
+ unsigned int res_index;
+
+ pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
+ bus_index, dev_index);
+
+ for (res_index = 0; res_index < 10; res_index++) {
+ enum ps3_interrupt_type intr_type;
+ unsigned int interrupt_id;
+
+ result = ps3_repository_read_dev_intr(bus_index, dev_index,
+ res_index, &intr_type, &interrupt_id);
+
+ if (result) {
+ if (result != LV1_NO_ENTRY)
+ pr_debug("%s:%d ps3_repository_read_dev_intr"
+ " (%u:%u) failed\n", __func__, __LINE__,
+ bus_index, dev_index);
+ break;
+ }
+
+ pr_debug("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
+ __func__, __LINE__, bus_index, dev_index, intr_type,
+ interrupt_id);
+ }
+
+ for (res_index = 0; res_index < 10; res_index++) {
+ enum ps3_region_type reg_type;
+ u64 bus_addr;
+ u64 len;
+
+ result = ps3_repository_read_dev_reg(bus_index, dev_index,
+ res_index, &reg_type, &bus_addr, &len);
+
+ if (result) {
+ if (result != LV1_NO_ENTRY)
+ pr_debug("%s:%d ps3_repository_read_dev_reg"
+ " (%u:%u) failed\n", __func__, __LINE__,
+ bus_index, dev_index);
+ break;
+ }
+
+ pr_debug("%s:%d (%u:%u) reg_type %u, bus_addr %lxh, len %lxh\n",
+ __func__, __LINE__, bus_index, dev_index, reg_type,
+ bus_addr, len);
+ }
+
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return result;
+}
+
+static int dump_device_info(unsigned int bus_index, unsigned int num_dev)
+{
+ int result = 0;
+ unsigned int dev_index;
+
+ pr_debug(" -> %s:%d: bus_%u\n", __func__, __LINE__, bus_index);
+
+ for (dev_index = 0; dev_index < num_dev; dev_index++) {
+ enum ps3_dev_type dev_type;
+ unsigned int dev_id;
+
+ result = ps3_repository_read_dev_type(bus_index, dev_index,
+ &dev_type);
+
+ if (result) {
+ pr_debug("%s:%d ps3_repository_read_dev_type"
+ " (%u:%u) failed\n", __func__, __LINE__,
+ bus_index, dev_index);
+ break;
+ }
+
+ result = ps3_repository_read_dev_id(bus_index, dev_index,
+ &dev_id);
+
+ if (result) {
+ pr_debug("%s:%d ps3_repository_read_dev_id"
+ " (%u:%u) failed\n", __func__, __LINE__,
+ bus_index, dev_index);
+ continue;
+ }
+
+ pr_debug("%s:%d (%u:%u): dev_type %u, dev_id %u\n", __func__,
+ __LINE__, bus_index, dev_index, dev_type, dev_id);
+
+ ps3_repository_dump_resource_info(bus_index, dev_index);
+ }
+
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return result;
+}
+
+int ps3_repository_dump_bus_info(void)
+{
+ int result = 0;
+ unsigned int bus_index;
+
+ pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+ for (bus_index = 0; bus_index < 10; bus_index++) {
+ enum ps3_bus_type bus_type;
+ unsigned int bus_id;
+ unsigned int num_dev;
+
+ result = ps3_repository_read_bus_type(bus_index, &bus_type);
+
+ if (result) {
+ pr_debug("%s:%d read_bus_type(%u) failed\n",
+ __func__, __LINE__, bus_index);
+ break;
+ }
+
+ result = ps3_repository_read_bus_id(bus_index, &bus_id);
+
+ if (result) {
+ pr_debug("%s:%d read_bus_id(%u) failed\n",
+ __func__, __LINE__, bus_index);
+ continue;
+ }
+
+ if (bus_index != bus_id)
+ pr_debug("%s:%d bus_index != bus_id\n",
+ __func__, __LINE__);
+
+ result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
+
+ if (result) {
+ pr_debug("%s:%d read_bus_num_dev(%u) failed\n",
+ __func__, __LINE__, bus_index);
+ continue;
+ }
+
+ pr_debug("%s:%d bus_%u: bus_type %u, bus_id %u, num_dev %u\n",
+ __func__, __LINE__, bus_index, bus_type, bus_id,
+ num_dev);
+
+ dump_device_info(bus_index, num_dev);
+ }
+
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return result;
+}
+#endif /* defined(DEBUG) */
+
+static int find_device(unsigned int bus_index, unsigned int num_dev,
+ unsigned int start_dev_index, enum ps3_dev_type dev_type,
+ struct ps3_repository_device *dev)
+{
+ int result = 0;
+ unsigned int dev_index;
+
+ pr_debug("%s:%d: find dev_type %u\n", __func__, __LINE__, dev_type);
+
+ dev->dev_index = UINT_MAX;
+
+ for (dev_index = start_dev_index; dev_index < num_dev; dev_index++) {
+ enum ps3_dev_type x;
+
+ result = ps3_repository_read_dev_type(bus_index, dev_index,
+ &x);
+
+ if (result) {
+ pr_debug("%s:%d read_dev_type failed\n",
+ __func__, __LINE__);
+ return result;
+ }
+
+ if (x == dev_type)
+ break;
+ }
+
+ BUG_ON(dev_index == num_dev);
+
+ pr_debug("%s:%d: found dev_type %u at dev_index %u\n",
+ __func__, __LINE__, dev_type, dev_index);
+
+ result = ps3_repository_read_dev_id(bus_index, dev_index,
+ &dev->did.dev_id);
+
+ if (result) {
+ pr_debug("%s:%d read_dev_id failed\n",
+ __func__, __LINE__);
+ return result;
+ }
+
+ dev->dev_index = dev_index;
+
+ pr_debug("%s:%d found: dev_id %u\n", __func__, __LINE__,
+ dev->did.dev_id);
+
+ return result;
+}
+
+int ps3_repository_find_device (enum ps3_bus_type bus_type,
+ enum ps3_dev_type dev_type,
+ const struct ps3_repository_device *start_dev,
+ struct ps3_repository_device *dev)
+{
+ int result = 0;
+ unsigned int bus_index;
+ unsigned int num_dev;
+
+ pr_debug("%s:%d: find bus_type %u, dev_type %u\n", __func__, __LINE__,
+ bus_type, dev_type);
+
+ dev->bus_index = UINT_MAX;
+
+ for (bus_index = start_dev ? start_dev->bus_index : 0; bus_index < 10;
+ bus_index++) {
+ enum ps3_bus_type x;
+
+ result = ps3_repository_read_bus_type(bus_index, &x);
+
+ if (result) {
+ pr_debug("%s:%d read_bus_type failed\n",
+ __func__, __LINE__);
+ return result;
+ }
+ if (x == bus_type)
+ break;
+ }
+
+ BUG_ON(bus_index == 10);
+
+ pr_debug("%s:%d: found bus_type %u at bus_index %u\n",
+ __func__, __LINE__, bus_type, bus_index);
+
+ result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
+
+ if (result) {
+ pr_debug("%s:%d read_bus_num_dev failed\n",
+ __func__, __LINE__);
+ return result;
+ }
+
+ result = find_device(bus_index, num_dev, start_dev
+ ? start_dev->dev_index + 1 : 0, dev_type, dev);
+
+ if (result) {
+ pr_debug("%s:%d get_did failed\n", __func__, __LINE__);
+ return result;
+ }
+
+ result = ps3_repository_read_bus_id(bus_index, &dev->did.bus_id);
+
+ if (result) {
+ pr_debug("%s:%d read_bus_id failed\n",
+ __func__, __LINE__);
+ return result;
+ }
+
+ dev->bus_index = bus_index;
+
+ pr_debug("%s:%d found: bus_id %u, dev_id %u\n",
+ __func__, __LINE__, dev->did.bus_id, dev->did.dev_id);
+
+ return result;
+}
+
+int ps3_repository_find_interrupt(const struct ps3_repository_device *dev,
+ enum ps3_interrupt_type intr_type, unsigned int *interrupt_id)
+{
+ int result = 0;
+ unsigned int res_index;
+
+ pr_debug("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
+
+ *interrupt_id = UINT_MAX;
+
+ for (res_index = 0; res_index < 10; res_index++) {
+ enum ps3_interrupt_type t;
+ unsigned int id;
+
+ result = ps3_repository_read_dev_intr(dev->bus_index,
+ dev->dev_index, res_index, &t, &id);
+
+ if (result) {
+ pr_debug("%s:%d read_dev_intr failed\n",
+ __func__, __LINE__);
+ return result;
+ }
+
+ if (t == intr_type) {
+ *interrupt_id = id;
+ break;
+ }
+ }
+
+ BUG_ON(res_index == 10);
+
+ pr_debug("%s:%d: found intr_type %u at res_index %u\n",
+ __func__, __LINE__, intr_type, res_index);
+
+ return result;
+}
+
+int ps3_repository_find_region(const struct ps3_repository_device *dev,
+ enum ps3_region_type reg_type, u64 *bus_addr, u64 *len)
+{
+ int result = 0;
+ unsigned int res_index;
+
+ pr_debug("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
+
+ *bus_addr = *len = 0;
+
+ for (res_index = 0; res_index < 10; res_index++) {
+ enum ps3_region_type t;
+ u64 a;
+ u64 l;
+
+ result = ps3_repository_read_dev_reg(dev->bus_index,
+ dev->dev_index, res_index, &t, &a, &l);
+
+ if (result) {
+ pr_debug("%s:%d read_dev_reg failed\n",
+ __func__, __LINE__);
+ return result;
+ }
+
+ if (t == reg_type) {
+ *bus_addr = a;
+ *len = l;
+ break;
+ }
+ }
+
+ BUG_ON(res_index == 10);
+
+ pr_debug("%s:%d: found reg_type %u at res_index %u\n",
+ __func__, __LINE__, reg_type, res_index);
+
+ return result;
+}
+
+int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size)
+{
+ return read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("bi", 0),
+ make_field("pu", 0),
+ ppe_id,
+ make_field("rm_size", 0),
+ rm_size, 0);
+}
+
+int ps3_repository_read_region_total(u64 *region_total)
+{
+ return read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("bi", 0),
+ make_field("rgntotal", 0),
+ 0, 0,
+ region_total, 0);
+}
+
+/**
+ * ps3_repository_read_mm_info - Read mm info for single pu system.
+ * @rm_base: Real mode memory base address.
+ * @rm_size: Real mode memory size.
+ * @region_total: Maximum memory region size.
+ */
+
+int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
+{
+ int result;
+ u64 ppe_id;
+
+ lv1_get_logical_ppe_id(&ppe_id);
+ *rm_base = 0;
+ result = ps3_repository_read_rm_size(ppe_id, rm_size);
+ return result ? result
+ : ps3_repository_read_region_total(region_total);
+}
+
+/**
+ * ps3_repository_read_num_spu_reserved - Number of physical spus reserved.
+ * @num_spu: Number of physical spus.
+ */
+
+int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
+{
+ int result;
+ u64 v1;
+
+ result = read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("bi", 0),
+ make_field("spun", 0),
+ 0, 0,
+ &v1, 0);
+ *num_spu_reserved = v1;
+ return result;
+}
+
+/**
+ * ps3_repository_read_num_spu_resource_id - Number of spu resource reservations.
+ * @num_resource_id: Number of spu resource ids.
+ */
+
+int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
+{
+ int result;
+ u64 v1;
+
+ result = read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("bi", 0),
+ make_field("spursvn", 0),
+ 0, 0,
+ &v1, 0);
+ *num_resource_id = v1;
+ return result;
+}
+
+/**
+ * ps3_repository_read_spu_resource_id - spu resource reservation id value.
+ * @res_index: Resource reservation index.
+ * @resource_type: Resource reservation type.
+ * @resource_id: Resource reservation id.
+ */
+
+int ps3_repository_read_spu_resource_id(unsigned int res_index,
+ enum ps3_spu_resource_type* resource_type, unsigned int *resource_id)
+{
+ int result;
+ u64 v1;
+ u64 v2;
+
+ result = read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("bi", 0),
+ make_field("spursv", 0),
+ res_index,
+ 0,
+ &v1, &v2);
+ *resource_type = v1;
+ *resource_id = v2;
+ return result;
+}
+
+int ps3_repository_read_boot_dat_address(u64 *address)
+{
+ return read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("bi", 0),
+ make_field("boot_dat", 0),
+ make_field("address", 0),
+ 0,
+ address, 0);
+}
+
+int ps3_repository_read_boot_dat_size(unsigned int *size)
+{
+ int result;
+ u64 v1;
+
+ result = read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("bi", 0),
+ make_field("boot_dat", 0),
+ make_field("size", 0),
+ 0,
+ &v1, 0);
+ *size = v1;
+ return result;
+}
+
+/**
+ * ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area.
+ * address: lpar address of cell_ext_os_area
+ * @size: size of cell_ext_os_area
+ */
+
+int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
+{
+ int result;
+
+ *size = 0;
+ result = ps3_repository_read_boot_dat_address(lpar_addr);
+ return result ? result
+ : ps3_repository_read_boot_dat_size(size);
+}
+
+int ps3_repository_read_num_be(unsigned int *num_be)
+{
+ int result;
+ u64 v1;
+
+ result = read_node(PS3_LPAR_ID_PME,
+ make_first_field("ben", 0),
+ 0,
+ 0,
+ 0,
+ &v1, 0);
+ *num_be = v1;
+ return result;
+}
+
+int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id)
+{
+ return read_node(PS3_LPAR_ID_PME,
+ make_first_field("be", be_index),
+ 0,
+ 0,
+ 0,
+ node_id, 0);
+}
+
+int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
+{
+ return read_node(PS3_LPAR_ID_PME,
+ make_first_field("be", 0),
+ node_id,
+ make_field("clock", 0),
+ 0,
+ tb_freq, 0);
+}
+
+int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
+{
+ int result;
+ u64 node_id;
+
+ *tb_freq = 0;
+ result = ps3_repository_read_be_node_id(0, &node_id);
+ return result ? result
+ : ps3_repository_read_tb_freq(node_id, tb_freq);
+}
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
new file mode 100644
index 00000000000..d8b5cadbe80
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -0,0 +1,173 @@
+/*
+ * PS3 platform setup routines.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/kexec.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/time.h>
+#include <asm/iommu.h>
+#include <asm/udbg.h>
+#include <asm/prom.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+static void ps3_show_cpuinfo(struct seq_file *m)
+{
+ seq_printf(m, "machine\t\t: %s\n", ppc_md.name);
+}
+
+static void ps3_power_save(void)
+{
+ /*
+ * lv1_pause() puts the PPE thread into inactive state until an
+ * irq on an unmasked plug exists. MSR[EE] has no effect.
+ * flags: 0 = wake on DEC interrupt, 1 = ignore DEC interrupt.
+ */
+
+ lv1_pause(0);
+}
+
+static void ps3_panic(char *str)
+{
+ DBG("%s:%d %s\n", __func__, __LINE__, str);
+
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+ printk("\n");
+ printk(" System does not reboot automatically.\n");
+ printk(" Please press POWER button.\n");
+ printk("\n");
+
+ for (;;) ;
+}
+
+static void __init ps3_setup_arch(void)
+{
+ DBG(" -> %s:%d\n", __func__, __LINE__);
+
+ ps3_spu_set_platform();
+ ps3_map_htab();
+
+#ifdef CONFIG_SMP
+ smp_init_ps3();
+#endif
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+
+ ppc_md.power_save = ps3_power_save;
+
+ DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+static void __init ps3_progress(char *s, unsigned short hex)
+{
+ printk("*** %04x : %s\n", hex, s ? s : "");
+}
+
+static int __init ps3_probe(void)
+{
+ unsigned long htab_size;
+ unsigned long dt_root;
+
+ DBG(" -> %s:%d\n", __func__, __LINE__);
+
+ dt_root = of_get_flat_dt_root();
+ if (!of_flat_dt_is_compatible(dt_root, "PS3"))
+ return 0;
+
+ powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
+
+ ps3_os_area_init();
+ ps3_mm_init();
+ ps3_mm_vas_create(&htab_size);
+ ps3_hpte_init(htab_size);
+
+ DBG(" <- %s:%d\n", __func__, __LINE__);
+ return 1;
+}
+
+#if defined(CONFIG_KEXEC)
+static void ps3_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+ DBG(" -> %s:%d\n", __func__, __LINE__);
+
+ if (secondary) {
+ int cpu;
+ for_each_online_cpu(cpu)
+ if (cpu)
+ ps3_smp_cleanup_cpu(cpu);
+ } else
+ ps3_smp_cleanup_cpu(0);
+
+ DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+static void ps3_machine_kexec(struct kimage *image)
+{
+ unsigned long ppe_id;
+
+ DBG(" -> %s:%d\n", __func__, __LINE__);
+
+ lv1_get_logical_ppe_id(&ppe_id);
+ lv1_configure_irq_state_bitmap(ppe_id, 0, 0);
+ ps3_mm_shutdown();
+ ps3_mm_vas_destroy();
+
+ default_machine_kexec(image);
+
+ DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+#endif
+
+define_machine(ps3) {
+ .name = "PS3",
+ .probe = ps3_probe,
+ .setup_arch = ps3_setup_arch,
+ .show_cpuinfo = ps3_show_cpuinfo,
+ .init_IRQ = ps3_init_IRQ,
+ .panic = ps3_panic,
+ .get_boot_time = ps3_get_boot_time,
+ .set_rtc_time = ps3_set_rtc_time,
+ .get_rtc_time = ps3_get_rtc_time,
+ .calibrate_decr = ps3_calibrate_decr,
+ .progress = ps3_progress,
+#if defined(CONFIG_KEXEC)
+ .kexec_cpu_down = ps3_kexec_cpu_down,
+ .machine_kexec = ps3_machine_kexec,
+ .machine_kexec_prepare = default_machine_kexec_prepare,
+ .machine_crash_shutdown = default_machine_crash_shutdown,
+#endif
+};
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
new file mode 100644
index 00000000000..11d2080607e
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -0,0 +1,158 @@
+/*
+ * PS3 SMP routines.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+static irqreturn_t ipi_function_handler(int irq, void *msg)
+{
+ smp_message_recv((int)(long)msg);
+ return IRQ_HANDLED;
+}
+
+/**
+ * virqs - a per cpu array of virqs for ipi use
+ */
+
+#define MSG_COUNT 4
+static DEFINE_PER_CPU(unsigned int, virqs[MSG_COUNT]);
+
+static const char *names[MSG_COUNT] = {
+ "ipi call",
+ "ipi reschedule",
+ "ipi migrate",
+ "ipi debug brk"
+};
+
+static void do_message_pass(int target, int msg)
+{
+ int result;
+ unsigned int virq;
+
+ if (msg >= MSG_COUNT) {
+ DBG("%s:%d: bad msg: %d\n", __func__, __LINE__, msg);
+ return;
+ }
+
+ virq = per_cpu(virqs, target)[msg];
+ result = ps3_send_event_locally(virq);
+
+ if (result)
+ DBG("%s:%d: ps3_send_event_locally(%d, %d) failed"
+ " (%d)\n", __func__, __LINE__, target, msg, result);
+}
+
+static void ps3_smp_message_pass(int target, int msg)
+{
+ int cpu;
+
+ if (target < NR_CPUS)
+ do_message_pass(target, msg);
+ else if (target == MSG_ALL_BUT_SELF) {
+ for_each_online_cpu(cpu)
+ if (cpu != smp_processor_id())
+ do_message_pass(cpu, msg);
+ } else {
+ for_each_online_cpu(cpu)
+ do_message_pass(cpu, msg);
+ }
+}
+
+static int ps3_smp_probe(void)
+{
+ return 2;
+}
+
+static void __init ps3_smp_setup_cpu(int cpu)
+{
+ int result;
+ unsigned int *virqs = per_cpu(virqs, cpu);
+ int i;
+
+ DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
+
+ /*
+ * Check assumptions on virqs[] indexing. If this
+ * check fails, then a different mapping of PPC_MSG_
+ * to index needs to be setup.
+ */
+
+ BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
+ BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
+ BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
+
+ for (i = 0; i < MSG_COUNT; i++) {
+ result = ps3_alloc_event_irq(&virqs[i]);
+
+ if (result)
+ continue;
+
+ DBG("%s:%d: (%d, %d) => virq %u\n",
+ __func__, __LINE__, cpu, i, virqs[i]);
+
+
+ request_irq(virqs[i], ipi_function_handler, IRQF_DISABLED,
+ names[i], (void*)(long)i);
+ }
+
+ ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
+
+ DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+}
+
+void ps3_smp_cleanup_cpu(int cpu)
+{
+ unsigned int *virqs = per_cpu(virqs, cpu);
+ int i;
+
+ DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
+ for (i = 0; i < MSG_COUNT; i++) {
+ ps3_free_event_irq(virqs[i]);
+ free_irq(virqs[i], (void*)(long)i);
+ virqs[i] = NO_IRQ;
+ }
+ DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+}
+
+static struct smp_ops_t ps3_smp_ops = {
+ .probe = ps3_smp_probe,
+ .message_pass = ps3_smp_message_pass,
+ .kick_cpu = smp_generic_kick_cpu,
+ .setup_cpu = ps3_smp_setup_cpu,
+};
+
+void smp_init_ps3(void)
+{
+ DBG(" -> %s\n", __func__);
+ smp_ops = &ps3_smp_ops;
+ DBG(" <- %s\n", __func__);
+}
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
new file mode 100644
index 00000000000..644532c3b7c
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -0,0 +1,613 @@
+/*
+ * PS3 Platform spu routines.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mmzone.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
+#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+/* spu_management_ops */
+
+/**
+ * enum spe_type - Type of spe to create.
+ * @spe_type_logical: Standard logical spe.
+ *
+ * For use with lv1_construct_logical_spe(). The current HV does not support
+ * any types other than those listed.
+ */
+
+enum spe_type {
+ SPE_TYPE_LOGICAL = 0,
+};
+
+/**
+ * struct spe_shadow - logical spe shadow register area.
+ *
+ * Read-only shadow of spe registers.
+ */
+
+struct spe_shadow {
+ u8 padding_0000[0x0140];
+ u64 int_status_class0_RW; /* 0x0140 */
+ u64 int_status_class1_RW; /* 0x0148 */
+ u64 int_status_class2_RW; /* 0x0150 */
+ u8 padding_0158[0x0610-0x0158];
+ u64 mfc_dsisr_RW; /* 0x0610 */
+ u8 padding_0618[0x0620-0x0618];
+ u64 mfc_dar_RW; /* 0x0620 */
+ u8 padding_0628[0x0800-0x0628];
+ u64 mfc_dsipr_R; /* 0x0800 */
+ u8 padding_0808[0x0810-0x0808];
+ u64 mfc_lscrr_R; /* 0x0810 */
+ u8 padding_0818[0x0c00-0x0818];
+ u64 mfc_cer_R; /* 0x0c00 */
+ u8 padding_0c08[0x0f00-0x0c08];
+ u64 spe_execution_status; /* 0x0f00 */
+ u8 padding_0f08[0x1000-0x0f08];
+} __attribute__ ((packed));
+
+
+/**
+ * enum spe_ex_state - Logical spe execution state.
+ * @spe_ex_state_unexecutable: Uninitialized.
+ * @spe_ex_state_executable: Enabled, not ready.
+ * @spe_ex_state_executed: Ready for use.
+ *
+ * The execution state (status) of the logical spe as reported in
+ * struct spe_shadow:spe_execution_status.
+ */
+
+enum spe_ex_state {
+ SPE_EX_STATE_UNEXECUTABLE = 0,
+ SPE_EX_STATE_EXECUTABLE = 2,
+ SPE_EX_STATE_EXECUTED = 3,
+};
+
+/**
+ * struct priv1_cache - Cached values of priv1 registers.
+ * @masks[]: Array of cached spe interrupt masks, indexed by class.
+ * @sr1: Cached mfc_sr1 register.
+ * @tclass_id: Cached mfc_tclass_id register.
+ */
+
+struct priv1_cache {
+ u64 masks[3];
+ u64 sr1;
+ u64 tclass_id;
+};
+
+/**
+ * struct spu_pdata - Platform state variables.
+ * @spe_id: HV spe id returned by lv1_construct_logical_spe().
+ * @resource_id: HV spe resource id returned by
+ * ps3_repository_read_spe_resource_id().
+ * @priv2_addr: lpar address of spe priv2 area returned by
+ * lv1_construct_logical_spe().
+ * @shadow_addr: lpar address of spe register shadow area returned by
+ * lv1_construct_logical_spe().
+ * @shadow: Virtual (ioremap) address of spe register shadow area.
+ * @cache: Cached values of priv1 registers.
+ */
+
+struct spu_pdata {
+ u64 spe_id;
+ u64 resource_id;
+ u64 priv2_addr;
+ u64 shadow_addr;
+ struct spe_shadow __iomem *shadow;
+ struct priv1_cache cache;
+};
+
+static struct spu_pdata *spu_pdata(struct spu *spu)
+{
+ return spu->pdata;
+}
+
+#define dump_areas(_a, _b, _c, _d, _e) \
+ _dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__)
+static void _dump_areas(unsigned int spe_id, unsigned long priv2,
+ unsigned long problem, unsigned long ls, unsigned long shadow,
+ const char* func, int line)
+{
+ pr_debug("%s:%d: spe_id: %xh (%u)\n", func, line, spe_id, spe_id);
+ pr_debug("%s:%d: priv2: %lxh\n", func, line, priv2);
+ pr_debug("%s:%d: problem: %lxh\n", func, line, problem);
+ pr_debug("%s:%d: ls: %lxh\n", func, line, ls);
+ pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow);
+}
+
+static unsigned long get_vas_id(void)
+{
+ unsigned long id;
+
+ lv1_get_logical_ppe_id(&id);
+ lv1_get_virtual_address_space_id_of_ppe(id, &id);
+
+ return id;
+}
+
+static int __init construct_spu(struct spu *spu)
+{
+ int result;
+ unsigned long unused;
+
+ result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT,
+ PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL,
+ &spu_pdata(spu)->priv2_addr, &spu->problem_phys,
+ &spu->local_store_phys, &unused,
+ &spu_pdata(spu)->shadow_addr,
+ &spu_pdata(spu)->spe_id);
+
+ if (result) {
+ pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ return result;
+ }
+
+ return result;
+}
+
+static int __init add_spu_pages(unsigned long start_addr, unsigned long size)
+{
+ int result;
+ unsigned long start_pfn;
+ unsigned long nr_pages;
+ struct pglist_data *pgdata;
+ struct zone *zone;
+
+ BUG_ON(!mem_init_done);
+
+ start_pfn = start_addr >> PAGE_SHIFT;
+ nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ pgdata = NODE_DATA(0);
+ zone = pgdata->node_zones;
+
+ result = __add_pages(zone, start_pfn, nr_pages);
+
+ if (result)
+ pr_debug("%s:%d: __add_pages failed: (%d)\n",
+ __func__, __LINE__, result);
+
+ return result;
+}
+
+static void spu_unmap(struct spu *spu)
+{
+ iounmap(spu->priv2);
+ iounmap(spu->problem);
+ iounmap((__force u8 __iomem *)spu->local_store);
+ iounmap(spu_pdata(spu)->shadow);
+}
+
+static int __init setup_areas(struct spu *spu)
+{
+ struct table {char* name; unsigned long addr; unsigned long size;};
+ int result;
+
+ /* setup pages */
+
+ result = add_spu_pages(spu->local_store_phys, LS_SIZE);
+ if (result)
+ goto fail_add;
+
+ result = add_spu_pages(spu->problem_phys, sizeof(struct spu_problem));
+ if (result)
+ goto fail_add;
+
+ /* ioremap */
+
+ spu_pdata(spu)->shadow = __ioremap(
+ spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow),
+ PAGE_READONLY | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ if (!spu_pdata(spu)->shadow) {
+ pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
+ goto fail_ioremap;
+ }
+
+ spu->local_store = ioremap(spu->local_store_phys, LS_SIZE);
+ if (!spu->local_store) {
+ pr_debug("%s:%d: ioremap local_store failed\n",
+ __func__, __LINE__);
+ goto fail_ioremap;
+ }
+
+ spu->problem = ioremap(spu->problem_phys,
+ sizeof(struct spu_problem));
+ if (!spu->problem) {
+ pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
+ goto fail_ioremap;
+ }
+
+ spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
+ sizeof(struct spu_priv2));
+ if (!spu->priv2) {
+ pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
+ goto fail_ioremap;
+ }
+
+ dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
+ spu->problem_phys, spu->local_store_phys,
+ spu_pdata(spu)->shadow_addr);
+ dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
+ (unsigned long)spu->problem, (unsigned long)spu->local_store,
+ (unsigned long)spu_pdata(spu)->shadow);
+
+ return 0;
+
+fail_ioremap:
+ spu_unmap(spu);
+fail_add:
+ return result;
+}
+
+static int __init setup_interrupts(struct spu *spu)
+{
+ int result;
+
+ result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 0,
+ &spu->irqs[0]);
+
+ if (result)
+ goto fail_alloc_0;
+
+ result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 1,
+ &spu->irqs[1]);
+
+ if (result)
+ goto fail_alloc_1;
+
+ result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 2,
+ &spu->irqs[2]);
+
+ if (result)
+ goto fail_alloc_2;
+
+ return result;
+
+fail_alloc_2:
+ ps3_free_spe_irq(spu->irqs[1]);
+fail_alloc_1:
+ ps3_free_spe_irq(spu->irqs[0]);
+fail_alloc_0:
+ spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
+ return result;
+}
+
+static int __init enable_spu(struct spu *spu)
+{
+ int result;
+
+ result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id,
+ spu_pdata(spu)->resource_id);
+
+ if (result) {
+ pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ goto fail_enable;
+ }
+
+ result = setup_areas(spu);
+
+ if (result)
+ goto fail_areas;
+
+ result = setup_interrupts(spu);
+
+ if (result)
+ goto fail_interrupts;
+
+ return 0;
+
+fail_interrupts:
+ spu_unmap(spu);
+fail_areas:
+ lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
+fail_enable:
+ return result;
+}
+
+static int ps3_destroy_spu(struct spu *spu)
+{
+ int result;
+
+ pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
+
+ result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
+ BUG_ON(result);
+
+ ps3_free_spe_irq(spu->irqs[2]);
+ ps3_free_spe_irq(spu->irqs[1]);
+ ps3_free_spe_irq(spu->irqs[0]);
+
+ spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
+
+ spu_unmap(spu);
+
+ result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id);
+ BUG_ON(result);
+
+ kfree(spu->pdata);
+ spu->pdata = NULL;
+
+ return 0;
+}
+
+static int __init ps3_create_spu(struct spu *spu, void *data)
+{
+ int result;
+
+ pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
+
+ spu->pdata = kzalloc(sizeof(struct spu_pdata),
+ GFP_KERNEL);
+
+ if (!spu->pdata) {
+ result = -ENOMEM;
+ goto fail_malloc;
+ }
+
+ spu_pdata(spu)->resource_id = (unsigned long)data;
+
+ /* Init cached reg values to HV defaults. */
+
+ spu_pdata(spu)->cache.sr1 = 0x33;
+
+ result = construct_spu(spu);
+
+ if (result)
+ goto fail_construct;
+
+ /* For now, just go ahead and enable it. */
+
+ result = enable_spu(spu);
+
+ if (result)
+ goto fail_enable;
+
+ /* Make sure the spu is in SPE_EX_STATE_EXECUTED. */
+
+ /* need something better here!!! */
+ while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status)
+ != SPE_EX_STATE_EXECUTED)
+ (void)0;
+
+ return result;
+
+fail_enable:
+fail_construct:
+ ps3_destroy_spu(spu);
+fail_malloc:
+ return result;
+}
+
+static int __init ps3_enumerate_spus(int (*fn)(void *data))
+{
+ int result;
+ unsigned int num_resource_id;
+ unsigned int i;
+
+ result = ps3_repository_read_num_spu_resource_id(&num_resource_id);
+
+ pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__,
+ num_resource_id);
+
+ /*
+ * For now, just create logical spus equal to the number
+ * of physical spus reserved for the partition.
+ */
+
+ for (i = 0; i < num_resource_id; i++) {
+ enum ps3_spu_resource_type resource_type;
+ unsigned int resource_id;
+
+ result = ps3_repository_read_spu_resource_id(i,
+ &resource_type, &resource_id);
+
+ if (result)
+ break;
+
+ if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) {
+ result = fn((void*)(unsigned long)resource_id);
+
+ if (result)
+ break;
+ }
+ }
+
+ if (result)
+ printk(KERN_WARNING "%s:%d: Error initializing spus\n",
+ __func__, __LINE__);
+
+ return result;
+}
+
+const struct spu_management_ops spu_management_ps3_ops = {
+ .enumerate_spus = ps3_enumerate_spus,
+ .create_spu = ps3_create_spu,
+ .destroy_spu = ps3_destroy_spu,
+};
+
+/* spu_priv1_ops */
+
+static void int_mask_and(struct spu *spu, int class, u64 mask)
+{
+ u64 old_mask;
+
+ /* are these serialized by caller??? */
+ old_mask = spu_int_mask_get(spu, class);
+ spu_int_mask_set(spu, class, old_mask & mask);
+}
+
+static void int_mask_or(struct spu *spu, int class, u64 mask)
+{
+ u64 old_mask;
+
+ old_mask = spu_int_mask_get(spu, class);
+ spu_int_mask_set(spu, class, old_mask | mask);
+}
+
+static void int_mask_set(struct spu *spu, int class, u64 mask)
+{
+ spu_pdata(spu)->cache.masks[class] = mask;
+ lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class,
+ spu_pdata(spu)->cache.masks[class]);
+}
+
+static u64 int_mask_get(struct spu *spu, int class)
+{
+ return spu_pdata(spu)->cache.masks[class];
+}
+
+static void int_stat_clear(struct spu *spu, int class, u64 stat)
+{
+ /* Note that MFC_DSISR will be cleared when class1[MF] is set. */
+
+ lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class,
+ stat, 0);
+}
+
+static u64 int_stat_get(struct spu *spu, int class)
+{
+ u64 stat;
+
+ lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat);
+ return stat;
+}
+
+static void cpu_affinity_set(struct spu *spu, int cpu)
+{
+ /* No support. */
+}
+
+static u64 mfc_dar_get(struct spu *spu)
+{
+ return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW);
+}
+
+static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
+{
+ /* Nothing to do, cleared in int_stat_clear(). */
+}
+
+static u64 mfc_dsisr_get(struct spu *spu)
+{
+ return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW);
+}
+
+static void mfc_sdr_setup(struct spu *spu)
+{
+ /* Nothing to do. */
+}
+
+static void mfc_sr1_set(struct spu *spu, u64 sr1)
+{
+ /* Check bits allowed by HV. */
+
+ static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
+ | MFC_STATE1_PROBLEM_STATE_MASK);
+
+ BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed));
+
+ spu_pdata(spu)->cache.sr1 = sr1;
+ lv1_set_spe_privilege_state_area_1_register(
+ spu_pdata(spu)->spe_id,
+ offsetof(struct spu_priv1, mfc_sr1_RW),
+ spu_pdata(spu)->cache.sr1);
+}
+
+static u64 mfc_sr1_get(struct spu *spu)
+{
+ return spu_pdata(spu)->cache.sr1;
+}
+
+static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
+{
+ spu_pdata(spu)->cache.tclass_id = tclass_id;
+ lv1_set_spe_privilege_state_area_1_register(
+ spu_pdata(spu)->spe_id,
+ offsetof(struct spu_priv1, mfc_tclass_id_RW),
+ spu_pdata(spu)->cache.tclass_id);
+}
+
+static u64 mfc_tclass_id_get(struct spu *spu)
+{
+ return spu_pdata(spu)->cache.tclass_id;
+}
+
+static void tlb_invalidate(struct spu *spu)
+{
+ /* Nothing to do. */
+}
+
+static void resource_allocation_groupID_set(struct spu *spu, u64 id)
+{
+ /* No support. */
+}
+
+static u64 resource_allocation_groupID_get(struct spu *spu)
+{
+ return 0; /* No support. */
+}
+
+static void resource_allocation_enable_set(struct spu *spu, u64 enable)
+{
+ /* No support. */
+}
+
+static u64 resource_allocation_enable_get(struct spu *spu)
+{
+ return 0; /* No support. */
+}
+
+const struct spu_priv1_ops spu_priv1_ps3_ops = {
+ .int_mask_and = int_mask_and,
+ .int_mask_or = int_mask_or,
+ .int_mask_set = int_mask_set,
+ .int_mask_get = int_mask_get,
+ .int_stat_clear = int_stat_clear,
+ .int_stat_get = int_stat_get,
+ .cpu_affinity_set = cpu_affinity_set,
+ .mfc_dar_get = mfc_dar_get,
+ .mfc_dsisr_set = mfc_dsisr_set,
+ .mfc_dsisr_get = mfc_dsisr_get,
+ .mfc_sdr_setup = mfc_sdr_setup,
+ .mfc_sr1_set = mfc_sr1_set,
+ .mfc_sr1_get = mfc_sr1_get,
+ .mfc_tclass_id_set = mfc_tclass_id_set,
+ .mfc_tclass_id_get = mfc_tclass_id_get,
+ .tlb_invalidate = tlb_invalidate,
+ .resource_allocation_groupID_set = resource_allocation_groupID_set,
+ .resource_allocation_groupID_get = resource_allocation_groupID_get,
+ .resource_allocation_enable_set = resource_allocation_enable_set,
+ .resource_allocation_enable_get = resource_allocation_enable_get,
+};
+
+void ps3_spu_set_platform(void)
+{
+ spu_priv1_ops = &spu_priv1_ps3_ops;
+ spu_management_ops = &spu_management_ps3_ops;
+}
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
new file mode 100644
index 00000000000..1bae8b19b36
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -0,0 +1,104 @@
+/*
+ * PS3 time and rtc routines.
+ *
+ * Copyright (C) 2006 Sony Computer Entertainment Inc.
+ * Copyright 2006 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/rtc.h>
+#include <asm/lv1call.h>
+#include <asm/ps3.h>
+
+#include "platform.h"
+
+#define dump_tm(_a) _dump_tm(_a, __func__, __LINE__)
+static void _dump_tm(const struct rtc_time *tm, const char* func, int line)
+{
+ pr_debug("%s:%d tm_sec %d\n", func, line, tm->tm_sec);
+ pr_debug("%s:%d tm_min %d\n", func, line, tm->tm_min);
+ pr_debug("%s:%d tm_hour %d\n", func, line, tm->tm_hour);
+ pr_debug("%s:%d tm_mday %d\n", func, line, tm->tm_mday);
+ pr_debug("%s:%d tm_mon %d\n", func, line, tm->tm_mon);
+ pr_debug("%s:%d tm_year %d\n", func, line, tm->tm_year);
+ pr_debug("%s:%d tm_wday %d\n", func, line, tm->tm_wday);
+}
+
+#define dump_time(_a) _dump_time(_a, __func__, __LINE__)
+static void __attribute__ ((unused)) _dump_time(int time, const char* func,
+ int line)
+{
+ struct rtc_time tm;
+
+ to_tm(time, &tm);
+
+ pr_debug("%s:%d time %d\n", func, line, time);
+ _dump_tm(&tm, func, line);
+}
+
+/**
+ * rtc_shift - Difference in seconds between 1970 and the ps3 rtc value.
+ */
+
+static s64 rtc_shift;
+
+void __init ps3_calibrate_decr(void)
+{
+ int result;
+ u64 tmp;
+
+ result = ps3_repository_read_be_tb_freq(0, &tmp);
+ BUG_ON(result);
+
+ ppc_tb_freq = tmp;
+ ppc_proc_freq = ppc_tb_freq * 40;
+
+ rtc_shift = ps3_os_area_rtc_diff();
+}
+
+static u64 read_rtc(void)
+{
+ int result;
+ u64 rtc_val;
+ u64 tb_val;
+
+ result = lv1_get_rtc(&rtc_val, &tb_val);
+ BUG_ON(result);
+
+ return rtc_val;
+}
+
+int ps3_set_rtc_time(struct rtc_time *tm)
+{
+ u64 now = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ rtc_shift = now - read_rtc();
+ return 0;
+}
+
+void ps3_get_rtc_time(struct rtc_time *tm)
+{
+ to_tm(read_rtc() + rtc_shift, tm);
+ tm->tm_year -= 1900;
+ tm->tm_mon -= 1;
+}
+
+unsigned long __init ps3_get_boot_time(void)
+{
+ return read_rtc() + rtc_shift;
+}
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 13707745131..49037edf7d3 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -37,8 +37,8 @@
/* EEH event workqueue setup. */
static DEFINE_SPINLOCK(eeh_eventlist_lock);
LIST_HEAD(eeh_eventlist);
-static void eeh_thread_launcher(void *);
-DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+static void eeh_thread_launcher(struct work_struct *);
+DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
/* Serialize reset sequences for a given pci device */
DEFINE_MUTEX(eeh_event_mutex);
@@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy)
* eeh_thread_launcher
* @dummy - unused
*/
-static void eeh_thread_launcher(void *dummy)
+static void eeh_thread_launcher(struct work_struct *dummy)
{
if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
printk(KERN_ERR "Failed to start EEH daemon\n");
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 556c279a789..3c95392f4f4 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -309,7 +309,7 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
tbl->it_size = size >> IOMMU_PAGE_SHIFT;
}
-static void iommu_bus_setup_pSeries(struct pci_bus *bus)
+static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
{
struct device_node *dn;
struct iommu_table *tbl;
@@ -318,10 +318,9 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
struct pci_dn *pci;
int children;
- DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self);
-
dn = pci_bus_to_OF_node(bus);
- pci = PCI_DN(dn);
+
+ DBG("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
if (bus->self) {
/* This is not a root bus, any setup will be done for the
@@ -329,6 +328,7 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
*/
return;
}
+ pci = PCI_DN(dn);
/* Check if the ISA bus on the system is under
* this PHB.
@@ -390,17 +390,17 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
}
-static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
+static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
{
struct iommu_table *tbl;
struct device_node *dn, *pdn;
struct pci_dn *ppci;
const void *dma_window = NULL;
- DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self);
-
dn = pci_bus_to_OF_node(bus);
+ DBG("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n", dn->full_name);
+
/* Find nearest ibm,dma-window, walking up the device tree */
for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
dma_window = get_property(pdn, "ibm,dma-window", NULL);
@@ -409,11 +409,15 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
}
if (dma_window == NULL) {
- DBG("iommu_bus_setup_pSeriesLP: bus %s seems to have no ibm,dma-window property\n", dn->full_name);
+ DBG(" no ibm,dma-window property !\n");
return;
}
ppci = PCI_DN(pdn);
+
+ DBG(" parent is %s, iommu_table: 0x%p\n",
+ pdn->full_name, ppci->iommu_table);
+
if (!ppci->iommu_table) {
/* Bussubno hasn't been copied yet.
* Do it now because iommu_table_setparms_lpar needs it.
@@ -427,6 +431,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
+ DBG(" created table: %p\n", ppci->iommu_table);
}
if (pdn != dn)
@@ -434,27 +439,27 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
}
-static void iommu_dev_setup_pSeries(struct pci_dev *dev)
+static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
{
- struct device_node *dn, *mydn;
+ struct device_node *dn;
struct iommu_table *tbl;
- DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev));
+ DBG("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
- mydn = dn = pci_device_to_OF_node(dev);
+ dn = dev->dev.archdata.of_node;
/* If we're the direct child of a root bus, then we need to allocate
* an iommu table ourselves. The bus setup code should have setup
* the window sizes already.
*/
if (!dev->bus->self) {
+ struct pci_controller *phb = PCI_DN(dn)->phb;
+
DBG(" --> first child, no bridge. Allocating iommu table.\n");
tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
- PCI_DN(dn)->phb->node);
- iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl);
- PCI_DN(dn)->iommu_table = iommu_init_table(tbl,
- PCI_DN(dn)->phb->node);
-
+ phb->node);
+ iommu_table_setparms(phb, dn, tbl);
+ dev->dev.archdata.dma_data = iommu_init_table(tbl, phb->node);
return;
}
@@ -465,11 +470,11 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
dn = dn->parent;
- if (dn && PCI_DN(dn)) {
- PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table;
- } else {
- DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev));
- }
+ if (dn && PCI_DN(dn))
+ dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table;
+ else
+ printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
+ pci_name(dev));
}
static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
@@ -495,13 +500,15 @@ static struct notifier_block iommu_reconfig_nb = {
.notifier_call = iommu_reconfig_notifier,
};
-static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
+static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
{
struct device_node *pdn, *dn;
struct iommu_table *tbl;
const void *dma_window = NULL;
struct pci_dn *pci;
+ DBG("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
+
/* dev setup for LPAR is a little tricky, since the device tree might
* contain the dma-window properties per-device and not neccesarily
* for the bus. So we need to search upwards in the tree until we
@@ -509,9 +516,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
* already allocated.
*/
dn = pci_device_to_OF_node(dev);
-
- DBG("iommu_dev_setup_pSeriesLP, dev %p (%s) %s\n",
- dev, pci_name(dev), dn->full_name);
+ DBG(" node is %s\n", dn->full_name);
for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
pdn = pdn->parent) {
@@ -520,16 +525,17 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
break;
}
+ DBG(" parent is %s\n", pdn->full_name);
+
/* Check for parent == NULL so we don't try to setup the empty EADS
* slots on POWER4 machines.
*/
if (dma_window == NULL || pdn->parent == NULL) {
- DBG("No dma window for device, linking to parent\n");
- PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table;
+ DBG(" no dma window for device, linking to parent\n");
+ dev->dev.archdata.dma_data = PCI_DN(pdn)->iommu_table;
return;
- } else {
- DBG("Found DMA window, allocating table\n");
}
+ DBG(" found DMA window, table: %p\n", pci->iommu_table);
pci = PCI_DN(pdn);
if (!pci->iommu_table) {
@@ -542,24 +548,20 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
+ DBG(" created table: %p\n", pci->iommu_table);
}
- if (pdn != dn)
- PCI_DN(dn)->iommu_table = pci->iommu_table;
+ dev->dev.archdata.dma_data = pci->iommu_table;
}
-static void iommu_bus_setup_null(struct pci_bus *b) { }
-static void iommu_dev_setup_null(struct pci_dev *d) { }
-
/* These are called very early. */
void iommu_init_early_pSeries(void)
{
if (of_chosen && get_property(of_chosen, "linux,iommu-off", NULL)) {
/* Direct I/O, IOMMU off */
- ppc_md.iommu_dev_setup = iommu_dev_setup_null;
- ppc_md.iommu_bus_setup = iommu_bus_setup_null;
- pci_direct_iommu_init();
-
+ ppc_md.pci_dma_dev_setup = NULL;
+ ppc_md.pci_dma_bus_setup = NULL;
+ pci_dma_ops = &dma_direct_ops;
return;
}
@@ -572,19 +574,19 @@ void iommu_init_early_pSeries(void)
ppc_md.tce_free = tce_free_pSeriesLP;
}
ppc_md.tce_get = tce_get_pSeriesLP;
- ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP;
- ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP;
+ ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
+ ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
} else {
ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries;
ppc_md.tce_get = tce_get_pseries;
- ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries;
- ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries;
+ ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
+ ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
}
pSeries_reconfig_notifier_register(&iommu_reconfig_nb);
- pci_iommu_init();
+ pci_dma_ops = &dma_iommu_ops;
}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 1820a0b0a8c..721436db3ef 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -282,7 +282,7 @@ void vpa_init(int cpu)
}
}
-long pSeries_lpar_hpte_insert(unsigned long hpte_group,
+static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long pa,
unsigned long rflags, unsigned long vflags,
int psize)
@@ -506,7 +506,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
* Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
* lock.
*/
-void pSeries_lpar_flush_hash_range(unsigned long number, int local)
+static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
{
int i;
unsigned long flags = 0;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 410a6bcc4ca..715db5c8990 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -29,8 +29,6 @@
#include <asm/prom.h>
#include <asm/ppc-pci.h>
-static int __devinitdata s7a_workaround = -1;
-
#if 0
void pcibios_name_device(struct pci_dev *dev)
{
@@ -57,39 +55,6 @@ void pcibios_name_device(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device);
#endif
-static void __devinit check_s7a(void)
-{
- struct device_node *root;
- const char *model;
-
- s7a_workaround = 0;
- root = of_find_node_by_path("/");
- if (root) {
- model = get_property(root, "model", NULL);
- if (model && !strcmp(model, "IBM,7013-S7A"))
- s7a_workaround = 1;
- of_node_put(root);
- }
-}
-
-void __devinit pSeries_irq_bus_setup(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- if (s7a_workaround < 0)
- check_s7a();
- list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_read_irq_line(dev);
- if (s7a_workaround) {
- if (dev->irq > 16) {
- dev->irq -= 3;
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
- dev->irq);
- }
- }
- }
-}
-
static void __init pSeries_request_regions(void)
{
if (!isa_io_base)
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 6bfacc21708..ac56b868913 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -93,8 +93,8 @@ pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
if (list_empty(&dev->global_list)) {
int i;
- /* Need to setup IOMMU tables */
- ppc_md.iommu_dev_setup(dev);
+ /* Fill device archdata and setup iommu table */
+ pcibios_setup_new_device(dev);
if(fix_bus)
pcibios_fixup_device_resources(dev, bus);
@@ -195,7 +195,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
phb = pcibios_alloc_controller(dn);
if (!phb)
return NULL;
- setup_phb(dn, phb);
+ rtas_setup_phb(phb);
pci_process_bridge_OF_ranges(phb, dn, 0);
pci_setup_phb_io_dynamic(phb, primary);
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 1773103354b..4ad33e41b00 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -268,11 +268,10 @@ static char * parse_next_property(char *buf, char *end, char **name, int *length
static struct property *new_property(const char *name, const int length,
const unsigned char *value, struct property *last)
{
- struct property *new = kmalloc(sizeof(*new), GFP_KERNEL);
+ struct property *new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
- memset(new, 0, sizeof(*new));
if (!(new->name = kmalloc(strlen(name) + 1, GFP_KERNEL)))
goto cleanup;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 89a8119f988..0dc2548ca9b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -347,6 +347,7 @@ static int __init pSeries_init_panel(void)
}
arch_initcall(pSeries_init_panel);
+#ifdef CONFIG_HOTPLUG_CPU
static void pSeries_mach_cpu_die(void)
{
local_irq_disable();
@@ -357,6 +358,9 @@ static void pSeries_mach_cpu_die(void)
BUG();
for(;;);
}
+#else
+#define pSeries_mach_cpu_die NULL
+#endif
static int pseries_set_dabr(unsigned long dabr)
{
@@ -553,7 +557,6 @@ define_machine(pseries) {
.log_error = pSeries_log_error,
.pcibios_fixup = pSeries_final_fixup,
.pci_probe_mode = pSeries_pci_probe_mode,
- .irq_bus_setup = pSeries_irq_bus_setup,
.restart = rtas_restart,
.power_off = rtas_power_off,
.halt = rtas_halt,
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index d071abe78ab..b5b2b1103de 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -656,13 +656,38 @@ static void __init xics_setup_8259_cascade(void)
set_irq_chained_handler(cascade, pseries_8259_cascade);
}
+static struct device_node *cpuid_to_of_node(int cpu)
+{
+ struct device_node *np;
+ u32 hcpuid = get_hard_smp_processor_id(cpu);
+
+ for_each_node_by_type(np, "cpu") {
+ int i, len;
+ const u32 *intserv;
+
+ intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
+
+ if (!intserv)
+ intserv = get_property(np, "reg", &len);
+
+ i = len / sizeof(u32);
+
+ while (i--)
+ if (intserv[i] == hcpuid)
+ return np;
+ }
+
+ return NULL;
+}
+
void __init xics_init_IRQ(void)
{
- int i;
+ int i, j;
struct device_node *np;
u32 ilen, indx = 0;
- const u32 *ireg;
+ const u32 *ireg, *isize;
int found = 0;
+ u32 hcpuid;
ppc64_boot_msg(0x20, "XICS Init");
@@ -683,26 +708,31 @@ void __init xics_init_IRQ(void)
xics_init_host();
/* Find the server numbers for the boot cpu. */
- for (np = of_find_node_by_type(NULL, "cpu");
- np;
- np = of_find_node_by_type(np, "cpu")) {
- ireg = get_property(np, "reg", &ilen);
- if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
- ireg = get_property(np,
- "ibm,ppc-interrupt-gserver#s", &ilen);
- i = ilen / sizeof(int);
- if (ireg && i > 0) {
- default_server = ireg[0];
- /* take last element */
- default_distrib_server = ireg[i-1];
- }
- ireg = get_property(np,
+ np = cpuid_to_of_node(boot_cpuid);
+ BUG_ON(!np);
+ ireg = get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
+ if (!ireg)
+ goto skip_gserver_check;
+ i = ilen / sizeof(int);
+ hcpuid = get_hard_smp_processor_id(boot_cpuid);
+
+ /* Global interrupt distribution server is specified in the last
+ * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
+ * entry fom this property for current boot cpu id and use it as
+ * default distribution server
+ */
+ for (j = 0; j < i; j += 2) {
+ if (ireg[j] == hcpuid) {
+ default_server = hcpuid;
+ default_distrib_server = ireg[j+1];
+
+ isize = get_property(np,
"ibm,interrupt-server#-size", NULL);
- if (ireg)
- interrupt_server_size = *ireg;
- break;
+ if (isize)
+ interrupt_server_size = *isize;
}
}
+skip_gserver_check:
of_node_put(np);
if (firmware_has_feature(FW_FEATURE_LPAR))
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 91f052d8cce..6cc34597a62 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -5,14 +5,13 @@ endif
obj-$(CONFIG_MPIC) += mpic.o
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_MPC106) += grackle.o
-obj-$(CONFIG_BOOKE) += dcr.o
-obj-$(CONFIG_40x) += dcr.o
+obj-$(CONFIG_PPC_DCR) += dcr.o dcr-low.o
obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_FSL_SOC) += fsl_soc.o
-obj-$(CONFIG_PPC_TODC) += todc.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
+obj-$(CONFIG_MTD) += rom.o
ifeq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_PPC_I8259) += i8259.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 572b7846cc7..1488535b0e1 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -48,9 +48,6 @@
#include "dart.h"
-extern int iommu_is_off;
-extern int iommu_force_on;
-
/* Physical base address and size of the DART table */
unsigned long dart_tablebase; /* exported to htab_initialize */
static unsigned long dart_tablesize;
@@ -289,24 +286,15 @@ static void iommu_table_dart_setup(void)
set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
}
-static void iommu_dev_setup_dart(struct pci_dev *dev)
+static void pci_dma_dev_setup_dart(struct pci_dev *dev)
{
- struct device_node *dn;
-
/* We only have one iommu table on the mac for now, which makes
* things simple. Setup all PCI devices to point to this table
- *
- * We must use pci_device_to_OF_node() to make sure that
- * we get the real "final" pointer to the device in the
- * pci_dev sysdata and not the temporary PHB one
*/
- dn = pci_device_to_OF_node(dev);
-
- if (dn)
- PCI_DN(dn)->iommu_table = &iommu_table_dart;
+ dev->dev.archdata.dma_data = &iommu_table_dart;
}
-static void iommu_bus_setup_dart(struct pci_bus *bus)
+static void pci_dma_bus_setup_dart(struct pci_bus *bus)
{
struct device_node *dn;
@@ -321,9 +309,6 @@ static void iommu_bus_setup_dart(struct pci_bus *bus)
PCI_DN(dn)->iommu_table = &iommu_table_dart;
}
-static void iommu_dev_setup_null(struct pci_dev *dev) { }
-static void iommu_bus_setup_null(struct pci_bus *bus) { }
-
void iommu_init_early_dart(void)
{
struct device_node *dn;
@@ -344,22 +329,21 @@ void iommu_init_early_dart(void)
/* Initialize the DART HW */
if (dart_init(dn) == 0) {
- ppc_md.iommu_dev_setup = iommu_dev_setup_dart;
- ppc_md.iommu_bus_setup = iommu_bus_setup_dart;
+ ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart;
+ ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart;
/* Setup pci_dma ops */
- pci_iommu_init();
-
+ pci_dma_ops = &dma_iommu_ops;
return;
}
bail:
/* If init failed, use direct iommu and null setup functions */
- ppc_md.iommu_dev_setup = iommu_dev_setup_null;
- ppc_md.iommu_bus_setup = iommu_bus_setup_null;
+ ppc_md.pci_dma_dev_setup = NULL;
+ ppc_md.pci_dma_bus_setup = NULL;
/* Setup pci_dma ops */
- pci_direct_iommu_init();
+ pci_dma_ops = &dma_direct_ops;
}
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
new file mode 100644
index 00000000000..2078f39e2f1
--- /dev/null
+++ b/arch/powerpc/sysdev/dcr-low.S
@@ -0,0 +1,39 @@
+/*
+ * "Indirect" DCR access
+ *
+ * Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+
+#define DCR_ACCESS_PROLOG(table) \
+ rlwinm r3,r3,4,18,27; \
+ lis r5,table@h; \
+ ori r5,r5,table@l; \
+ add r3,r3,r5; \
+ mtctr r3; \
+ bctr
+
+_GLOBAL(__mfdcr)
+ DCR_ACCESS_PROLOG(__mfdcr_table)
+
+_GLOBAL(__mtdcr)
+ DCR_ACCESS_PROLOG(__mtdcr_table)
+
+__mfdcr_table:
+ mfdcr r3,0; blr
+__mtdcr_table:
+ mtdcr 0,r4; blr
+
+dcr = 1
+ .rept 1023
+ mfdcr r3,dcr; blr
+ mtdcr dcr,r4; blr
+ dcr = dcr + 1
+ .endr
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
new file mode 100644
index 00000000000..dffeeaeca1d
--- /dev/null
+++ b/arch/powerpc/sysdev/dcr.c
@@ -0,0 +1,137 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <asm/prom.h>
+#include <asm/dcr.h>
+
+unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
+{
+ unsigned int ds;
+ const u32 *dr = get_property(np, "dcr-reg", &ds);
+
+ if (dr == NULL || ds & 1 || index >= (ds / 8))
+ return 0;
+
+ return dr[index * 2];
+}
+
+unsigned int dcr_resource_len(struct device_node *np, unsigned int index)
+{
+ unsigned int ds;
+ const u32 *dr = get_property(np, "dcr-reg", &ds);
+
+ if (dr == NULL || ds & 1 || index >= (ds / 8))
+ return 0;
+
+ return dr[index * 2 + 1];
+}
+
+#ifndef CONFIG_PPC_DCR_NATIVE
+
+static struct device_node * find_dcr_parent(struct device_node * node)
+{
+ struct device_node *par, *tmp;
+ const u32 *p;
+
+ for (par = of_node_get(node); par;) {
+ if (get_property(par, "dcr-controller", NULL))
+ break;
+ p = get_property(par, "dcr-parent", NULL);
+ tmp = par;
+ if (p == NULL)
+ par = of_get_parent(par);
+ else
+ par = of_find_node_by_phandle(*p);
+ of_node_put(tmp);
+ }
+ return par;
+}
+
+u64 of_translate_dcr_address(struct device_node *dev,
+ unsigned int dcr_n,
+ unsigned int *out_stride)
+{
+ struct device_node *dp;
+ const u32 *p;
+ unsigned int stride;
+ u64 ret;
+
+ dp = find_dcr_parent(dev);
+ if (dp == NULL)
+ return OF_BAD_ADDR;
+
+ /* Stride is not properly defined yet, default to 0x10 for Axon */
+ p = get_property(dp, "dcr-mmio-stride", NULL);
+ stride = (p == NULL) ? 0x10 : *p;
+
+ /* XXX FIXME: Which property name is to use of the 2 following ? */
+ p = get_property(dp, "dcr-mmio-range", NULL);
+ if (p == NULL)
+ p = get_property(dp, "dcr-mmio-space", NULL);
+ if (p == NULL)
+ return OF_BAD_ADDR;
+
+ /* Maybe could do some better range checking here */
+ ret = of_translate_address(dp, p);
+ if (ret != OF_BAD_ADDR)
+ ret += (u64)(stride) * (u64)dcr_n;
+ if (out_stride)
+ *out_stride = stride;
+ return ret;
+}
+
+dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n,
+ unsigned int dcr_c)
+{
+ dcr_host_t ret = { .token = NULL, .stride = 0 };
+ u64 addr;
+
+ pr_debug("dcr_map(%s, 0x%x, 0x%x)\n",
+ dev->full_name, dcr_n, dcr_c);
+
+ addr = of_translate_dcr_address(dev, dcr_n, &ret.stride);
+ pr_debug("translates to addr: 0x%lx, stride: 0x%x\n",
+ addr, ret.stride);
+ if (addr == OF_BAD_ADDR)
+ return ret;
+ pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride);
+ ret.token = ioremap(addr, dcr_c * ret.stride);
+ if (ret.token == NULL)
+ return ret;
+ pr_debug("mapped at 0x%p -> base is 0x%p\n",
+ ret.token, ret.token - dcr_n * ret.stride);
+ ret.token -= dcr_n * ret.stride;
+ return ret;
+}
+
+void dcr_unmap(dcr_host_t host, unsigned int dcr_n, unsigned int dcr_c)
+{
+ dcr_host_t h = host;
+
+ if (h.token == NULL)
+ return;
+ h.token -= dcr_n * h.stride;
+ iounmap(h.token);
+ h.token = NULL;
+}
+
+#endif /* !defined(CONFIG_PPC_DCR_NATIVE) */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index dbe92ae2033..ad31e56e892 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
#include <linux/fsl_devices.h>
#include <linux/fs_enet_pd.h>
#include <linux/fs_uart_pd.h>
@@ -146,7 +147,7 @@ static int __init gfar_mdio_of_init(void)
}
for (k = 0; k < 32; k++)
- mdio_data.irq[k] = -1;
+ mdio_data.irq[k] = PHY_POLL;
while ((child = of_get_next_child(np, child)) != NULL) {
int irq = irq_of_parse_and_map(child, 0);
@@ -177,6 +178,7 @@ static const char *gfar_tx_intr = "tx";
static const char *gfar_rx_intr = "rx";
static const char *gfar_err_intr = "error";
+
static int __init gfar_of_init(void)
{
struct device_node *np;
@@ -204,8 +206,7 @@ static int __init gfar_of_init(void)
if (ret)
goto err;
- r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
- r[1].flags = IORESOURCE_IRQ;
+ of_irq_to_resource(np, 0, &r[1]);
model = get_property(np, "model", NULL);
@@ -214,12 +215,10 @@ static int __init gfar_of_init(void)
r[1].name = gfar_tx_intr;
r[2].name = gfar_rx_intr;
- r[2].start = r[2].end = irq_of_parse_and_map(np, 1);
- r[2].flags = IORESOURCE_IRQ;
+ of_irq_to_resource(np, 1, &r[2]);
r[3].name = gfar_err_intr;
- r[3].start = r[3].end = irq_of_parse_and_map(np, 2);
- r[3].flags = IORESOURCE_IRQ;
+ of_irq_to_resource(np, 2, &r[3]);
n_res += 2;
}
@@ -323,8 +322,7 @@ static int __init fsl_i2c_of_init(void)
if (ret)
goto err;
- r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
- r[1].flags = IORESOURCE_IRQ;
+ of_irq_to_resource(np, 0, &r[1]);
i2c_dev = platform_device_register_simple("fsl-i2c", i, r, 2);
if (IS_ERR(i2c_dev)) {
@@ -459,8 +457,7 @@ static int __init fsl_usb_of_init(void)
if (ret)
goto err;
- r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
- r[1].flags = IORESOURCE_IRQ;
+ of_irq_to_resource(np, 0, &r[1]);
usb_dev_mph =
platform_device_register_simple("fsl-ehci", i, r, 2);
@@ -507,8 +504,7 @@ static int __init fsl_usb_of_init(void)
if (ret)
goto unreg_mph;
- r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
- r[1].flags = IORESOURCE_IRQ;
+ of_irq_to_resource(np, 0, &r[1]);
usb_dev_dr =
platform_device_register_simple("fsl-ehci", i, r, 2);
@@ -591,8 +587,7 @@ static int __init fs_enet_of_init(void)
r[2].name = fcc_regs_c;
fs_enet_data.fcc_regs_c = r[2].start;
- r[3].start = r[3].end = irq_of_parse_and_map(np, 0);
- r[3].flags = IORESOURCE_IRQ;
+ of_irq_to_resource(np, 0, &r[3]);
fs_enet_dev =
platform_device_register_simple("fsl-cpm-fcc", i, &r[0], 4);
@@ -754,8 +749,7 @@ static int __init cpm_uart_of_init(void)
goto err;
r[1].name = scc_pram;
- r[2].start = r[2].end = irq_of_parse_and_map(np, 0);
- r[2].flags = IORESOURCE_IRQ;
+ of_irq_to_resource(np, 0, &r[2]);
cpm_uart_dev =
platform_device_register_simple("fsl-cpm-scc:uart", i, &r[0], 3);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index ba4833f57d4..411480d5c62 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -147,33 +147,51 @@ static u32 mpic_infos[][MPIC_IDX_END] = {
*/
-static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
- unsigned int reg)
+static inline u32 _mpic_read(enum mpic_reg_type type,
+ struct mpic_reg_bank *rb,
+ unsigned int reg)
{
- if (be)
- return in_be32(base + (reg >> 2));
- else
- return in_le32(base + (reg >> 2));
+ switch(type) {
+#ifdef CONFIG_PPC_DCR
+ case mpic_access_dcr:
+ return dcr_read(rb->dhost,
+ rb->dbase + reg + rb->doff);
+#endif
+ case mpic_access_mmio_be:
+ return in_be32(rb->base + (reg >> 2));
+ case mpic_access_mmio_le:
+ default:
+ return in_le32(rb->base + (reg >> 2));
+ }
}
-static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
- unsigned int reg, u32 value)
+static inline void _mpic_write(enum mpic_reg_type type,
+ struct mpic_reg_bank *rb,
+ unsigned int reg, u32 value)
{
- if (be)
- out_be32(base + (reg >> 2), value);
- else
- out_le32(base + (reg >> 2), value);
+ switch(type) {
+#ifdef CONFIG_PPC_DCR
+ case mpic_access_dcr:
+ return dcr_write(rb->dhost,
+ rb->dbase + reg + rb->doff, value);
+#endif
+ case mpic_access_mmio_be:
+ return out_be32(rb->base + (reg >> 2), value);
+ case mpic_access_mmio_le:
+ default:
+ return out_le32(rb->base + (reg >> 2), value);
+ }
}
static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
{
- unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
+ enum mpic_reg_type type = mpic->reg_type;
unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
(ipi * MPIC_INFO(GREG_IPI_STRIDE));
- if (mpic->flags & MPIC_BROKEN_IPI)
- be = !be;
- return _mpic_read(be, mpic->gregs, offset);
+ if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le)
+ type = mpic_access_mmio_be;
+ return _mpic_read(type, &mpic->gregs, offset);
}
static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
@@ -181,7 +199,7 @@ static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 valu
unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
(ipi * MPIC_INFO(GREG_IPI_STRIDE));
- _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
+ _mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
}
static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
@@ -190,8 +208,7 @@ static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
if (mpic->flags & MPIC_PRIMARY)
cpu = hard_smp_processor_id();
- return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,
- mpic->cpuregs[cpu], reg);
+ return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
}
static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
@@ -201,7 +218,7 @@ static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 valu
if (mpic->flags & MPIC_PRIMARY)
cpu = hard_smp_processor_id();
- _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
+ _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
}
static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
@@ -209,7 +226,7 @@ static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigne
unsigned int isu = src_no >> mpic->isu_shift;
unsigned int idx = src_no & mpic->isu_mask;
- return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+ return _mpic_read(mpic->reg_type, &mpic->isus[isu],
reg + (idx * MPIC_INFO(IRQ_STRIDE)));
}
@@ -219,12 +236,12 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
unsigned int isu = src_no >> mpic->isu_shift;
unsigned int idx = src_no & mpic->isu_mask;
- _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+ _mpic_write(mpic->reg_type, &mpic->isus[isu],
reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
}
-#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
-#define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
+#define mpic_read(b,r) _mpic_read(mpic->reg_type,&(b),(r))
+#define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v))
#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
@@ -238,6 +255,38 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
*/
+static void _mpic_map_mmio(struct mpic *mpic, unsigned long phys_addr,
+ struct mpic_reg_bank *rb, unsigned int offset,
+ unsigned int size)
+{
+ rb->base = ioremap(phys_addr + offset, size);
+ BUG_ON(rb->base == NULL);
+}
+
+#ifdef CONFIG_PPC_DCR
+static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
+ unsigned int offset, unsigned int size)
+{
+ rb->dbase = mpic->dcr_base;
+ rb->doff = offset;
+ rb->dhost = dcr_map(mpic->of_node, rb->dbase + rb->doff, size);
+ BUG_ON(!DCR_MAP_OK(rb->dhost));
+}
+
+static inline void mpic_map(struct mpic *mpic, unsigned long phys_addr,
+ struct mpic_reg_bank *rb, unsigned int offset,
+ unsigned int size)
+{
+ if (mpic->flags & MPIC_USES_DCR)
+ _mpic_map_dcr(mpic, rb, offset, size);
+ else
+ _mpic_map_mmio(mpic, phys_addr, rb, offset, size);
+}
+#else /* CONFIG_PPC_DCR */
+#define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
+#endif /* !CONFIG_PPC_DCR */
+
+
/* Check if we have one of those nice broken MPICs with a flipped endian on
* reads from IPI registers
@@ -845,7 +894,7 @@ static struct irq_host_ops mpic_host_ops = {
*/
struct mpic * __init mpic_alloc(struct device_node *node,
- unsigned long phys_addr,
+ phys_addr_t phys_addr,
unsigned int flags,
unsigned int isu_size,
unsigned int irq_count,
@@ -855,6 +904,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
u32 reg;
const char *vers;
int i;
+ u64 paddr = phys_addr;
mpic = alloc_bootmem(sizeof(struct mpic));
if (mpic == NULL)
@@ -883,6 +933,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
if (flags & MPIC_PRIMARY)
mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
#endif /* CONFIG_MPIC_BROKEN_U3 */
+
#ifdef CONFIG_SMP
mpic->hc_ipi = mpic_ipi_chip;
mpic->hc_ipi.typename = name;
@@ -893,15 +944,52 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->irq_count = irq_count;
mpic->num_sources = 0; /* so far */
+ /* Check for "big-endian" in device-tree */
+ if (node && get_property(node, "big-endian", NULL) != NULL)
+ mpic->flags |= MPIC_BIG_ENDIAN;
+
+
#ifdef CONFIG_MPIC_WEIRD
mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)];
#endif
+ /* default register type */
+ mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ?
+ mpic_access_mmio_be : mpic_access_mmio_le;
+
+ /* If no physical address is passed in, a device-node is mandatory */
+ BUG_ON(paddr == 0 && node == NULL);
+
+ /* If no physical address passed in, check if it's dcr based */
+ if (paddr == 0 && get_property(node, "dcr-reg", NULL) != NULL)
+ mpic->flags |= MPIC_USES_DCR;
+
+#ifdef CONFIG_PPC_DCR
+ if (mpic->flags & MPIC_USES_DCR) {
+ const u32 *dbasep;
+ dbasep = get_property(node, "dcr-reg", NULL);
+ BUG_ON(dbasep == NULL);
+ mpic->dcr_base = *dbasep;
+ mpic->reg_type = mpic_access_dcr;
+ }
+#else
+ BUG_ON (mpic->flags & MPIC_USES_DCR);
+#endif /* CONFIG_PPC_DCR */
+
+ /* If the MPIC is not DCR based, and no physical address was passed
+ * in, try to obtain one
+ */
+ if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) {
+ const u32 *reg;
+ reg = get_property(node, "reg", NULL);
+ BUG_ON(reg == NULL);
+ paddr = of_translate_address(node, reg);
+ BUG_ON(paddr == OF_BAD_ADDR);
+ }
+
/* Map the global registers */
- mpic->gregs = ioremap(phys_addr + MPIC_INFO(GREG_BASE), 0x1000);
- mpic->tmregs = mpic->gregs +
- ((MPIC_INFO(TIMER_BASE) - MPIC_INFO(GREG_BASE)) >> 2);
- BUG_ON(mpic->gregs == NULL);
+ mpic_map(mpic, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
+ mpic_map(mpic, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
/* Reset */
if (flags & MPIC_WANTS_RESET) {
@@ -926,17 +1014,16 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Map the per-CPU registers */
for (i = 0; i < mpic->num_cpus; i++) {
- mpic->cpuregs[i] = ioremap(phys_addr + MPIC_INFO(CPU_BASE) +
- i * MPIC_INFO(CPU_STRIDE), 0x1000);
- BUG_ON(mpic->cpuregs[i] == NULL);
+ mpic_map(mpic, paddr, &mpic->cpuregs[i],
+ MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE),
+ 0x1000);
}
/* Initialize main ISU if none provided */
if (mpic->isu_size == 0) {
mpic->isu_size = mpic->num_sources;
- mpic->isus[0] = ioremap(phys_addr + MPIC_INFO(IRQ_BASE),
- MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
- BUG_ON(mpic->isus[0] == NULL);
+ mpic_map(mpic, paddr, &mpic->isus[0],
+ MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
}
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
mpic->isu_mask = (1 << mpic->isu_shift) - 1;
@@ -956,10 +1043,11 @@ struct mpic * __init mpic_alloc(struct device_node *node,
vers = "<unknown>";
break;
}
- printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
- name, vers, phys_addr, mpic->num_cpus);
- printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
- mpic->isu_shift, mpic->isu_mask);
+ printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
+ " max %d CPUs\n",
+ name, vers, (unsigned long long)paddr, mpic->num_cpus);
+ printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
+ mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
mpic->next = mpics;
mpics = mpic;
@@ -973,14 +1061,14 @@ struct mpic * __init mpic_alloc(struct device_node *node,
}
void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
- unsigned long phys_addr)
+ phys_addr_t paddr)
{
unsigned int isu_first = isu_num * mpic->isu_size;
BUG_ON(isu_num >= MPIC_MAX_ISU);
- mpic->isus[isu_num] = ioremap(phys_addr,
- MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
+ mpic_map(mpic, paddr, &mpic->isus[isu_num], 0,
+ MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
if ((isu_first + mpic->isu_size) > mpic->num_sources)
mpic->num_sources = isu_first + mpic->isu_size;
}
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index e4223226a7a..e3d71e083f3 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -174,8 +174,7 @@ void qe_setbrg(u32 brg, u32 rate)
u32 divisor, tempval;
int div16 = 0;
- bp = &qe_immr->brg.brgc1;
- bp += brg;
+ bp = &qe_immr->brg.brgc[brg];
divisor = (get_brg_clk() / rate);
if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index 75fa3104a43..e657559bea9 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -216,14 +216,12 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
return -EINVAL;
}
- uccf = (struct ucc_fast_private *)
- kmalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
+ uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
if (!uccf) {
uccf_err
("ucc_fast_init: No memory for UCC slow data structure!");
return -ENOMEM;
}
- memset(uccf, 0, sizeof(struct ucc_fast_private));
/* Fill fast UCC structure */
uccf->uf_info = uf_info;
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index a49da6b73ec..47b56203f47 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -168,14 +168,12 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
return -EINVAL;
}
- uccs = (struct ucc_slow_private *)
- kmalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
+ uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
if (!uccs) {
uccs_err
("ucc_slow_init: No memory for UCC slow data structure!");
return -ENOMEM;
}
- memset(uccs, 0, sizeof(struct ucc_slow_private));
/* Fill slow UCC structure */
uccs->us_info = us_info;
diff --git a/arch/powerpc/sysdev/rom.c b/arch/powerpc/sysdev/rom.c
new file mode 100644
index 00000000000..bf5b3f10e6c
--- /dev/null
+++ b/arch/powerpc/sysdev/rom.c
@@ -0,0 +1,31 @@
+/*
+ * ROM device registration
+ *
+ * (C) 2006 MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/kernel.h>
+#include <asm/of_device.h>
+
+static int __init powerpc_flash_init(void)
+{
+ struct device_node *node = NULL;
+
+ /*
+ * Register all the devices which type is "rom"
+ */
+ while ((node = of_find_node_by_type(node, "rom")) != NULL) {
+ if (node->name == NULL) {
+ printk(KERN_WARNING "powerpc_flash_init: found 'rom' "
+ "device, but with no name, skipping...\n");
+ continue;
+ }
+ of_platform_device_create(node, node->name, NULL);
+ }
+ return 0;
+}
+
+arch_initcall(powerpc_flash_init);
diff --git a/arch/powerpc/sysdev/todc.c b/arch/powerpc/sysdev/todc.c
deleted file mode 100644
index 0a65980efb5..00000000000
--- a/arch/powerpc/sysdev/todc.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Time of Day Clock support for the M48T35, M48T37, M48T59, and MC146818
- * Real Time Clocks/Timekeepers.
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2001-2004 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/bcd.h>
-#include <linux/mc146818rtc.h>
-
-#include <asm/machdep.h>
-#include <asm/io.h>
-#include <asm/time.h>
-#include <asm/todc.h>
-
-/*
- * Depending on the hardware on your board and your board design, the
- * RTC/NVRAM may be accessed either directly (like normal memory) or via
- * address/data registers. If your board uses the direct method, set
- * 'nvram_data' to the base address of your nvram and leave 'nvram_as0' and
- * 'nvram_as1' NULL. If your board uses address/data regs to access nvram,
- * set 'nvram_as0' to the address of the lower byte, set 'nvram_as1' to the
- * address of the upper byte (leave NULL if using mc146818), and set
- * 'nvram_data' to the address of the 8-bit data register.
- *
- * Note: Even though the documentation for the various RTC chips say that it
- * take up to a second before it starts updating once the 'R' bit is
- * cleared, they always seem to update even though we bang on it many
- * times a second. This is true, except for the Dallas Semi 1746/1747
- * (possibly others). Those chips seem to have a real problem whenever
- * we set the 'R' bit before reading them, they basically stop counting.
- * --MAG
- */
-
-/*
- * 'todc_info' should be initialized in your *_setup.c file to
- * point to a fully initialized 'todc_info_t' structure.
- * This structure holds all the register offsets for your particular
- * TODC/RTC chip.
- * TODC_ALLOC()/TODC_INIT() will allocate and initialize this table for you.
- */
-
-#ifdef RTC_FREQ_SELECT
-#undef RTC_FREQ_SELECT
-#define RTC_FREQ_SELECT control_b /* Register A */
-#endif
-
-#ifdef RTC_CONTROL
-#undef RTC_CONTROL
-#define RTC_CONTROL control_a /* Register B */
-#endif
-
-#ifdef RTC_INTR_FLAGS
-#undef RTC_INTR_FLAGS
-#define RTC_INTR_FLAGS watchdog /* Register C */
-#endif
-
-#ifdef RTC_VALID
-#undef RTC_VALID
-#define RTC_VALID interrupts /* Register D */
-#endif
-
-/* Access routines when RTC accessed directly (like normal memory) */
-u_char
-todc_direct_read_val(int addr)
-{
- return readb((void __iomem *)(todc_info->nvram_data + addr));
-}
-
-void
-todc_direct_write_val(int addr, unsigned char val)
-{
- writeb(val, (void __iomem *)(todc_info->nvram_data + addr));
- return;
-}
-
-/* Access routines for accessing m48txx type chips via addr/data regs */
-u_char
-todc_m48txx_read_val(int addr)
-{
- outb(addr, todc_info->nvram_as0);
- outb(addr>>todc_info->as0_bits, todc_info->nvram_as1);
- return inb(todc_info->nvram_data);
-}
-
-void
-todc_m48txx_write_val(int addr, unsigned char val)
-{
- outb(addr, todc_info->nvram_as0);
- outb(addr>>todc_info->as0_bits, todc_info->nvram_as1);
- outb(val, todc_info->nvram_data);
- return;
-}
-
-/* Access routines for accessing mc146818 type chips via addr/data regs */
-u_char
-todc_mc146818_read_val(int addr)
-{
- outb_p(addr, todc_info->nvram_as0);
- return inb_p(todc_info->nvram_data);
-}
-
-void
-todc_mc146818_write_val(int addr, unsigned char val)
-{
- outb_p(addr, todc_info->nvram_as0);
- outb_p(val, todc_info->nvram_data);
-}
-
-
-/*
- * Routines to make RTC chips with NVRAM buried behind an addr/data pair
- * have the NVRAM and clock regs appear at the same level.
- * The NVRAM will appear to start at addr 0 and the clock regs will appear
- * to start immediately after the NVRAM (actually, start at offset
- * todc_info->nvram_size).
- */
-static inline u_char
-todc_read_val(int addr)
-{
- u_char val;
-
- if (todc_info->sw_flags & TODC_FLAG_2_LEVEL_NVRAM) {
- if (addr < todc_info->nvram_size) { /* NVRAM */
- ppc_md.rtc_write_val(todc_info->nvram_addr_reg, addr);
- val = ppc_md.rtc_read_val(todc_info->nvram_data_reg);
- } else { /* Clock Reg */
- addr -= todc_info->nvram_size;
- val = ppc_md.rtc_read_val(addr);
- }
- } else
- val = ppc_md.rtc_read_val(addr);
-
- return val;
-}
-
-static inline void
-todc_write_val(int addr, u_char val)
-{
- if (todc_info->sw_flags & TODC_FLAG_2_LEVEL_NVRAM) {
- if (addr < todc_info->nvram_size) { /* NVRAM */
- ppc_md.rtc_write_val(todc_info->nvram_addr_reg, addr);
- ppc_md.rtc_write_val(todc_info->nvram_data_reg, val);
- } else { /* Clock Reg */
- addr -= todc_info->nvram_size;
- ppc_md.rtc_write_val(addr, val);
- }
- } else
- ppc_md.rtc_write_val(addr, val);
-}
-
-/*
- * TODC routines
- *
- * There is some ugly stuff in that there are assumptions for the mc146818.
- *
- * Assumptions:
- * - todc_info->control_a has the offset as mc146818 Register B reg
- * - todc_info->control_b has the offset as mc146818 Register A reg
- * - m48txx control reg's write enable or 'W' bit is same as
- * mc146818 Register B 'SET' bit (i.e., 0x80)
- *
- * These assumptions were made to make the code simpler.
- */
-long __init
-todc_time_init(void)
-{
- u_char cntl_b;
-
- if (!ppc_md.rtc_read_val)
- ppc_md.rtc_read_val = ppc_md.nvram_read_val;
- if (!ppc_md.rtc_write_val)
- ppc_md.rtc_write_val = ppc_md.nvram_write_val;
-
- cntl_b = todc_read_val(todc_info->control_b);
-
- if (todc_info->rtc_type == TODC_TYPE_MC146818) {
- if ((cntl_b & 0x70) != 0x20) {
- printk(KERN_INFO "TODC real-time-clock was stopped."
- " Now starting...");
- cntl_b &= ~0x70;
- cntl_b |= 0x20;
- }
-
- todc_write_val(todc_info->control_b, cntl_b);
- } else if (todc_info->rtc_type == TODC_TYPE_DS17285) {
- u_char mode;
-
- mode = todc_read_val(TODC_TYPE_DS17285_CNTL_A);
- /* Make sure countdown clear is not set */
- mode &= ~0x40;
- /* Enable oscillator, extended register set */
- mode |= 0x30;
- todc_write_val(TODC_TYPE_DS17285_CNTL_A, mode);
-
- } else if (todc_info->rtc_type == TODC_TYPE_DS1501) {
- u_char month;
-
- todc_info->enable_read = TODC_DS1501_CNTL_B_TE;
- todc_info->enable_write = TODC_DS1501_CNTL_B_TE;
-
- month = todc_read_val(todc_info->month);
-
- if ((month & 0x80) == 0x80) {
- printk(KERN_INFO "TODC %s %s\n",
- "real-time-clock was stopped.",
- "Now starting...");
- month &= ~0x80;
- todc_write_val(todc_info->month, month);
- }
-
- cntl_b &= ~TODC_DS1501_CNTL_B_TE;
- todc_write_val(todc_info->control_b, cntl_b);
- } else { /* must be a m48txx type */
- u_char cntl_a;
-
- todc_info->enable_read = TODC_MK48TXX_CNTL_A_R;
- todc_info->enable_write = TODC_MK48TXX_CNTL_A_W;
-
- cntl_a = todc_read_val(todc_info->control_a);
-
- /* Check & clear STOP bit in control B register */
- if (cntl_b & TODC_MK48TXX_DAY_CB) {
- printk(KERN_INFO "TODC %s %s\n",
- "real-time-clock was stopped.",
- "Now starting...");
-
- cntl_a |= todc_info->enable_write;
- cntl_b &= ~TODC_MK48TXX_DAY_CB;/* Start Oscil */
-
- todc_write_val(todc_info->control_a, cntl_a);
- todc_write_val(todc_info->control_b, cntl_b);
- }
-
- /* Make sure READ & WRITE bits are cleared. */
- cntl_a &= ~(todc_info->enable_write | todc_info->enable_read);
- todc_write_val(todc_info->control_a, cntl_a);
- }
-
- return 0;
-}
-
-/*
- * There is some ugly stuff in that there are assumptions that for a mc146818,
- * the todc_info->control_a has the offset of the mc146818 Register B reg and
- * that the register'ss 'SET' bit is the same as the m48txx's write enable
- * bit in the control register of the m48txx (i.e., 0x80).
- *
- * It was done to make the code look simpler.
- */
-void
-todc_get_rtc_time(struct rtc_time *tm)
-{
- uint year = 0, mon = 0, mday = 0, hour = 0, min = 0, sec = 0;
- uint limit, i;
- u_char save_control, uip = 0;
- extern void GregorianDay(struct rtc_time *);
-
- spin_lock(&rtc_lock);
- save_control = todc_read_val(todc_info->control_a);
-
- if (todc_info->rtc_type != TODC_TYPE_MC146818) {
- limit = 1;
-
- switch (todc_info->rtc_type) {
- case TODC_TYPE_DS1553:
- case TODC_TYPE_DS1557:
- case TODC_TYPE_DS1743:
- case TODC_TYPE_DS1746: /* XXXX BAD HACK -> FIX */
- case TODC_TYPE_DS1747:
- case TODC_TYPE_DS17285:
- break;
- default:
- todc_write_val(todc_info->control_a,
- (save_control | todc_info->enable_read));
- }
- } else
- limit = 100000000;
-
- for (i=0; i<limit; i++) {
- if (todc_info->rtc_type == TODC_TYPE_MC146818)
- uip = todc_read_val(todc_info->RTC_FREQ_SELECT);
-
- sec = todc_read_val(todc_info->seconds) & 0x7f;
- min = todc_read_val(todc_info->minutes) & 0x7f;
- hour = todc_read_val(todc_info->hours) & 0x3f;
- mday = todc_read_val(todc_info->day_of_month) & 0x3f;
- mon = todc_read_val(todc_info->month) & 0x1f;
- year = todc_read_val(todc_info->year) & 0xff;
-
- if (todc_info->rtc_type == TODC_TYPE_MC146818) {
- uip |= todc_read_val(todc_info->RTC_FREQ_SELECT);
- if ((uip & RTC_UIP) == 0)
- break;
- }
- }
-
- if (todc_info->rtc_type != TODC_TYPE_MC146818) {
- switch (todc_info->rtc_type) {
- case TODC_TYPE_DS1553:
- case TODC_TYPE_DS1557:
- case TODC_TYPE_DS1743:
- case TODC_TYPE_DS1746: /* XXXX BAD HACK -> FIX */
- case TODC_TYPE_DS1747:
- case TODC_TYPE_DS17285:
- break;
- default:
- save_control &= ~(todc_info->enable_read);
- todc_write_val(todc_info->control_a, save_control);
- }
- }
- spin_unlock(&rtc_lock);
-
- if ((todc_info->rtc_type != TODC_TYPE_MC146818)
- || ((save_control & RTC_DM_BINARY) == 0)
- || RTC_ALWAYS_BCD) {
- BCD_TO_BIN(sec);
- BCD_TO_BIN(min);
- BCD_TO_BIN(hour);
- BCD_TO_BIN(mday);
- BCD_TO_BIN(mon);
- BCD_TO_BIN(year);
- }
-
- if ((year + 1900) < 1970) {
- year += 100;
- }
-
- tm->tm_sec = sec;
- tm->tm_min = min;
- tm->tm_hour = hour;
- tm->tm_mday = mday;
- tm->tm_mon = mon;
- tm->tm_year = year;
-
- GregorianDay(tm);
-}
-
-int
-todc_set_rtc_time(struct rtc_time *tm)
-{
- u_char save_control, save_freq_select = 0;
-
- spin_lock(&rtc_lock);
- save_control = todc_read_val(todc_info->control_a);
-
- /* Assuming MK48T59_RTC_CA_WRITE & RTC_SET are equal */
- todc_write_val(todc_info->control_a,
- (save_control | todc_info->enable_write));
- save_control &= ~(todc_info->enable_write); /* in case it was set */
-
- if (todc_info->rtc_type == TODC_TYPE_MC146818) {
- save_freq_select = todc_read_val(todc_info->RTC_FREQ_SELECT);
- todc_write_val(todc_info->RTC_FREQ_SELECT,
- save_freq_select | RTC_DIV_RESET2);
- }
-
- if ((todc_info->rtc_type != TODC_TYPE_MC146818)
- || ((save_control & RTC_DM_BINARY) == 0)
- || RTC_ALWAYS_BCD) {
- BIN_TO_BCD(tm->tm_sec);
- BIN_TO_BCD(tm->tm_min);
- BIN_TO_BCD(tm->tm_hour);
- BIN_TO_BCD(tm->tm_mon);
- BIN_TO_BCD(tm->tm_mday);
- BIN_TO_BCD(tm->tm_year);
- }
-
- todc_write_val(todc_info->seconds, tm->tm_sec);
- todc_write_val(todc_info->minutes, tm->tm_min);
- todc_write_val(todc_info->hours, tm->tm_hour);
- todc_write_val(todc_info->month, tm->tm_mon);
- todc_write_val(todc_info->day_of_month, tm->tm_mday);
- todc_write_val(todc_info->year, tm->tm_year);
-
- todc_write_val(todc_info->control_a, save_control);
-
- if (todc_info->rtc_type == TODC_TYPE_MC146818)
- todc_write_val(todc_info->RTC_FREQ_SELECT, save_freq_select);
-
- spin_unlock(&rtc_lock);
- return 0;
-}
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 322f86e93de..ae249c6bbbc 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -3,6 +3,8 @@
*
* 2004-2005 (c) Tundra Semiconductor Corp.
* Author: Alex Bounine (alexandreb@tundra.com)
+ * Author: Roy Zang (tie-fei.zang@freescale.com)
+ * Add pci interrupt router host
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -48,6 +50,8 @@
u32 tsi108_pci_cfg_base;
u32 tsi108_csr_vir_base;
+static struct device_node *pci_irq_node;
+static struct irq_host *pci_irq_host;
extern u32 get_vir_csrbase(void);
extern u32 tsi108_read_reg(u32 reg_offset);
@@ -378,6 +382,38 @@ static struct irq_chip tsi108_pci_irq = {
.unmask = tsi108_pci_irq_enable,
};
+static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+{
+ *out_hwirq = intspec[0];
+ *out_flags = IRQ_TYPE_LEVEL_HIGH;
+ return 0;
+}
+
+static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
+{ unsigned int irq;
+ DBG("%s(%d, 0x%lx)\n", __FUNCTION__, virq, hw);
+ if ((virq >= 1) && (virq <= 4)){
+ irq = virq + IRQ_PCI_INTAD_BASE - 1;
+ get_irq_desc(irq)->status |= IRQ_LEVEL;
+ set_irq_chip(irq, &tsi108_pci_irq);
+ }
+ return 0;
+}
+
+static int pci_irq_host_match(struct irq_host *h, struct device_node *node)
+{
+ return pci_irq_node == node;
+}
+
+static struct irq_host_ops pci_irq_host_ops = {
+ .match = pci_irq_host_match,
+ .map = pci_irq_host_map,
+ .xlate = pci_irq_host_xlate,
+};
+
/*
* Exported functions
*/
@@ -391,15 +427,15 @@ static struct irq_chip tsi108_pci_irq = {
* to the MPIC.
*/
-void __init tsi108_pci_int_init(void)
+void __init tsi108_pci_int_init(struct device_node *node)
{
- u_int i;
-
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
- for (i = 0; i < NUM_PCI_IRQS; i++) {
- irq_desc[i + IRQ_PCI_INTAD_BASE].chip = &tsi108_pci_irq;
- irq_desc[i + IRQ_PCI_INTAD_BASE].status |= IRQ_LEVEL;
+ pci_irq_node = of_node_get(node);
+ pci_irq_host = irq_alloc_host(IRQ_HOST_MAP_LEGACY, 0, &pci_irq_host_ops, 0);
+ if (pci_irq_host == NULL) {
+ printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n");
+ return;
}
init_pci_source();
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 109d874ecfb..51d97588e76 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -3,5 +3,10 @@
ifdef CONFIG_PPC64
EXTRA_CFLAGS += -mno-minimal-toc
endif
-obj-y += xmon.o ppc-dis.o ppc-opc.o setjmp.o start.o \
- nonstdio.o
+
+obj-y += xmon.o setjmp.o start.o nonstdio.o
+
+ifdef CONFIG_XMON_DISASSEMBLY
+obj-y += ppc-dis.o ppc-opc.o
+obj-$(CONFIG_SPU_BASE) += spu-dis.o spu-opc.o
+endif
diff --git a/arch/powerpc/xmon/dis-asm.h b/arch/powerpc/xmon/dis-asm.h
new file mode 100644
index 00000000000..be3533b93f3
--- /dev/null
+++ b/arch/powerpc/xmon/dis-asm.h
@@ -0,0 +1,31 @@
+#ifndef _POWERPC_XMON_DIS_ASM_H
+#define _POWERPC_XMON_DIS_ASM_H
+/*
+ * Copyright (C) 2006 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+extern void print_address (unsigned long memaddr);
+
+#ifdef CONFIG_XMON_DISASSEMBLY
+extern int print_insn_powerpc(unsigned long insn, unsigned long memaddr);
+extern int print_insn_spu(unsigned long insn, unsigned long memaddr);
+#else
+static inline int print_insn_powerpc(unsigned long insn, unsigned long memaddr)
+{
+ printf("%.8x", insn);
+ return 0;
+}
+
+static inline int print_insn_spu(unsigned long insn, unsigned long memaddr)
+{
+ printf("%.8x", insn);
+ return 0;
+}
+#endif
+
+#endif /* _POWERPC_XMON_DIS_ASM_H */
diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
index ac0a9d2427e..89098f320ad 100644
--- a/arch/powerpc/xmon/ppc-dis.c
+++ b/arch/powerpc/xmon/ppc-dis.c
@@ -1,5 +1,6 @@
/* ppc-dis.c -- Disassemble PowerPC instructions
- Copyright 1994 Free Software Foundation, Inc.
+ Copyright 1994, 1995, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
Written by Ian Lance Taylor, Cygnus Support
This file is part of GDB, GAS, and the GNU binutils.
@@ -16,27 +17,36 @@ the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this file; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+#include <asm/cputable.h>
#include "nonstdio.h"
#include "ansidecl.h"
#include "ppc.h"
-
-extern void print_address (unsigned long memaddr);
+#include "dis-asm.h"
/* Print a PowerPC or POWER instruction. */
int
-print_insn_powerpc (unsigned long insn, unsigned long memaddr, int dialect)
+print_insn_powerpc (unsigned long insn, unsigned long memaddr)
{
const struct powerpc_opcode *opcode;
const struct powerpc_opcode *opcode_end;
unsigned long op;
+ int dialect;
- if (dialect == 0)
- dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON
+ dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON
| PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC;
+ if (cpu_has_feature(CPU_FTRS_POWER5))
+ dialect |= PPC_OPCODE_POWER5;
+
+ if (cpu_has_feature(CPU_FTRS_CELL))
+ dialect |= PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC;
+
+ if (cpu_has_feature(CPU_FTRS_POWER6))
+ dialect |= PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC;
+
/* Get the major opcode of the instruction. */
op = PPC_OP (insn);
@@ -121,7 +131,8 @@ print_insn_powerpc (unsigned long insn, unsigned long memaddr, int dialect)
}
/* Print the operand as directed by the flags. */
- if ((operand->flags & PPC_OPERAND_GPR) != 0)
+ if ((operand->flags & PPC_OPERAND_GPR) != 0
+ || ((operand->flags & PPC_OPERAND_GPR_0) != 0 && value != 0))
printf("r%ld", value);
else if ((operand->flags & PPC_OPERAND_FPR) != 0)
printf("f%ld", value);
@@ -137,7 +148,7 @@ print_insn_powerpc (unsigned long insn, unsigned long memaddr, int dialect)
else
{
if (operand->bits == 3)
- printf("cr%d", value);
+ printf("cr%ld", value);
else
{
static const char *cbnames[4] = { "lt", "gt", "eq", "so" };
diff --git a/arch/powerpc/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c
index 5ee8fc32f82..5d841f4b353 100644
--- a/arch/powerpc/xmon/ppc-opc.c
+++ b/arch/powerpc/xmon/ppc-opc.c
@@ -1,6 +1,6 @@
/* ppc-opc.c -- PowerPC opcode list
- Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003
- Free Software Foundation, Inc.
+ Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
+ 2005 Free Software Foundation, Inc.
Written by Ian Lance Taylor, Cygnus Support
This file is part of GDB, GAS, and the GNU binutils.
@@ -17,8 +17,8 @@
You should have received a copy of the GNU General Public License
along with this file; see the file COPYING. If not, write to the Free
- Software Foundation, 59 Temple Place - Suite 330, Boston, MA
- 02111-1307, USA. */
+ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
+ 02110-1301, USA. */
#include <linux/stddef.h>
#include "nonstdio.h"
@@ -86,6 +86,8 @@ static unsigned long insert_sh6 (unsigned long, long, int, const char **);
static long extract_sh6 (unsigned long, int, int *);
static unsigned long insert_spr (unsigned long, long, int, const char **);
static long extract_spr (unsigned long, int, int *);
+static unsigned long insert_sprg (unsigned long, long, int, const char **);
+static long extract_sprg (unsigned long, int, int *);
static unsigned long insert_tbr (unsigned long, long, int, const char **);
static long extract_tbr (unsigned long, int, int *);
static unsigned long insert_ev2 (unsigned long, long, int, const char **);
@@ -196,8 +198,11 @@ const struct powerpc_operand powerpc_operands[] =
#define BOE BO + 1
{ 5, 21, insert_boe, extract_boe, 0 },
+#define BH BOE + 1
+ { 2, 11, NULL, NULL, PPC_OPERAND_OPTIONAL },
+
/* The BT field in an X or XL form instruction. */
-#define BT BOE + 1
+#define BT BH + 1
{ 5, 21, NULL, NULL, PPC_OPERAND_CR },
/* The condition register number portion of the BI field in a B form
@@ -301,10 +306,14 @@ const struct powerpc_operand powerpc_operands[] =
#define L FXM4 + 1
{ 1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
- /* The LEV field in a POWER SC form instruction. */
-#define LEV L + 1
+ /* The LEV field in a POWER SVC form instruction. */
+#define SVC_LEV L + 1
{ 7, 5, NULL, NULL, 0 },
+ /* The LEV field in an SC form instruction. */
+#define LEV SVC_LEV + 1
+ { 7, 5, NULL, NULL, PPC_OPERAND_OPTIONAL },
+
/* The LI field in an I form instruction. The lower two bits are
forced to zero. */
#define LI LEV + 1
@@ -346,7 +355,7 @@ const struct powerpc_operand powerpc_operands[] =
/* The MO field in an mbar instruction. */
#define MO MB6 + 1
- { 5, 21, NULL, NULL, 0 },
+ { 5, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
/* The NB field in an X form instruction. The value 32 is stored as
0. */
@@ -364,30 +373,38 @@ const struct powerpc_operand powerpc_operands[] =
#define RA_MASK (0x1f << 16)
{ 5, 16, NULL, NULL, PPC_OPERAND_GPR },
+ /* As above, but 0 in the RA field means zero, not r0. */
+#define RA0 RA + 1
+ { 5, 16, NULL, NULL, PPC_OPERAND_GPR_0 },
+
/* The RA field in the DQ form lq instruction, which has special
value restrictions. */
-#define RAQ RA + 1
- { 5, 16, insert_raq, NULL, PPC_OPERAND_GPR },
+#define RAQ RA0 + 1
+ { 5, 16, insert_raq, NULL, PPC_OPERAND_GPR_0 },
/* The RA field in a D or X form instruction which is an updating
load, which means that the RA field may not be zero and may not
equal the RT field. */
#define RAL RAQ + 1
- { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR },
+ { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR_0 },
/* The RA field in an lmw instruction, which has special value
restrictions. */
#define RAM RAL + 1
- { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR },
+ { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR_0 },
/* The RA field in a D or X form instruction which is an updating
store or an updating floating point load, which means that the RA
field may not be zero. */
#define RAS RAM + 1
- { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR },
+ { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR_0 },
+
+ /* The RA field of the tlbwe instruction, which is optional. */
+#define RAOPT RAS + 1
+ { 5, 16, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL },
/* The RB field in an X, XO, M, or MDS form instruction. */
-#define RB RAS + 1
+#define RB RAOPT + 1
#define RB_MASK (0x1f << 11)
{ 5, 11, NULL, NULL, PPC_OPERAND_GPR },
@@ -408,15 +425,20 @@ const struct powerpc_operand powerpc_operands[] =
/* The RS field of the DS form stq instruction, which has special
value restrictions. */
#define RSQ RS + 1
- { 5, 21, insert_rsq, NULL, PPC_OPERAND_GPR },
+ { 5, 21, insert_rsq, NULL, PPC_OPERAND_GPR_0 },
/* The RT field of the DQ form lq instruction, which has special
value restrictions. */
#define RTQ RSQ + 1
- { 5, 21, insert_rtq, NULL, PPC_OPERAND_GPR },
+ { 5, 21, insert_rtq, NULL, PPC_OPERAND_GPR_0 },
+
+ /* The RS field of the tlbwe instruction, which is optional. */
+#define RSO RTQ + 1
+#define RTO RSO
+ { 5, 21, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL },
/* The SH field in an X or M form instruction. */
-#define SH RTQ + 1
+#define SH RSO + 1
#define SH_MASK (0x1f << 11)
{ 5, 11, NULL, NULL, 0 },
@@ -425,8 +447,12 @@ const struct powerpc_operand powerpc_operands[] =
#define SH6_MASK ((0x1f << 11) | (1 << 1))
{ 6, 1, insert_sh6, extract_sh6, 0 },
+ /* The SH field of the tlbwe instruction, which is optional. */
+#define SHO SH6 + 1
+ { 5, 11,NULL, NULL, PPC_OPERAND_OPTIONAL },
+
/* The SI field in a D form instruction. */
-#define SI SH6 + 1
+#define SI SHO + 1
{ 16, 0, NULL, NULL, PPC_OPERAND_SIGNED },
/* The SI field in a D form instruction when we accept a wide range
@@ -448,8 +474,7 @@ const struct powerpc_operand powerpc_operands[] =
/* The SPRG register number in an XFX form m[ft]sprg instruction. */
#define SPRG SPRBAT + 1
-#define SPRG_MASK (0x3 << 16)
- { 2, 16, NULL, NULL, 0 },
+ { 5, 16, insert_sprg, extract_sprg, 0 },
/* The SR field in an X form instruction. */
#define SR SPRG + 1
@@ -536,10 +561,45 @@ const struct powerpc_operand powerpc_operands[] =
#define WS_MASK (0x7 << 11)
{ 3, 11, NULL, NULL, 0 },
- /* The L field in an mtmsrd instruction */
+ /* The L field in an mtmsrd or A form instruction. */
#define MTMSRD_L WS + 1
+#define A_L MTMSRD_L
{ 1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL },
+ /* The DCM field in a Z form instruction. */
+#define DCM MTMSRD_L + 1
+ { 6, 16, NULL, NULL, 0 },
+
+ /* Likewise, the DGM field in a Z form instruction. */
+#define DGM DCM + 1
+ { 6, 16, NULL, NULL, 0 },
+
+#define TE DGM + 1
+ { 5, 11, NULL, NULL, 0 },
+
+#define RMC TE + 1
+ { 2, 21, NULL, NULL, 0 },
+
+#define R RMC + 1
+ { 1, 15, NULL, NULL, 0 },
+
+#define SP R + 1
+ { 2, 11, NULL, NULL, 0 },
+
+#define S SP + 1
+ { 1, 11, NULL, NULL, 0 },
+
+ /* SH field starting at bit position 16. */
+#define SH16 S + 1
+ { 6, 10, NULL, NULL, 0 },
+
+ /* The L field in an X form with the RT field fixed instruction. */
+#define XRT_L SH16 + 1
+ { 2, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
+
+ /* The EH field in larx instruction. */
+#define EH XRT_L + 1
+ { 1, 0, NULL, NULL, PPC_OPERAND_OPTIONAL },
};
/* The functions used to insert and extract complicated operands. */
@@ -550,7 +610,6 @@ const struct powerpc_operand powerpc_operands[] =
and the extraction function just checks that the fields are the
same. */
-/*ARGSUSED*/
static unsigned long
insert_bat (unsigned long insn,
long value ATTRIBUTE_UNUSED,
@@ -576,7 +635,6 @@ extract_bat (unsigned long insn,
and the extraction function just checks that the fields are the
same. */
-/*ARGSUSED*/
static unsigned long
insert_bba (unsigned long insn,
long value ATTRIBUTE_UNUSED,
@@ -599,7 +657,6 @@ extract_bba (unsigned long insn,
/* The BD field in a B form instruction. The lower two bits are
forced to zero. */
-/*ARGSUSED*/
static unsigned long
insert_bd (unsigned long insn,
long value,
@@ -609,7 +666,6 @@ insert_bd (unsigned long insn,
return insn | (value & 0xfffc);
}
-/*ARGSUSED*/
static long
extract_bd (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
@@ -631,7 +687,6 @@ extract_bd (unsigned long insn,
in BO field, the "a" bit is 00010 for branch on CR(BI) and 01000
for branch on CTR. We only handle the taken/not-taken hint here. */
-/*ARGSUSED*/
static unsigned long
insert_bdm (unsigned long insn,
long value,
@@ -677,7 +732,6 @@ extract_bdm (unsigned long insn,
This is like BDM, above, except that the branch is expected to be
taken. */
-/*ARGSUSED*/
static unsigned long
insert_bdp (unsigned long insn,
long value,
@@ -831,7 +885,6 @@ extract_boe (unsigned long insn,
/* The DQ field in a DQ form instruction. This is like D, but the
lower four bits are forced to zero. */
-/*ARGSUSED*/
static unsigned long
insert_dq (unsigned long insn,
long value,
@@ -843,7 +896,6 @@ insert_dq (unsigned long insn,
return insn | (value & 0xfff0);
}
-/*ARGSUSED*/
static long
extract_dq (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
@@ -918,7 +970,6 @@ extract_ev8 (unsigned long insn,
/* The DS field in a DS form instruction. This is like D, but the
lower two bits are forced to zero. */
-/*ARGSUSED*/
static unsigned long
insert_ds (unsigned long insn,
long value,
@@ -930,7 +981,6 @@ insert_ds (unsigned long insn,
return insn | (value & 0xfffc);
}
-/*ARGSUSED*/
static long
extract_ds (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
@@ -941,7 +991,6 @@ extract_ds (unsigned long insn,
/* The DE field in a DE form instruction. */
-/*ARGSUSED*/
static unsigned long
insert_de (unsigned long insn,
long value,
@@ -953,7 +1002,6 @@ insert_de (unsigned long insn,
return insn | ((value << 4) & 0xfff0);
}
-/*ARGSUSED*/
static long
extract_de (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
@@ -964,7 +1012,6 @@ extract_de (unsigned long insn,
/* The DES field in a DES form instruction. */
-/*ARGSUSED*/
static unsigned long
insert_des (unsigned long insn,
long value,
@@ -978,7 +1025,6 @@ insert_des (unsigned long insn,
return insn | ((value << 2) & 0xfff0);
}
-/*ARGSUSED*/
static long
extract_des (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
@@ -995,17 +1041,33 @@ insert_fxm (unsigned long insn,
int dialect,
const char **errmsg)
{
+ /* If we're handling the mfocrf and mtocrf insns ensure that exactly
+ one bit of the mask field is set. */
+ if ((insn & (1 << 20)) != 0)
+ {
+ if (value == 0 || (value & -value) != value)
+ {
+ *errmsg = _("invalid mask field");
+ value = 0;
+ }
+ }
+
/* If the optional field on mfcr is missing that means we want to use
the old form of the instruction that moves the whole cr. In that
case we'll have VALUE zero. There doesn't seem to be a way to
distinguish this from the case where someone writes mfcr %r3,0. */
- if (value == 0)
+ else if (value == 0)
;
/* If only one bit of the FXM field is set, we can use the new form
of the instruction, which is faster. Unlike the Power4 branch hint
- encoding, this is not backward compatible. */
- else if ((dialect & PPC_OPCODE_POWER4) != 0 && (value & -value) == value)
+ encoding, this is not backward compatible. Do not generate the
+ new form unless -mpower4 has been given, or -many and the two
+ operand form of mfcr was used. */
+ else if ((value & -value) == value
+ && ((dialect & PPC_OPCODE_POWER4) != 0
+ || ((dialect & PPC_OPCODE_ANY) != 0
+ && (insn & (0x3ff << 1)) == 19 << 1)))
insn |= 1 << 20;
/* Any other value on mfcr is an error. */
@@ -1020,7 +1082,7 @@ insert_fxm (unsigned long insn,
static long
extract_fxm (unsigned long insn,
- int dialect,
+ int dialect ATTRIBUTE_UNUSED,
int *invalid)
{
long mask = (insn >> 12) & 0xff;
@@ -1028,14 +1090,9 @@ extract_fxm (unsigned long insn,
/* Is this a Power4 insn? */
if ((insn & (1 << 20)) != 0)
{
- if ((dialect & PPC_OPCODE_POWER4) == 0)
+ /* Exactly one bit of MASK should be set. */
+ if (mask == 0 || (mask & -mask) != mask)
*invalid = 1;
- else
- {
- /* Exactly one bit of MASK should be set. */
- if (mask == 0 || (mask & -mask) != mask)
- *invalid = 1;
- }
}
/* Check that non-power4 form of mfcr has a zero MASK. */
@@ -1051,7 +1108,6 @@ extract_fxm (unsigned long insn,
/* The LI field in an I form instruction. The lower two bits are
forced to zero. */
-/*ARGSUSED*/
static unsigned long
insert_li (unsigned long insn,
long value,
@@ -1063,7 +1119,6 @@ insert_li (unsigned long insn,
return insn | (value & 0x3fffffc);
}
-/*ARGSUSED*/
static long
extract_li (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
@@ -1163,7 +1218,6 @@ extract_mbe (unsigned long insn,
/* The MB or ME field in an MD or MDS form instruction. The high bit
is wrapped to the low end. */
-/*ARGSUSED*/
static unsigned long
insert_mb6 (unsigned long insn,
long value,
@@ -1173,7 +1227,6 @@ insert_mb6 (unsigned long insn,
return insn | ((value & 0x1f) << 6) | (value & 0x20);
}
-/*ARGSUSED*/
static long
extract_mb6 (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
@@ -1198,7 +1251,6 @@ insert_nb (unsigned long insn,
return insn | ((value & 0x1f) << 11);
}
-/*ARGSUSED*/
static long
extract_nb (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
@@ -1217,7 +1269,6 @@ extract_nb (unsigned long insn,
invalid, since we never want to recognize an instruction which uses
a field of this type. */
-/*ARGSUSED*/
static unsigned long
insert_nsi (unsigned long insn,
long value,
@@ -1269,7 +1320,6 @@ insert_ram (unsigned long insn,
/* The RA field in the DQ form lq instruction, which has special
value restrictions. */
-/*ARGSUSED*/
static unsigned long
insert_raq (unsigned long insn,
long value,
@@ -1304,7 +1354,6 @@ insert_ras (unsigned long insn,
function just copies the BT field into the BA field, and the
extraction function just checks that the fields are the same. */
-/*ARGSUSED*/
static unsigned long
insert_rbs (unsigned long insn,
long value ATTRIBUTE_UNUSED,
@@ -1327,7 +1376,6 @@ extract_rbs (unsigned long insn,
/* The RT field of the DQ form lq instruction, which has special
value restrictions. */
-/*ARGSUSED*/
static unsigned long
insert_rtq (unsigned long insn,
long value,
@@ -1342,7 +1390,6 @@ insert_rtq (unsigned long insn,
/* The RS field of the DS form stq instruction, which has special
value restrictions. */
-/*ARGSUSED*/
static unsigned long
insert_rsq (unsigned long insn,
long value ATTRIBUTE_UNUSED,
@@ -1356,7 +1403,6 @@ insert_rsq (unsigned long insn,
/* The SH field in an MD form instruction. This is split. */
-/*ARGSUSED*/
static unsigned long
insert_sh6 (unsigned long insn,
long value,
@@ -1366,7 +1412,6 @@ insert_sh6 (unsigned long insn,
return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4);
}
-/*ARGSUSED*/
static long
extract_sh6 (unsigned long insn,
int dialect ATTRIBUTE_UNUSED,
@@ -1395,6 +1440,47 @@ extract_spr (unsigned long insn,
return ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0);
}
+/* Some dialects have 8 SPRG registers instead of the standard 4. */
+
+static unsigned long
+insert_sprg (unsigned long insn,
+ long value,
+ int dialect,
+ const char **errmsg)
+{
+ /* This check uses PPC_OPCODE_403 because PPC405 is later defined
+ as a synonym. If ever a 405 specific dialect is added this
+ check should use that instead. */
+ if (value > 7
+ || (value > 3
+ && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0))
+ *errmsg = _("invalid sprg number");
+
+ /* If this is mfsprg4..7 then use spr 260..263 which can be read in
+ user mode. Anything else must use spr 272..279. */
+ if (value <= 3 || (insn & 0x100) != 0)
+ value |= 0x10;
+
+ return insn | ((value & 0x17) << 16);
+}
+
+static long
+extract_sprg (unsigned long insn,
+ int dialect,
+ int *invalid)
+{
+ unsigned long val = (insn >> 16) & 0x1f;
+
+ /* mfsprg can use 260..263 and 272..279. mtsprg only uses spr 272..279
+ If not BOOKE or 405, then both use only 272..275. */
+ if (val <= 3
+ || (val < 0x10 && (insn & 0x100) != 0)
+ || (val - 0x10 > 3
+ && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0))
+ *invalid = 1;
+ return val & 7;
+}
+
/* The TBR field in an XFX instruction. This is just like SPR, but it
is optional. When TBR is omitted, it must be inserted as 268 (the
magic number of the TB register). These functions treat 0
@@ -1460,6 +1546,9 @@ extract_tbr (unsigned long insn,
/* An A_MASK with the FRA and FRC fields fixed. */
#define AFRAFRC_MASK (A_MASK | FRA_MASK | FRC_MASK)
+/* An AFRAFRC_MASK, but with L bit clear. */
+#define AFRALFRC_MASK (AFRAFRC_MASK & ~((unsigned long) 1 << 16))
+
/* A B form instruction. */
#define B(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 1) | ((lk) & 1))
#define B_MASK B (0x3f, 1, 1)
@@ -1494,11 +1583,11 @@ extract_tbr (unsigned long insn,
/* An Context form instruction. */
#define CTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7))
-#define CTX_MASK CTX(0x3f, 0x7)
+#define CTX_MASK CTX(0x3f, 0x7)
/* An User Context form instruction. */
#define UCTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f))
-#define UCTX_MASK UCTX(0x3f, 0x1f)
+#define UCTX_MASK UCTX(0x3f, 0x1f)
/* The main opcode mask with the RA field clear. */
#define DRA_MASK (OP_MASK | RA_MASK)
@@ -1570,12 +1659,21 @@ extract_tbr (unsigned long insn,
/* An X form instruction. */
#define X(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1))
+/* A Z form instruction. */
+#define Z(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1))
+
/* An X form instruction with the RC bit specified. */
#define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1))
+/* A Z form instruction with the RC bit specified. */
+#define ZRC(op, xop, rc) (Z ((op), (xop)) | ((rc) & 1))
+
/* The mask for an X form instruction. */
#define X_MASK XRC (0x3f, 0x3ff, 1)
+/* The mask for a Z form instruction. */
+#define Z_MASK ZRC (0x3f, 0x1ff, 1)
+
/* An X_MASK with the RA field fixed. */
#define XRA_MASK (X_MASK | RA_MASK)
@@ -1585,6 +1683,9 @@ extract_tbr (unsigned long insn,
/* An X_MASK with the RT field fixed. */
#define XRT_MASK (X_MASK | RT_MASK)
+/* An XRT_MASK mask with the L bits clear. */
+#define XLRT_MASK (XRT_MASK & ~((unsigned long) 0x3 << 21))
+
/* An X_MASK with the RA and RB fields fixed. */
#define XRARB_MASK (X_MASK | RA_MASK | RB_MASK)
@@ -1597,8 +1698,8 @@ extract_tbr (unsigned long insn,
/* An XRTRA_MASK, but with L bit clear. */
#define XRTLRA_MASK (XRTRA_MASK & ~((unsigned long) 1 << 21))
-/* An X form comparison instruction. */
-#define XCMPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21))
+/* An X form instruction with the L bit specified. */
+#define XOPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21))
/* The mask for an X form comparison instruction. */
#define XCMP_MASK (X_MASK | (((unsigned long)1) << 22))
@@ -1621,6 +1722,9 @@ extract_tbr (unsigned long insn,
/* An X form sync instruction with everything filled in except the LS field. */
#define XSYNC_MASK (0xff9fffff)
+/* An X_MASK, but with the EH bit clear. */
+#define XEH_MASK (X_MASK & ~((unsigned long )1))
+
/* An X form AltiVec dss instruction. */
#define XDSS(op, xop, a) (X ((op), (xop)) | ((((unsigned long)(a)) & 1) << 25))
#define XDSS_MASK XDSS(0x3f, 0x3ff, 1)
@@ -1663,6 +1767,9 @@ extract_tbr (unsigned long insn,
#define XLYBB_MASK (XLYLK_MASK | BB_MASK)
#define XLBOCBBB_MASK (XLOCB_MASK | BB_MASK)
+/* A mask for branch instructions using the BH field. */
+#define XLBH_MASK (XL_MASK | (0x1c << 11))
+
/* An XL_MASK with the BO and BB fields fixed. */
#define XLBOBB_MASK (XL_MASK | BO_MASK | BB_MASK)
@@ -1682,11 +1789,12 @@ extract_tbr (unsigned long insn,
#define XS_MASK XS (0x3f, 0x1ff, 1)
/* A mask for the FXM version of an XFX form instruction. */
-#define XFXFXM_MASK (X_MASK | (1 << 11))
+#define XFXFXM_MASK (X_MASK | (1 << 11) | (1 << 20))
/* An XFX form instruction with the FXM field filled in. */
-#define XFXM(op, xop, fxm) \
- (X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12))
+#define XFXM(op, xop, fxm, p4) \
+ (X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12) \
+ | ((unsigned long)(p4) << 20))
/* An XFX form instruction with the SPR field filled in. */
#define XSPR(op, xop, spr) \
@@ -1699,7 +1807,7 @@ extract_tbr (unsigned long insn,
/* An XFX form instruction with the SPR field filled in except for the
SPRG field. */
-#define XSPRG_MASK (XSPR_MASK &~ SPRG_MASK)
+#define XSPRG_MASK (XSPR_MASK & ~(0x17 << 16))
/* An X form instruction with everything filled in except the E field. */
#define XE_MASK (0xffff7fff)
@@ -1769,6 +1877,9 @@ extract_tbr (unsigned long insn,
#define PPCCOM PPC_OPCODE_PPC | PPC_OPCODE_COMMON
#define NOPOWER4 PPC_OPCODE_NOPOWER4 | PPCCOM
#define POWER4 PPC_OPCODE_POWER4
+#define POWER5 PPC_OPCODE_POWER5
+#define POWER6 PPC_OPCODE_POWER6
+#define CELL PPC_OPCODE_CELL
#define PPC32 PPC_OPCODE_32 | PPC_OPCODE_PPC
#define PPC64 PPC_OPCODE_64 | PPC_OPCODE_PPC
#define PPC403 PPC_OPCODE_403
@@ -1776,7 +1887,7 @@ extract_tbr (unsigned long insn,
#define PPC440 PPC_OPCODE_440
#define PPC750 PPC
#define PPC860 PPC
-#define PPCVEC PPC_OPCODE_ALTIVEC | PPC_OPCODE_PPC
+#define PPCVEC PPC_OPCODE_ALTIVEC
#define POWER PPC_OPCODE_POWER
#define POWER2 PPC_OPCODE_POWER | PPC_OPCODE_POWER2
#define PPCPWR2 PPC_OPCODE_PPC | PPC_OPCODE_POWER | PPC_OPCODE_POWER2
@@ -1790,6 +1901,7 @@ extract_tbr (unsigned long insn,
#define BOOKE PPC_OPCODE_BOOKE
#define BOOKE64 PPC_OPCODE_BOOKE64
#define CLASSIC PPC_OPCODE_CLASSIC
+#define PPCE300 PPC_OPCODE_E300
#define PPCSPE PPC_OPCODE_SPE
#define PPCISEL PPC_OPCODE_ISEL
#define PPCEFS PPC_OPCODE_EFS
@@ -1952,6 +2064,41 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "nmaclhwso.", XO(4,494,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
{ "mfvscr", VX(4, 1540), VX_MASK, PPCVEC, { VD } },
{ "mtvscr", VX(4, 1604), VX_MASK, PPCVEC, { VB } },
+
+ /* Double-precision opcodes. */
+ /* Some of these conflict with AltiVec, so move them before, since
+ PPCVEC includes the PPC_OPCODE_PPC set. */
+{ "efscfd", VX(4, 719), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdabs", VX(4, 740), VX_MASK, PPCEFS, { RS, RA } },
+{ "efdnabs", VX(4, 741), VX_MASK, PPCEFS, { RS, RA } },
+{ "efdneg", VX(4, 742), VX_MASK, PPCEFS, { RS, RA } },
+{ "efdadd", VX(4, 736), VX_MASK, PPCEFS, { RS, RA, RB } },
+{ "efdsub", VX(4, 737), VX_MASK, PPCEFS, { RS, RA, RB } },
+{ "efdmul", VX(4, 744), VX_MASK, PPCEFS, { RS, RA, RB } },
+{ "efddiv", VX(4, 745), VX_MASK, PPCEFS, { RS, RA, RB } },
+{ "efdcmpgt", VX(4, 748), VX_MASK, PPCEFS, { CRFD, RA, RB } },
+{ "efdcmplt", VX(4, 749), VX_MASK, PPCEFS, { CRFD, RA, RB } },
+{ "efdcmpeq", VX(4, 750), VX_MASK, PPCEFS, { CRFD, RA, RB } },
+{ "efdtstgt", VX(4, 764), VX_MASK, PPCEFS, { CRFD, RA, RB } },
+{ "efdtstlt", VX(4, 765), VX_MASK, PPCEFS, { CRFD, RA, RB } },
+{ "efdtsteq", VX(4, 766), VX_MASK, PPCEFS, { CRFD, RA, RB } },
+{ "efdcfsi", VX(4, 753), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdcfsid", VX(4, 739), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdcfui", VX(4, 752), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdcfuid", VX(4, 738), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdcfsf", VX(4, 755), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdcfuf", VX(4, 754), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdctsi", VX(4, 757), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdctsidz",VX(4, 747), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdctsiz", VX(4, 762), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdctui", VX(4, 756), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdctuidz",VX(4, 746), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdctuiz", VX(4, 760), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdctsf", VX(4, 759), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdctuf", VX(4, 758), VX_MASK, PPCEFS, { RS, RB } },
+{ "efdcfs", VX(4, 751), VX_MASK, PPCEFS, { RS, RB } },
+ /* End of double-precision opcodes. */
+
{ "vaddcuw", VX(4, 384), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vaddfp", VX(4, 10), VX_MASK, PPCVEC, { VD, VA, VB } },
{ "vaddsbs", VX(4, 768), VX_MASK, PPCVEC, { VD, VA, VB } },
@@ -2389,16 +2536,16 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "li", OP(14), DRA_MASK, PPCCOM, { RT, SI } },
{ "lil", OP(14), DRA_MASK, PWRCOM, { RT, SI } },
-{ "addi", OP(14), OP_MASK, PPCCOM, { RT, RA, SI } },
-{ "cal", OP(14), OP_MASK, PWRCOM, { RT, D, RA } },
-{ "subi", OP(14), OP_MASK, PPCCOM, { RT, RA, NSI } },
-{ "la", OP(14), OP_MASK, PPCCOM, { RT, D, RA } },
+{ "addi", OP(14), OP_MASK, PPCCOM, { RT, RA0, SI } },
+{ "cal", OP(14), OP_MASK, PWRCOM, { RT, D, RA0 } },
+{ "subi", OP(14), OP_MASK, PPCCOM, { RT, RA0, NSI } },
+{ "la", OP(14), OP_MASK, PPCCOM, { RT, D, RA0 } },
{ "lis", OP(15), DRA_MASK, PPCCOM, { RT, SISIGNOPT } },
{ "liu", OP(15), DRA_MASK, PWRCOM, { RT, SISIGNOPT } },
-{ "addis", OP(15), OP_MASK, PPCCOM, { RT,RA,SISIGNOPT } },
-{ "cau", OP(15), OP_MASK, PWRCOM, { RT,RA,SISIGNOPT } },
-{ "subis", OP(15), OP_MASK, PPCCOM, { RT, RA, NSI } },
+{ "addis", OP(15), OP_MASK, PPCCOM, { RT,RA0,SISIGNOPT } },
+{ "cau", OP(15), OP_MASK, PWRCOM, { RT,RA0,SISIGNOPT } },
+{ "subis", OP(15), OP_MASK, PPCCOM, { RT, RA0, NSI } },
{ "bdnz-", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDM } },
{ "bdnz+", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDP } },
@@ -2665,9 +2812,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "bcla+", B(16,1,1), B_MASK, PPCCOM, { BOE, BI, BDPA } },
{ "bcla", B(16,1,1), B_MASK, COM, { BO, BI, BDA } },
-{ "sc", SC(17,1,0), 0xffffffff, PPC, { 0 } },
-{ "svc", SC(17,0,0), SC_MASK, POWER, { LEV, FL1, FL2 } },
-{ "svcl", SC(17,0,1), SC_MASK, POWER, { LEV, FL1, FL2 } },
+{ "sc", SC(17,1,0), SC_MASK, PPC, { LEV } },
+{ "svc", SC(17,0,0), SC_MASK, POWER, { SVC_LEV, FL1, FL2 } },
+{ "svcl", SC(17,0,1), SC_MASK, POWER, { SVC_LEV, FL1, FL2 } },
{ "svca", SC(17,1,0), SC_MASK, PWRCOM, { SV } },
{ "svcla", SC(17,1,1), SC_MASK, POWER, { SV } },
@@ -2890,12 +3037,12 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM, { BI } },
{ "bdzflrl-",XLO(19,BODZF,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bdzflrl+",XLO(19,BODZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
-{ "bclr", XLLK(19,16,0), XLYBB_MASK, PPCCOM, { BO, BI } },
-{ "bclrl", XLLK(19,16,1), XLYBB_MASK, PPCCOM, { BO, BI } },
{ "bclr+", XLYLK(19,16,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bclrl+", XLYLK(19,16,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bclr-", XLYLK(19,16,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bclrl-", XLYLK(19,16,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
+{ "bclr", XLLK(19,16,0), XLBH_MASK, PPCCOM, { BO, BI, BH } },
+{ "bclrl", XLLK(19,16,1), XLBH_MASK, PPCCOM, { BO, BI, BH } },
{ "bcr", XLLK(19,16,0), XLBB_MASK, PWRCOM, { BO, BI } },
{ "bcrl", XLLK(19,16,1), XLBB_MASK, PWRCOM, { BO, BI } },
{ "bclre", XLLK(19,17,0), XLBB_MASK, BOOKE64, { BO, BI } },
@@ -2924,14 +3071,23 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "crand", XL(19,257), XL_MASK, COM, { BT, BA, BB } },
+{ "hrfid", XL(19,274), 0xffffffff, POWER5 | CELL, { 0 } },
+
{ "crset", XL(19,289), XL_MASK, PPCCOM, { BT, BAT, BBA } },
{ "creqv", XL(19,289), XL_MASK, COM, { BT, BA, BB } },
+{ "doze", XL(19,402), 0xffffffff, POWER6, { 0 } },
+
{ "crorc", XL(19,417), XL_MASK, COM, { BT, BA, BB } },
+{ "nap", XL(19,434), 0xffffffff, POWER6, { 0 } },
+
{ "crmove", XL(19,449), XL_MASK, PPCCOM, { BT, BA, BBA } },
{ "cror", XL(19,449), XL_MASK, COM, { BT, BA, BB } },
+{ "sleep", XL(19,466), 0xffffffff, POWER6, { 0 } },
+{ "rvwinkle", XL(19,498), 0xffffffff, POWER6, { 0 } },
+
{ "bctr", XLO(19,BOU,528,0), XLBOBIBB_MASK, COM, { 0 } },
{ "bctrl", XLO(19,BOU,528,1), XLBOBIBB_MASK, COM, { 0 } },
{ "bltctr", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
@@ -3074,12 +3230,12 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "bfctrl-", XLO(19,BOFM4,528,1), XLBOBB_MASK, POWER4, { BI } },
{ "bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
{ "bfctrl+", XLO(19,BOFP4,528,1), XLBOBB_MASK, POWER4, { BI } },
-{ "bcctr", XLLK(19,528,0), XLYBB_MASK, PPCCOM, { BO, BI } },
{ "bcctr-", XLYLK(19,528,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bcctr+", XLYLK(19,528,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
-{ "bcctrl", XLLK(19,528,1), XLYBB_MASK, PPCCOM, { BO, BI } },
{ "bcctrl-", XLYLK(19,528,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
{ "bcctrl+", XLYLK(19,528,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
+{ "bcctr", XLLK(19,528,0), XLBH_MASK, PPCCOM, { BO, BI, BH } },
+{ "bcctrl", XLLK(19,528,1), XLBH_MASK, PPCCOM, { BO, BI, BH } },
{ "bcc", XLLK(19,528,0), XLBB_MASK, PWRCOM, { BO, BI } },
{ "bccl", XLLK(19,528,1), XLBB_MASK, PWRCOM, { BO, BI } },
{ "bcctre", XLLK(19,529,0), XLYBB_MASK, BOOKE64, { BO, BI } },
@@ -3158,8 +3314,8 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "rldcr", MDS(30,9,0), MDS_MASK, PPC64, { RA, RS, RB, ME6 } },
{ "rldcr.", MDS(30,9,1), MDS_MASK, PPC64, { RA, RS, RB, ME6 } },
-{ "cmpw", XCMPL(31,0,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } },
-{ "cmpd", XCMPL(31,0,1), XCMPL_MASK, PPC64, { OBF, RA, RB } },
+{ "cmpw", XOPL(31,0,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } },
+{ "cmpd", XOPL(31,0,1), XCMPL_MASK, PPC64, { OBF, RA, RB } },
{ "cmp", X(31,0), XCMP_MASK, PPC, { BF, L, RA, RB } },
{ "cmp", X(31,0), XCMPL_MASK, PWRCOM, { BF, RA, RB } },
@@ -3228,17 +3384,18 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "iseleq", X(31,79), X_MASK, PPCISEL, { RT, RA, RB } },
{ "isel", XISEL(31,15), XISEL_MASK, PPCISEL, { RT, RA, RB, CRB } },
-{ "mfcr", X(31,19), XRARB_MASK, NOPOWER4, { RT } },
+{ "mfocrf", XFXM(31,19,0,1), XFXFXM_MASK, COM, { RT, FXM } },
+{ "mfcr", X(31,19), XRARB_MASK, NOPOWER4 | COM, { RT } },
{ "mfcr", X(31,19), XFXFXM_MASK, POWER4, { RT, FXM4 } },
-{ "lwarx", X(31,20), X_MASK, PPC, { RT, RA, RB } },
+{ "lwarx", X(31,20), XEH_MASK, PPC, { RT, RA0, RB, EH } },
-{ "ldx", X(31,21), X_MASK, PPC64, { RT, RA, RB } },
+{ "ldx", X(31,21), X_MASK, PPC64, { RT, RA0, RB } },
-{ "icbt", X(31,22), X_MASK, BOOKE, { CT, RA, RB } },
+{ "icbt", X(31,22), X_MASK, BOOKE|PPCE300, { CT, RA, RB } },
{ "icbt", X(31,262), XRT_MASK, PPC403, { RA, RB } },
-{ "lwzx", X(31,23), X_MASK, PPCCOM, { RT, RA, RB } },
+{ "lwzx", X(31,23), X_MASK, PPCCOM, { RT, RA0, RB } },
{ "lx", X(31,23), X_MASK, PWRCOM, { RT, RA, RB } },
{ "slw", XRC(31,24,0), X_MASK, PPCCOM, { RA, RS, RB } },
@@ -3262,10 +3419,10 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "icbte", X(31,30), X_MASK, BOOKE64, { CT, RA, RB } },
-{ "lwzxe", X(31,31), X_MASK, BOOKE64, { RT, RA, RB } },
+{ "lwzxe", X(31,31), X_MASK, BOOKE64, { RT, RA0, RB } },
-{ "cmplw", XCMPL(31,32,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } },
-{ "cmpld", XCMPL(31,32,1), XCMPL_MASK, PPC64, { OBF, RA, RB } },
+{ "cmplw", XOPL(31,32,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } },
+{ "cmpld", XOPL(31,32,1), XCMPL_MASK, PPC64, { OBF, RA, RB } },
{ "cmpl", X(31,32), XCMP_MASK, PPC, { BF, L, RA, RB } },
{ "cmpl", X(31,32), XCMPL_MASK, PWRCOM, { BF, RA, RB } },
@@ -3324,15 +3481,16 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mfmsr", X(31,83), XRARB_MASK, COM, { RT } },
-{ "ldarx", X(31,84), X_MASK, PPC64, { RT, RA, RB } },
+{ "ldarx", X(31,84), XEH_MASK, PPC64, { RT, RA0, RB, EH } },
-{ "dcbf", X(31,86), XRT_MASK, PPC, { RA, RB } },
+{ "dcbfl", XOPL(31,86,1), XRT_MASK, POWER5, { RA, RB } },
+{ "dcbf", X(31,86), XLRT_MASK, PPC, { RA, RB, XRT_L } },
-{ "lbzx", X(31,87), X_MASK, COM, { RT, RA, RB } },
+{ "lbzx", X(31,87), X_MASK, COM, { RT, RA0, RB } },
{ "dcbfe", X(31,94), XRT_MASK, BOOKE64, { RA, RB } },
-{ "lbzxe", X(31,95), X_MASK, BOOKE64, { RT, RA, RB } },
+{ "lbzxe", X(31,95), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "neg", XO(31,104,0,0), XORB_MASK, COM, { RT, RA } },
{ "neg.", XO(31,104,0,1), XORB_MASK, COM, { RT, RA } },
@@ -3350,12 +3508,14 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "lbzux", X(31,119), X_MASK, COM, { RT, RAL, RB } },
+{ "popcntb", X(31,122), XRB_MASK, POWER5, { RA, RS } },
+
{ "not", XRC(31,124,0), X_MASK, COM, { RA, RS, RBS } },
{ "nor", XRC(31,124,0), X_MASK, COM, { RA, RS, RB } },
{ "not.", XRC(31,124,1), X_MASK, COM, { RA, RS, RBS } },
{ "nor.", XRC(31,124,1), X_MASK, COM, { RA, RS, RB } },
-{ "lwarxe", X(31,126), X_MASK, BOOKE64, { RT, RA, RB } },
+{ "lwarxe", X(31,126), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "lbzuxe", X(31,127), X_MASK, BOOKE64, { RT, RAL, RB } },
@@ -3383,21 +3543,22 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "dcbtstlse",X(31,142),X_MASK, PPCCHLK64, { CT, RA, RB }},
-{ "mtcr", XFXM(31,144,0xff), XRARB_MASK, COM, { RS }},
+{ "mtocrf", XFXM(31,144,0,1), XFXFXM_MASK, COM, { FXM, RS } },
+{ "mtcr", XFXM(31,144,0xff,0), XRARB_MASK, COM, { RS }},
{ "mtcrf", X(31,144), XFXFXM_MASK, COM, { FXM, RS } },
{ "mtmsr", X(31,146), XRARB_MASK, COM, { RS } },
-{ "stdx", X(31,149), X_MASK, PPC64, { RS, RA, RB } },
+{ "stdx", X(31,149), X_MASK, PPC64, { RS, RA0, RB } },
-{ "stwcx.", XRC(31,150,1), X_MASK, PPC, { RS, RA, RB } },
+{ "stwcx.", XRC(31,150,1), X_MASK, PPC, { RS, RA0, RB } },
-{ "stwx", X(31,151), X_MASK, PPCCOM, { RS, RA, RB } },
+{ "stwx", X(31,151), X_MASK, PPCCOM, { RS, RA0, RB } },
{ "stx", X(31,151), X_MASK, PWRCOM, { RS, RA, RB } },
-{ "stwcxe.", XRC(31,158,1), X_MASK, BOOKE64, { RS, RA, RB } },
+{ "stwcxe.", XRC(31,158,1), X_MASK, BOOKE64, { RS, RA0, RB } },
-{ "stwxe", X(31,159), X_MASK, BOOKE64, { RS, RA, RB } },
+{ "stwxe", X(31,159), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "slq", XRC(31,152,0), X_MASK, M601, { RA, RS, RB } },
{ "slq.", XRC(31,152,1), X_MASK, M601, { RA, RS, RB } },
@@ -3405,6 +3566,8 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "sle", XRC(31,153,0), X_MASK, M601, { RA, RS, RB } },
{ "sle.", XRC(31,153,1), X_MASK, M601, { RA, RS, RB } },
+{ "prtyw", X(31,154), XRB_MASK, POWER6, { RA, RS } },
+
{ "wrteei", X(31,163), XE_MASK, PPC403 | BOOKE, { E } },
{ "dcbtls", X(31,166), X_MASK, PPCCHLK, { CT, RA, RB }},
@@ -3415,11 +3578,13 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "stdux", X(31,181), X_MASK, PPC64, { RS, RAS, RB } },
{ "stwux", X(31,183), X_MASK, PPCCOM, { RS, RAS, RB } },
-{ "stux", X(31,183), X_MASK, PWRCOM, { RS, RA, RB } },
+{ "stux", X(31,183), X_MASK, PWRCOM, { RS, RA0, RB } },
{ "sliq", XRC(31,184,0), X_MASK, M601, { RA, RS, SH } },
{ "sliq.", XRC(31,184,1), X_MASK, M601, { RA, RS, SH } },
+{ "prtyd", X(31,186), XRB_MASK, POWER6, { RA, RS } },
+
{ "stwuxe", X(31,191), X_MASK, BOOKE64, { RS, RAS, RB } },
{ "subfze", XO(31,200,0,0), XORB_MASK, PPCCOM, { RT, RA } },
@@ -3442,9 +3607,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mtsr", X(31,210), XRB_MASK|(1<<20), COM32, { SR, RS } },
-{ "stdcx.", XRC(31,214,1), X_MASK, PPC64, { RS, RA, RB } },
+{ "stdcx.", XRC(31,214,1), X_MASK, PPC64, { RS, RA0, RB } },
-{ "stbx", X(31,215), X_MASK, COM, { RS, RA, RB } },
+{ "stbx", X(31,215), X_MASK, COM, { RS, RA0, RB } },
{ "sllq", XRC(31,216,0), X_MASK, M601, { RA, RS, RB } },
{ "sllq.", XRC(31,216,1), X_MASK, M601, { RA, RS, RB } },
@@ -3452,7 +3617,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "sleq", XRC(31,217,0), X_MASK, M601, { RA, RS, RB } },
{ "sleq.", XRC(31,217,1), X_MASK, M601, { RA, RS, RB } },
-{ "stbxe", X(31,223), X_MASK, BOOKE64, { RS, RA, RB } },
+{ "stbxe", X(31,223), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "icblc", X(31,230), X_MASK, PPCCHLK, { CT, RA, RB }},
@@ -3492,7 +3657,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mtsrin", X(31,242), XRA_MASK, PPC32, { RS, RB } },
{ "mtsri", X(31,242), XRA_MASK, POWER32, { RS, RB } },
-{ "dcbtst", X(31,246), XRT_MASK, PPC, { CT, RA, RB } },
+{ "dcbtst", X(31,246), X_MASK, PPC, { CT, RA, RB } },
{ "stbux", X(31,247), X_MASK, COM, { RS, RAS, RB } },
@@ -3519,26 +3684,26 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "addo.", XO(31,266,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
{ "caxo.", XO(31,266,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
-{ "tlbiel", X(31,274), XRTRA_MASK, POWER4, { RB } },
+{ "tlbiel", X(31,274), XRTLRA_MASK, POWER4, { RB, L } },
{ "mfapidi", X(31,275), X_MASK, BOOKE, { RT, RA } },
{ "lscbx", XRC(31,277,0), X_MASK, M601, { RT, RA, RB } },
{ "lscbx.", XRC(31,277,1), X_MASK, M601, { RT, RA, RB } },
-{ "dcbt", X(31,278), XRT_MASK, PPC, { CT, RA, RB } },
+{ "dcbt", X(31,278), X_MASK, PPC, { CT, RA, RB } },
-{ "lhzx", X(31,279), X_MASK, COM, { RT, RA, RB } },
+{ "lhzx", X(31,279), X_MASK, COM, { RT, RA0, RB } },
{ "eqv", XRC(31,284,0), X_MASK, COM, { RA, RS, RB } },
{ "eqv.", XRC(31,284,1), X_MASK, COM, { RA, RS, RB } },
{ "dcbte", X(31,286), X_MASK, BOOKE64, { CT, RA, RB } },
-{ "lhzxe", X(31,287), X_MASK, BOOKE64, { RT, RA, RB } },
+{ "lhzxe", X(31,287), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "tlbie", X(31,306), XRTLRA_MASK, PPC, { RB, L } },
-{ "tlbi", X(31,306), XRT_MASK, POWER, { RA, RB } },
+{ "tlbi", X(31,306), XRT_MASK, POWER, { RA0, RB } },
{ "eciwx", X(31,310), X_MASK, PPC, { RT, RA, RB } },
@@ -3607,6 +3772,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mfsdr1", XSPR(31,339,25), XSPR_MASK, COM, { RT } },
{ "mfsrr0", XSPR(31,339,26), XSPR_MASK, COM, { RT } },
{ "mfsrr1", XSPR(31,339,27), XSPR_MASK, COM, { RT } },
+{ "mfcfar", XSPR(31,339,28), XSPR_MASK, POWER6, { RT } },
{ "mfpid", XSPR(31,339,48), XSPR_MASK, BOOKE, { RT } },
{ "mfpid", XSPR(31,339,945), XSPR_MASK, PPC403, { RT } },
{ "mfcsrr0", XSPR(31,339,58), XSPR_MASK, BOOKE, { RT } },
@@ -3634,21 +3800,21 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mfbar", XSPR(31,339,159), XSPR_MASK, PPC860, { RT } },
{ "mfvrsave", XSPR(31,339,256), XSPR_MASK, PPCVEC, { RT } },
{ "mfusprg0", XSPR(31,339,256), XSPR_MASK, BOOKE, { RT } },
-{ "mfsprg4", XSPR(31,339,260), XSPR_MASK, PPC405, { RT } },
-{ "mfsprg5", XSPR(31,339,261), XSPR_MASK, PPC405, { RT } },
-{ "mfsprg6", XSPR(31,339,262), XSPR_MASK, PPC405, { RT } },
-{ "mfsprg7", XSPR(31,339,263), XSPR_MASK, PPC405, { RT } },
{ "mftb", X(31,371), X_MASK, CLASSIC, { RT, TBR } },
{ "mftb", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } },
{ "mftbl", XSPR(31,371,268), XSPR_MASK, CLASSIC, { RT } },
{ "mftbl", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } },
{ "mftbu", XSPR(31,371,269), XSPR_MASK, CLASSIC, { RT } },
{ "mftbu", XSPR(31,339,269), XSPR_MASK, BOOKE, { RT } },
-{ "mfsprg", XSPR(31,339,272), XSPRG_MASK, PPC, { RT, SPRG } },
+{ "mfsprg", XSPR(31,339,256), XSPRG_MASK, PPC, { RT, SPRG } },
{ "mfsprg0", XSPR(31,339,272), XSPR_MASK, PPC, { RT } },
{ "mfsprg1", XSPR(31,339,273), XSPR_MASK, PPC, { RT } },
{ "mfsprg2", XSPR(31,339,274), XSPR_MASK, PPC, { RT } },
{ "mfsprg3", XSPR(31,339,275), XSPR_MASK, PPC, { RT } },
+{ "mfsprg4", XSPR(31,339,260), XSPR_MASK, PPC405 | BOOKE, { RT } },
+{ "mfsprg5", XSPR(31,339,261), XSPR_MASK, PPC405 | BOOKE, { RT } },
+{ "mfsprg6", XSPR(31,339,262), XSPR_MASK, PPC405 | BOOKE, { RT } },
+{ "mfsprg7", XSPR(31,339,263), XSPR_MASK, PPC405 | BOOKE, { RT } },
{ "mfasr", XSPR(31,339,280), XSPR_MASK, PPC64, { RT } },
{ "mfear", XSPR(31,339,282), XSPR_MASK, PPC, { RT } },
{ "mfpir", XSPR(31,339,286), XSPR_MASK, BOOKE, { RT } },
@@ -3699,6 +3865,10 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mfspefscr", XSPR(31,339,512), XSPR_MASK, PPCSPE, { RT } },
{ "mfbbear", XSPR(31,339,513), XSPR_MASK, PPCBRLK, { RT } },
{ "mfbbtar", XSPR(31,339,514), XSPR_MASK, PPCBRLK, { RT } },
+{ "mfivor32", XSPR(31,339,528), XSPR_MASK, PPCSPE, { RT } },
+{ "mfivor33", XSPR(31,339,529), XSPR_MASK, PPCSPE, { RT } },
+{ "mfivor34", XSPR(31,339,530), XSPR_MASK, PPCSPE, { RT } },
+{ "mfivor35", XSPR(31,339,531), XSPR_MASK, PPCPMR, { RT } },
{ "mfibatu", XSPR(31,339,528), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
{ "mfibatl", XSPR(31,339,529), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
{ "mfdbatu", XSPR(31,339,536), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
@@ -3708,10 +3878,11 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mfic_dat", XSPR(31,339,562), XSPR_MASK, PPC860, { RT } },
{ "mfdc_cst", XSPR(31,339,568), XSPR_MASK, PPC860, { RT } },
{ "mfdc_adr", XSPR(31,339,569), XSPR_MASK, PPC860, { RT } },
-{ "mfdc_dat", XSPR(31,339,570), XSPR_MASK, PPC860, { RT } },
{ "mfmcsrr0", XSPR(31,339,570), XSPR_MASK, PPCRFMCI, { RT } },
+{ "mfdc_dat", XSPR(31,339,570), XSPR_MASK, PPC860, { RT } },
{ "mfmcsrr1", XSPR(31,339,571), XSPR_MASK, PPCRFMCI, { RT } },
{ "mfmcsr", XSPR(31,339,572), XSPR_MASK, PPCRFMCI, { RT } },
+{ "mfmcar", XSPR(31,339,573), XSPR_MASK, PPCRFMCI, { RT } },
{ "mfdpdr", XSPR(31,339,630), XSPR_MASK, PPC860, { RT } },
{ "mfdpir", XSPR(31,339,631), XSPR_MASK, PPC860, { RT } },
{ "mfimmr", XSPR(31,339,638), XSPR_MASK, PPC860, { RT } },
@@ -3775,14 +3946,14 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mfpbu2", XSPR(31,339,1023), XSPR_MASK, PPC403, { RT } },
{ "mfspr", X(31,339), X_MASK, COM, { RT, SPR } },
-{ "lwax", X(31,341), X_MASK, PPC64, { RT, RA, RB } },
+{ "lwax", X(31,341), X_MASK, PPC64, { RT, RA0, RB } },
{ "dst", XDSS(31,342,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
{ "dstt", XDSS(31,342,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
-{ "lhax", X(31,343), X_MASK, COM, { RT, RA, RB } },
+{ "lhax", X(31,343), X_MASK, COM, { RT, RA0, RB } },
-{ "lhaxe", X(31,351), X_MASK, BOOKE64, { RT, RA, RB } },
+{ "lhaxe", X(31,351), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "dstst", XDSS(31,374,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
{ "dststt", XDSS(31,374,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
@@ -3821,14 +3992,20 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "slbmte", X(31,402), XRA_MASK, PPC64, { RS, RB } },
-{ "sthx", X(31,407), X_MASK, COM, { RS, RA, RB } },
+{ "sthx", X(31,407), X_MASK, COM, { RS, RA0, RB } },
+
+{ "cmpb", X(31,508), X_MASK, POWER6, { RA, RS, RB } },
{ "lfqx", X(31,791), X_MASK, POWER2, { FRT, RA, RB } },
+{ "lfdpx", X(31,791), X_MASK, POWER6, { FRT, RA, RB } },
+
{ "lfqux", X(31,823), X_MASK, POWER2, { FRT, RA, RB } },
{ "stfqx", X(31,919), X_MASK, POWER2, { FRS, RA, RB } },
+{ "stfdpx", X(31,919), X_MASK, POWER6, { FRS, RA, RB } },
+
{ "stfqux", X(31,951), X_MASK, POWER2, { FRS, RA, RB } },
{ "orc", XRC(31,412,0), X_MASK, COM, { RA, RS, RB } },
@@ -3837,7 +4014,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "sradi", XS(31,413,0), XS_MASK, PPC64, { RA, RS, SH6 } },
{ "sradi.", XS(31,413,1), XS_MASK, PPC64, { RA, RS, SH6 } },
-{ "sthxe", X(31,415), X_MASK, BOOKE64, { RS, RA, RB } },
+{ "sthxe", X(31,415), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "slbie", X(31,434), XRTRA_MASK, PPC64, { RB } },
@@ -3918,6 +4095,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mtsdr1", XSPR(31,467,25), XSPR_MASK, COM, { RS } },
{ "mtsrr0", XSPR(31,467,26), XSPR_MASK, COM, { RS } },
{ "mtsrr1", XSPR(31,467,27), XSPR_MASK, COM, { RS } },
+{ "mtcfar", XSPR(31,467,28), XSPR_MASK, POWER6, { RS } },
{ "mtpid", XSPR(31,467,48), XSPR_MASK, BOOKE, { RS } },
{ "mtpid", XSPR(31,467,945), XSPR_MASK, PPC403, { RS } },
{ "mtdecar", XSPR(31,467,54), XSPR_MASK, BOOKE, { RS } },
@@ -3946,7 +4124,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mtbar", XSPR(31,467,159), XSPR_MASK, PPC860, { RS } },
{ "mtvrsave", XSPR(31,467,256), XSPR_MASK, PPCVEC, { RS } },
{ "mtusprg0", XSPR(31,467,256), XSPR_MASK, BOOKE, { RS } },
-{ "mtsprg", XSPR(31,467,272), XSPRG_MASK,PPC, { SPRG, RS } },
+{ "mtsprg", XSPR(31,467,256), XSPRG_MASK,PPC, { SPRG, RS } },
{ "mtsprg0", XSPR(31,467,272), XSPR_MASK, PPC, { RS } },
{ "mtsprg1", XSPR(31,467,273), XSPR_MASK, PPC, { RS } },
{ "mtsprg2", XSPR(31,467,274), XSPR_MASK, PPC, { RS } },
@@ -4005,6 +4183,10 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mtspefscr", XSPR(31,467,512), XSPR_MASK, PPCSPE, { RS } },
{ "mtbbear", XSPR(31,467,513), XSPR_MASK, PPCBRLK, { RS } },
{ "mtbbtar", XSPR(31,467,514), XSPR_MASK, PPCBRLK, { RS } },
+{ "mtivor32", XSPR(31,467,528), XSPR_MASK, PPCSPE, { RS } },
+{ "mtivor33", XSPR(31,467,529), XSPR_MASK, PPCSPE, { RS } },
+{ "mtivor34", XSPR(31,467,530), XSPR_MASK, PPCSPE, { RS } },
+{ "mtivor35", XSPR(31,467,531), XSPR_MASK, PPCPMR, { RS } },
{ "mtibatu", XSPR(31,467,528), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
{ "mtibatl", XSPR(31,467,529), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
{ "mtdbatu", XSPR(31,467,536), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
@@ -4101,13 +4283,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "clcs", X(31,531), XRB_MASK, M601, { RT, RA } },
-{ "lswx", X(31,533), X_MASK, PPCCOM, { RT, RA, RB } },
+{ "ldbrx", X(31,532), X_MASK, CELL, { RT, RA0, RB } },
+
+{ "lswx", X(31,533), X_MASK, PPCCOM, { RT, RA0, RB } },
{ "lsx", X(31,533), X_MASK, PWRCOM, { RT, RA, RB } },
-{ "lwbrx", X(31,534), X_MASK, PPCCOM, { RT, RA, RB } },
+{ "lwbrx", X(31,534), X_MASK, PPCCOM, { RT, RA0, RB } },
{ "lbrx", X(31,534), X_MASK, PWRCOM, { RT, RA, RB } },
-{ "lfsx", X(31,535), X_MASK, COM, { FRT, RA, RB } },
+{ "lfsx", X(31,535), X_MASK, COM, { FRT, RA0, RB } },
{ "srw", XRC(31,536,0), X_MASK, PPCCOM, { RA, RS, RB } },
{ "sr", XRC(31,536,0), X_MASK, PWRCOM, { RA, RS, RB } },
@@ -4123,11 +4307,12 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "maskir", XRC(31,541,0), X_MASK, M601, { RA, RS, RB } },
{ "maskir.", XRC(31,541,1), X_MASK, M601, { RA, RS, RB } },
-{ "lwbrxe", X(31,542), X_MASK, BOOKE64, { RT, RA, RB } },
+{ "lwbrxe", X(31,542), X_MASK, BOOKE64, { RT, RA0, RB } },
-{ "lfsxe", X(31,543), X_MASK, BOOKE64, { FRT, RA, RB } },
+{ "lfsxe", X(31,543), X_MASK, BOOKE64, { FRT, RA0, RB } },
{ "bbelr", X(31,550), X_MASK, PPCBRLK, { 0 }},
+
{ "tlbsync", X(31,566), 0xffffffff, PPC, { 0 } },
{ "lfsux", X(31,567), X_MASK, COM, { FRT, RAS, RB } },
@@ -4136,8 +4321,8 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mfsr", X(31,595), XRB_MASK|(1<<20), COM32, { RT, SR } },
-{ "lswi", X(31,597), X_MASK, PPCCOM, { RT, RA, NB } },
-{ "lsi", X(31,597), X_MASK, PWRCOM, { RT, RA, NB } },
+{ "lswi", X(31,597), X_MASK, PPCCOM, { RT, RA0, NB } },
+{ "lsi", X(31,597), X_MASK, PWRCOM, { RT, RA0, NB } },
{ "lwsync", XSYNC(31,598,1), 0xffffffff, PPC, { 0 } },
{ "ptesync", XSYNC(31,598,2), 0xffffffff, PPC64, { 0 } },
@@ -4145,9 +4330,11 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "sync", X(31,598), XSYNC_MASK, PPCCOM, { LS } },
{ "dcs", X(31,598), 0xffffffff, PWRCOM, { 0 } },
-{ "lfdx", X(31,599), X_MASK, COM, { FRT, RA, RB } },
+{ "lfdx", X(31,599), X_MASK, COM, { FRT, RA0, RB } },
+
+{ "lfdxe", X(31,607), X_MASK, BOOKE64, { FRT, RA0, RB } },
-{ "lfdxe", X(31,607), X_MASK, BOOKE64, { FRT, RA, RB } },
+{ "mffgpr", XRC(31,607,0), XRA_MASK, POWER6, { FRT, RB } },
{ "mfsri", X(31,627), X_MASK, PWRCOM, { RT, RA, RB } },
@@ -4159,13 +4346,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mfsrin", X(31,659), XRA_MASK, PPC32, { RT, RB } },
-{ "stswx", X(31,661), X_MASK, PPCCOM, { RS, RA, RB } },
-{ "stsx", X(31,661), X_MASK, PWRCOM, { RS, RA, RB } },
+{ "stdbrx", X(31,660), X_MASK, CELL, { RS, RA0, RB } },
+
+{ "stswx", X(31,661), X_MASK, PPCCOM, { RS, RA0, RB } },
+{ "stsx", X(31,661), X_MASK, PWRCOM, { RS, RA0, RB } },
-{ "stwbrx", X(31,662), X_MASK, PPCCOM, { RS, RA, RB } },
-{ "stbrx", X(31,662), X_MASK, PWRCOM, { RS, RA, RB } },
+{ "stwbrx", X(31,662), X_MASK, PPCCOM, { RS, RA0, RB } },
+{ "stbrx", X(31,662), X_MASK, PWRCOM, { RS, RA0, RB } },
-{ "stfsx", X(31,663), X_MASK, COM, { FRS, RA, RB } },
+{ "stfsx", X(31,663), X_MASK, COM, { FRS, RA0, RB } },
{ "srq", XRC(31,664,0), X_MASK, M601, { RA, RS, RB } },
{ "srq.", XRC(31,664,1), X_MASK, M601, { RA, RS, RB } },
@@ -4173,9 +4362,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "sre", XRC(31,665,0), X_MASK, M601, { RA, RS, RB } },
{ "sre.", XRC(31,665,1), X_MASK, M601, { RA, RS, RB } },
-{ "stwbrxe", X(31,670), X_MASK, BOOKE64, { RS, RA, RB } },
+{ "stwbrxe", X(31,670), X_MASK, BOOKE64, { RS, RA0, RB } },
-{ "stfsxe", X(31,671), X_MASK, BOOKE64, { FRS, RA, RB } },
+{ "stfsxe", X(31,671), X_MASK, BOOKE64, { FRS, RA0, RB } },
{ "stfsux", X(31,695), X_MASK, COM, { FRS, RAS, RB } },
@@ -4184,10 +4373,10 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "stfsuxe", X(31,703), X_MASK, BOOKE64, { FRS, RAS, RB } },
-{ "stswi", X(31,725), X_MASK, PPCCOM, { RS, RA, NB } },
-{ "stsi", X(31,725), X_MASK, PWRCOM, { RS, RA, NB } },
+{ "stswi", X(31,725), X_MASK, PPCCOM, { RS, RA0, NB } },
+{ "stsi", X(31,725), X_MASK, PWRCOM, { RS, RA0, NB } },
-{ "stfdx", X(31,727), X_MASK, COM, { FRS, RA, RB } },
+{ "stfdx", X(31,727), X_MASK, COM, { FRS, RA0, RB } },
{ "srlq", XRC(31,728,0), X_MASK, M601, { RA, RS, RB } },
{ "srlq.", XRC(31,728,1), X_MASK, M601, { RA, RS, RB } },
@@ -4195,7 +4384,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "sreq", XRC(31,729,0), X_MASK, M601, { RA, RS, RB } },
{ "sreq.", XRC(31,729,1), X_MASK, M601, { RA, RS, RB } },
-{ "stfdxe", X(31,735), X_MASK, BOOKE64, { FRS, RA, RB } },
+{ "stfdxe", X(31,735), X_MASK, BOOKE64, { FRS, RA0, RB } },
+
+{ "mftgpr", XRC(31,735,0), XRA_MASK, POWER6, { RT, FRB } },
{ "dcba", X(31,758), XRT_MASK, PPC405 | BOOKE, { RA, RB } },
@@ -4211,7 +4402,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "tlbivax", X(31,786), XRT_MASK, BOOKE, { RA, RB } },
{ "tlbivaxe",X(31,787), XRT_MASK, BOOKE64, { RA, RB } },
-{ "lhbrx", X(31,790), X_MASK, COM, { RT, RA, RB } },
+{ "lwzcix", X(31,789), X_MASK, POWER6, { RT, RA0, RB } },
+
+{ "lhbrx", X(31,790), X_MASK, COM, { RT, RA0, RB } },
{ "sraw", XRC(31,792,0), X_MASK, PPCCOM, { RA, RS, RB } },
{ "sra", XRC(31,792,0), X_MASK, PWRCOM, { RA, RS, RB } },
@@ -4221,13 +4414,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "srad", XRC(31,794,0), X_MASK, PPC64, { RA, RS, RB } },
{ "srad.", XRC(31,794,1), X_MASK, PPC64, { RA, RS, RB } },
-{ "lhbrxe", X(31,798), X_MASK, BOOKE64, { RT, RA, RB } },
+{ "lhbrxe", X(31,798), X_MASK, BOOKE64, { RT, RA0, RB } },
-{ "ldxe", X(31,799), X_MASK, BOOKE64, { RT, RA, RB } },
-{ "lduxe", X(31,831), X_MASK, BOOKE64, { RT, RA, RB } },
+{ "ldxe", X(31,799), X_MASK, BOOKE64, { RT, RA0, RB } },
+{ "lduxe", X(31,831), X_MASK, BOOKE64, { RT, RA0, RB } },
{ "rac", X(31,818), X_MASK, PWRCOM, { RT, RA, RB } },
+{ "lhzcix", X(31,821), X_MASK, POWER6, { RT, RA0, RB } },
+
{ "dss", XDSS(31,822,0), XDSS_MASK, PPCVEC, { STRM } },
{ "dssall", XDSS(31,822,1), XDSS_MASK, PPCVEC, { 0 } },
@@ -4238,19 +4433,25 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "slbmfev", X(31,851), XRA_MASK, PPC64, { RT, RB } },
+{ "lbzcix", X(31,853), X_MASK, POWER6, { RT, RA0, RB } },
+
{ "mbar", X(31,854), X_MASK, BOOKE, { MO } },
{ "eieio", X(31,854), 0xffffffff, PPC, { 0 } },
-{ "tlbsx", XRC(31,914,0), X_MASK, BOOKE, { RA, RB } },
-{ "tlbsx", XRC(31,914,0), X_MASK, PPC403, { RT, RA, RB } },
-{ "tlbsx.", XRC(31,914,1), X_MASK, BOOKE, { RA, RB } },
-{ "tlbsx.", XRC(31,914,1), X_MASK, PPC403, { RT, RA, RB } },
+{ "lfiwax", X(31,855), X_MASK, POWER6, { FRT, RA0, RB } },
+
+{ "ldcix", X(31,885), X_MASK, POWER6, { RT, RA0, RB } },
+
+{ "tlbsx", XRC(31,914,0), X_MASK, PPC403|BOOKE, { RTO, RA, RB } },
+{ "tlbsx.", XRC(31,914,1), X_MASK, PPC403|BOOKE, { RTO, RA, RB } },
{ "tlbsxe", XRC(31,915,0), X_MASK, BOOKE64, { RA, RB } },
{ "tlbsxe.", XRC(31,915,1), X_MASK, BOOKE64, { RA, RB } },
{ "slbmfee", X(31,915), XRA_MASK, PPC64, { RT, RB } },
-{ "sthbrx", X(31,918), X_MASK, COM, { RS, RA, RB } },
+{ "stwcix", X(31,917), X_MASK, POWER6, { RS, RA0, RB } },
+
+{ "sthbrx", X(31,918), X_MASK, COM, { RS, RA0, RB } },
{ "sraq", XRC(31,920,0), X_MASK, M601, { RA, RS, RB } },
{ "sraq.", XRC(31,920,1), X_MASK, M601, { RA, RS, RB } },
@@ -4263,14 +4464,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "extsh.", XRC(31,922,1), XRB_MASK, PPCCOM, { RA, RS } },
{ "exts.", XRC(31,922,1), XRB_MASK, PWRCOM, { RA, RS } },
-{ "sthbrxe", X(31,926), X_MASK, BOOKE64, { RS, RA, RB } },
+{ "sthbrxe", X(31,926), X_MASK, BOOKE64, { RS, RA0, RB } },
-{ "stdxe", X(31,927), X_MASK, BOOKE64, { RS, RA, RB } },
+{ "stdxe", X(31,927), X_MASK, BOOKE64, { RS, RA0, RB } },
{ "tlbrehi", XTLB(31,946,0), XTLB_MASK, PPC403, { RT, RA } },
{ "tlbrelo", XTLB(31,946,1), XTLB_MASK, PPC403, { RT, RA } },
-{ "tlbre", X(31,946), X_MASK, BOOKE, { 0 } },
-{ "tlbre", X(31,946), X_MASK, PPC403, { RS, RA, SH } },
+{ "tlbre", X(31,946), X_MASK, PPC403|BOOKE, { RSO, RAOPT, SHO } },
+
+{ "sthcix", X(31,949), X_MASK, POWER6, { RS, RA0, RB } },
{ "sraiq", XRC(31,952,0), X_MASK, M601, { RA, RS, SH } },
{ "sraiq.", XRC(31,952,1), X_MASK, M601, { RA, RS, SH } },
@@ -4284,13 +4486,14 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "tlbwehi", XTLB(31,978,0), XTLB_MASK, PPC403, { RT, RA } },
{ "tlbwelo", XTLB(31,978,1), XTLB_MASK, PPC403, { RT, RA } },
-{ "tlbwe", X(31,978), X_MASK, BOOKE, { 0 } },
-{ "tlbwe", X(31,978), X_MASK, PPC403, { RS, RA, SH } },
+{ "tlbwe", X(31,978), X_MASK, PPC403|BOOKE, { RSO, RAOPT, SHO } },
{ "tlbld", X(31,978), XRTRA_MASK, PPC, { RB } },
+{ "stbcix", X(31,981), X_MASK, POWER6, { RS, RA0, RB } },
+
{ "icbi", X(31,982), XRT_MASK, PPC, { RA, RB } },
-{ "stfiwx", X(31,983), X_MASK, PPC, { FRS, RA, RB } },
+{ "stfiwx", X(31,983), X_MASK, PPC, { FRS, RA0, RB } },
{ "extsw", XRC(31,986,0), XRB_MASK, PPC64 | BOOKE64,{ RA, RS } },
{ "extsw.", XRC(31,986,1), XRB_MASK, PPC64, { RA, RS } },
@@ -4298,10 +4501,13 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "icread", X(31,998), XRT_MASK, PPC403|PPC440, { RA, RB } },
{ "icbie", X(31,990), XRT_MASK, BOOKE64, { RA, RB } },
-{ "stfiwxe", X(31,991), X_MASK, BOOKE64, { FRS, RA, RB } },
+{ "stfiwxe", X(31,991), X_MASK, BOOKE64, { FRS, RA0, RB } },
{ "tlbli", X(31,1010), XRTRA_MASK, PPC, { RB } },
+{ "stdcix", X(31,1013), X_MASK, POWER6, { RS, RA0, RB } },
+
+{ "dcbzl", XOPL(31,1014,1), XRT_MASK,POWER4, { RA, RB } },
{ "dcbz", X(31,1014), XRT_MASK, PPC, { RA, RB } },
{ "dclz", X(31,1014), XRT_MASK, PPC, { RA, RB } },
@@ -4320,86 +4526,104 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "stvx", X(31, 231), X_MASK, PPCVEC, { VS, RA, RB } },
{ "stvxl", X(31, 487), X_MASK, PPCVEC, { VS, RA, RB } },
-{ "lwz", OP(32), OP_MASK, PPCCOM, { RT, D, RA } },
-{ "l", OP(32), OP_MASK, PWRCOM, { RT, D, RA } },
+/* New load/store left/right index vector instructions that are in the Cell only. */
+{ "lvlx", X(31, 519), X_MASK, CELL, { VD, RA0, RB } },
+{ "lvlxl", X(31, 775), X_MASK, CELL, { VD, RA0, RB } },
+{ "lvrx", X(31, 551), X_MASK, CELL, { VD, RA0, RB } },
+{ "lvrxl", X(31, 807), X_MASK, CELL, { VD, RA0, RB } },
+{ "stvlx", X(31, 647), X_MASK, CELL, { VS, RA0, RB } },
+{ "stvlxl", X(31, 903), X_MASK, CELL, { VS, RA0, RB } },
+{ "stvrx", X(31, 679), X_MASK, CELL, { VS, RA0, RB } },
+{ "stvrxl", X(31, 935), X_MASK, CELL, { VS, RA0, RB } },
+
+{ "lwz", OP(32), OP_MASK, PPCCOM, { RT, D, RA0 } },
+{ "l", OP(32), OP_MASK, PWRCOM, { RT, D, RA0 } },
{ "lwzu", OP(33), OP_MASK, PPCCOM, { RT, D, RAL } },
-{ "lu", OP(33), OP_MASK, PWRCOM, { RT, D, RA } },
+{ "lu", OP(33), OP_MASK, PWRCOM, { RT, D, RA0 } },
-{ "lbz", OP(34), OP_MASK, COM, { RT, D, RA } },
+{ "lbz", OP(34), OP_MASK, COM, { RT, D, RA0 } },
{ "lbzu", OP(35), OP_MASK, COM, { RT, D, RAL } },
-{ "stw", OP(36), OP_MASK, PPCCOM, { RS, D, RA } },
-{ "st", OP(36), OP_MASK, PWRCOM, { RS, D, RA } },
+{ "stw", OP(36), OP_MASK, PPCCOM, { RS, D, RA0 } },
+{ "st", OP(36), OP_MASK, PWRCOM, { RS, D, RA0 } },
{ "stwu", OP(37), OP_MASK, PPCCOM, { RS, D, RAS } },
-{ "stu", OP(37), OP_MASK, PWRCOM, { RS, D, RA } },
+{ "stu", OP(37), OP_MASK, PWRCOM, { RS, D, RA0 } },
-{ "stb", OP(38), OP_MASK, COM, { RS, D, RA } },
+{ "stb", OP(38), OP_MASK, COM, { RS, D, RA0 } },
{ "stbu", OP(39), OP_MASK, COM, { RS, D, RAS } },
-{ "lhz", OP(40), OP_MASK, COM, { RT, D, RA } },
+{ "lhz", OP(40), OP_MASK, COM, { RT, D, RA0 } },
{ "lhzu", OP(41), OP_MASK, COM, { RT, D, RAL } },
-{ "lha", OP(42), OP_MASK, COM, { RT, D, RA } },
+{ "lha", OP(42), OP_MASK, COM, { RT, D, RA0 } },
{ "lhau", OP(43), OP_MASK, COM, { RT, D, RAL } },
-{ "sth", OP(44), OP_MASK, COM, { RS, D, RA } },
+{ "sth", OP(44), OP_MASK, COM, { RS, D, RA0 } },
{ "sthu", OP(45), OP_MASK, COM, { RS, D, RAS } },
{ "lmw", OP(46), OP_MASK, PPCCOM, { RT, D, RAM } },
-{ "lm", OP(46), OP_MASK, PWRCOM, { RT, D, RA } },
+{ "lm", OP(46), OP_MASK, PWRCOM, { RT, D, RA0 } },
-{ "stmw", OP(47), OP_MASK, PPCCOM, { RS, D, RA } },
-{ "stm", OP(47), OP_MASK, PWRCOM, { RS, D, RA } },
+{ "stmw", OP(47), OP_MASK, PPCCOM, { RS, D, RA0 } },
+{ "stm", OP(47), OP_MASK, PWRCOM, { RS, D, RA0 } },
-{ "lfs", OP(48), OP_MASK, COM, { FRT, D, RA } },
+{ "lfs", OP(48), OP_MASK, COM, { FRT, D, RA0 } },
{ "lfsu", OP(49), OP_MASK, COM, { FRT, D, RAS } },
-{ "lfd", OP(50), OP_MASK, COM, { FRT, D, RA } },
+{ "lfd", OP(50), OP_MASK, COM, { FRT, D, RA0 } },
{ "lfdu", OP(51), OP_MASK, COM, { FRT, D, RAS } },
-{ "stfs", OP(52), OP_MASK, COM, { FRS, D, RA } },
+{ "stfs", OP(52), OP_MASK, COM, { FRS, D, RA0 } },
{ "stfsu", OP(53), OP_MASK, COM, { FRS, D, RAS } },
-{ "stfd", OP(54), OP_MASK, COM, { FRS, D, RA } },
+{ "stfd", OP(54), OP_MASK, COM, { FRS, D, RA0 } },
{ "stfdu", OP(55), OP_MASK, COM, { FRS, D, RAS } },
{ "lq", OP(56), OP_MASK, POWER4, { RTQ, DQ, RAQ } },
-{ "lfq", OP(56), OP_MASK, POWER2, { FRT, D, RA } },
+{ "lfq", OP(56), OP_MASK, POWER2, { FRT, D, RA0 } },
+
+{ "lfqu", OP(57), OP_MASK, POWER2, { FRT, D, RA0 } },
-{ "lfqu", OP(57), OP_MASK, POWER2, { FRT, D, RA } },
+{ "lfdp", OP(57), OP_MASK, POWER6, { FRT, D, RA0 } },
-{ "lbze", DEO(58,0), DE_MASK, BOOKE64, { RT, DE, RA } },
+{ "lbze", DEO(58,0), DE_MASK, BOOKE64, { RT, DE, RA0 } },
{ "lbzue", DEO(58,1), DE_MASK, BOOKE64, { RT, DE, RAL } },
-{ "lhze", DEO(58,2), DE_MASK, BOOKE64, { RT, DE, RA } },
+{ "lhze", DEO(58,2), DE_MASK, BOOKE64, { RT, DE, RA0 } },
{ "lhzue", DEO(58,3), DE_MASK, BOOKE64, { RT, DE, RAL } },
-{ "lhae", DEO(58,4), DE_MASK, BOOKE64, { RT, DE, RA } },
+{ "lhae", DEO(58,4), DE_MASK, BOOKE64, { RT, DE, RA0 } },
{ "lhaue", DEO(58,5), DE_MASK, BOOKE64, { RT, DE, RAL } },
-{ "lwze", DEO(58,6), DE_MASK, BOOKE64, { RT, DE, RA } },
+{ "lwze", DEO(58,6), DE_MASK, BOOKE64, { RT, DE, RA0 } },
{ "lwzue", DEO(58,7), DE_MASK, BOOKE64, { RT, DE, RAL } },
-{ "stbe", DEO(58,8), DE_MASK, BOOKE64, { RS, DE, RA } },
+{ "stbe", DEO(58,8), DE_MASK, BOOKE64, { RS, DE, RA0 } },
{ "stbue", DEO(58,9), DE_MASK, BOOKE64, { RS, DE, RAS } },
-{ "sthe", DEO(58,10), DE_MASK, BOOKE64, { RS, DE, RA } },
+{ "sthe", DEO(58,10), DE_MASK, BOOKE64, { RS, DE, RA0 } },
{ "sthue", DEO(58,11), DE_MASK, BOOKE64, { RS, DE, RAS } },
-{ "stwe", DEO(58,14), DE_MASK, BOOKE64, { RS, DE, RA } },
+{ "stwe", DEO(58,14), DE_MASK, BOOKE64, { RS, DE, RA0 } },
{ "stwue", DEO(58,15), DE_MASK, BOOKE64, { RS, DE, RAS } },
-{ "ld", DSO(58,0), DS_MASK, PPC64, { RT, DS, RA } },
+{ "ld", DSO(58,0), DS_MASK, PPC64, { RT, DS, RA0 } },
{ "ldu", DSO(58,1), DS_MASK, PPC64, { RT, DS, RAL } },
-{ "lwa", DSO(58,2), DS_MASK, PPC64, { RT, DS, RA } },
+{ "lwa", DSO(58,2), DS_MASK, PPC64, { RT, DS, RA0 } },
+
+{ "dadd", XRC(59,2,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "dadd.", XRC(59,2,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
+{ "dqua", ZRC(59,3,0), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } },
+{ "dqua.", ZRC(59,3,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } },
{ "fdivs", A(59,18,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
{ "fdivs.", A(59,18,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
@@ -4413,12 +4637,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "fsqrts", A(59,22,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
{ "fsqrts.", A(59,22,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
-{ "fres", A(59,24,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
-{ "fres.", A(59,24,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
+{ "fres", A(59,24,0), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } },
+{ "fres.", A(59,24,1), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } },
{ "fmuls", A(59,25,0), AFRB_MASK, PPC, { FRT, FRA, FRC } },
{ "fmuls.", A(59,25,1), AFRB_MASK, PPC, { FRT, FRA, FRC } },
+{ "frsqrtes", A(59,26,0), AFRALFRC_MASK,POWER5, { FRT, FRB, A_L } },
+{ "frsqrtes.",A(59,26,1), AFRALFRC_MASK,POWER5, { FRT, FRB, A_L } },
+
{ "fmsubs", A(59,28,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fmsubs.", A(59,28,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
@@ -4431,31 +4658,103 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "fnmadds", A(59,31,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fnmadds.",A(59,31,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "dmul", XRC(59,34,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "dmul.", XRC(59,34,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
+{ "drrnd", ZRC(59,35,0), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } },
+{ "drrnd.", ZRC(59,35,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } },
+
+{ "dscli", ZRC(59,66,0), Z_MASK, POWER6, { FRT, FRA, SH16 } },
+{ "dscli.", ZRC(59,66,1), Z_MASK, POWER6, { FRT, FRA, SH16 } },
+
+{ "dquai", ZRC(59,67,0), Z_MASK, POWER6, { TE, FRT, FRB, RMC } },
+{ "dquai.", ZRC(59,67,1), Z_MASK, POWER6, { TE, FRT, FRB, RMC } },
+
+{ "dscri", ZRC(59,98,0), Z_MASK, POWER6, { FRT, FRA, SH16 } },
+{ "dscri.", ZRC(59,98,1), Z_MASK, POWER6, { FRT, FRA, SH16 } },
+
+{ "drintx", ZRC(59,99,0), Z_MASK, POWER6, { R, FRT, FRB, RMC } },
+{ "drintx.", ZRC(59,99,1), Z_MASK, POWER6, { R, FRT, FRB, RMC } },
+
+{ "dcmpo", X(59,130), X_MASK, POWER6, { BF, FRA, FRB } },
+
+{ "dtstex", X(59,162), X_MASK, POWER6, { BF, FRA, FRB } },
+{ "dtstdc", Z(59,194), Z_MASK, POWER6, { BF, FRA, DCM } },
+{ "dtstdg", Z(59,226), Z_MASK, POWER6, { BF, FRA, DGM } },
+
+{ "drintn", ZRC(59,227,0), Z_MASK, POWER6, { R, FRT, FRB, RMC } },
+{ "drintn.", ZRC(59,227,1), Z_MASK, POWER6, { R, FRT, FRB, RMC } },
+
+{ "dctdp", XRC(59,258,0), X_MASK, POWER6, { FRT, FRB } },
+{ "dctdp.", XRC(59,258,1), X_MASK, POWER6, { FRT, FRB } },
+
+{ "dctfix", XRC(59,290,0), X_MASK, POWER6, { FRT, FRB } },
+{ "dctfix.", XRC(59,290,1), X_MASK, POWER6, { FRT, FRB } },
+
+{ "ddedpd", XRC(59,322,0), X_MASK, POWER6, { SP, FRT, FRB } },
+{ "ddedpd.", XRC(59,322,1), X_MASK, POWER6, { SP, FRT, FRB } },
+
+{ "dxex", XRC(59,354,0), X_MASK, POWER6, { FRT, FRB } },
+{ "dxex.", XRC(59,354,1), X_MASK, POWER6, { FRT, FRB } },
+
+{ "dsub", XRC(59,514,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "dsub.", XRC(59,514,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
+{ "ddiv", XRC(59,546,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "ddiv.", XRC(59,546,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
+{ "dcmpu", X(59,642), X_MASK, POWER6, { BF, FRA, FRB } },
+
+{ "dtstsf", X(59,674), X_MASK, POWER6, { BF, FRA, FRB } },
+
+{ "drsp", XRC(59,770,0), X_MASK, POWER6, { FRT, FRB } },
+{ "drsp.", XRC(59,770,1), X_MASK, POWER6, { FRT, FRB } },
+
+{ "dcffix", XRC(59,802,0), X_MASK, POWER6, { FRT, FRB } },
+{ "dcffix.", XRC(59,802,1), X_MASK, POWER6, { FRT, FRB } },
+
+{ "denbcd", XRC(59,834,0), X_MASK, POWER6, { S, FRT, FRB } },
+{ "denbcd.", XRC(59,834,1), X_MASK, POWER6, { S, FRT, FRB } },
+
+{ "diex", XRC(59,866,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "diex.", XRC(59,866,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
{ "stfq", OP(60), OP_MASK, POWER2, { FRS, D, RA } },
{ "stfqu", OP(61), OP_MASK, POWER2, { FRS, D, RA } },
-{ "lde", DEO(62,0), DE_MASK, BOOKE64, { RT, DES, RA } },
-{ "ldue", DEO(62,1), DE_MASK, BOOKE64, { RT, DES, RA } },
-{ "lfse", DEO(62,4), DE_MASK, BOOKE64, { FRT, DES, RA } },
+{ "stfdp", OP(61), OP_MASK, POWER6, { FRT, D, RA0 } },
+
+{ "lde", DEO(62,0), DE_MASK, BOOKE64, { RT, DES, RA0 } },
+{ "ldue", DEO(62,1), DE_MASK, BOOKE64, { RT, DES, RA0 } },
+{ "lfse", DEO(62,4), DE_MASK, BOOKE64, { FRT, DES, RA0 } },
{ "lfsue", DEO(62,5), DE_MASK, BOOKE64, { FRT, DES, RAS } },
-{ "lfde", DEO(62,6), DE_MASK, BOOKE64, { FRT, DES, RA } },
+{ "lfde", DEO(62,6), DE_MASK, BOOKE64, { FRT, DES, RA0 } },
{ "lfdue", DEO(62,7), DE_MASK, BOOKE64, { FRT, DES, RAS } },
-{ "stde", DEO(62,8), DE_MASK, BOOKE64, { RS, DES, RA } },
+{ "stde", DEO(62,8), DE_MASK, BOOKE64, { RS, DES, RA0 } },
{ "stdue", DEO(62,9), DE_MASK, BOOKE64, { RS, DES, RAS } },
-{ "stfse", DEO(62,12), DE_MASK, BOOKE64, { FRS, DES, RA } },
+{ "stfse", DEO(62,12), DE_MASK, BOOKE64, { FRS, DES, RA0 } },
{ "stfsue", DEO(62,13), DE_MASK, BOOKE64, { FRS, DES, RAS } },
-{ "stfde", DEO(62,14), DE_MASK, BOOKE64, { FRS, DES, RA } },
+{ "stfde", DEO(62,14), DE_MASK, BOOKE64, { FRS, DES, RA0 } },
{ "stfdue", DEO(62,15), DE_MASK, BOOKE64, { FRS, DES, RAS } },
-{ "std", DSO(62,0), DS_MASK, PPC64, { RS, DS, RA } },
+{ "std", DSO(62,0), DS_MASK, PPC64, { RS, DS, RA0 } },
{ "stdu", DSO(62,1), DS_MASK, PPC64, { RS, DS, RAS } },
-{ "stq", DSO(62,2), DS_MASK, POWER4, { RSQ, DS, RA } },
+{ "stq", DSO(62,2), DS_MASK, POWER4, { RSQ, DS, RA0 } },
{ "fcmpu", X(63,0), X_MASK|(3<<21), COM, { BF, FRA, FRB } },
+{ "daddq", XRC(63,2,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "daddq.", XRC(63,2,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
+{ "dquaq", ZRC(63,3,0), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } },
+{ "dquaq.", ZRC(63,3,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } },
+
+{ "fcpsgn", XRC(63,8,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "fcpsgn.", XRC(63,8,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
{ "frsp", XRC(63,12,0), XRA_MASK, COM, { FRT, FRB } },
{ "frsp.", XRC(63,12,1), XRA_MASK, COM, { FRT, FRB } },
@@ -4490,13 +4789,16 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "fsel", A(63,23,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
{ "fsel.", A(63,23,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
+{ "fre", A(63,24,0), AFRALFRC_MASK, POWER5, { FRT, FRB, A_L } },
+{ "fre.", A(63,24,1), AFRALFRC_MASK, POWER5, { FRT, FRB, A_L } },
+
{ "fmul", A(63,25,0), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } },
{ "fm", A(63,25,0), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } },
{ "fmul.", A(63,25,1), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } },
{ "fm.", A(63,25,1), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } },
-{ "frsqrte", A(63,26,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
-{ "frsqrte.",A(63,26,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
+{ "frsqrte", A(63,26,0), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } },
+{ "frsqrte.",A(63,26,1), AFRALFRC_MASK, PPC, { FRT, FRB, A_L } },
{ "fmsub", A(63,28,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
{ "fms", A(63,28,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
@@ -4520,6 +4822,12 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "fcmpo", X(63,32), X_MASK|(3<<21), COM, { BF, FRA, FRB } },
+{ "dmulq", XRC(63,34,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "dmulq.", XRC(63,34,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
+{ "drrndq", ZRC(63,35,0), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } },
+{ "drrndq.", ZRC(63,35,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } },
+
{ "mtfsb1", XRC(63,38,0), XRARB_MASK, COM, { BT } },
{ "mtfsb1.", XRC(63,38,1), XRARB_MASK, COM, { BT } },
@@ -4528,36 +4836,100 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "mcrfs", X(63,64), XRB_MASK|(3<<21)|(3<<16), COM, { BF, BFA } },
+{ "dscliq", ZRC(63,66,0), Z_MASK, POWER6, { FRT, FRA, SH16 } },
+{ "dscliq.", ZRC(63,66,1), Z_MASK, POWER6, { FRT, FRA, SH16 } },
+
+{ "dquaiq", ZRC(63,67,0), Z_MASK, POWER6, { TE, FRT, FRB, RMC } },
+{ "dquaiq.", ZRC(63,67,1), Z_MASK, POWER6, { FRT, FRA, FRB, RMC } },
+
{ "mtfsb0", XRC(63,70,0), XRARB_MASK, COM, { BT } },
{ "mtfsb0.", XRC(63,70,1), XRARB_MASK, COM, { BT } },
{ "fmr", XRC(63,72,0), XRA_MASK, COM, { FRT, FRB } },
{ "fmr.", XRC(63,72,1), XRA_MASK, COM, { FRT, FRB } },
+{ "dscriq", ZRC(63,98,0), Z_MASK, POWER6, { FRT, FRA, SH16 } },
+{ "dscriq.", ZRC(63,98,1), Z_MASK, POWER6, { FRT, FRA, SH16 } },
+
+{ "drintxq", ZRC(63,99,0), Z_MASK, POWER6, { R, FRT, FRB, RMC } },
+{ "drintxq.",ZRC(63,99,1), Z_MASK, POWER6, { R, FRT, FRB, RMC } },
+
+{ "dcmpoq", X(63,130), X_MASK, POWER6, { BF, FRA, FRB } },
+
{ "mtfsfi", XRC(63,134,0), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } },
{ "mtfsfi.", XRC(63,134,1), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } },
{ "fnabs", XRC(63,136,0), XRA_MASK, COM, { FRT, FRB } },
{ "fnabs.", XRC(63,136,1), XRA_MASK, COM, { FRT, FRB } },
+{ "dtstexq", X(63,162), X_MASK, POWER6, { BF, FRA, FRB } },
+{ "dtstdcq", Z(63,194), Z_MASK, POWER6, { BF, FRA, DCM } },
+{ "dtstdgq", Z(63,226), Z_MASK, POWER6, { BF, FRA, DGM } },
+
+{ "drintnq", ZRC(63,227,0), Z_MASK, POWER6, { R, FRT, FRB, RMC } },
+{ "drintnq.",ZRC(63,227,1), Z_MASK, POWER6, { R, FRT, FRB, RMC } },
+
+{ "dctqpq", XRC(63,258,0), X_MASK, POWER6, { FRT, FRB } },
+{ "dctqpq.", XRC(63,258,1), X_MASK, POWER6, { FRT, FRB } },
+
{ "fabs", XRC(63,264,0), XRA_MASK, COM, { FRT, FRB } },
{ "fabs.", XRC(63,264,1), XRA_MASK, COM, { FRT, FRB } },
+{ "dctfixq", XRC(63,290,0), X_MASK, POWER6, { FRT, FRB } },
+{ "dctfixq.",XRC(63,290,1), X_MASK, POWER6, { FRT, FRB } },
+
+{ "ddedpdq", XRC(63,322,0), X_MASK, POWER6, { SP, FRT, FRB } },
+{ "ddedpdq.",XRC(63,322,1), X_MASK, POWER6, { SP, FRT, FRB } },
+
+{ "dxexq", XRC(63,354,0), X_MASK, POWER6, { FRT, FRB } },
+{ "dxexq.", XRC(63,354,1), X_MASK, POWER6, { FRT, FRB } },
+
+{ "frin", XRC(63,392,0), XRA_MASK, POWER5, { FRT, FRB } },
+{ "frin.", XRC(63,392,1), XRA_MASK, POWER5, { FRT, FRB } },
+{ "friz", XRC(63,424,0), XRA_MASK, POWER5, { FRT, FRB } },
+{ "friz.", XRC(63,424,1), XRA_MASK, POWER5, { FRT, FRB } },
+{ "frip", XRC(63,456,0), XRA_MASK, POWER5, { FRT, FRB } },
+{ "frip.", XRC(63,456,1), XRA_MASK, POWER5, { FRT, FRB } },
+{ "frim", XRC(63,488,0), XRA_MASK, POWER5, { FRT, FRB } },
+{ "frim.", XRC(63,488,1), XRA_MASK, POWER5, { FRT, FRB } },
+
+{ "dsubq", XRC(63,514,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "dsubq.", XRC(63,514,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
+{ "ddivq", XRC(63,546,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "ddivq.", XRC(63,546,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
{ "mffs", XRC(63,583,0), XRARB_MASK, COM, { FRT } },
{ "mffs.", XRC(63,583,1), XRARB_MASK, COM, { FRT } },
+{ "dcmpuq", X(63,642), X_MASK, POWER6, { BF, FRA, FRB } },
+
+{ "dtstsfq", X(63,674), X_MASK, POWER6, { BF, FRA, FRB } },
+
{ "mtfsf", XFL(63,711,0), XFL_MASK, COM, { FLM, FRB } },
{ "mtfsf.", XFL(63,711,1), XFL_MASK, COM, { FLM, FRB } },
+{ "drdpq", XRC(63,770,0), X_MASK, POWER6, { FRT, FRB } },
+{ "drdpq.", XRC(63,770,1), X_MASK, POWER6, { FRT, FRB } },
+
+{ "dcffixq", XRC(63,802,0), X_MASK, POWER6, { FRT, FRB } },
+{ "dcffixq.",XRC(63,802,1), X_MASK, POWER6, { FRT, FRB } },
+
{ "fctid", XRC(63,814,0), XRA_MASK, PPC64, { FRT, FRB } },
{ "fctid.", XRC(63,814,1), XRA_MASK, PPC64, { FRT, FRB } },
{ "fctidz", XRC(63,815,0), XRA_MASK, PPC64, { FRT, FRB } },
{ "fctidz.", XRC(63,815,1), XRA_MASK, PPC64, { FRT, FRB } },
+{ "denbcdq", XRC(63,834,0), X_MASK, POWER6, { S, FRT, FRB } },
+{ "denbcdq.",XRC(63,834,1), X_MASK, POWER6, { S, FRT, FRB } },
+
{ "fcfid", XRC(63,846,0), XRA_MASK, PPC64, { FRT, FRB } },
{ "fcfid.", XRC(63,846,1), XRA_MASK, PPC64, { FRT, FRB } },
+{ "diexq", XRC(63,866,0), X_MASK, POWER6, { FRT, FRA, FRB } },
+{ "diexq.", XRC(63,866,1), X_MASK, POWER6, { FRT, FRA, FRB } },
+
};
const int powerpc_num_opcodes =
diff --git a/arch/powerpc/xmon/ppc.h b/arch/powerpc/xmon/ppc.h
index 342237e8dd6..110df96354b 100644
--- a/arch/powerpc/xmon/ppc.h
+++ b/arch/powerpc/xmon/ppc.h
@@ -1,5 +1,5 @@
/* ppc.h -- Header file for PowerPC opcode table
- Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003
+ Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
Written by Ian Lance Taylor, Cygnus Support
@@ -17,7 +17,7 @@ the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this file; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
#ifndef PPC_H
#define PPC_H
@@ -134,6 +134,18 @@ extern const int powerpc_num_opcodes;
/* Opcode is supported by machine check APU. */
#define PPC_OPCODE_RFMCI 0x800000
+/* Opcode is only supported by Power5 architecture. */
+#define PPC_OPCODE_POWER5 0x1000000
+
+/* Opcode is supported by PowerPC e300 family. */
+#define PPC_OPCODE_E300 0x2000000
+
+/* Opcode is only supported by Power6 architecture. */
+#define PPC_OPCODE_POWER6 0x4000000
+
+/* Opcode is only supported by PowerPC Cell family. */
+#define PPC_OPCODE_CELL 0x8000000
+
/* A macro to extract the major opcode from an instruction. */
#define PPC_OP(i) (((i) >> 26) & 0x3f)
@@ -233,25 +245,28 @@ extern const struct powerpc_operand powerpc_operands[];
register names with a leading 'r'. */
#define PPC_OPERAND_GPR (040)
+/* Like PPC_OPERAND_GPR, but don't print a leading 'r' for r0. */
+#define PPC_OPERAND_GPR_0 (0100)
+
/* This operand names a floating point register. The disassembler
prints these with a leading 'f'. */
-#define PPC_OPERAND_FPR (0100)
+#define PPC_OPERAND_FPR (0200)
/* This operand is a relative branch displacement. The disassembler
prints these symbolically if possible. */
-#define PPC_OPERAND_RELATIVE (0200)
+#define PPC_OPERAND_RELATIVE (0400)
/* This operand is an absolute branch address. The disassembler
prints these symbolically if possible. */
-#define PPC_OPERAND_ABSOLUTE (0400)
+#define PPC_OPERAND_ABSOLUTE (01000)
/* This operand is optional, and is zero if omitted. This is used for
- the optional BF and L fields in the comparison instructions. The
+ example, in the optional BF field in the comparison instructions. The
assembler must count the number of operands remaining on the line,
and the number of operands remaining for the opcode, and decide
whether this operand is present or not. The disassembler should
print this operand out only if it is not zero. */
-#define PPC_OPERAND_OPTIONAL (01000)
+#define PPC_OPERAND_OPTIONAL (02000)
/* This flag is only used with PPC_OPERAND_OPTIONAL. If this operand
is omitted, then for the next operand use this operand value plus
@@ -259,24 +274,24 @@ extern const struct powerpc_operand powerpc_operands[];
hack is needed because the Power rotate instructions can take
either 4 or 5 operands. The disassembler should print this operand
out regardless of the PPC_OPERAND_OPTIONAL field. */
-#define PPC_OPERAND_NEXT (02000)
+#define PPC_OPERAND_NEXT (04000)
/* This operand should be regarded as a negative number for the
purposes of overflow checking (i.e., the normal most negative
number is disallowed and one more than the normal most positive
number is allowed). This flag will only be set for a signed
operand. */
-#define PPC_OPERAND_NEGATIVE (04000)
+#define PPC_OPERAND_NEGATIVE (010000)
/* This operand names a vector unit register. The disassembler
prints these with a leading 'v'. */
-#define PPC_OPERAND_VR (010000)
+#define PPC_OPERAND_VR (020000)
/* This operand is for the DS field in a DS form instruction. */
-#define PPC_OPERAND_DS (020000)
+#define PPC_OPERAND_DS (040000)
/* This operand is for the DQ field in a DQ form instruction. */
-#define PPC_OPERAND_DQ (040000)
+#define PPC_OPERAND_DQ (0100000)
/* The POWER and PowerPC assemblers use a few macros. We keep them
with the operands table for simplicity. The macro table is an
diff --git a/arch/powerpc/xmon/spu-dis.c b/arch/powerpc/xmon/spu-dis.c
new file mode 100644
index 00000000000..ee929c641bf
--- /dev/null
+++ b/arch/powerpc/xmon/spu-dis.c
@@ -0,0 +1,248 @@
+/* Disassemble SPU instructions
+
+ Copyright 2006 Free Software Foundation, Inc.
+
+ This file is part of GDB, GAS, and the GNU binutils.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <linux/string.h>
+#include "nonstdio.h"
+#include "ansidecl.h"
+#include "spu.h"
+#include "dis-asm.h"
+
+/* This file provides a disassembler function which uses
+ the disassembler interface defined in dis-asm.h. */
+
+extern const struct spu_opcode spu_opcodes[];
+extern const int spu_num_opcodes;
+
+#define SPU_DISASM_TBL_SIZE (1 << 11)
+static const struct spu_opcode *spu_disassemble_table[SPU_DISASM_TBL_SIZE];
+
+static void
+init_spu_disassemble (void)
+{
+ int i;
+
+ /* If two instructions have the same opcode then we prefer the first
+ * one. In most cases it is just an alternate mnemonic. */
+ for (i = 0; i < spu_num_opcodes; i++)
+ {
+ int o = spu_opcodes[i].opcode;
+ if (o >= SPU_DISASM_TBL_SIZE)
+ continue; /* abort (); */
+ if (spu_disassemble_table[o] == 0)
+ spu_disassemble_table[o] = &spu_opcodes[i];
+ }
+}
+
+/* Determine the instruction from the 10 least significant bits. */
+static const struct spu_opcode *
+get_index_for_opcode (unsigned int insn)
+{
+ const struct spu_opcode *index;
+ unsigned int opcode = insn >> (32-11);
+
+ /* Init the table. This assumes that element 0/opcode 0 (currently
+ * NOP) is always used */
+ if (spu_disassemble_table[0] == 0)
+ init_spu_disassemble ();
+
+ if ((index = spu_disassemble_table[opcode & 0x780]) != 0
+ && index->insn_type == RRR)
+ return index;
+
+ if ((index = spu_disassemble_table[opcode & 0x7f0]) != 0
+ && (index->insn_type == RI18 || index->insn_type == LBT))
+ return index;
+
+ if ((index = spu_disassemble_table[opcode & 0x7f8]) != 0
+ && index->insn_type == RI10)
+ return index;
+
+ if ((index = spu_disassemble_table[opcode & 0x7fc]) != 0
+ && (index->insn_type == RI16))
+ return index;
+
+ if ((index = spu_disassemble_table[opcode & 0x7fe]) != 0
+ && (index->insn_type == RI8))
+ return index;
+
+ if ((index = spu_disassemble_table[opcode & 0x7ff]) != 0)
+ return index;
+
+ return 0;
+}
+
+/* Print a Spu instruction. */
+
+int
+print_insn_spu (unsigned long insn, unsigned long memaddr)
+{
+ int value;
+ int hex_value;
+ const struct spu_opcode *index;
+ enum spu_insns tag;
+
+ index = get_index_for_opcode (insn);
+
+ if (index == 0)
+ {
+ printf(".long 0x%x", insn);
+ }
+ else
+ {
+ int i;
+ int paren = 0;
+ tag = (enum spu_insns)(index - spu_opcodes);
+ printf("%s", index->mnemonic);
+ if (tag == M_BI || tag == M_BISL || tag == M_IRET || tag == M_BISLED
+ || tag == M_BIHNZ || tag == M_BIHZ || tag == M_BINZ || tag == M_BIZ
+ || tag == M_SYNC || tag == M_HBR)
+ {
+ int fb = (insn >> (32-18)) & 0x7f;
+ if (fb & 0x40)
+ printf(tag == M_SYNC ? "c" : "p");
+ if (fb & 0x20)
+ printf("d");
+ if (fb & 0x10)
+ printf("e");
+ }
+ if (index->arg[0] != 0)
+ printf("\t");
+ hex_value = 0;
+ for (i = 1; i <= index->arg[0]; i++)
+ {
+ int arg = index->arg[i];
+ if (arg != A_P && !paren && i > 1)
+ printf(",");
+
+ switch (arg)
+ {
+ case A_T:
+ printf("$%d",
+ DECODE_INSN_RT (insn));
+ break;
+ case A_A:
+ printf("$%d",
+ DECODE_INSN_RA (insn));
+ break;
+ case A_B:
+ printf("$%d",
+ DECODE_INSN_RB (insn));
+ break;
+ case A_C:
+ printf("$%d",
+ DECODE_INSN_RC (insn));
+ break;
+ case A_S:
+ printf("$sp%d",
+ DECODE_INSN_RA (insn));
+ break;
+ case A_H:
+ printf("$ch%d",
+ DECODE_INSN_RA (insn));
+ break;
+ case A_P:
+ paren++;
+ printf("(");
+ break;
+ case A_U7A:
+ printf("%d",
+ 173 - DECODE_INSN_U8 (insn));
+ break;
+ case A_U7B:
+ printf("%d",
+ 155 - DECODE_INSN_U8 (insn));
+ break;
+ case A_S3:
+ case A_S6:
+ case A_S7:
+ case A_S7N:
+ case A_U3:
+ case A_U5:
+ case A_U6:
+ case A_U7:
+ hex_value = DECODE_INSN_I7 (insn);
+ printf("%d", hex_value);
+ break;
+ case A_S11:
+ print_address(memaddr + DECODE_INSN_I9a (insn) * 4);
+ break;
+ case A_S11I:
+ print_address(memaddr + DECODE_INSN_I9b (insn) * 4);
+ break;
+ case A_S10:
+ case A_S10B:
+ hex_value = DECODE_INSN_I10 (insn);
+ printf("%d", hex_value);
+ break;
+ case A_S14:
+ hex_value = DECODE_INSN_I10 (insn) * 16;
+ printf("%d", hex_value);
+ break;
+ case A_S16:
+ hex_value = DECODE_INSN_I16 (insn);
+ printf("%d", hex_value);
+ break;
+ case A_X16:
+ hex_value = DECODE_INSN_U16 (insn);
+ printf("%u", hex_value);
+ break;
+ case A_R18:
+ value = DECODE_INSN_I16 (insn) * 4;
+ if (value == 0)
+ printf("%d", value);
+ else
+ {
+ hex_value = memaddr + value;
+ print_address(hex_value & 0x3ffff);
+ }
+ break;
+ case A_S18:
+ value = DECODE_INSN_U16 (insn) * 4;
+ if (value == 0)
+ printf("%d", value);
+ else
+ print_address(value);
+ break;
+ case A_U18:
+ value = DECODE_INSN_U18 (insn);
+ if (value == 0 || 1)
+ {
+ hex_value = value;
+ printf("%u", value);
+ }
+ else
+ print_address(value);
+ break;
+ case A_U14:
+ hex_value = DECODE_INSN_U14 (insn);
+ printf("%u", hex_value);
+ break;
+ }
+ if (arg != A_P && paren)
+ {
+ printf(")");
+ paren--;
+ }
+ }
+ if (hex_value > 16)
+ printf("\t# %x", hex_value);
+ }
+ return 4;
+}
diff --git a/arch/powerpc/xmon/spu-insns.h b/arch/powerpc/xmon/spu-insns.h
new file mode 100644
index 00000000000..99dc452821a
--- /dev/null
+++ b/arch/powerpc/xmon/spu-insns.h
@@ -0,0 +1,410 @@
+/* SPU ELF support for BFD.
+
+ Copyright 2006 Free Software Foundation, Inc.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* SPU Opcode Table
+
+-=-=-= FORMAT =-=-=-
+
+ +----+-------+-------+-------+-------+ +------------+-------+-------+-------+
+RRR | op | RC | RB | RA | RT | RI7 | op | I7 | RA | RT |
+ +----+-------+-------+-------+-------+ +------------+-------+-------+-------+
+ 0 3 1 1 2 3 0 1 1 2 3
+ 0 7 4 1 0 7 4 1
+
+ +-----------+--------+-------+-------+ +---------+----------+-------+-------+
+RI8 | op | I8 | RA | RT | RI10 | op | I10 | RA | RT |
+ +-----------+--------+-------+-------+ +---------+----------+-------+-------+
+ 0 9 1 2 3 0 7 1 2 3
+ 7 4 1 7 4 1
+
+ +----------+-----------------+-------+ +--------+-------------------+-------+
+RI16 | op | I16 | RT | RI18 | op | I18 | RT |
+ +----------+-----------------+-------+ +--------+-------------------+-------+
+ 0 8 2 3 0 6 2 3
+ 4 1 4 1
+
+ +------------+-------+-------+-------+ +-------+--+-----------------+-------+
+RR | op | RB | RA | RT | LBT | op |RO| I16 | RO |
+ +------------+-------+-------+-------+ +-------+--+-----------------+-------+
+ 0 1 1 2 3 0 6 8 2 3
+ 0 7 4 1 4 1
+
+ +------------+----+--+-------+-------+
+ LBTI | op | // |RO| RA | RO |
+ +------------+----+--+-------+-------+
+ 0 1 1 1 2 3
+ 0 5 7 4 1
+
+-=-=-= OPCODE =-=-=-
+
+OPCODE field specifies the most significant 11bit of the instruction. Some formats don't have 11bits for opcode field, and in this
+case, bit field other than op are defined as 0s. For example, opcode of fma instruction which is RRR format is defined as 0x700,
+since 0x700 -> 11'b11100000000, this means opcode is 4'b1110, and other 7bits are defined as 7'b0000000.
+
+-=-=-= ASM_FORMAT =-=-=-
+
+RRR category RI7 category
+ ASM_RRR mnemonic RC, RA, RB, RT ASM_RI4 mnemonic RT, RA, I4
+ ASM_RI7 mnemonic RT, RA, I7
+
+RI8 category RI10 category
+ ASM_RUI8 mnemonic RT, RA, UI8 ASM_AI10 mnemonic RA, I10
+ ASM_RI10 mnemonic RT, RA, R10
+ ASM_RI10IDX mnemonic RT, I10(RA)
+
+RI16 category RI18 category
+ ASM_I16W mnemonic I16W ASM_RI18 mnemonic RT, I18
+ ASM_RI16 mnemonic RT, I16
+ ASM_RI16W mnemonic RT, I16W
+
+RR category LBT category
+ ASM_MFSPR mnemonic RT, SA ASM_LBT mnemonic brinst, brtarg
+ ASM_MTSPR mnemonic SA, RT
+ ASM_NOOP mnemonic LBTI category
+ ASM_RA mnemonic RA ASM_LBTI mnemonic brinst, RA
+ ASM_RAB mnemonic RA, RB
+ ASM_RDCH mnemonic RT, CA
+ ASM_RR mnemonic RT, RA, RB
+ ASM_RT mnemonic RT
+ ASM_RTA mnemonic RT, RA
+ ASM_WRCH mnemonic CA, RT
+
+Note that RRR instructions have the names for RC and RT reversed from
+what's in the ISA, in order to put RT in the same position it appears
+for other formats.
+
+-=-=-= DEPENDENCY =-=-=-
+
+DEPENDENCY filed consists of 5 digits. This represents which register is used as source and which register is used as target.
+The first(most significant) digit is always 0. Then it is followd by RC, RB, RA and RT digits.
+If the digit is 0, this means the corresponding register is not used in the instruction.
+If the digit is 1, this means the corresponding register is used as a source in the instruction.
+If the digit is 2, this means the corresponding register is used as a target in the instruction.
+If the digit is 3, this means the corresponding register is used as both source and target in the instruction.
+For example, fms instruction has 00113 as the DEPENDENCY field. This means RC is not used in this operation, RB and RA are
+used as sources and RT is the target.
+
+-=-=-= PIPE =-=-=-
+
+This field shows which execution pipe is used for the instruction
+
+pipe0 execution pipelines:
+ FP6 SP floating pipeline
+ FP7 integer operations executed in SP floating pipeline
+ FPD DP floating pipeline
+ FX2 FXU pipeline
+ FX3 Rotate/Shift pipeline
+ FXB Byte pipeline
+ NOP No pipeline
+
+pipe1 execution pipelines:
+ BR Branch pipeline
+ LNOP No pipeline
+ LS Load/Store pipeline
+ SHUF Shuffle pipeline
+ SPR SPR/CH pipeline
+
+*/
+
+#define _A0() {0}
+#define _A1(a) {1,a}
+#define _A2(a,b) {2,a,b}
+#define _A3(a,b,c) {3,a,b,c}
+#define _A4(a,b,c,d) {4,a,b,c,d}
+
+/* TAG FORMAT OPCODE MNEMONIC ASM_FORMAT DEPENDENCY PIPE COMMENT */
+/* 0[RC][RB][RA][RT] */
+/* 1:src, 2:target */
+
+APUOP(M_BR, RI16, 0x190, "br", _A1(A_R18), 00000, BR) /* BRel IP<-IP+I16 */
+APUOP(M_BRSL, RI16, 0x198, "brsl", _A2(A_T,A_R18), 00002, BR) /* BRelSetLink RT,IP<-IP,IP+I16 */
+APUOP(M_BRA, RI16, 0x180, "bra", _A1(A_S18), 00000, BR) /* BRAbs IP<-I16 */
+APUOP(M_BRASL, RI16, 0x188, "brasl", _A2(A_T,A_S18), 00002, BR) /* BRAbsSetLink RT,IP<-IP,I16 */
+APUOP(M_FSMBI, RI16, 0x194, "fsmbi", _A2(A_T,A_X16), 00002, SHUF) /* FormSelMask%I RT<-fsm(I16) */
+APUOP(M_LQA, RI16, 0x184, "lqa", _A2(A_T,A_S18), 00002, LS) /* LoadQAbs RT<-M[I16] */
+APUOP(M_LQR, RI16, 0x19C, "lqr", _A2(A_T,A_R18), 00002, LS) /* LoadQRel RT<-M[IP+I16] */
+APUOP(M_STOP, RR, 0x000, "stop", _A0(), 00000, BR) /* STOP stop */
+APUOP(M_STOP2, RR, 0x000, "stop", _A1(A_U14), 00000, BR) /* STOP stop */
+APUOP(M_STOPD, RR, 0x140, "stopd", _A3(A_T,A_A,A_B), 00111, BR) /* STOPD stop (with register dependencies) */
+APUOP(M_LNOP, RR, 0x001, "lnop", _A0(), 00000, LNOP) /* LNOP no_operation */
+APUOP(M_SYNC, RR, 0x002, "sync", _A0(), 00000, BR) /* SYNC flush_pipe */
+APUOP(M_DSYNC, RR, 0x003, "dsync", _A0(), 00000, BR) /* DSYNC flush_store_queue */
+APUOP(M_MFSPR, RR, 0x00c, "mfspr", _A2(A_T,A_S), 00002, SPR) /* MFSPR RT<-SA */
+APUOP(M_RDCH, RR, 0x00d, "rdch", _A2(A_T,A_H), 00002, SPR) /* ReaDCHannel RT<-CA:data */
+APUOP(M_RCHCNT, RR, 0x00f, "rchcnt", _A2(A_T,A_H), 00002, SPR) /* ReaDCHanCouNT RT<-CA:count */
+APUOP(M_HBRA, LBT, 0x080, "hbra", _A2(A_S11,A_S18), 00000, LS) /* HBRA BTB[B9]<-M[I16] */
+APUOP(M_HBRR, LBT, 0x090, "hbrr", _A2(A_S11,A_R18), 00000, LS) /* HBRR BTB[B9]<-M[IP+I16] */
+APUOP(M_BRZ, RI16, 0x100, "brz", _A2(A_T,A_R18), 00001, BR) /* BRZ IP<-IP+I16_if(RT) */
+APUOP(M_BRNZ, RI16, 0x108, "brnz", _A2(A_T,A_R18), 00001, BR) /* BRNZ IP<-IP+I16_if(RT) */
+APUOP(M_BRHZ, RI16, 0x110, "brhz", _A2(A_T,A_R18), 00001, BR) /* BRHZ IP<-IP+I16_if(RT) */
+APUOP(M_BRHNZ, RI16, 0x118, "brhnz", _A2(A_T,A_R18), 00001, BR) /* BRHNZ IP<-IP+I16_if(RT) */
+APUOP(M_STQA, RI16, 0x104, "stqa", _A2(A_T,A_S18), 00001, LS) /* SToreQAbs M[I16]<-RT */
+APUOP(M_STQR, RI16, 0x11C, "stqr", _A2(A_T,A_R18), 00001, LS) /* SToreQRel M[IP+I16]<-RT */
+APUOP(M_MTSPR, RR, 0x10c, "mtspr", _A2(A_S,A_T), 00001, SPR) /* MTSPR SA<-RT */
+APUOP(M_WRCH, RR, 0x10d, "wrch", _A2(A_H,A_T), 00001, SPR) /* ChanWRite CA<-RT */
+APUOP(M_LQD, RI10, 0x1a0, "lqd", _A4(A_T,A_S14,A_P,A_A), 00012, LS) /* LoadQDisp RT<-M[Ra+I10] */
+APUOP(M_BI, RR, 0x1a8, "bi", _A1(A_A), 00010, BR) /* BI IP<-RA */
+APUOP(M_BISL, RR, 0x1a9, "bisl", _A2(A_T,A_A), 00012, BR) /* BISL RT,IP<-IP,RA */
+APUOP(M_IRET, RR, 0x1aa, "iret", _A1(A_A), 00010, BR) /* IRET IP<-SRR0 */
+APUOP(M_IRET2, RR, 0x1aa, "iret", _A0(), 00010, BR) /* IRET IP<-SRR0 */
+APUOP(M_BISLED, RR, 0x1ab, "bisled", _A2(A_T,A_A), 00012, BR) /* BISLED RT,IP<-IP,RA_if(ext) */
+APUOP(M_HBR, LBTI, 0x1ac, "hbr", _A2(A_S11I,A_A), 00010, LS) /* HBR BTB[B9]<-M[Ra] */
+APUOP(M_FREST, RR, 0x1b8, "frest", _A2(A_T,A_A), 00012, SHUF) /* FREST RT<-recip(RA) */
+APUOP(M_FRSQEST, RR, 0x1b9, "frsqest", _A2(A_T,A_A), 00012, SHUF) /* FRSQEST RT<-rsqrt(RA) */
+APUOP(M_FSM, RR, 0x1b4, "fsm", _A2(A_T,A_A), 00012, SHUF) /* FormSelMask% RT<-expand(Ra) */
+APUOP(M_FSMH, RR, 0x1b5, "fsmh", _A2(A_T,A_A), 00012, SHUF) /* FormSelMask% RT<-expand(Ra) */
+APUOP(M_FSMB, RR, 0x1b6, "fsmb", _A2(A_T,A_A), 00012, SHUF) /* FormSelMask% RT<-expand(Ra) */
+APUOP(M_GB, RR, 0x1b0, "gb", _A2(A_T,A_A), 00012, SHUF) /* GatherBits% RT<-gather(RA) */
+APUOP(M_GBH, RR, 0x1b1, "gbh", _A2(A_T,A_A), 00012, SHUF) /* GatherBits% RT<-gather(RA) */
+APUOP(M_GBB, RR, 0x1b2, "gbb", _A2(A_T,A_A), 00012, SHUF) /* GatherBits% RT<-gather(RA) */
+APUOP(M_CBD, RI7, 0x1f4, "cbd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */
+APUOP(M_CHD, RI7, 0x1f5, "chd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */
+APUOP(M_CWD, RI7, 0x1f6, "cwd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */
+APUOP(M_CDD, RI7, 0x1f7, "cdd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */
+APUOP(M_ROTQBII, RI7, 0x1f8, "rotqbii", _A3(A_T,A_A,A_U3), 00012, SHUF) /* ROTQBII RT<-RA<<<I7 */
+APUOP(M_ROTQBYI, RI7, 0x1fc, "rotqbyi", _A3(A_T,A_A,A_S7N), 00012, SHUF) /* ROTQBYI RT<-RA<<<(I7*8) */
+APUOP(M_ROTQMBII, RI7, 0x1f9, "rotqmbii", _A3(A_T,A_A,A_S3), 00012, SHUF) /* ROTQMBII RT<-RA<<I7 */
+APUOP(M_ROTQMBYI, RI7, 0x1fd, "rotqmbyi", _A3(A_T,A_A,A_S6), 00012, SHUF) /* ROTQMBYI RT<-RA<<I7 */
+APUOP(M_SHLQBII, RI7, 0x1fb, "shlqbii", _A3(A_T,A_A,A_U3), 00012, SHUF) /* SHLQBII RT<-RA<<I7 */
+APUOP(M_SHLQBYI, RI7, 0x1ff, "shlqbyi", _A3(A_T,A_A,A_U5), 00012, SHUF) /* SHLQBYI RT<-RA<<I7 */
+APUOP(M_STQD, RI10, 0x120, "stqd", _A4(A_T,A_S14,A_P,A_A), 00011, LS) /* SToreQDisp M[Ra+I10]<-RT */
+APUOP(M_BIHNZ, RR, 0x12b, "bihnz", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */
+APUOP(M_BIHZ, RR, 0x12a, "bihz", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
+APUOP(M_BINZ, RR, 0x129, "binz", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */
+APUOP(M_BIZ, RR, 0x128, "biz", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
+APUOP(M_CBX, RR, 0x1d4, "cbx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */
+APUOP(M_CHX, RR, 0x1d5, "chx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */
+APUOP(M_CWX, RR, 0x1d6, "cwx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */
+APUOP(M_CDX, RR, 0x1d7, "cdx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */
+APUOP(M_LQX, RR, 0x1c4, "lqx", _A3(A_T,A_A,A_B), 00112, LS) /* LoadQindeX RT<-M[Ra+Rb] */
+APUOP(M_ROTQBI, RR, 0x1d8, "rotqbi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQBI RT<-RA<<<Rb */
+APUOP(M_ROTQMBI, RR, 0x1d9, "rotqmbi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQMBI RT<-RA<<Rb */
+APUOP(M_SHLQBI, RR, 0x1db, "shlqbi", _A3(A_T,A_A,A_B), 00112, SHUF) /* SHLQBI RT<-RA<<Rb */
+APUOP(M_ROTQBY, RR, 0x1dc, "rotqby", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQBY RT<-RA<<<(Rb*8) */
+APUOP(M_ROTQMBY, RR, 0x1dd, "rotqmby", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQMBY RT<-RA<<Rb */
+APUOP(M_SHLQBY, RR, 0x1df, "shlqby", _A3(A_T,A_A,A_B), 00112, SHUF) /* SHLQBY RT<-RA<<Rb */
+APUOP(M_ROTQBYBI, RR, 0x1cc, "rotqbybi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQBYBI RT<-RA<<Rb */
+APUOP(M_ROTQMBYBI, RR, 0x1cd, "rotqmbybi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQMBYBI RT<-RA<<Rb */
+APUOP(M_SHLQBYBI, RR, 0x1cf, "shlqbybi", _A3(A_T,A_A,A_B), 00112, SHUF) /* SHLQBYBI RT<-RA<<Rb */
+APUOP(M_STQX, RR, 0x144, "stqx", _A3(A_T,A_A,A_B), 00111, LS) /* SToreQindeX M[Ra+Rb]<-RT */
+APUOP(M_SHUFB, RRR, 0x580, "shufb", _A4(A_C,A_A,A_B,A_T), 02111, SHUF) /* SHUFfleBytes RC<-f(RA,RB,RT) */
+APUOP(M_IL, RI16, 0x204, "il", _A2(A_T,A_S16), 00002, FX2) /* ImmLoad RT<-sxt(I16) */
+APUOP(M_ILH, RI16, 0x20c, "ilh", _A2(A_T,A_X16), 00002, FX2) /* ImmLoadH RT<-I16 */
+APUOP(M_ILHU, RI16, 0x208, "ilhu", _A2(A_T,A_X16), 00002, FX2) /* ImmLoadHUpper RT<-I16<<16 */
+APUOP(M_ILA, RI18, 0x210, "ila", _A2(A_T,A_U18), 00002, FX2) /* ImmLoadAddr RT<-zxt(I18) */
+APUOP(M_NOP, RR, 0x201, "nop", _A1(A_T), 00000, NOP) /* XNOP no_operation */
+APUOP(M_NOP2, RR, 0x201, "nop", _A0(), 00000, NOP) /* XNOP no_operation */
+APUOP(M_IOHL, RI16, 0x304, "iohl", _A2(A_T,A_X16), 00003, FX2) /* AddImmeXt RT<-RT+sxt(I16) */
+APUOP(M_ANDBI, RI10, 0x0b0, "andbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* AND%I RT<-RA&I10 */
+APUOP(M_ANDHI, RI10, 0x0a8, "andhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* AND%I RT<-RA&I10 */
+APUOP(M_ANDI, RI10, 0x0a0, "andi", _A3(A_T,A_A,A_S10), 00012, FX2) /* AND%I RT<-RA&I10 */
+APUOP(M_ORBI, RI10, 0x030, "orbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* OR%I RT<-RA|I10 */
+APUOP(M_ORHI, RI10, 0x028, "orhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* OR%I RT<-RA|I10 */
+APUOP(M_ORI, RI10, 0x020, "ori", _A3(A_T,A_A,A_S10), 00012, FX2) /* OR%I RT<-RA|I10 */
+APUOP(M_ORX, RR, 0x1f0, "orx", _A2(A_T,A_A), 00012, BR) /* ORX RT<-RA.w0|RA.w1|RA.w2|RA.w3 */
+APUOP(M_XORBI, RI10, 0x230, "xorbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* XOR%I RT<-RA^I10 */
+APUOP(M_XORHI, RI10, 0x228, "xorhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* XOR%I RT<-RA^I10 */
+APUOP(M_XORI, RI10, 0x220, "xori", _A3(A_T,A_A,A_S10), 00012, FX2) /* XOR%I RT<-RA^I10 */
+APUOP(M_AHI, RI10, 0x0e8, "ahi", _A3(A_T,A_A,A_S10), 00012, FX2) /* Add%Immed RT<-RA+I10 */
+APUOP(M_AI, RI10, 0x0e0, "ai", _A3(A_T,A_A,A_S10), 00012, FX2) /* Add%Immed RT<-RA+I10 */
+APUOP(M_SFHI, RI10, 0x068, "sfhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* SubFrom%Imm RT<-I10-RA */
+APUOP(M_SFI, RI10, 0x060, "sfi", _A3(A_T,A_A,A_S10), 00012, FX2) /* SubFrom%Imm RT<-I10-RA */
+APUOP(M_CGTBI, RI10, 0x270, "cgtbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* CGT%I RT<-(RA>I10) */
+APUOP(M_CGTHI, RI10, 0x268, "cgthi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CGT%I RT<-(RA>I10) */
+APUOP(M_CGTI, RI10, 0x260, "cgti", _A3(A_T,A_A,A_S10), 00012, FX2) /* CGT%I RT<-(RA>I10) */
+APUOP(M_CLGTBI, RI10, 0x2f0, "clgtbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* CLGT%I RT<-(RA>I10) */
+APUOP(M_CLGTHI, RI10, 0x2e8, "clgthi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CLGT%I RT<-(RA>I10) */
+APUOP(M_CLGTI, RI10, 0x2e0, "clgti", _A3(A_T,A_A,A_S10), 00012, FX2) /* CLGT%I RT<-(RA>I10) */
+APUOP(M_CEQBI, RI10, 0x3f0, "ceqbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* CEQ%I RT<-(RA=I10) */
+APUOP(M_CEQHI, RI10, 0x3e8, "ceqhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CEQ%I RT<-(RA=I10) */
+APUOP(M_CEQI, RI10, 0x3e0, "ceqi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CEQ%I RT<-(RA=I10) */
+APUOP(M_HGTI, RI10, 0x278, "hgti", _A3(A_T,A_A,A_S10), 00010, FX2) /* HaltGTI halt_if(RA>I10) */
+APUOP(M_HGTI2, RI10, 0x278, "hgti", _A2(A_A,A_S10), 00010, FX2) /* HaltGTI halt_if(RA>I10) */
+APUOP(M_HLGTI, RI10, 0x2f8, "hlgti", _A3(A_T,A_A,A_S10), 00010, FX2) /* HaltLGTI halt_if(RA>I10) */
+APUOP(M_HLGTI2, RI10, 0x2f8, "hlgti", _A2(A_A,A_S10), 00010, FX2) /* HaltLGTI halt_if(RA>I10) */
+APUOP(M_HEQI, RI10, 0x3f8, "heqi", _A3(A_T,A_A,A_S10), 00010, FX2) /* HaltEQImm halt_if(RA=I10) */
+APUOP(M_HEQI2, RI10, 0x3f8, "heqi", _A2(A_A,A_S10), 00010, FX2) /* HaltEQImm halt_if(RA=I10) */
+APUOP(M_MPYI, RI10, 0x3a0, "mpyi", _A3(A_T,A_A,A_S10), 00012, FP7) /* MPYI RT<-RA*I10 */
+APUOP(M_MPYUI, RI10, 0x3a8, "mpyui", _A3(A_T,A_A,A_S10), 00012, FP7) /* MPYUI RT<-RA*I10 */
+APUOP(M_CFLTS, RI8, 0x3b0, "cflts", _A3(A_T,A_A,A_U7A), 00012, FP7) /* CFLTS RT<-int(RA,I8) */
+APUOP(M_CFLTU, RI8, 0x3b2, "cfltu", _A3(A_T,A_A,A_U7A), 00012, FP7) /* CFLTU RT<-int(RA,I8) */
+APUOP(M_CSFLT, RI8, 0x3b4, "csflt", _A3(A_T,A_A,A_U7B), 00012, FP7) /* CSFLT RT<-flt(RA,I8) */
+APUOP(M_CUFLT, RI8, 0x3b6, "cuflt", _A3(A_T,A_A,A_U7B), 00012, FP7) /* CUFLT RT<-flt(RA,I8) */
+APUOP(M_FESD, RR, 0x3b8, "fesd", _A2(A_T,A_A), 00012, FPD) /* FESD RT<-double(RA) */
+APUOP(M_FRDS, RR, 0x3b9, "frds", _A2(A_T,A_A), 00012, FPD) /* FRDS RT<-single(RA) */
+APUOP(M_FSCRRD, RR, 0x398, "fscrrd", _A1(A_T), 00002, FPD) /* FSCRRD RT<-FP_status */
+APUOP(M_FSCRWR, RR, 0x3ba, "fscrwr", _A2(A_T,A_A), 00010, FP7) /* FSCRWR FP_status<-RA */
+APUOP(M_FSCRWR2, RR, 0x3ba, "fscrwr", _A1(A_A), 00010, FP7) /* FSCRWR FP_status<-RA */
+APUOP(M_CLZ, RR, 0x2a5, "clz", _A2(A_T,A_A), 00012, FX2) /* CLZ RT<-clz(RA) */
+APUOP(M_CNTB, RR, 0x2b4, "cntb", _A2(A_T,A_A), 00012, FXB) /* CNT RT<-pop(RA) */
+APUOP(M_XSBH, RR, 0x2b6, "xsbh", _A2(A_T,A_A), 00012, FX2) /* eXtSignBtoH RT<-sign_ext(RA) */
+APUOP(M_XSHW, RR, 0x2ae, "xshw", _A2(A_T,A_A), 00012, FX2) /* eXtSignHtoW RT<-sign_ext(RA) */
+APUOP(M_XSWD, RR, 0x2a6, "xswd", _A2(A_T,A_A), 00012, FX2) /* eXtSignWtoD RT<-sign_ext(RA) */
+APUOP(M_ROTI, RI7, 0x078, "roti", _A3(A_T,A_A,A_S7N), 00012, FX3) /* ROT%I RT<-RA<<<I7 */
+APUOP(M_ROTMI, RI7, 0x079, "rotmi", _A3(A_T,A_A,A_S7), 00012, FX3) /* ROT%MI RT<-RA<<I7 */
+APUOP(M_ROTMAI, RI7, 0x07a, "rotmai", _A3(A_T,A_A,A_S7), 00012, FX3) /* ROTMA%I RT<-RA<<I7 */
+APUOP(M_SHLI, RI7, 0x07b, "shli", _A3(A_T,A_A,A_U6), 00012, FX3) /* SHL%I RT<-RA<<I7 */
+APUOP(M_ROTHI, RI7, 0x07c, "rothi", _A3(A_T,A_A,A_S7N), 00012, FX3) /* ROT%I RT<-RA<<<I7 */
+APUOP(M_ROTHMI, RI7, 0x07d, "rothmi", _A3(A_T,A_A,A_S6), 00012, FX3) /* ROT%MI RT<-RA<<I7 */
+APUOP(M_ROTMAHI, RI7, 0x07e, "rotmahi", _A3(A_T,A_A,A_S6), 00012, FX3) /* ROTMA%I RT<-RA<<I7 */
+APUOP(M_SHLHI, RI7, 0x07f, "shlhi", _A3(A_T,A_A,A_U5), 00012, FX3) /* SHL%I RT<-RA<<I7 */
+APUOP(M_A, RR, 0x0c0, "a", _A3(A_T,A_A,A_B), 00112, FX2) /* Add% RT<-RA+RB */
+APUOP(M_AH, RR, 0x0c8, "ah", _A3(A_T,A_A,A_B), 00112, FX2) /* Add% RT<-RA+RB */
+APUOP(M_SF, RR, 0x040, "sf", _A3(A_T,A_A,A_B), 00112, FX2) /* SubFrom% RT<-RB-RA */
+APUOP(M_SFH, RR, 0x048, "sfh", _A3(A_T,A_A,A_B), 00112, FX2) /* SubFrom% RT<-RB-RA */
+APUOP(M_CGT, RR, 0x240, "cgt", _A3(A_T,A_A,A_B), 00112, FX2) /* CGT% RT<-(RA>RB) */
+APUOP(M_CGTB, RR, 0x250, "cgtb", _A3(A_T,A_A,A_B), 00112, FX2) /* CGT% RT<-(RA>RB) */
+APUOP(M_CGTH, RR, 0x248, "cgth", _A3(A_T,A_A,A_B), 00112, FX2) /* CGT% RT<-(RA>RB) */
+APUOP(M_CLGT, RR, 0x2c0, "clgt", _A3(A_T,A_A,A_B), 00112, FX2) /* CLGT% RT<-(RA>RB) */
+APUOP(M_CLGTB, RR, 0x2d0, "clgtb", _A3(A_T,A_A,A_B), 00112, FX2) /* CLGT% RT<-(RA>RB) */
+APUOP(M_CLGTH, RR, 0x2c8, "clgth", _A3(A_T,A_A,A_B), 00112, FX2) /* CLGT% RT<-(RA>RB) */
+APUOP(M_CEQ, RR, 0x3c0, "ceq", _A3(A_T,A_A,A_B), 00112, FX2) /* CEQ% RT<-(RA=RB) */
+APUOP(M_CEQB, RR, 0x3d0, "ceqb", _A3(A_T,A_A,A_B), 00112, FX2) /* CEQ% RT<-(RA=RB) */
+APUOP(M_CEQH, RR, 0x3c8, "ceqh", _A3(A_T,A_A,A_B), 00112, FX2) /* CEQ% RT<-(RA=RB) */
+APUOP(M_HGT, RR, 0x258, "hgt", _A3(A_T,A_A,A_B), 00110, FX2) /* HaltGT halt_if(RA>RB) */
+APUOP(M_HGT2, RR, 0x258, "hgt", _A2(A_A,A_B), 00110, FX2) /* HaltGT halt_if(RA>RB) */
+APUOP(M_HLGT, RR, 0x2d8, "hlgt", _A3(A_T,A_A,A_B), 00110, FX2) /* HaltLGT halt_if(RA>RB) */
+APUOP(M_HLGT2, RR, 0x2d8, "hlgt", _A2(A_A,A_B), 00110, FX2) /* HaltLGT halt_if(RA>RB) */
+APUOP(M_HEQ, RR, 0x3d8, "heq", _A3(A_T,A_A,A_B), 00110, FX2) /* HaltEQ halt_if(RA=RB) */
+APUOP(M_HEQ2, RR, 0x3d8, "heq", _A2(A_A,A_B), 00110, FX2) /* HaltEQ halt_if(RA=RB) */
+APUOP(M_FCEQ, RR, 0x3c2, "fceq", _A3(A_T,A_A,A_B), 00112, FX2) /* FCEQ RT<-(RA=RB) */
+APUOP(M_FCMEQ, RR, 0x3ca, "fcmeq", _A3(A_T,A_A,A_B), 00112, FX2) /* FCMEQ RT<-(|RA|=|RB|) */
+APUOP(M_FCGT, RR, 0x2c2, "fcgt", _A3(A_T,A_A,A_B), 00112, FX2) /* FCGT RT<-(RA<RB) */
+APUOP(M_FCMGT, RR, 0x2ca, "fcmgt", _A3(A_T,A_A,A_B), 00112, FX2) /* FCMGT RT<-(|RA|<|RB|) */
+APUOP(M_AND, RR, 0x0c1, "and", _A3(A_T,A_A,A_B), 00112, FX2) /* AND RT<-RA&RB */
+APUOP(M_NAND, RR, 0x0c9, "nand", _A3(A_T,A_A,A_B), 00112, FX2) /* NAND RT<-!(RA&RB) */
+APUOP(M_OR, RR, 0x041, "or", _A3(A_T,A_A,A_B), 00112, FX2) /* OR RT<-RA|RB */
+APUOP(M_NOR, RR, 0x049, "nor", _A3(A_T,A_A,A_B), 00112, FX2) /* NOR RT<-!(RA&RB) */
+APUOP(M_XOR, RR, 0x241, "xor", _A3(A_T,A_A,A_B), 00112, FX2) /* XOR RT<-RA^RB */
+APUOP(M_EQV, RR, 0x249, "eqv", _A3(A_T,A_A,A_B), 00112, FX2) /* EQuiValent RT<-!(RA^RB) */
+APUOP(M_ANDC, RR, 0x2c1, "andc", _A3(A_T,A_A,A_B), 00112, FX2) /* ANDComplement RT<-RA&!RB */
+APUOP(M_ORC, RR, 0x2c9, "orc", _A3(A_T,A_A,A_B), 00112, FX2) /* ORComplement RT<-RA|!RB */
+APUOP(M_ABSDB, RR, 0x053, "absdb", _A3(A_T,A_A,A_B), 00112, FXB) /* ABSoluteDiff RT<-|RA-RB| */
+APUOP(M_AVGB, RR, 0x0d3, "avgb", _A3(A_T,A_A,A_B), 00112, FXB) /* AVG% RT<-(RA+RB+1)/2 */
+APUOP(M_SUMB, RR, 0x253, "sumb", _A3(A_T,A_A,A_B), 00112, FXB) /* SUM% RT<-f(RA,RB) */
+APUOP(M_DFA, RR, 0x2cc, "dfa", _A3(A_T,A_A,A_B), 00112, FPD) /* DFAdd RT<-RA+RB */
+APUOP(M_DFM, RR, 0x2ce, "dfm", _A3(A_T,A_A,A_B), 00112, FPD) /* DFMul RT<-RA*RB */
+APUOP(M_DFS, RR, 0x2cd, "dfs", _A3(A_T,A_A,A_B), 00112, FPD) /* DFSub RT<-RA-RB */
+APUOP(M_FA, RR, 0x2c4, "fa", _A3(A_T,A_A,A_B), 00112, FP6) /* FAdd RT<-RA+RB */
+APUOP(M_FM, RR, 0x2c6, "fm", _A3(A_T,A_A,A_B), 00112, FP6) /* FMul RT<-RA*RB */
+APUOP(M_FS, RR, 0x2c5, "fs", _A3(A_T,A_A,A_B), 00112, FP6) /* FSub RT<-RA-RB */
+APUOP(M_MPY, RR, 0x3c4, "mpy", _A3(A_T,A_A,A_B), 00112, FP7) /* MPY RT<-RA*RB */
+APUOP(M_MPYH, RR, 0x3c5, "mpyh", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYH RT<-(RAh*RB)<<16 */
+APUOP(M_MPYHH, RR, 0x3c6, "mpyhh", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYHH RT<-RAh*RBh */
+APUOP(M_MPYHHU, RR, 0x3ce, "mpyhhu", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYHHU RT<-RAh*RBh */
+APUOP(M_MPYS, RR, 0x3c7, "mpys", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYS RT<-(RA*RB)>>16 */
+APUOP(M_MPYU, RR, 0x3cc, "mpyu", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYU RT<-RA*RB */
+APUOP(M_FI, RR, 0x3d4, "fi", _A3(A_T,A_A,A_B), 00112, FP7) /* FInterpolate RT<-f(RA,RB) */
+APUOP(M_ROT, RR, 0x058, "rot", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT% RT<-RA<<<RB */
+APUOP(M_ROTM, RR, 0x059, "rotm", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT%M RT<-RA<<Rb */
+APUOP(M_ROTMA, RR, 0x05a, "rotma", _A3(A_T,A_A,A_B), 00112, FX3) /* ROTMA% RT<-RA<<Rb */
+APUOP(M_SHL, RR, 0x05b, "shl", _A3(A_T,A_A,A_B), 00112, FX3) /* SHL% RT<-RA<<Rb */
+APUOP(M_ROTH, RR, 0x05c, "roth", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT% RT<-RA<<<RB */
+APUOP(M_ROTHM, RR, 0x05d, "rothm", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT%M RT<-RA<<Rb */
+APUOP(M_ROTMAH, RR, 0x05e, "rotmah", _A3(A_T,A_A,A_B), 00112, FX3) /* ROTMA% RT<-RA<<Rb */
+APUOP(M_SHLH, RR, 0x05f, "shlh", _A3(A_T,A_A,A_B), 00112, FX3) /* SHL% RT<-RA<<Rb */
+APUOP(M_MPYHHA, RR, 0x346, "mpyhha", _A3(A_T,A_A,A_B), 00113, FP7) /* MPYHHA RT<-RAh*RBh+RT */
+APUOP(M_MPYHHAU, RR, 0x34e, "mpyhhau", _A3(A_T,A_A,A_B), 00113, FP7) /* MPYHHAU RT<-RAh*RBh+RT */
+APUOP(M_DFMA, RR, 0x35c, "dfma", _A3(A_T,A_A,A_B), 00113, FPD) /* DFMAdd RT<-RT+RA*RB */
+APUOP(M_DFMS, RR, 0x35d, "dfms", _A3(A_T,A_A,A_B), 00113, FPD) /* DFMSub RT<-RA*RB-RT */
+APUOP(M_DFNMS, RR, 0x35e, "dfnms", _A3(A_T,A_A,A_B), 00113, FPD) /* DFNMSub RT<-RT-RA*RB */
+APUOP(M_DFNMA, RR, 0x35f, "dfnma", _A3(A_T,A_A,A_B), 00113, FPD) /* DFNMAdd RT<-(-RT)-RA*RB */
+APUOP(M_FMA, RRR, 0x700, "fma", _A4(A_C,A_A,A_B,A_T), 02111, FP6) /* FMAdd RC<-RT+RA*RB */
+APUOP(M_FMS, RRR, 0x780, "fms", _A4(A_C,A_A,A_B,A_T), 02111, FP6) /* FMSub RC<-RA*RB-RT */
+APUOP(M_FNMS, RRR, 0x680, "fnms", _A4(A_C,A_A,A_B,A_T), 02111, FP6) /* FNMSub RC<-RT-RA*RB */
+APUOP(M_MPYA, RRR, 0x600, "mpya", _A4(A_C,A_A,A_B,A_T), 02111, FP7) /* MPYA RC<-RA*RB+RT */
+APUOP(M_SELB, RRR, 0x400, "selb", _A4(A_C,A_A,A_B,A_T), 02111, FX2) /* SELectBits RC<-RA&RT|RB&!RT */
+/* for system function call, this uses op-code of mtspr */
+APUOP(M_SYSCALL, RI7, 0x10c, "syscall", _A3(A_T,A_A,A_S7N), 00002, SPR) /* System Call */
+/*
+pseudo instruction:
+system call
+value of I9 operation
+0 halt
+1 rt[0] = open(MEM[ra[0]], ra[1])
+2 rt[0] = close(ra[0])
+3 rt[0] = read(ra[0], MEM[ra[1]], ra[2])
+4 rt[0] = write(ra[0], MEM[ra[1]], ra[2])
+5 printf(MEM[ra[0]], ra[1], ra[2], ra[3])
+42 rt[0] = clock()
+52 rt[0] = lseek(ra0, ra1, ra2)
+
+*/
+
+
+/* new multiprecision add/sub */
+APUOP(M_ADDX, RR, 0x340, "addx", _A3(A_T,A_A,A_B), 00113, FX2) /* Add_eXtended RT<-RA+RB+RT */
+APUOP(M_CG, RR, 0x0c2, "cg", _A3(A_T,A_A,A_B), 00112, FX2) /* CarryGenerate RT<-cout(RA+RB) */
+APUOP(M_CGX, RR, 0x342, "cgx", _A3(A_T,A_A,A_B), 00113, FX2) /* CarryGen_eXtd RT<-cout(RA+RB+RT) */
+APUOP(M_SFX, RR, 0x341, "sfx", _A3(A_T,A_A,A_B), 00113, FX2) /* Add_eXtended RT<-RA+RB+RT */
+APUOP(M_BG, RR, 0x042, "bg", _A3(A_T,A_A,A_B), 00112, FX2) /* CarryGenerate RT<-cout(RA+RB) */
+APUOP(M_BGX, RR, 0x343, "bgx", _A3(A_T,A_A,A_B), 00113, FX2) /* CarryGen_eXtd RT<-cout(RA+RB+RT) */
+
+/*
+
+The following ops are a subset of above except with feature bits set.
+Feature bits are bits 11-17 of the instruction:
+
+ 11 - C & P feature bit
+ 12 - disable interrupts
+ 13 - enable interrupts
+
+*/
+APUOPFB(M_BID, RR, 0x1a8, 0x20, "bid", _A1(A_A), 00010, BR) /* BI IP<-RA */
+APUOPFB(M_BIE, RR, 0x1a8, 0x10, "bie", _A1(A_A), 00010, BR) /* BI IP<-RA */
+APUOPFB(M_BISLD, RR, 0x1a9, 0x20, "bisld", _A2(A_T,A_A), 00012, BR) /* BISL RT,IP<-IP,RA */
+APUOPFB(M_BISLE, RR, 0x1a9, 0x10, "bisle", _A2(A_T,A_A), 00012, BR) /* BISL RT,IP<-IP,RA */
+APUOPFB(M_IRETD, RR, 0x1aa, 0x20, "iretd", _A1(A_A), 00010, BR) /* IRET IP<-SRR0 */
+APUOPFB(M_IRETD2, RR, 0x1aa, 0x20, "iretd", _A0(), 00010, BR) /* IRET IP<-SRR0 */
+APUOPFB(M_IRETE, RR, 0x1aa, 0x10, "irete", _A1(A_A), 00010, BR) /* IRET IP<-SRR0 */
+APUOPFB(M_IRETE2, RR, 0x1aa, 0x10, "irete", _A0(), 00010, BR) /* IRET IP<-SRR0 */
+APUOPFB(M_BISLEDD, RR, 0x1ab, 0x20, "bisledd", _A2(A_T,A_A), 00012, BR) /* BISLED RT,IP<-IP,RA_if(ext) */
+APUOPFB(M_BISLEDE, RR, 0x1ab, 0x10, "bislede", _A2(A_T,A_A), 00012, BR) /* BISLED RT,IP<-IP,RA_if(ext) */
+APUOPFB(M_BIHNZD, RR, 0x12b, 0x20, "bihnzd", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */
+APUOPFB(M_BIHNZE, RR, 0x12b, 0x10, "bihnze", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */
+APUOPFB(M_BIHZD, RR, 0x12a, 0x20, "bihzd", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
+APUOPFB(M_BIHZE, RR, 0x12a, 0x10, "bihze", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
+APUOPFB(M_BINZD, RR, 0x129, 0x20, "binzd", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */
+APUOPFB(M_BINZE, RR, 0x129, 0x10, "binze", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */
+APUOPFB(M_BIZD, RR, 0x128, 0x20, "bizd", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
+APUOPFB(M_BIZE, RR, 0x128, 0x10, "bize", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
+APUOPFB(M_SYNCC, RR, 0x002, 0x40, "syncc", _A0(), 00000, BR) /* SYNCC flush_pipe */
+APUOPFB(M_HBRP, LBTI, 0x1ac, 0x40, "hbrp", _A0(), 00010, LS) /* HBR BTB[B9]<-M[Ra] */
+
+/* Synonyms required by the AS manual. */
+APUOP(M_LR, RI10, 0x020, "lr", _A2(A_T,A_A), 00012, FX2) /* OR%I RT<-RA|I10 */
+APUOP(M_BIHT, RR, 0x12b, "biht", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */
+APUOP(M_BIHF, RR, 0x12a, "bihf", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
+APUOP(M_BIT, RR, 0x129, "bit", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */
+APUOP(M_BIF, RR, 0x128, "bif", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
+APUOPFB(M_BIHTD, RR, 0x12b, 0x20, "bihtd", _A2(A_T,A_A), 00011, BR) /* BIHNF IP<-RA_if(RT) */
+APUOPFB(M_BIHTE, RR, 0x12b, 0x10, "bihte", _A2(A_T,A_A), 00011, BR) /* BIHNF IP<-RA_if(RT) */
+APUOPFB(M_BIHFD, RR, 0x12a, 0x20, "bihfd", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
+APUOPFB(M_BIHFE, RR, 0x12a, 0x10, "bihfe", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
+APUOPFB(M_BITD, RR, 0x129, 0x20, "bitd", _A2(A_T,A_A), 00011, BR) /* BINF IP<-RA_if(RT) */
+APUOPFB(M_BITE, RR, 0x129, 0x10, "bite", _A2(A_T,A_A), 00011, BR) /* BINF IP<-RA_if(RT) */
+APUOPFB(M_BIFD, RR, 0x128, 0x20, "bifd", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
+APUOPFB(M_BIFE, RR, 0x128, 0x10, "bife", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
+
+#undef _A0
+#undef _A1
+#undef _A2
+#undef _A3
+#undef _A4
diff --git a/arch/powerpc/xmon/spu-opc.c b/arch/powerpc/xmon/spu-opc.c
new file mode 100644
index 00000000000..efffde9edc6
--- /dev/null
+++ b/arch/powerpc/xmon/spu-opc.c
@@ -0,0 +1,44 @@
+/* SPU opcode list
+
+ Copyright 2006 Free Software Foundation, Inc.
+
+ This file is part of GDB, GAS, and the GNU binutils.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "spu.h"
+
+/* This file holds the Spu opcode table */
+
+
+/*
+ Example contents of spu-insn.h
+ id_tag mode mode type opcode mnemonic asmtype dependency FPU L/S? branch? instruction
+ QUAD WORD (0,RC,RB,RA,RT) latency
+ APUOP(M_LQD, 1, 0, RI9, 0x1f8, "lqd", ASM_RI9IDX, 00012, FXU, 1, 0) Load Quadword d-form
+ */
+
+const struct spu_opcode spu_opcodes[] = {
+#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+ { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT },
+#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+ { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT },
+#include "spu-insns.h"
+#undef APUOP
+#undef APUOPFB
+};
+
+const int spu_num_opcodes =
+ sizeof (spu_opcodes) / sizeof (spu_opcodes[0]);
diff --git a/arch/powerpc/xmon/spu.h b/arch/powerpc/xmon/spu.h
new file mode 100644
index 00000000000..c761fc8f35d
--- /dev/null
+++ b/arch/powerpc/xmon/spu.h
@@ -0,0 +1,126 @@
+/* SPU ELF support for BFD.
+
+ Copyright 2006 Free Software Foundation, Inc.
+
+ This file is part of GDB, GAS, and the GNU binutils.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+/* These two enums are from rel_apu/common/spu_asm_format.h */
+/* definition of instruction format */
+typedef enum {
+ RRR,
+ RI18,
+ RI16,
+ RI10,
+ RI8,
+ RI7,
+ RR,
+ LBT,
+ LBTI,
+ IDATA,
+ UNKNOWN_IFORMAT
+} spu_iformat;
+
+/* These values describe assembly instruction arguments. They indicate
+ * how to encode, range checking and which relocation to use. */
+typedef enum {
+ A_T, /* register at pos 0 */
+ A_A, /* register at pos 7 */
+ A_B, /* register at pos 14 */
+ A_C, /* register at pos 21 */
+ A_S, /* special purpose register at pos 7 */
+ A_H, /* channel register at pos 7 */
+ A_P, /* parenthesis, this has to separate regs from immediates */
+ A_S3,
+ A_S6,
+ A_S7N,
+ A_S7,
+ A_U7A,
+ A_U7B,
+ A_S10B,
+ A_S10,
+ A_S11,
+ A_S11I,
+ A_S14,
+ A_S16,
+ A_S18,
+ A_R18,
+ A_U3,
+ A_U5,
+ A_U6,
+ A_U7,
+ A_U14,
+ A_X16,
+ A_U18,
+ A_MAX
+} spu_aformat;
+
+enum spu_insns {
+#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+ TAG,
+#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+ TAG,
+#include "spu-insns.h"
+#undef APUOP
+#undef APUOPFB
+ M_SPU_MAX
+};
+
+struct spu_opcode
+{
+ spu_iformat insn_type;
+ unsigned int opcode;
+ char *mnemonic;
+ int arg[5];
+};
+
+#define SIGNED_EXTRACT(insn,size,pos) (((int)((insn) << (32-size-pos))) >> (32-size))
+#define UNSIGNED_EXTRACT(insn,size,pos) (((insn) >> pos) & ((1 << size)-1))
+
+#define DECODE_INSN_RT(insn) (insn & 0x7f)
+#define DECODE_INSN_RA(insn) ((insn >> 7) & 0x7f)
+#define DECODE_INSN_RB(insn) ((insn >> 14) & 0x7f)
+#define DECODE_INSN_RC(insn) ((insn >> 21) & 0x7f)
+
+#define DECODE_INSN_I10(insn) SIGNED_EXTRACT(insn,10,14)
+#define DECODE_INSN_U10(insn) UNSIGNED_EXTRACT(insn,10,14)
+
+/* For branching, immediate loads, hbr and lqa/stqa. */
+#define DECODE_INSN_I16(insn) SIGNED_EXTRACT(insn,16,7)
+#define DECODE_INSN_U16(insn) UNSIGNED_EXTRACT(insn,16,7)
+
+/* for stop */
+#define DECODE_INSN_U14(insn) UNSIGNED_EXTRACT(insn,14,0)
+
+/* For ila */
+#define DECODE_INSN_I18(insn) SIGNED_EXTRACT(insn,18,7)
+#define DECODE_INSN_U18(insn) UNSIGNED_EXTRACT(insn,18,7)
+
+/* For rotate and shift and generate control mask */
+#define DECODE_INSN_I7(insn) SIGNED_EXTRACT(insn,7,14)
+#define DECODE_INSN_U7(insn) UNSIGNED_EXTRACT(insn,7,14)
+
+/* For float <-> int conversion */
+#define DECODE_INSN_I8(insn) SIGNED_EXTRACT(insn,8,14)
+#define DECODE_INSN_U8(insn) UNSIGNED_EXTRACT(insn,8,14)
+
+/* For hbr */
+#define DECODE_INSN_I9a(insn) ((SIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+#define DECODE_INSN_I9b(insn) ((SIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+#define DECODE_INSN_U9a(insn) ((UNSIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+#define DECODE_INSN_U9b(insn) ((UNSIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index f56ffef4def..a34ed49e035 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -37,13 +37,18 @@
#include <asm/sstep.h>
#include <asm/bug.h>
#include <asm/irq_regs.h>
+#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/firmware.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
#include <asm/paca.h>
+#include <asm/iseries/it_lp_reg_save.h>
#endif
#include "nonstdio.h"
+#include "dis-asm.h"
#define scanhex xmon_scanhex
#define skipbl xmon_skipbl
@@ -107,7 +112,6 @@ static int bsesc(void);
static void dump(void);
static void prdump(unsigned long, long);
static int ppc_inst_dump(unsigned long, long, int);
-void print_address(unsigned long);
static void backtrace(struct pt_regs *);
static void excprint(struct pt_regs *);
static void prregs(struct pt_regs *);
@@ -147,9 +151,9 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
const char *after);
static const char *getvecname(unsigned long vec);
-int xmon_no_auto_backtrace;
+static int do_spu_cmd(void);
-extern int print_insn_powerpc(unsigned long, unsigned long, int);
+int xmon_no_auto_backtrace;
extern void xmon_enter(void);
extern void xmon_leave(void);
@@ -209,8 +213,15 @@ Commands:\n\
mi show information about memory allocation\n\
p call a procedure\n\
r print registers\n\
- s single step\n\
- S print special registers\n\
+ s single step\n"
+#ifdef CONFIG_SPU_BASE
+" ss stop execution on all spus\n\
+ sr restore execution on stopped spus\n\
+ sf # dump spu fields for spu # (in hex)\n\
+ sd # dump spu local store for spu # (in hex)\
+ sdi # disassemble spu local store for spu # (in hex)\n"
+#endif
+" S print special registers\n\
t print backtrace\n\
x exit monitor and recover\n\
X exit monitor and dont recover\n"
@@ -518,6 +529,7 @@ int xmon(struct pt_regs *excp)
xmon_save_regs(&regs);
excp = &regs;
}
+
return xmon_core(excp, 0);
}
EXPORT_SYMBOL(xmon);
@@ -809,6 +821,8 @@ cmds(struct pt_regs *excp)
cacheflush();
break;
case 's':
+ if (do_spu_cmd() == 0)
+ break;
if (do_step(excp))
return cmd;
break;
@@ -1555,11 +1569,6 @@ void super_regs(void)
{
int cmd;
unsigned long val;
-#ifdef CONFIG_PPC_ISERIES
- struct paca_struct *ptrPaca = NULL;
- struct lppaca *ptrLpPaca = NULL;
- struct ItLpRegSave *ptrLpRegSave = NULL;
-#endif
cmd = skipbl();
if (cmd == '\n') {
@@ -1576,26 +1585,32 @@ void super_regs(void)
printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
#ifdef CONFIG_PPC_ISERIES
- // Dump out relevant Paca data areas.
- printf("Paca: \n");
- ptrPaca = get_paca();
-
- printf(" Local Processor Control Area (LpPaca): \n");
- ptrLpPaca = ptrPaca->lppaca_ptr;
- printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n",
- ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
- printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n",
- ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
- printf(" Saved Gpr5=%.16lx \n", ptrLpPaca->saved_gpr5);
-
- printf(" Local Processor Register Save Area (LpRegSave): \n");
- ptrLpRegSave = ptrPaca->reg_save_ptr;
- printf(" Saved Sprg0=%.16lx Saved Sprg1=%.16lx \n",
- ptrLpRegSave->xSPRG0, ptrLpRegSave->xSPRG0);
- printf(" Saved Sprg2=%.16lx Saved Sprg3=%.16lx \n",
- ptrLpRegSave->xSPRG2, ptrLpRegSave->xSPRG3);
- printf(" Saved Msr =%.16lx Saved Nia =%.16lx \n",
- ptrLpRegSave->xMSR, ptrLpRegSave->xNIA);
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ struct paca_struct *ptrPaca;
+ struct lppaca *ptrLpPaca;
+ struct ItLpRegSave *ptrLpRegSave;
+
+ /* Dump out relevant Paca data areas. */
+ printf("Paca: \n");
+ ptrPaca = get_paca();
+
+ printf(" Local Processor Control Area (LpPaca): \n");
+ ptrLpPaca = ptrPaca->lppaca_ptr;
+ printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n",
+ ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
+ printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n",
+ ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
+ printf(" Saved Gpr5=%.16lx \n", ptrLpPaca->saved_gpr5);
+
+ printf(" Local Processor Register Save Area (LpRegSave): \n");
+ ptrLpRegSave = ptrPaca->reg_save_ptr;
+ printf(" Saved Sprg0=%.16lx Saved Sprg1=%.16lx \n",
+ ptrLpRegSave->xSPRG0, ptrLpRegSave->xSPRG0);
+ printf(" Saved Sprg2=%.16lx Saved Sprg3=%.16lx \n",
+ ptrLpRegSave->xSPRG2, ptrLpRegSave->xSPRG3);
+ printf(" Saved Msr =%.16lx Saved Nia =%.16lx \n",
+ ptrLpRegSave->xMSR, ptrLpRegSave->xNIA);
+ }
#endif
return;
@@ -2053,8 +2068,11 @@ prdump(unsigned long adrs, long ndump)
}
}
+typedef int (*instruction_dump_func)(unsigned long inst, unsigned long addr);
+
int
-ppc_inst_dump(unsigned long adr, long count, int praddr)
+generic_inst_dump(unsigned long adr, long count, int praddr,
+ instruction_dump_func dump_func)
{
int nr, dotted;
unsigned long first_adr;
@@ -2084,12 +2102,18 @@ ppc_inst_dump(unsigned long adr, long count, int praddr)
if (praddr)
printf(REG" %.8x", adr, inst);
printf("\t");
- print_insn_powerpc(inst, adr, 0); /* always returns 4 */
+ dump_func(inst, adr);
printf("\n");
}
return adr - first_adr;
}
+int
+ppc_inst_dump(unsigned long adr, long count, int praddr)
+{
+ return generic_inst_dump(adr, count, praddr, print_insn_powerpc);
+}
+
void
print_address(unsigned long addr)
{
@@ -2557,6 +2581,10 @@ void dump_segments(void)
void xmon_init(int enable)
{
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ return;
+#endif
if (enable) {
__debugger = xmon;
__debugger_ipi = xmon_ipi;
@@ -2594,6 +2622,10 @@ static struct sysrq_key_op sysrq_xmon_op =
static int __init setup_xmon_sysrq(void)
{
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES))
+ return 0;
+#endif
register_sysrq_key('x', &sysrq_xmon_op);
return 0;
}
@@ -2630,3 +2662,263 @@ void __init xmon_setup(void)
if (xmon_early)
debugger(NULL);
}
+
+#ifdef CONFIG_SPU_BASE
+
+struct spu_info {
+ struct spu *spu;
+ u64 saved_mfc_sr1_RW;
+ u32 saved_spu_runcntl_RW;
+ unsigned long dump_addr;
+ u8 stopped_ok;
+};
+
+#define XMON_NUM_SPUS 16 /* Enough for current hardware */
+
+static struct spu_info spu_info[XMON_NUM_SPUS];
+
+void xmon_register_spus(struct list_head *list)
+{
+ struct spu *spu;
+
+ list_for_each_entry(spu, list, full_list) {
+ if (spu->number >= XMON_NUM_SPUS) {
+ WARN_ON(1);
+ continue;
+ }
+
+ spu_info[spu->number].spu = spu;
+ spu_info[spu->number].stopped_ok = 0;
+ spu_info[spu->number].dump_addr = (unsigned long)
+ spu_info[spu->number].spu->local_store;
+ }
+}
+
+static void stop_spus(void)
+{
+ struct spu *spu;
+ int i;
+ u64 tmp;
+
+ for (i = 0; i < XMON_NUM_SPUS; i++) {
+ if (!spu_info[i].spu)
+ continue;
+
+ if (setjmp(bus_error_jmp) == 0) {
+ catch_memory_errors = 1;
+ sync();
+
+ spu = spu_info[i].spu;
+
+ spu_info[i].saved_spu_runcntl_RW =
+ in_be32(&spu->problem->spu_runcntl_RW);
+
+ tmp = spu_mfc_sr1_get(spu);
+ spu_info[i].saved_mfc_sr1_RW = tmp;
+
+ tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+ spu_mfc_sr1_set(spu, tmp);
+
+ sync();
+ __delay(200);
+
+ spu_info[i].stopped_ok = 1;
+
+ printf("Stopped spu %.2d (was %s)\n", i,
+ spu_info[i].saved_spu_runcntl_RW ?
+ "running" : "stopped");
+ } else {
+ catch_memory_errors = 0;
+ printf("*** Error stopping spu %.2d\n", i);
+ }
+ catch_memory_errors = 0;
+ }
+}
+
+static void restart_spus(void)
+{
+ struct spu *spu;
+ int i;
+
+ for (i = 0; i < XMON_NUM_SPUS; i++) {
+ if (!spu_info[i].spu)
+ continue;
+
+ if (!spu_info[i].stopped_ok) {
+ printf("*** Error, spu %d was not successfully stopped"
+ ", not restarting\n", i);
+ continue;
+ }
+
+ if (setjmp(bus_error_jmp) == 0) {
+ catch_memory_errors = 1;
+ sync();
+
+ spu = spu_info[i].spu;
+ spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW);
+ out_be32(&spu->problem->spu_runcntl_RW,
+ spu_info[i].saved_spu_runcntl_RW);
+
+ sync();
+ __delay(200);
+
+ printf("Restarted spu %.2d\n", i);
+ } else {
+ catch_memory_errors = 0;
+ printf("*** Error restarting spu %.2d\n", i);
+ }
+ catch_memory_errors = 0;
+ }
+}
+
+#define DUMP_WIDTH 23
+#define DUMP_VALUE(format, field, value) \
+do { \
+ if (setjmp(bus_error_jmp) == 0) { \
+ catch_memory_errors = 1; \
+ sync(); \
+ printf(" %-*s = "format"\n", DUMP_WIDTH, \
+ #field, value); \
+ sync(); \
+ __delay(200); \
+ } else { \
+ catch_memory_errors = 0; \
+ printf(" %-*s = *** Error reading field.\n", \
+ DUMP_WIDTH, #field); \
+ } \
+ catch_memory_errors = 0; \
+} while (0)
+
+#define DUMP_FIELD(obj, format, field) \
+ DUMP_VALUE(format, field, obj->field)
+
+static void dump_spu_fields(struct spu *spu)
+{
+ printf("Dumping spu fields at address %p:\n", spu);
+
+ DUMP_FIELD(spu, "0x%x", number);
+ DUMP_FIELD(spu, "%s", name);
+ DUMP_FIELD(spu, "0x%lx", local_store_phys);
+ DUMP_FIELD(spu, "0x%p", local_store);
+ DUMP_FIELD(spu, "0x%lx", ls_size);
+ DUMP_FIELD(spu, "0x%x", node);
+ DUMP_FIELD(spu, "0x%lx", flags);
+ DUMP_FIELD(spu, "0x%lx", dar);
+ DUMP_FIELD(spu, "0x%lx", dsisr);
+ DUMP_FIELD(spu, "%d", class_0_pending);
+ DUMP_FIELD(spu, "0x%lx", irqs[0]);
+ DUMP_FIELD(spu, "0x%lx", irqs[1]);
+ DUMP_FIELD(spu, "0x%lx", irqs[2]);
+ DUMP_FIELD(spu, "0x%x", slb_replace);
+ DUMP_FIELD(spu, "%d", pid);
+ DUMP_FIELD(spu, "%d", prio);
+ DUMP_FIELD(spu, "0x%p", mm);
+ DUMP_FIELD(spu, "0x%p", ctx);
+ DUMP_FIELD(spu, "0x%p", rq);
+ DUMP_FIELD(spu, "0x%p", timestamp);
+ DUMP_FIELD(spu, "0x%lx", problem_phys);
+ DUMP_FIELD(spu, "0x%p", problem);
+ DUMP_VALUE("0x%x", problem->spu_runcntl_RW,
+ in_be32(&spu->problem->spu_runcntl_RW));
+ DUMP_VALUE("0x%x", problem->spu_status_R,
+ in_be32(&spu->problem->spu_status_R));
+ DUMP_VALUE("0x%x", problem->spu_npc_RW,
+ in_be32(&spu->problem->spu_npc_RW));
+ DUMP_FIELD(spu, "0x%p", priv2);
+ DUMP_FIELD(spu, "0x%p", pdata);
+}
+
+int
+spu_inst_dump(unsigned long adr, long count, int praddr)
+{
+ return generic_inst_dump(adr, count, praddr, print_insn_spu);
+}
+
+static void dump_spu_ls(unsigned long num, int subcmd)
+{
+ unsigned long offset, addr, ls_addr;
+
+ if (setjmp(bus_error_jmp) == 0) {
+ catch_memory_errors = 1;
+ sync();
+ ls_addr = (unsigned long)spu_info[num].spu->local_store;
+ sync();
+ __delay(200);
+ } else {
+ catch_memory_errors = 0;
+ printf("*** Error: accessing spu info for spu %d\n", num);
+ return;
+ }
+ catch_memory_errors = 0;
+
+ if (scanhex(&offset))
+ addr = ls_addr + offset;
+ else
+ addr = spu_info[num].dump_addr;
+
+ if (addr >= ls_addr + LS_SIZE) {
+ printf("*** Error: address outside of local store\n");
+ return;
+ }
+
+ switch (subcmd) {
+ case 'i':
+ addr += spu_inst_dump(addr, 16, 1);
+ last_cmd = "sdi\n";
+ break;
+ default:
+ prdump(addr, 64);
+ addr += 64;
+ last_cmd = "sd\n";
+ break;
+ }
+
+ spu_info[num].dump_addr = addr;
+}
+
+static int do_spu_cmd(void)
+{
+ static unsigned long num = 0;
+ int cmd, subcmd = 0;
+
+ cmd = inchar();
+ switch (cmd) {
+ case 's':
+ stop_spus();
+ break;
+ case 'r':
+ restart_spus();
+ break;
+ case 'd':
+ subcmd = inchar();
+ if (isxdigit(subcmd) || subcmd == '\n')
+ termch = subcmd;
+ case 'f':
+ scanhex(&num);
+ if (num >= XMON_NUM_SPUS || !spu_info[num].spu) {
+ printf("*** Error: invalid spu number\n");
+ return 0;
+ }
+
+ switch (cmd) {
+ case 'f':
+ dump_spu_fields(spu_info[num].spu);
+ break;
+ default:
+ dump_spu_ls(num, subcmd);
+ break;
+ }
+
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+#else /* ! CONFIG_SPU_BASE */
+static int do_spu_cmd(void)
+{
+ return -1;
+}
+#endif
diff --git a/arch/ppc/.gitignore b/arch/ppc/.gitignore
new file mode 100644
index 00000000000..a1a869c8c84
--- /dev/null
+++ b/arch/ppc/.gitignore
@@ -0,0 +1 @@
+include
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index 2e1943e2781..709952c25f2 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -385,6 +385,7 @@ struct fcc_enet_private {
phy_info_t *phy;
struct work_struct phy_relink;
struct work_struct phy_display_config;
+ struct net_device *dev;
uint sequence_done;
@@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = {
NULL
};
-static void mii_display_status(void *data)
+static void mii_display_status(struct work_struct *work)
{
- struct net_device *dev = data;
- volatile struct fcc_enet_private *fep = dev->priv;
+ volatile struct fcc_enet_private *fep =
+ container_of(work, struct fcc_enet_private, phy_relink);
+ struct net_device *dev = fep->dev;
uint s = fep->phy_status;
if (!fep->link && !fep->old_link) {
@@ -1428,10 +1430,12 @@ static void mii_display_status(void *data)
printk(".\n");
}
-static void mii_display_config(void *data)
+static void mii_display_config(struct work_struct *work)
{
- struct net_device *dev = data;
- volatile struct fcc_enet_private *fep = dev->priv;
+ volatile struct fcc_enet_private *fep =
+ container_of(work, struct fcc_enet_private,
+ phy_display_config);
+ struct net_device *dev = fep->dev;
uint s = fep->phy_status;
printk("%s: config: auto-negotiation ", dev->name);
@@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void)
cep->phy_id_done = 0;
cep->phy_addr = fip->fc_phyaddr;
mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
- INIT_WORK(&cep->phy_relink, mii_display_status, dev);
- INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
+ INIT_WORK(&cep->phy_relink, mii_display_status);
+ INIT_WORK(&cep->phy_display_config, mii_display_config);
+ cep->dev = dev;
#endif /* CONFIG_USE_MDIO */
fip++;
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c
index 2f9fa9e3d33..e6c28fb423b 100644
--- a/arch/ppc/8xx_io/fec.c
+++ b/arch/ppc/8xx_io/fec.c
@@ -173,6 +173,7 @@ struct fec_enet_private {
uint phy_speed;
phy_info_t *phy;
struct work_struct phy_task;
+ struct net_device *dev;
uint sequence_done;
@@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev)
printk(".\n");
}
-static void mii_display_config(void *priv)
+static void mii_display_config(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *)priv;
- struct fec_enet_private *fep = dev->priv;
+ struct fec_enet_private *fep =
+ container_of(work, struct fec_enet_private, phy_task);
+ struct net_device *dev = fep->dev;
volatile uint *s = &(fep->phy_status);
printk("%s: config: auto-negotiation ", dev->name);
@@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv)
fep->sequence_done = 1;
}
-static void mii_relink(void *priv)
+static void mii_relink(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *)priv;
- struct fec_enet_private *fep = dev->priv;
+ struct fec_enet_private *fep =
+ container_of(work, struct fec_enet_private, phy_task);
+ struct net_device *dev = fep->dev;
int duplex;
fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
@@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev)
{
struct fec_enet_private *fep = dev->priv;
- INIT_WORK(&fep->phy_task, mii_relink, (void *)dev);
+ fep->dev = dev;
+ INIT_WORK(&fep->phy_task, mii_relink);
schedule_work(&fep->phy_task);
}
@@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev)
{
struct fec_enet_private *fep = dev->priv;
- INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev);
+ fep->dev = dev;
+ INIT_WORK(&fep->phy_task, mii_display_config);
schedule_work(&fep->phy_task);
}
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 077711e6310..edf71a4ecc9 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -77,9 +77,11 @@ config 6xx
config 40x
bool "40x"
+ select PPC_DCR_NATIVE
config 44x
bool "44x"
+ select PPC_DCR_NATIVE
config 8xx
bool "8xx"
@@ -95,6 +97,15 @@ endchoice
config PPC_FPU
bool
+config PPC_DCR_NATIVE
+ bool
+ default n
+
+config PPC_DCR
+ bool
+ depends on PPC_DCR_NATIVE
+ default y
+
config BOOKE
bool
depends on E200 || E500
@@ -724,7 +735,7 @@ config MPC834x_SYS
Be aware that PCI buses can only function when SYS board is plugged
into the PIB (Platform IO Board) board from Freescale which provide
3 PCI slots. The PIBs PCI initialization is the bootloader's
- responsiblilty.
+ responsibility.
config EV64360
bool "Marvell-EV64360BP"
diff --git a/arch/ppc/boot/images/.gitignore b/arch/ppc/boot/images/.gitignore
new file mode 100644
index 00000000000..21c2dc5b6b7
--- /dev/null
+++ b/arch/ppc/boot/images/.gitignore
@@ -0,0 +1,6 @@
+sImage
+vmapus
+vmlinux*
+miboot*
+zImage*
+uImage
diff --git a/arch/ppc/boot/lib/.gitignore b/arch/ppc/boot/lib/.gitignore
new file mode 100644
index 00000000000..1629a616775
--- /dev/null
+++ b/arch/ppc/boot/lib/.gitignore
@@ -0,0 +1,3 @@
+inffast.c
+inflate.c
+inftrees.c
diff --git a/arch/ppc/boot/utils/.gitignore b/arch/ppc/boot/utils/.gitignore
new file mode 100644
index 00000000000..bbdfb3b9c53
--- /dev/null
+++ b/arch/ppc/boot/utils/.gitignore
@@ -0,0 +1,3 @@
+mkprep
+mkbugboot
+mktree
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 27faeca2c7a..3c506af1988 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -313,7 +313,7 @@ early_init(int r3, int r4, int r5)
* Identify the CPU type and fix up code sections
* that depend on which cpu we have.
*/
- spec = identify_cpu(offset);
+ spec = identify_cpu(offset, mfspr(SPRN_PVR));
do_feature_fixups(spec->cpu_features,
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 9661a91183b..2f835b9e95e 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -316,7 +316,7 @@ void machine_check_exception(struct pt_regs *regs)
if (reason & MCSR_BUS_RBERR)
printk("Bus - Read Data Bus Error\n");
if (reason & MCSR_BUS_WBERR)
- printk("Bus - Read Data Bus Error\n");
+ printk("Bus - Write Data Bus Error\n");
if (reason & MCSR_BUS_IPERR)
printk("Bus - Instruction Parity Error\n");
if (reason & MCSR_BUS_RPERR)
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 16e8661e1fe..61921268a0d 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -31,6 +31,7 @@ SECTIONS
.plt : { *(.plt) }
.text :
{
+ _text = .;
*(.text)
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/ppc/platforms/4xx/bubinga.c b/arch/ppc/platforms/4xx/bubinga.c
index 4009f4983ca..75857b38e89 100644
--- a/arch/ppc/platforms/4xx/bubinga.c
+++ b/arch/ppc/platforms/4xx/bubinga.c
@@ -116,6 +116,7 @@ bubinga_early_serial_map(void)
void __init
bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
{
+#ifdef CONFIG_PCI
unsigned int bar_response, bar;
/*
@@ -212,6 +213,7 @@ bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la)));
#endif
+#endif
}
void __init
diff --git a/arch/ppc/platforms/4xx/cpci405.c b/arch/ppc/platforms/4xx/cpci405.c
index 367430998fc..8474b05b795 100644
--- a/arch/ppc/platforms/4xx/cpci405.c
+++ b/arch/ppc/platforms/4xx/cpci405.c
@@ -126,6 +126,7 @@ cpci405_setup_arch(void)
void __init
bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
{
+#ifdef CONFIG_PCI
unsigned int bar_response, bar;
/* Disable region first */
@@ -167,6 +168,7 @@ bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
PCI_FUNC(hose->first_busno), bar,
&bar_response);
}
+#endif
}
void __init
diff --git a/arch/ppc/platforms/4xx/ep405.c b/arch/ppc/platforms/4xx/ep405.c
index ae5c82081c9..e5adf9ba1fc 100644
--- a/arch/ppc/platforms/4xx/ep405.c
+++ b/arch/ppc/platforms/4xx/ep405.c
@@ -68,6 +68,7 @@ ep405_setup_arch(void)
void __init
bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
{
+#ifdef CONFIG_PCI
unsigned int bar_response, bar;
/*
* Expected PCI mapping:
@@ -130,6 +131,7 @@ bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
PCI_FUNC(hose->first_busno), bar, bar_response);
}
/* end work arround */
+#endif
}
void __init
diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.c b/arch/ppc/platforms/83xx/mpc834x_sys.c
index 3397f0de159..b84f8df325c 100644
--- a/arch/ppc/platforms/83xx/mpc834x_sys.c
+++ b/arch/ppc/platforms/83xx/mpc834x_sys.c
@@ -121,8 +121,8 @@ mpc834x_sys_setup_arch(void)
mdata->irq[0] = MPC83xx_IRQ_EXT1;
mdata->irq[1] = MPC83xx_IRQ_EXT2;
- mdata->irq[2] = -1;
- mdata->irq[31] = -1;
+ mdata->irq[2] = PHY_POLL;
+ mdata->irq[31] = PHY_POLL;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC83xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c
index 4f839da6782..00a3ba57063 100644
--- a/arch/ppc/platforms/85xx/mpc8540_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8540_ads.c
@@ -92,9 +92,9 @@ mpc8540ads_setup_arch(void)
mdata->irq[0] = MPC85xx_IRQ_EXT5;
mdata->irq[1] = MPC85xx_IRQ_EXT5;
- mdata->irq[2] = -1;
+ mdata->irq[2] = PHY_POLL;
mdata->irq[3] = MPC85xx_IRQ_EXT5;
- mdata->irq[31] = -1;
+ mdata->irq[31] = PHY_POLL;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c
index 14ecec7bbed..3a060468dd9 100644
--- a/arch/ppc/platforms/85xx/mpc8560_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8560_ads.c
@@ -156,9 +156,9 @@ mpc8560ads_setup_arch(void)
mdata->irq[0] = MPC85xx_IRQ_EXT5;
mdata->irq[1] = MPC85xx_IRQ_EXT5;
- mdata->irq[2] = -1;
+ mdata->irq[2] = PHY_POLL;
mdata->irq[3] = MPC85xx_IRQ_EXT5;
- mdata->irq[31] = -1;
+ mdata->irq[31] = PHY_POLL;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
index 5ce0f69c1db..2d59eb776c9 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
@@ -451,9 +451,9 @@ mpc85xx_cds_setup_arch(void)
mdata->irq[0] = MPC85xx_IRQ_EXT5;
mdata->irq[1] = MPC85xx_IRQ_EXT5;
- mdata->irq[2] = -1;
- mdata->irq[3] = -1;
- mdata->irq[31] = -1;
+ mdata->irq[2] = PHY_POLL;
+ mdata->irq[3] = PHY_POLL;
+ mdata->irq[31] = PHY_POLL;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c
index 764d580ff53..1d10ab98f66 100644
--- a/arch/ppc/platforms/85xx/sbc8560.c
+++ b/arch/ppc/platforms/85xx/sbc8560.c
@@ -129,7 +129,7 @@ sbc8560_setup_arch(void)
mdata->irq[25] = MPC85xx_IRQ_EXT6;
mdata->irq[26] = MPC85xx_IRQ_EXT7;
- mdata->irq[31] = -1;
+ mdata->irq[31] = PHY_POLL;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c
index 4bb18ab2767..b1f5b737c70 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.c
+++ b/arch/ppc/platforms/85xx/stx_gp3.c
@@ -123,7 +123,7 @@ gp3_setup_arch(void)
mdata->irq[2] = MPC85xx_IRQ_EXT5;
mdata->irq[4] = MPC85xx_IRQ_EXT5;
- mdata->irq[31] = -1;
+ mdata->irq[31] = PHY_POLL;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/85xx/tqm85xx.c b/arch/ppc/platforms/85xx/tqm85xx.c
index dd45f2e1844..4ee2bd156dc 100644
--- a/arch/ppc/platforms/85xx/tqm85xx.c
+++ b/arch/ppc/platforms/85xx/tqm85xx.c
@@ -137,9 +137,9 @@ tqm85xx_setup_arch(void)
mdata->irq[0] = MPC85xx_IRQ_EXT8;
mdata->irq[1] = MPC85xx_IRQ_EXT8;
- mdata->irq[2] = -1;
+ mdata->irq[2] = PHY_POLL;
mdata->irq[3] = MPC85xx_IRQ_EXT8;
- mdata->irq[31] = -1;
+ mdata->irq[31] = PHY_POLL;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
diff --git a/arch/ppc/platforms/mpc8272ads_setup.c b/arch/ppc/platforms/mpc8272ads_setup.c
index 1f9ea36837b..0bc06768cf2 100644
--- a/arch/ppc/platforms/mpc8272ads_setup.c
+++ b/arch/ppc/platforms/mpc8272ads_setup.c
@@ -266,10 +266,10 @@ static void __init mpc8272ads_fixup_mdio_pdata(struct platform_device *pdev,
int idx)
{
m82xx_mii_bb_pdata.irq[0] = PHY_INTERRUPT;
- m82xx_mii_bb_pdata.irq[1] = -1;
- m82xx_mii_bb_pdata.irq[2] = -1;
+ m82xx_mii_bb_pdata.irq[1] = PHY_POLL;
+ m82xx_mii_bb_pdata.irq[2] = PHY_POLL;
m82xx_mii_bb_pdata.irq[3] = PHY_INTERRUPT;
- m82xx_mii_bb_pdata.irq[31] = -1;
+ m82xx_mii_bb_pdata.irq[31] = PHY_POLL;
m82xx_mii_bb_pdata.mdio_dat.offset =
diff --git a/arch/ppc/platforms/mpc866ads_setup.c b/arch/ppc/platforms/mpc866ads_setup.c
index e95d2c11174..8a0c07eb444 100644
--- a/arch/ppc/platforms/mpc866ads_setup.c
+++ b/arch/ppc/platforms/mpc866ads_setup.c
@@ -361,7 +361,7 @@ int __init mpc866ads_init(void)
fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1;
/* No PHY interrupt line here */
- fmpi->irq[0xf] = -1;
+ fmpi->irq[0xf] = PHY_POLL;
/* Since either of the uarts could be used as console, they need to ready */
#ifdef CONFIG_SERIAL_CPM_SMC1
@@ -380,7 +380,7 @@ int __init mpc866ads_init(void)
fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1;
/* No PHY interrupt line here */
- fmpi->irq[0xf] = -1;
+ fmpi->irq[0xf] = PHY_POLL;
return 0;
}
diff --git a/arch/ppc/syslib/mpc8xx_devices.c b/arch/ppc/syslib/mpc8xx_devices.c
index cf5ab47487a..31fb56593d1 100644
--- a/arch/ppc/syslib/mpc8xx_devices.c
+++ b/arch/ppc/syslib/mpc8xx_devices.c
@@ -78,7 +78,7 @@ struct platform_device ppc_sys_platform_devices[] = {
{
.name = "pram",
.start = 0x3c00,
- .end = 0x3c80,
+ .end = 0x3c7f,
.flags = IORESOURCE_MEM,
},
{
@@ -103,7 +103,7 @@ struct platform_device ppc_sys_platform_devices[] = {
{
.name = "pram",
.start = 0x3d00,
- .end = 0x3d80,
+ .end = 0x3d7f,
.flags = IORESOURCE_MEM,
},
@@ -129,7 +129,7 @@ struct platform_device ppc_sys_platform_devices[] = {
{
.name = "pram",
.start = 0x3e00,
- .end = 0x3e80,
+ .end = 0x3e7f,
.flags = IORESOURCE_MEM,
},
@@ -155,7 +155,7 @@ struct platform_device ppc_sys_platform_devices[] = {
{
.name = "pram",
.start = 0x3f00,
- .end = 0x3f80,
+ .end = 0x3f7f,
.flags = IORESOURCE_MEM,
},
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 245b81bc715..583d9ff0a57 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -33,9 +33,6 @@ config GENERIC_CALIBRATE_DELAY
config GENERIC_TIME
def_bool y
-config GENERIC_BUST_SPINLOCK
- bool
-
mainmenu "Linux Kernel Configuration"
config S390
@@ -181,7 +178,7 @@ config PACK_STACK
config SMALL_STACK
bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb"
- depends on PACK_STACK
+ depends on PACK_STACK && !LOCKDEP
help
If you say Y here and the compiler supports the -mkernel-backchain
option the kernel will use a smaller kernel stack size. For 31 bit
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 5deb9f7544a..6598e526857 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -35,6 +35,9 @@ cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
+#KBUILD_IMAGE is necessary for make rpm
+KBUILD_IMAGE :=arch/s390/boot/image
+
#
# Prevent tail-call optimizations, to get clearer backtraces:
#
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index af1e8fc7d98..b8c23729026 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -92,8 +92,8 @@ static int appldata_timer_active;
* Work queue
*/
static struct workqueue_struct *appldata_wq;
-static void appldata_work_fn(void *data);
-static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
+static void appldata_work_fn(struct work_struct *work);
+static DECLARE_WORK(appldata_work, appldata_work_fn);
/*
@@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data)
*
* call data gathering function for each (active) module
*/
-static void appldata_work_fn(void *data)
+static void appldata_work_fn(struct work_struct *work)
{
struct list_head *lh;
struct appldata_ops *ops;
@@ -561,7 +561,6 @@ appldata_offline_cpu(int cpu)
spin_unlock(&appldata_timer_lock);
}
-#ifdef CONFIG_HOTPLUG_CPU
static int __cpuinit
appldata_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -582,7 +581,6 @@ appldata_cpu_notify(struct notifier_block *self,
static struct notifier_block appldata_nb = {
.notifier_call = appldata_cpu_notify,
};
-#endif
/*
* appldata_init()
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index aa978978d3d..a81881c9b29 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,7 +4,7 @@
EXTRA_AFLAGS := -traditional
-obj-y := bitmap.o traps.o time.o process.o \
+obj-y := bitmap.o traps.o time.o process.o reset.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 9565a2dcfad..5c46054195c 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -176,7 +176,6 @@ struct elf_prpsinfo32
#include <linux/highuid.h>
-#define elf_addr_t u32
/*
#define init_elf_binfmt init_elf32_binfmt
*/
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 1eae74e72f9..a5972f1541f 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -21,14 +21,15 @@ static DEFINE_SPINLOCK(cpcmd_lock);
static char cpcmd_buf[241];
/*
- * the caller of __cpcmd has to ensure that the response buffer is below 2 GB
+ * __cpcmd has some restrictions over cpcmd
+ * - the response buffer must reside below 2GB (if any)
+ * - __cpcmd is unlocked and therefore not SMP-safe
*/
int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{
- unsigned long flags, cmdlen;
+ unsigned cmdlen;
int return_code, return_len;
- spin_lock_irqsave(&cpcmd_lock, flags);
cmdlen = strlen(cmd);
BUG_ON(cmdlen > 240);
memcpy(cpcmd_buf, cmd, cmdlen);
@@ -74,7 +75,6 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
: "+d" (reg3) : "d" (reg2) : "cc");
return_code = (int) reg3;
}
- spin_unlock_irqrestore(&cpcmd_lock, flags);
if (response_code != NULL)
*response_code = return_code;
return return_len;
@@ -82,15 +82,18 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
EXPORT_SYMBOL(__cpcmd);
-#ifdef CONFIG_64BIT
int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{
char *lowbuf;
int len;
+ unsigned long flags;
if ((rlen == 0) || (response == NULL)
- || !((unsigned long)response >> 31))
+ || !((unsigned long)response >> 31)) {
+ spin_lock_irqsave(&cpcmd_lock, flags);
len = __cpcmd(cmd, response, rlen, response_code);
+ spin_unlock_irqrestore(&cpcmd_lock, flags);
+ }
else {
lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
if (!lowbuf) {
@@ -98,7 +101,9 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
"cpcmd: could not allocate response buffer\n");
return -ENOMEM;
}
+ spin_lock_irqsave(&cpcmd_lock, flags);
len = __cpcmd(cmd, lowbuf, rlen, response_code);
+ spin_unlock_irqrestore(&cpcmd_lock, flags);
memcpy(response, lowbuf, rlen);
kfree(lowbuf);
}
@@ -106,4 +111,3 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
}
EXPORT_SYMBOL(cpcmd);
-#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 0cf59bb7a85..8f8c802f1bc 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -418,24 +418,6 @@ start:
.gotr:
l %r10,.tbl # EBCDIC to ASCII table
tr 0(240,%r8),0(%r10)
- stidp __LC_CPUID # Are we running on VM maybe
- cli __LC_CPUID,0xff
- bnz .test
- .long 0x83300060 # diag 3,0,x'0060' - storage size
- b .done
-.test:
- mvc 0x68(8),.pgmnw # set up pgm check handler
- l %r2,.fourmeg
- lr %r3,%r2
- bctr %r3,%r0 # 4M-1
-.loop: iske %r0,%r3
- ar %r3,%r2
-.pgmx:
- sr %r3,%r2
- la %r3,1(%r3)
-.done:
- l %r1,.memsize
- st %r3,ARCH_OFFSET(%r1)
slr %r0,%r0
st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
@@ -443,9 +425,6 @@ start:
.tbl: .long _ebcasc # translate table
.cmd: .long COMMAND_LINE # address of command line buffer
.parm: .long PARMAREA
-.memsize: .long memory_size
-.fourmeg: .long 0x00400000 # 4M
-.pgmnw: .long 0x00080000,.pgmx
.lowcase:
.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 0a2c929486a..4388b3309e0 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -131,10 +131,11 @@ startup_continue:
.long init_thread_union
.Lpmask:
.byte 0
-.align 8
+ .align 8
.Lpcext:.long 0x00080000,0x80000000
.Lcr:
.long 0x00 # place holder for cr0
+ .align 8
.Lwaitsclp:
.long 0x010a0000,0x80000000 + .Lsclph
.Lrcp:
@@ -156,7 +157,7 @@ startup_continue:
slr %r4,%r4 # set start of chunk to zero
slr %r5,%r5 # set end of chunk to zero
slr %r6,%r6 # set access code to zero
- la %r10, MEMORY_CHUNKS # number of chunks
+ la %r10,MEMORY_CHUNKS # number of chunks
.Lloop:
tprot 0(%r5),0 # test protection of first byte
ipm %r7
@@ -176,8 +177,6 @@ startup_continue:
st %r0,4(%r3) # store size of chunk
st %r6,8(%r3) # store type of chunk
la %r3,12(%r3)
- l %r4,.Lmemsize-.LPG1(%r13) # address of variable memory_size
- st %r5,0(%r4) # store last end to memory size
ahi %r10,-1 # update chunk number
.Lchkloop:
lr %r6,%r7 # set access code to last cc
@@ -292,7 +291,6 @@ startup_continue:
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
.Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c
-.Lmemsize:.long memory_size
.Lmchunk:.long memory_chunk
.Lmflags:.long machine_flags
.Lbss_bgn: .long __bss_start
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 42f54d48244..c526279e112 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -70,7 +70,20 @@ startup_continue:
sgr %r5,%r5 # set src,length and pad to zero
mvcle %r2,%r4,0 # clear mem
jo .-4 # branch back, if not finish
+ # set program check new psw mask
+ mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
+ larl %r1,.Lslowmemdetect # set program check address
+ stg %r1,__LC_PGM_NEW_PSW+8
+ lghi %r1,0xc
+ diag %r0,%r1,0x260 # get memory size of virtual machine
+ cgr %r0,%r1 # different? -> old detection routine
+ jne .Lslowmemdetect
+ aghi %r1,1 # size is one more than end
+ larl %r2,memory_chunk
+ stg %r1,8(%r2) # store size of chunk
+ j .Ldonemem
+.Lslowmemdetect:
l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
.Lservicecall:
stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
@@ -139,8 +152,6 @@ startup_continue:
.int 0x100000
.Lfchunk:
- # set program check new psw mask
- mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
#
# find memory chunks.
@@ -175,8 +186,6 @@ startup_continue:
stg %r0,8(%r3) # store size of chunk
st %r6,20(%r3) # store type of chunk
la %r3,24(%r3)
- larl %r8,memory_size
- stg %r5,0(%r8) # store memory size
ahi %r10,-1 # update chunk number
.Lchkloop:
lr %r6,%r7 # set access code to last cc
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 1f5e782b3d0..a36bea1188d 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -13,12 +13,21 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/reboot.h>
+#include <linux/ctype.h>
#include <asm/smp.h>
#include <asm/setup.h>
#include <asm/cpcmd.h>
#include <asm/cio.h>
+#include <asm/ebcdic.h>
+#include <asm/reset.h>
#define IPL_PARM_BLOCK_VERSION 0
+#define LOADPARM_LEN 8
+
+extern char s390_readinfo_sccb[];
+#define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010)
+#define SCCB_LOADPARM (&s390_readinfo_sccb[24])
+#define SCCB_FLAG (s390_readinfo_sccb[91])
enum ipl_type {
IPL_TYPE_NONE = 1,
@@ -289,9 +298,25 @@ static struct attribute_group ipl_fcp_attr_group = {
/* CCW ipl device attributes */
+static ssize_t ipl_ccw_loadparm_show(struct subsystem *subsys, char *page)
+{
+ char loadparm[LOADPARM_LEN + 1] = {};
+
+ if (!SCCB_VALID)
+ return sprintf(page, "#unknown#\n");
+ memcpy(loadparm, SCCB_LOADPARM, LOADPARM_LEN);
+ EBCASC(loadparm, LOADPARM_LEN);
+ strstrip(loadparm);
+ return sprintf(page, "%s\n", loadparm);
+}
+
+static struct subsys_attribute sys_ipl_ccw_loadparm_attr =
+ __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
+
static struct attribute *ipl_ccw_attrs[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
+ &sys_ipl_ccw_loadparm_attr.attr,
NULL,
};
@@ -348,8 +373,57 @@ static struct attribute_group reipl_fcp_attr_group = {
DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
reipl_block_ccw->ipl_info.ccw.devno);
+static void reipl_get_ascii_loadparm(char *loadparm)
+{
+ memcpy(loadparm, &reipl_block_ccw->ipl_info.ccw.load_param,
+ LOADPARM_LEN);
+ EBCASC(loadparm, LOADPARM_LEN);
+ loadparm[LOADPARM_LEN] = 0;
+ strstrip(loadparm);
+}
+
+static ssize_t reipl_ccw_loadparm_show(struct subsystem *subsys, char *page)
+{
+ char buf[LOADPARM_LEN + 1];
+
+ reipl_get_ascii_loadparm(buf);
+ return sprintf(page, "%s\n", buf);
+}
+
+static ssize_t reipl_ccw_loadparm_store(struct subsystem *subsys,
+ const char *buf, size_t len)
+{
+ int i, lp_len;
+
+ /* ignore trailing newline */
+ lp_len = len;
+ if ((len > 0) && (buf[len - 1] == '\n'))
+ lp_len--;
+ /* loadparm can have max 8 characters and must not start with a blank */
+ if ((lp_len > LOADPARM_LEN) || ((lp_len > 0) && (buf[0] == ' ')))
+ return -EINVAL;
+ /* loadparm can only contain "a-z,A-Z,0-9,SP,." */
+ for (i = 0; i < lp_len; i++) {
+ if (isalpha(buf[i]) || isdigit(buf[i]) || (buf[i] == ' ') ||
+ (buf[i] == '.'))
+ continue;
+ return -EINVAL;
+ }
+ /* initialize loadparm with blanks */
+ memset(&reipl_block_ccw->ipl_info.ccw.load_param, ' ', LOADPARM_LEN);
+ /* copy and convert to ebcdic */
+ memcpy(&reipl_block_ccw->ipl_info.ccw.load_param, buf, lp_len);
+ ASCEBC(reipl_block_ccw->ipl_info.ccw.load_param, LOADPARM_LEN);
+ return len;
+}
+
+static struct subsys_attribute sys_reipl_ccw_loadparm_attr =
+ __ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
+ reipl_ccw_loadparm_store);
+
static struct attribute *reipl_ccw_attrs[] = {
&sys_reipl_ccw_device_attr.attr,
+ &sys_reipl_ccw_loadparm_attr.attr,
NULL,
};
@@ -502,23 +576,6 @@ static struct subsys_attribute dump_type_attr =
static decl_subsys(dump, NULL, NULL);
-#ifdef CONFIG_SMP
-static void dump_smp_stop_all(void)
-{
- int cpu;
- preempt_disable();
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- while (signal_processor(cpu, sigp_stop) == sigp_busy)
- udelay(10);
- }
- preempt_enable();
-}
-#else
-#define dump_smp_stop_all() do { } while (0)
-#endif
-
/*
* Shutdown actions section
*/
@@ -571,11 +628,14 @@ void do_reipl(void)
{
struct ccw_dev_id devid;
static char buf[100];
+ char loadparm[LOADPARM_LEN + 1];
switch (reipl_type) {
case IPL_TYPE_CCW:
+ reipl_get_ascii_loadparm(loadparm);
printk(KERN_EMERG "reboot on ccw device: 0.0.%04x\n",
reipl_block_ccw->ipl_info.ccw.devno);
+ printk(KERN_EMERG "loadparm = '%s'\n", loadparm);
break;
case IPL_TYPE_FCP:
printk(KERN_EMERG "reboot on fcp device:\n");
@@ -588,12 +648,19 @@ void do_reipl(void)
switch (reipl_method) {
case IPL_METHOD_CCW_CIO:
devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
+ if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno)
+ diag308(DIAG308_IPL, NULL);
devid.ssid = 0;
reipl_ccw_dev(&devid);
break;
case IPL_METHOD_CCW_VM:
- sprintf(buf, "IPL %X", reipl_block_ccw->ipl_info.ccw.devno);
- cpcmd(buf, NULL, 0, NULL);
+ if (strlen(loadparm) == 0)
+ sprintf(buf, "IPL %X",
+ reipl_block_ccw->ipl_info.ccw.devno);
+ else
+ sprintf(buf, "IPL %X LOADPARM '%s'",
+ reipl_block_ccw->ipl_info.ccw.devno, loadparm);
+ __cpcmd(buf, NULL, 0, NULL);
break;
case IPL_METHOD_CCW_DIAG:
diag308(DIAG308_SET, reipl_block_ccw);
@@ -607,16 +674,17 @@ void do_reipl(void)
diag308(DIAG308_IPL, NULL);
break;
case IPL_METHOD_FCP_RO_VM:
- cpcmd("IPL", NULL, 0, NULL);
+ __cpcmd("IPL", NULL, 0, NULL);
break;
case IPL_METHOD_NONE:
default:
if (MACHINE_IS_VM)
- cpcmd("IPL", NULL, 0, NULL);
+ __cpcmd("IPL", NULL, 0, NULL);
diag308(DIAG308_IPL, NULL);
break;
}
- panic("reipl failed!\n");
+ printk(KERN_EMERG "reboot failed!\n");
+ signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
static void do_dump(void)
@@ -639,17 +707,17 @@ static void do_dump(void)
switch (dump_method) {
case IPL_METHOD_CCW_CIO:
- dump_smp_stop_all();
+ smp_send_stop();
devid.devno = dump_block_ccw->ipl_info.ccw.devno;
devid.ssid = 0;
reipl_ccw_dev(&devid);
break;
case IPL_METHOD_CCW_VM:
- dump_smp_stop_all();
+ smp_send_stop();
sprintf(buf, "STORE STATUS");
- cpcmd(buf, NULL, 0, NULL);
+ __cpcmd(buf, NULL, 0, NULL);
sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
- cpcmd(buf, NULL, 0, NULL);
+ __cpcmd(buf, NULL, 0, NULL);
break;
case IPL_METHOD_CCW_DIAG:
diag308(DIAG308_SET, dump_block_ccw);
@@ -746,6 +814,17 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw);
reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+ /* check if read scp info worked and set loadparm */
+ if (SCCB_VALID)
+ memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
+ SCCB_LOADPARM, LOADPARM_LEN);
+ else
+ /* read scp info failed: set empty loadparm (EBCDIC blanks) */
+ memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
+ LOADPARM_LEN);
+ /* FIXME: check for diag308_set_works when enabling diag ccw reipl */
+ if (!MACHINE_IS_VM)
+ sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
if (ipl_get_type() == IPL_TYPE_CCW)
reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
reipl_capabilities |= IPL_TYPE_CCW;
@@ -827,13 +906,11 @@ static int __init dump_ccw_init(void)
return 0;
}
-extern char s390_readinfo_sccb[];
-
static int __init dump_fcp_init(void)
{
int rc;
- if(!(s390_readinfo_sccb[91] & 0x2))
+ if(!(SCCB_FLAG & 0x2) || !SCCB_VALID)
return 0; /* LDIPL DUMP is not installed */
if (!diag308_set_works)
return 0;
@@ -931,3 +1008,53 @@ static int __init s390_ipl_init(void)
}
__initcall(s390_ipl_init);
+
+static LIST_HEAD(rcall);
+static DEFINE_MUTEX(rcall_mutex);
+
+void register_reset_call(struct reset_call *reset)
+{
+ mutex_lock(&rcall_mutex);
+ list_add(&reset->list, &rcall);
+ mutex_unlock(&rcall_mutex);
+}
+EXPORT_SYMBOL_GPL(register_reset_call);
+
+void unregister_reset_call(struct reset_call *reset)
+{
+ mutex_lock(&rcall_mutex);
+ list_del(&reset->list);
+ mutex_unlock(&rcall_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_reset_call);
+
+static void do_reset_calls(void)
+{
+ struct reset_call *reset;
+
+ list_for_each_entry(reset, &rcall, list)
+ reset->fn();
+}
+
+extern void reset_mcck_handler(void);
+
+void s390_reset_system(void)
+{
+ struct _lowcore *lc;
+
+ /* Stack for interrupt/machine check handler */
+ lc = (struct _lowcore *)(unsigned long) store_prefix();
+ lc->panic_stack = S390_lowcore.panic_stack;
+
+ /* Disable prefixing */
+ set_prefix(0);
+
+ /* Disable lowcore protection */
+ __ctl_clear_bit(0,28);
+
+ /* Set new machine check handler */
+ S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK;
+ S390_lowcore.mcck_new_psw.addr =
+ PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler;
+ do_reset_calls();
+}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 67914fe7f31..576368c4f60 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -200,7 +200,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
- free_insn_slot(p->ainsn.insn);
+ free_insn_slot(p->ainsn.insn, 0);
mutex_unlock(&kprobe_mutex);
}
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 60b1ea9f946..f6d9bcc0f75 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -1,15 +1,10 @@
/*
* arch/s390/kernel/machine_kexec.c
*
- * (C) Copyright IBM Corp. 2005
+ * Copyright IBM Corp. 2005,2006
*
- * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com>
- *
- */
-
-/*
- * s390_machine_kexec.c - handle the transition of Linux booting another kernel
- * on the S390 architecture.
+ * Author(s): Rolf Adelsberger,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/device.h>
@@ -22,86 +17,49 @@
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/smp.h>
+#include <asm/reset.h>
-static void kexec_halt_all_cpus(void *);
-
-typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long);
+typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
extern const unsigned char relocate_kernel[];
extern const unsigned long long relocate_kernel_len;
-int
-machine_kexec_prepare(struct kimage *image)
+int machine_kexec_prepare(struct kimage *image)
{
- unsigned long reboot_code_buffer;
+ void *reboot_code_buffer;
/* We don't support anything but the default image type for now. */
if (image->type != KEXEC_TYPE_DEFAULT)
return -EINVAL;
/* Get the destination where the assembler code should be copied to.*/
- reboot_code_buffer = page_to_pfn(image->control_code_page)<<PAGE_SHIFT;
+ reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
/* Then copy it */
- memcpy((void *) reboot_code_buffer, relocate_kernel,
- relocate_kernel_len);
+ memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
return 0;
}
-void
-machine_kexec_cleanup(struct kimage *image)
+void machine_kexec_cleanup(struct kimage *image)
{
}
-void
-machine_shutdown(void)
+void machine_shutdown(void)
{
printk(KERN_INFO "kexec: machine_shutdown called\n");
}
-NORET_TYPE void
-machine_kexec(struct kimage *image)
+void machine_kexec(struct kimage *image)
{
- clear_all_subchannels();
- cio_reset_channel_paths();
-
- /* Disable lowcore protection */
- ctl_clear_bit(0,28);
-
- on_each_cpu(kexec_halt_all_cpus, image, 0, 0);
- for (;;);
-}
-
-extern void pfault_fini(void);
-
-static void
-kexec_halt_all_cpus(void *kernel_image)
-{
- static atomic_t cpuid = ATOMIC_INIT(-1);
- int cpu;
- struct kimage *image;
relocate_kernel_t data_mover;
-#ifdef CONFIG_PFAULT
- if (MACHINE_IS_VM)
- pfault_fini();
-#endif
+ smp_send_stop();
+ pfault_fini();
+ s390_reset_system();
- if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
- signal_processor(smp_processor_id(), sigp_stop);
-
- /* Wait for all other cpus to enter stopped state */
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- while (!smp_cpu_not_running(cpu))
- cpu_relax();
- }
-
- image = (struct kimage *) kernel_image;
- data_mover = (relocate_kernel_t)
- (page_to_pfn(image->control_code_page) << PAGE_SHIFT);
+ data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
/* Call the moving routine */
- (*data_mover) (&image->head, image->start);
+ (*data_mover)(&image->head, image->start);
+ for (;;);
}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 0340477f3b0..f9434d42ce9 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -11,19 +11,10 @@
.globl do_reipl_asm
do_reipl_asm: basr %r13,0
.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
-
- # switch off lowcore protection
-
-.Lpg1: stctl %c0,%c0,.Lctlsave1-.Lpg0(%r13)
- stctl %c0,%c0,.Lctlsave2-.Lpg0(%r13)
- ni .Lctlsave1-.Lpg0(%r13),0xef
- lctl %c0,%c0,.Lctlsave1-.Lpg0(%r13)
-
- # do store status of all registers
+.Lpg1: # do store status of all registers
stm %r0,%r15,__LC_GPREGS_SAVE_AREA
stctl %c0,%c15,__LC_CREGS_SAVE_AREA
- mvc __LC_CREGS_SAVE_AREA(4),.Lctlsave2-.Lpg0(%r13)
stam %a0,%a15,__LC_AREGS_SAVE_AREA
stpx __LC_PREFIX_SAVE_AREA
stckc .Lclkcmp-.Lpg0(%r13)
@@ -56,8 +47,7 @@ do_reipl_asm: basr %r13,0
.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
jz .L003
bas %r14,.Ldisab-.Lpg0(%r13)
-.L003: spx .Lnull-.Lpg0(%r13)
- st %r1,__LC_SUBCHANNEL_ID
+.L003: st %r1,__LC_SUBCHANNEL_ID
lpsw 0
sigp 0,0,0(6)
.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
@@ -65,9 +55,6 @@ do_reipl_asm: basr %r13,0
.align 8
.Lclkcmp: .quad 0x0000000000000000
.Lall: .long 0xff000000
-.Lnull: .long 0x00000000
-.Lctlsave1: .long 0x00000000
-.Lctlsave2: .long 0x00000000
.align 8
.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
.Lpcnew: .long 0x00080000,0x80000000+.Lecs
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index de7435054f7..f18ef260ca2 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -10,10 +10,10 @@
#include <asm/lowcore.h>
.globl do_reipl_asm
do_reipl_asm: basr %r13,0
+.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
+.Lpg1: # do store status of all registers
- # do store status of all registers
-
-.Lpg0: stg %r1,.Lregsave-.Lpg0(%r13)
+ stg %r1,.Lregsave-.Lpg0(%r13)
lghi %r1,0x1000
stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
lg %r0,.Lregsave-.Lpg0(%r13)
@@ -27,11 +27,7 @@ do_reipl_asm: basr %r13,0
stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
- lpswe .Lnewpsw-.Lpg0(%r13)
-.Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13)
- stctg %c0,%c0,.Lregsave-.Lpg0(%r13)
- ni .Lregsave+4-.Lpg0(%r13),0xef
- lctlg %c0,%c0,.Lregsave-.Lpg0(%r13)
+ lctlg %c6,%c6,.Lall-.Lpg0(%r13)
lgr %r1,%r2
mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
stsch .Lschib-.Lpg0(%r13)
@@ -56,8 +52,7 @@ do_reipl_asm: basr %r13,0
.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
jz .L003
bas %r14,.Ldisab-.Lpg0(%r13)
-.L003: spx .Lnull-.Lpg0(%r13)
- st %r1,__LC_SUBCHANNEL_ID
+.L003: st %r1,__LC_SUBCHANNEL_ID
lhi %r1,0 # mode 0 = esa
slr %r0,%r0 # set cpuid to zero
sigp %r1,%r0,0x12 # switch to esa mode
@@ -70,7 +65,6 @@ do_reipl_asm: basr %r13,0
.Lclkcmp: .quad 0x0000000000000000
.Lall: .quad 0x00000000ff000000
.Lregsave: .quad 0x0000000000000000
-.Lnull: .long 0x0000000000000000
.align 16
/*
* These addresses have to be 31 bit otherwise
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index f9899ff2e5b..3b456b80bce 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -26,8 +26,7 @@
relocate_kernel:
basr %r13,0 # base address
.base:
- stnsm sys_msk-.base(%r13),0xf8 # disable DAT and IRQ (external)
- spx zero64-.base(%r13) # absolute addressing mode
+ stnsm sys_msk-.base(%r13),0xfb # disable DAT
stctl %c0,%c15,ctlregs-.base(%r13)
stm %r0,%r15,gprregs-.base(%r13)
la %r1,load_psw-.base(%r13)
@@ -97,8 +96,6 @@
lpsw 0 # hopefully start new kernel...
.align 8
- zero64:
- .quad 0
load_psw:
.long 0x00080000,0x80000000
sys_msk:
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
index 4fb443042d9..1f9ea2067b5 100644
--- a/arch/s390/kernel/relocate_kernel64.S
+++ b/arch/s390/kernel/relocate_kernel64.S
@@ -27,8 +27,7 @@
relocate_kernel:
basr %r13,0 # base address
.base:
- stnsm sys_msk-.base(%r13),0xf8 # disable DAT and IRQs
- spx zero64-.base(%r13) # absolute addressing mode
+ stnsm sys_msk-.base(%r13),0xfb # disable DAT
stctg %c0,%c15,ctlregs-.base(%r13)
stmg %r0,%r15,gprregs-.base(%r13)
lghi %r0,3
@@ -100,8 +99,6 @@
lpsw 0 # hopefully start new kernel...
.align 8
- zero64:
- .quad 0
load_psw:
.long 0x00080000,0x80000000
sys_msk:
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S
new file mode 100644
index 00000000000..be8688c0665
--- /dev/null
+++ b/arch/s390/kernel/reset.S
@@ -0,0 +1,48 @@
+/*
+ * arch/s390/kernel/reset.S
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <asm/ptrace.h>
+#include <asm/lowcore.h>
+
+#ifdef CONFIG_64BIT
+
+ .globl reset_mcck_handler
+reset_mcck_handler:
+ basr %r13,0
+0: lg %r15,__LC_PANIC_STACK # load panic stack
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ lg %r1,s390_reset_mcck_handler-0b(%r13)
+ ltgr %r1,%r1
+ jz 1f
+ basr %r14,%r1
+1: la %r1,4095
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
+ lpswe __LC_MCK_OLD_PSW
+
+ .globl s390_reset_mcck_handler
+s390_reset_mcck_handler:
+ .quad 0
+
+#else /* CONFIG_64BIT */
+
+ .globl reset_mcck_handler
+reset_mcck_handler:
+ basr %r13,0
+0: l %r15,__LC_PANIC_STACK # load panic stack
+ ahi %r15,-STACK_FRAME_OVERHEAD
+ l %r1,s390_reset_mcck_handler-0b(%r13)
+ ltr %r1,%r1
+ jz 1f
+ basr %r14,%r1
+1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
+ lpsw __LC_MCK_OLD_PSW
+
+ .globl s390_reset_mcck_handler
+s390_reset_mcck_handler:
+ .long 0
+
+#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2aa13e8e000..b928fecdc74 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -62,13 +62,9 @@ EXPORT_SYMBOL_GPL(uaccess);
unsigned int console_mode = 0;
unsigned int console_devno = -1;
unsigned int console_irq = -1;
-unsigned long memory_size = 0;
unsigned long machine_flags = 0;
-struct {
- unsigned long addr, size, type;
-} memory_chunk[MEMORY_CHUNKS] = { { 0 } };
-#define CHUNK_READ_WRITE 0
-#define CHUNK_READ_ONLY 1
+
+struct mem_chunk memory_chunk[MEMORY_CHUNKS];
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
unsigned long __initdata zholes_size[MAX_NR_ZONES];
static unsigned long __initdata memory_end;
@@ -229,11 +225,11 @@ static void __init conmode_default(void)
char *ptr;
if (MACHINE_IS_VM) {
- __cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
+ cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
ptr = strstr(query_buffer, "SUBCHANNEL =");
console_irq = simple_strtoul(ptr + 13, NULL, 16);
- __cpcmd("QUERY TERM", query_buffer, 1024, NULL);
+ cpcmd("QUERY TERM", query_buffer, 1024, NULL);
ptr = strstr(query_buffer, "CONMODE");
/*
* Set the conmode to 3215 so that the device recognition
@@ -242,7 +238,7 @@ static void __init conmode_default(void)
* 3215 and the 3270 driver will try to access the console
* device (3215 as console and 3270 as normal tty).
*/
- __cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
+ cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
if (ptr == NULL) {
#if defined(CONFIG_SCLP_CONSOLE)
SET_CONSOLE_SCLP;
@@ -299,14 +295,14 @@ static void do_machine_restart_nonsmp(char * __unused)
static void do_machine_halt_nonsmp(void)
{
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
- cpcmd(vmhalt_cmd, NULL, 0, NULL);
+ __cpcmd(vmhalt_cmd, NULL, 0, NULL);
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
static void do_machine_power_off_nonsmp(void)
{
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
- cpcmd(vmpoff_cmd, NULL, 0, NULL);
+ __cpcmd(vmpoff_cmd, NULL, 0, NULL);
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
@@ -489,6 +485,37 @@ setup_resources(void)
}
}
+static void __init setup_memory_end(void)
+{
+ unsigned long real_size, memory_size;
+ unsigned long max_mem, max_phys;
+ int i;
+
+ memory_size = real_size = 0;
+ max_phys = VMALLOC_END - VMALLOC_MIN_SIZE;
+ memory_end &= PAGE_MASK;
+
+ max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
+
+ for (i = 0; i < MEMORY_CHUNKS; i++) {
+ struct mem_chunk *chunk = &memory_chunk[i];
+
+ real_size = max(real_size, chunk->addr + chunk->size);
+ if (chunk->addr >= max_mem) {
+ memset(chunk, 0, sizeof(*chunk));
+ continue;
+ }
+ if (chunk->addr + chunk->size > max_mem)
+ chunk->size = max_mem - chunk->addr;
+ memory_size = max(memory_size, chunk->addr + chunk->size);
+ }
+ if (!memory_end)
+ memory_end = memory_size;
+ if (real_size > memory_end)
+ printk("More memory detected than supported. Unused: %luk\n",
+ (real_size - memory_end) >> 10);
+}
+
static void __init
setup_memory(void)
{
@@ -645,8 +672,6 @@ setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
- memory_end = memory_size;
-
if (MACHINE_HAS_MVCOS)
memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
else
@@ -654,20 +679,7 @@ setup_arch(char **cmdline_p)
parse_early_param();
-#ifndef CONFIG_64BIT
- memory_end &= ~0x400000UL;
-
- /*
- * We need some free virtual space to be able to do vmalloc.
- * On a machine with 2GB memory we make sure that we have at
- * least 128 MB free space for vmalloc.
- */
- if (memory_end > 1920*1024*1024)
- memory_end = 1920*1024*1024;
-#else /* CONFIG_64BIT */
- memory_end &= ~0x200000UL;
-#endif /* CONFIG_64BIT */
-
+ setup_memory_end();
setup_memory();
setup_resources();
setup_lowcore();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 62822245f9b..19090f7d4f5 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -230,18 +230,37 @@ static inline void do_store_status(void)
}
}
+static inline void do_wait_for_stop(void)
+{
+ int cpu;
+
+ /* Wait for all other cpus to enter stopped state */
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ while(!smp_cpu_not_running(cpu))
+ cpu_relax();
+ }
+}
+
/*
* this function sends a 'stop' sigp to all other CPUs in the system.
* it goes straight through.
*/
void smp_send_stop(void)
{
+ /* Disable all interrupts/machine checks */
+ __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
+
/* write magic number to zero page (absolute 0) */
lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
/* stop other processors. */
do_send_stop();
+ /* wait until other processors are stopped */
+ do_wait_for_stop();
+
/* store status of other processors. */
do_store_status();
}
@@ -250,88 +269,28 @@ void smp_send_stop(void)
* Reboot, halt and power_off routines for SMP.
*/
-static void do_machine_restart(void * __unused)
-{
- int cpu;
- static atomic_t cpuid = ATOMIC_INIT(-1);
-
- if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
- signal_processor(smp_processor_id(), sigp_stop);
-
- /* Wait for all other cpus to enter stopped state */
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- while(!smp_cpu_not_running(cpu))
- cpu_relax();
- }
-
- /* Store status of other cpus. */
- do_store_status();
-
- /*
- * Finally call reipl. Because we waited for all other
- * cpus to enter this function we know that they do
- * not hold any s390irq-locks (the cpus have been
- * interrupted by an external interrupt and s390irq
- * locks are always held disabled).
- */
- do_reipl();
-}
-
void machine_restart_smp(char * __unused)
{
- on_each_cpu(do_machine_restart, NULL, 0, 0);
-}
-
-static void do_wait_for_stop(void)
-{
- unsigned long cr[16];
-
- __ctl_store(cr, 0, 15);
- cr[0] &= ~0xffff;
- cr[6] = 0;
- __ctl_load(cr, 0, 15);
- for (;;)
- enabled_wait();
-}
-
-static void do_machine_halt(void * __unused)
-{
- static atomic_t cpuid = ATOMIC_INIT(-1);
-
- if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
- smp_send_stop();
- if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
- cpcmd(vmhalt_cmd, NULL, 0, NULL);
- signal_processor(smp_processor_id(),
- sigp_stop_and_store_status);
- }
- do_wait_for_stop();
+ smp_send_stop();
+ do_reipl();
}
void machine_halt_smp(void)
{
- on_each_cpu(do_machine_halt, NULL, 0, 0);
-}
-
-static void do_machine_power_off(void * __unused)
-{
- static atomic_t cpuid = ATOMIC_INIT(-1);
-
- if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
- smp_send_stop();
- if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
- cpcmd(vmpoff_cmd, NULL, 0, NULL);
- signal_processor(smp_processor_id(),
- sigp_stop_and_store_status);
- }
- do_wait_for_stop();
+ smp_send_stop();
+ if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+ __cpcmd(vmhalt_cmd, NULL, 0, NULL);
+ signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+ for (;;);
}
void machine_power_off_smp(void)
{
- on_each_cpu(do_machine_power_off, NULL, 0, 0);
+ smp_send_stop();
+ if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+ __cpcmd(vmpoff_cmd, NULL, 0, NULL);
+ signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+ for (;;);
}
/*
@@ -501,8 +460,6 @@ __init smp_count_cpus(void)
*/
extern void init_cpu_timer(void);
extern void init_cpu_vtimer(void);
-extern int pfault_init(void);
-extern void pfault_fini(void);
int __devinit start_secondary(void *cpuvoid)
{
@@ -514,11 +471,9 @@ int __devinit start_secondary(void *cpuvoid)
#ifdef CONFIG_VIRT_TIMER
init_cpu_vtimer();
#endif
-#ifdef CONFIG_PFAULT
/* Enable pfault pseudo page faults on this cpu. */
- if (MACHINE_IS_VM)
- pfault_init();
-#endif
+ pfault_init();
+
/* Mark this cpu as online */
cpu_set(smp_processor_id(), cpu_online_map);
/* Switch on interrupts */
@@ -708,11 +663,8 @@ __cpu_disable(void)
}
cpu_clear(cpu, cpu_online_map);
-#ifdef CONFIG_PFAULT
/* Disable pfault pseudo page faults on this cpu. */
- if (MACHINE_IS_VM)
- pfault_fini();
-#endif
+ pfault_fini();
memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
@@ -860,4 +812,3 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_get_cpu);
EXPORT_SYMBOL(smp_put_cpu);
-
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 92ecffbc8d8..3cbb0dcf1f1 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -58,12 +58,6 @@ int sysctl_userprocess_debug = 0;
extern pgm_check_handler_t do_protection_exception;
extern pgm_check_handler_t do_dat_exception;
-#ifdef CONFIG_PFAULT
-extern int pfault_init(void);
-extern void pfault_fini(void);
-extern void pfault_interrupt(__u16 error_code);
-static ext_int_info_t ext_int_pfault;
-#endif
extern pgm_check_handler_t do_monitor_call;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -135,7 +129,7 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
}
}
-void show_trace(struct task_struct *task, unsigned long * stack)
+void show_trace(struct task_struct *task, unsigned long *stack)
{
register unsigned long __r15 asm ("15");
unsigned long sp;
@@ -157,6 +151,9 @@ void show_trace(struct task_struct *task, unsigned long * stack)
__show_trace(sp, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
printk("\n");
+ if (!task)
+ task = current;
+ debug_show_held_locks(task);
}
void show_stack(struct task_struct *task, unsigned long *sp)
@@ -739,22 +736,5 @@ void __init trap_init(void)
pgm_check_table[0x1C] = &space_switch_exception;
pgm_check_table[0x1D] = &hfp_sqrt_exception;
pgm_check_table[0x40] = &do_monitor_call;
-
- if (MACHINE_IS_VM) {
-#ifdef CONFIG_PFAULT
- /*
- * Try to get pfault pseudo page faults going.
- */
- if (register_early_external_interrupt(0x2603, pfault_interrupt,
- &ext_int_pfault) != 0)
- panic("Couldn't request external interrupt 0x2603");
-
- if (pfault_init() == 0)
- return;
-
- /* Tough luck, no pfault. */
- unregister_early_external_interrupt(0x2603, pfault_interrupt,
- &ext_int_pfault);
-#endif
- }
+ pfault_irq_init();
}
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b0cfa6c4883..b5f94cf3bde 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,7 +4,7 @@
EXTRA_AFLAGS := -traditional
-lib-y += delay.o string.o uaccess_std.o
+lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
lib-$(CONFIG_32BIT) += div64.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o
lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 121b2935a42..f9a23d57eb7 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -27,6 +27,9 @@
#define SLR "slgr"
#endif
+extern size_t copy_from_user_std(size_t, const void __user *, void *);
+extern size_t copy_to_user_std(size_t, void __user *, const void *);
+
size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
{
register unsigned long reg0 asm("0") = 0x81UL;
@@ -66,6 +69,13 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
return size;
}
+size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
+{
+ if (size <= 256)
+ return copy_from_user_std(size, ptr, x);
+ return copy_from_user_mvcos(size, ptr, x);
+}
+
size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
{
register unsigned long reg0 asm("0") = 0x810000UL;
@@ -95,6 +105,13 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
return size;
}
+size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x)
+{
+ if (size <= 256)
+ return copy_to_user_std(size, ptr, x);
+ return copy_to_user_mvcos(size, ptr, x);
+}
+
size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
{
register unsigned long reg0 asm("0") = 0x810081UL;
@@ -145,18 +162,16 @@ size_t clear_user_mvcos(size_t size, void __user *to)
return size;
}
-extern size_t copy_from_user_std_small(size_t, const void __user *, void *);
-extern size_t copy_to_user_std_small(size_t, void __user *, const void *);
extern size_t strnlen_user_std(size_t, const char __user *);
extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
extern int futex_atomic_op(int, int __user *, int, int *);
extern int futex_atomic_cmpxchg(int __user *, int, int);
struct uaccess_ops uaccess_mvcos = {
- .copy_from_user = copy_from_user_mvcos,
- .copy_from_user_small = copy_from_user_std_small,
- .copy_to_user = copy_to_user_mvcos,
- .copy_to_user_small = copy_to_user_std_small,
+ .copy_from_user = copy_from_user_mvcos_check,
+ .copy_from_user_small = copy_from_user_std,
+ .copy_to_user = copy_to_user_mvcos_check,
+ .copy_to_user_small = copy_to_user_std,
.copy_in_user = copy_in_user_mvcos,
.clear_user = clear_user_mvcos,
.strnlen_user = strnlen_user_std,
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
new file mode 100644
index 00000000000..8741bdc0929
--- /dev/null
+++ b/arch/s390/lib/uaccess_pt.c
@@ -0,0 +1,153 @@
+/*
+ * arch/s390/lib/uaccess_pt.c
+ *
+ * User access functions based on page table walks.
+ *
+ * Copyright IBM Corp. 2006
+ * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
+ */
+
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <linux/mm.h>
+#include <asm/futex.h>
+
+static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
+ int write_access)
+{
+ struct vm_area_struct *vma;
+ int ret = -EFAULT;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if (unlikely(!vma))
+ goto out;
+ if (unlikely(vma->vm_start > address)) {
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto out;
+ if (expand_stack(vma, address))
+ goto out;
+ }
+
+ if (!write_access) {
+ /* page not present, check vm flags */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+ goto out;
+ } else {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto out;
+ }
+
+survive:
+ switch (handle_mm_fault(mm, vma, address, write_access)) {
+ case VM_FAULT_MINOR:
+ current->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ current->maj_flt++;
+ break;
+ case VM_FAULT_SIGBUS:
+ goto out_sigbus;
+ case VM_FAULT_OOM:
+ goto out_of_memory;
+ default:
+ BUG();
+ }
+ ret = 0;
+out:
+ up_read(&mm->mmap_sem);
+ return ret;
+
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (current->pid == 1) {
+ yield();
+ goto survive;
+ }
+ printk("VM: killing process %s\n", current->comm);
+ return ret;
+
+out_sigbus:
+ up_read(&mm->mmap_sem);
+ current->thread.prot_addr = address;
+ current->thread.trap_no = 0x11;
+ force_sig(SIGBUS, current);
+ return ret;
+}
+
+static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
+ size_t n, int write_user)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long offset, pfn, done, size;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ void *from, *to;
+
+ done = 0;
+retry:
+ spin_lock(&mm->page_table_lock);
+ do {
+ pgd = pgd_offset(mm, uaddr);
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ goto fault;
+
+ pmd = pmd_offset(pgd, uaddr);
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+ goto fault;
+
+ pte = pte_offset_map(pmd, uaddr);
+ if (!pte || !pte_present(*pte) ||
+ (write_user && !pte_write(*pte)))
+ goto fault;
+
+ pfn = pte_pfn(*pte);
+ if (!pfn_valid(pfn))
+ goto out;
+
+ offset = uaddr & (PAGE_SIZE - 1);
+ size = min(n - done, PAGE_SIZE - offset);
+ if (write_user) {
+ to = (void *)((pfn << PAGE_SHIFT) + offset);
+ from = kptr + done;
+ } else {
+ from = (void *)((pfn << PAGE_SHIFT) + offset);
+ to = kptr + done;
+ }
+ memcpy(to, from, size);
+ done += size;
+ uaddr += size;
+ } while (done < n);
+out:
+ spin_unlock(&mm->page_table_lock);
+ return n - done;
+fault:
+ spin_unlock(&mm->page_table_lock);
+ if (__handle_fault(mm, uaddr, write_user))
+ return n - done;
+ goto retry;
+}
+
+size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
+{
+ size_t rc;
+
+ if (segment_eq(get_fs(), KERNEL_DS)) {
+ memcpy(to, (void __kernel __force *) from, n);
+ return 0;
+ }
+ rc = __user_copy_pt((unsigned long) from, to, n, 0);
+ if (unlikely(rc))
+ memset(to + n - rc, 0, rc);
+ return rc;
+}
+
+size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
+{
+ if (segment_eq(get_fs(), KERNEL_DS)) {
+ memcpy((void __kernel __force *) to, from, n);
+ return 0;
+ }
+ return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
+}
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index f44f0078b35..bbaca66fa29 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -11,7 +11,7 @@
#include <linux/errno.h>
#include <linux/mm.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/futex.h>
#ifndef __s390x__
@@ -28,6 +28,9 @@
#define SLR "slgr"
#endif
+extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to);
+extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from);
+
size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
{
unsigned long tmp1, tmp2;
@@ -69,34 +72,11 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
return size;
}
-size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x)
+size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x)
{
- unsigned long tmp1, tmp2;
-
- tmp1 = 0UL;
- asm volatile(
- "0: mvcp 0(%0,%2),0(%1),%3\n"
- " "SLR" %0,%0\n"
- " j 5f\n"
- "1: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
- " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "2: mvcp 0(%4,%2),0(%1),%3\n"
- " "SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "3:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
- " bras %3,4f\n"
- " xc 0(1,%2),0(%2)\n"
- "4: ex %4,0(%3)\n"
- "5:\n"
- EX_TABLE(0b,1b) EX_TABLE(2b,3b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
+ if (size <= 1024)
+ return copy_from_user_std(size, ptr, x);
+ return copy_from_user_pt(size, ptr, x);
}
size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
@@ -130,28 +110,11 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
return size;
}
-size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x)
+size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x)
{
- unsigned long tmp1, tmp2;
-
- tmp1 = 0UL;
- asm volatile(
- "0: mvcs 0(%0,%1),0(%2),%3\n"
- " "SLR" %0,%0\n"
- " j 3f\n"
- "1: la %4,255(%1)\n" /* ptr + 255 */
- " "LHI" %3,-4096\n"
- " nr %4,%3\n" /* (ptr + 255) & -4096UL */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 3f\n"
- "2: mvcs 0(%4,%1),0(%2),%3\n"
- " "SLR" %0,%4\n"
- "3:\n"
- EX_TABLE(0b,1b) EX_TABLE(2b,3b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
+ if (size <= 1024)
+ return copy_to_user_std(size, ptr, x);
+ return copy_to_user_pt(size, ptr, x);
}
size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
@@ -295,7 +258,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
{
int oldval = 0, newval, ret;
- inc_preempt_count();
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -321,7 +284,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
default:
ret = -ENOSYS;
}
- dec_preempt_count();
+ pagefault_enable();
*old = oldval;
return ret;
}
@@ -343,10 +306,10 @@ int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval)
}
struct uaccess_ops uaccess_std = {
- .copy_from_user = copy_from_user_std,
- .copy_from_user_small = copy_from_user_std_small,
- .copy_to_user = copy_to_user_std,
- .copy_to_user_small = copy_to_user_std_small,
+ .copy_from_user = copy_from_user_std_check,
+ .copy_from_user_small = copy_from_user_std,
+ .copy_to_user = copy_to_user_std_check,
+ .copy_to_user_small = copy_to_user_std,
.copy_in_user = copy_in_user_std,
.clear_user = clear_user_std,
.strnlen_user = strnlen_user_std,
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 226275d5c4f..9e9bc48463a 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -14,12 +14,13 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bootmem.h>
+#include <linux/ctype.h>
#include <asm/page.h>
#include <asm/ebcdic.h>
#include <asm/errno.h>
#include <asm/extmem.h>
#include <asm/cpcmd.h>
-#include <linux/ctype.h>
+#include <asm/setup.h>
#define DCSS_DEBUG /* Debug messages on/off */
@@ -77,15 +78,11 @@ struct dcss_segment {
int segcnt;
};
-static DEFINE_SPINLOCK(dcss_lock);
+static DEFINE_MUTEX(dcss_lock);
static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
"EW/EN-MIXED" };
-extern struct {
- unsigned long addr, size, type;
-} memory_chunk[MEMORY_CHUNKS];
-
/*
* Create the 8 bytes, ebcdic VM segment name from
* an ascii name.
@@ -117,7 +114,7 @@ segment_by_name (char *name)
struct list_head *l;
struct dcss_segment *tmp, *retval = NULL;
- assert_spin_locked(&dcss_lock);
+ BUG_ON(!mutex_is_locked(&dcss_lock));
dcss_mkname (name, dcss_name);
list_for_each (l, &dcss_list) {
tmp = list_entry (l, struct dcss_segment, list);
@@ -249,8 +246,8 @@ segment_overlaps_storage(struct dcss_segment *seg)
{
int i;
- for (i=0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
- if (memory_chunk[i].type != 0)
+ for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
+ if (memory_chunk[i].type != CHUNK_READ_WRITE)
continue;
if ((memory_chunk[i].addr >> 20) > (seg->end >> 20))
continue;
@@ -272,7 +269,7 @@ segment_overlaps_others (struct dcss_segment *seg)
struct list_head *l;
struct dcss_segment *tmp;
- assert_spin_locked(&dcss_lock);
+ BUG_ON(!mutex_is_locked(&dcss_lock));
list_for_each(l, &dcss_list) {
tmp = list_entry(l, struct dcss_segment, list);
if ((tmp->start_addr >> 20) > (seg->end >> 20))
@@ -429,7 +426,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr,
if (!MACHINE_IS_VM)
return -ENOSYS;
- spin_lock (&dcss_lock);
+ mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL)
rc = __segment_load (name, do_nonshared, addr, end);
@@ -444,7 +441,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr,
rc = -EPERM;
}
}
- spin_unlock (&dcss_lock);
+ mutex_unlock(&dcss_lock);
return rc;
}
@@ -467,7 +464,7 @@ segment_modify_shared (char *name, int do_nonshared)
unsigned long dummy;
int dcss_command, rc, diag_cc;
- spin_lock (&dcss_lock);
+ mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
rc = -EINVAL;
@@ -508,7 +505,7 @@ segment_modify_shared (char *name, int do_nonshared)
&dummy, &dummy);
kfree(seg);
out_unlock:
- spin_unlock(&dcss_lock);
+ mutex_unlock(&dcss_lock);
return rc;
}
@@ -526,7 +523,7 @@ segment_unload(char *name)
if (!MACHINE_IS_VM)
return;
- spin_lock(&dcss_lock);
+ mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
PRINT_ERR ("could not find segment %s in segment_unload, "
@@ -540,7 +537,7 @@ segment_unload(char *name)
kfree(seg);
}
out_unlock:
- spin_unlock(&dcss_lock);
+ mutex_unlock(&dcss_lock);
}
/*
@@ -559,12 +556,13 @@ segment_save(char *name)
if (!MACHINE_IS_VM)
return;
- spin_lock(&dcss_lock);
+ mutex_lock(&dcss_lock);
seg = segment_by_name (name);
if (seg == NULL) {
- PRINT_ERR ("could not find segment %s in segment_save, please report to linux390@de.ibm.com\n",name);
- return;
+ PRINT_ERR("could not find segment %s in segment_save, please "
+ "report to linux390@de.ibm.com\n", name);
+ goto out;
}
startpfn = seg->start_addr >> PAGE_SHIFT;
@@ -591,7 +589,7 @@ segment_save(char *name)
goto out;
}
out:
- spin_unlock(&dcss_lock);
+ mutex_unlock(&dcss_lock);
}
EXPORT_SYMBOL(segment_load);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 1c323bbfda9..cd85e34d870 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -31,6 +31,7 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/kdebug.h>
+#include <asm/s390_ext.h>
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
@@ -394,6 +395,7 @@ void do_dat_exception(struct pt_regs *regs, unsigned long error_code)
/*
* 'pfault' pseudo page faults routines.
*/
+static ext_int_info_t ext_int_pfault;
static int pfault_disable = 0;
static int __init nopfault(char *str)
@@ -422,7 +424,7 @@ int pfault_init(void)
__PF_RES_FIELD };
int rc;
- if (pfault_disable)
+ if (!MACHINE_IS_VM || pfault_disable)
return -1;
asm volatile(
" diag %1,%0,0x258\n"
@@ -440,7 +442,7 @@ void pfault_fini(void)
pfault_refbk_t refbk =
{ 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
- if (pfault_disable)
+ if (!MACHINE_IS_VM || pfault_disable)
return;
__ctl_clear_bit(0,9);
asm volatile(
@@ -500,5 +502,25 @@ pfault_interrupt(__u16 error_code)
set_tsk_need_resched(tsk);
}
}
-#endif
+void __init pfault_irq_init(void)
+{
+ if (!MACHINE_IS_VM)
+ return;
+
+ /*
+ * Try to get pfault pseudo page faults going.
+ */
+ if (register_early_external_interrupt(0x2603, pfault_interrupt,
+ &ext_int_pfault) != 0)
+ panic("Couldn't request external interrupt 0x2603");
+
+ if (pfault_init() == 0)
+ return;
+
+ /* Tough luck, no pfault. */
+ pfault_disable = 1;
+ unregister_early_external_interrupt(0x2603, pfault_interrupt,
+ &ext_int_pfault);
+}
+#endif
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 6a461d4caef..d83d64af31f 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -51,6 +51,14 @@ config GENERIC_TIME
config ARCH_MAY_HAVE_PC_FDC
bool
+config STACKTRACE_SUPPORT
+ bool
+ default y
+
+config LOCKDEP_SUPPORT
+ bool
+ default y
+
source "init/Kconfig"
menu "System type"
@@ -217,7 +225,21 @@ config SH_SHMIN
bool "SHMIN"
select CPU_SUBTYPE_SH7706
help
- Select SHMIN if configureing for the SHMIN board
+ Select SHMIN if configuring for the SHMIN board.
+
+config SH_7206_SOLUTION_ENGINE
+ bool "SolutionEngine7206"
+ select CPU_SUBTYPE_SH7206
+ help
+ Select 7206 SolutionEngine if configuring for a Hitachi SH7206
+ evaluation board.
+
+config SH_7619_SOLUTION_ENGINE
+ bool "SolutionEngine7619"
+ select CPU_SUBTYPE_SH7619
+ help
+ Select 7619 SolutionEngine if configuring for a Hitachi SH7619
+ evaluation board.
config SH_UNKNOWN
bool "BareCPU"
@@ -280,12 +302,20 @@ config CF_BASE_ADDR
menu "Processor features"
-config CPU_LITTLE_ENDIAN
- bool "Little Endian"
+choice
+ prompt "Endianess selection"
+ default CPU_LITTLE_ENDIAN
help
Some SuperH machines can be configured for either little or big
- endian byte order. These modes require different kernels. Say Y if
- your machine is little endian, N if it's a big endian machine.
+ endian byte order. These modes require different kernels.
+
+config CPU_LITTLE_ENDIAN
+ bool "Little Endian"
+
+config CPU_BIG_ENDIAN
+ bool "Big Endian"
+
+endchoice
config SH_FPU
bool "FPU support"
@@ -345,6 +375,9 @@ config CPU_HAS_MASKREG_IRQ
config CPU_HAS_INTC2_IRQ
bool
+config CPU_HAS_IPR_IRQ
+ bool
+
config CPU_HAS_SR_RB
bool "CPU has SR.RB"
depends on CPU_SH3 || CPU_SH4
@@ -357,6 +390,9 @@ config CPU_HAS_SR_RB
See <file:Documentation/sh/register-banks.txt> for further
information on SR.RB and register banking in the kernel in general.
+config CPU_HAS_PTEA
+ bool
+
endmenu
menu "Timer support"
@@ -364,10 +400,25 @@ depends on !GENERIC_TIME
config SH_TMU
bool "TMU timer support"
+ depends on CPU_SH3 || CPU_SH4
default y
help
This enables the use of the TMU as the system timer.
+config SH_CMT
+ bool "CMT timer support"
+ depends on CPU_SH2
+ default y
+ help
+ This enables the use of the CMT as the system timer.
+
+config SH_MTU2
+ bool "MTU2 timer support"
+ depends on CPU_SH2A
+ default n
+ help
+ This enables the use of the MTU2 as the system timer.
+
endmenu
source "arch/sh/boards/renesas/hs7751rvoip/Kconfig"
@@ -376,19 +427,52 @@ source "arch/sh/boards/renesas/rts7751r2d/Kconfig"
source "arch/sh/boards/renesas/r7780rp/Kconfig"
+config SH_TIMER_IRQ
+ int
+ default "28" if CPU_SUBTYPE_SH7780
+ default "86" if CPU_SUBTYPE_SH7619
+ default "140" if CPU_SUBTYPE_SH7206
+ default "16"
+
+config NO_IDLE_HZ
+ bool "Dynamic tick timer"
+ help
+ Select this option if you want to disable continuous timer ticks
+ and have them programmed to occur as required. This option saves
+ power as the system can remain in idle state for longer.
+
+ By default dynamic tick is disabled during the boot, and can be
+ manually enabled with:
+
+ echo 1 > /sys/devices/system/timer/timer0/dyn_tick
+
+ Alternatively, if you want dynamic tick automatically enabled
+ during boot, pass "dyntick=enable" via the kernel command string.
+
+ Please note that dynamic tick may affect the accuracy of
+ timekeeping on some platforms depending on the implementation.
+
config SH_PCLK_FREQ
int "Peripheral clock frequency (in Hz)"
+ default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
+ default "31250000" if CPU_SUBTYPE_SH7619
+ default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \
+ CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 || \
+ CPU_SUBTYPE_SH7206
default "50000000" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7780
default "60000000" if CPU_SUBTYPE_SH7751
- default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \
- CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705
- default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
default "66000000" if CPU_SUBTYPE_SH4_202
help
This option is used to specify the peripheral clock frequency.
This is necessary for determining the reference clock value on
platforms lacking an RTC.
+config SH_CLK_MD
+ int "CPU Mode Pin Setting"
+ depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206
+ help
+ MD2 - MD0 Setting.
+
menu "CPU Frequency scaling"
source "drivers/cpufreq/Kconfig"
@@ -421,6 +505,8 @@ config HEARTBEAT
behavior is platform-dependent, but normally the flash frequency is
a hyperbolic function of the 5-minute load average.
+source "arch/sh/drivers/Kconfig"
+
endmenu
config ISA_DMA_API
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 48479e014da..66a25ef4ef1 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -1,5 +1,9 @@
menu "Kernel hacking"
+config TRACE_IRQFLAGS_SUPPORT
+ bool
+ default y
+
source "lib/Kconfig.debug"
config SH_STANDARD_BIOS
@@ -17,7 +21,18 @@ config SH_STANDARD_BIOS
config EARLY_SCIF_CONSOLE
bool "Use early SCIF console"
- depends on CPU_SH4 || CPU_SH2A && !SH_STANDARD_BIOS
+ help
+ This enables an early console using a fixed SCIF port. This can
+ be used by platforms that are either not running the SH
+ standard BIOS, or do not wish to use the BIOS callbacks for the
+ serial I/O.
+
+config EARLY_SCIF_CONSOLE_PORT
+ hex "SCIF port for early console"
+ depends on EARLY_SCIF_CONSOLE
+ default "0xffe00000" if CPU_SUBTYPE_SH7780
+ default "0xfffe9800" if CPU_SUBTYPE_SH72060
+ default "0xffe80000" if CPU_SH4
config EARLY_PRINTK
bool "Early printk support"
@@ -30,6 +45,11 @@ config EARLY_PRINTK
when the kernel may crash or hang before the serial console is
initialised. If unsure, say N.
+ On devices that are running SH-IPL and want to keep the port
+ initialization consistent while not using the BIOS callbacks,
+ select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using
+ the kernel command line option to toggle back and forth.
+
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 26d62ff51a6..d10bba5e107 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -13,10 +13,6 @@
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture
#
-
-cflags-y := -mb
-cflags-$(CONFIG_CPU_LITTLE_ENDIAN) := -ml
-
isa-y := any
isa-$(CONFIG_SH_DSP) := sh
isa-$(CONFIG_CPU_SH2) := sh2
@@ -38,13 +34,16 @@ isa-y := $(isa-y)-nofpu
endif
endif
-cflags-y += $(call as-option,-Wa$(comma)-isa=$(isa-y),)
-
-cflags-$(CONFIG_CPU_SH2) += -m2
-cflags-$(CONFIG_CPU_SH3) += -m3
-cflags-$(CONFIG_CPU_SH4) += -m4 \
+cflags-$(CONFIG_CPU_SH2) := -m2
+cflags-$(CONFIG_CPU_SH3) := -m3
+cflags-$(CONFIG_CPU_SH4) := -m4 \
$(call cc-option,-mno-implicit-fp,-m4-nofpu)
-cflags-$(CONFIG_CPU_SH4A) += $(call cc-option,-m4a-nofpu,)
+cflags-$(CONFIG_CPU_SH4A) := -m4a $(call cc-option,-m4a-nofpu,)
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mb
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -ml
+
+cflags-y += $(call as-option,-Wa$(comma)-isa=$(isa-y),) -ffreestanding
cflags-$(CONFIG_SH_DSP) += -Wa,-dsp
cflags-$(CONFIG_SH_KGDB) += -g
@@ -59,7 +58,9 @@ OBJCOPYFLAGS := -O binary -R .note -R .comment -R .stab -R .stabstr -S
# never be used by anyone. Use a board-specific defconfig that has a
# reasonable chance of being current instead.
#
-KBUILD_DEFCONFIG := rts7751r2d_defconfig
+KBUILD_DEFCONFIG := r7780rp_defconfig
+
+KBUILD_IMAGE := arch/sh/boot/zImage
#
# Choosing incompatible machines durings configuration will result in
@@ -109,6 +110,8 @@ machdir-$(CONFIG_SH_SH4202_MICRODEV) := superh/microdev
machdir-$(CONFIG_SH_LANDISK) := landisk
machdir-$(CONFIG_SH_TITAN) := titan
machdir-$(CONFIG_SH_SHMIN) := shmin
+machdir-$(CONFIG_SH_7206_SOLUTION_ENGINE) := se/7206
+machdir-$(CONFIG_SH_7619_SOLUTION_ENGINE) := se/7619
machdir-$(CONFIG_SH_UNKNOWN) := unknown
incdir-y := $(notdir $(machdir-y))
@@ -124,6 +127,7 @@ core-$(CONFIG_HD64465) += arch/sh/cchips/hd6446x/hd64465/
core-$(CONFIG_VOYAGERGX) += arch/sh/cchips/voyagergx/
cpuincdir-$(CONFIG_CPU_SH2) := cpu-sh2
+cpuincdir-$(CONFIG_CPU_SH2A) := cpu-sh2a
cpuincdir-$(CONFIG_CPU_SH3) := cpu-sh3
cpuincdir-$(CONFIG_CPU_SH4) := cpu-sh4
diff --git a/arch/sh/boards/renesas/r7780rp/Makefile b/arch/sh/boards/renesas/r7780rp/Makefile
index f1776d02797..574b0316ed5 100644
--- a/arch/sh/boards/renesas/r7780rp/Makefile
+++ b/arch/sh/boards/renesas/r7780rp/Makefile
@@ -3,4 +3,6 @@
#
obj-y := setup.o io.o irq.o
-obj-$(CONFIG_HEARTBEAT) += led.o
+
+obj-$(CONFIG_HEARTBEAT) += led.o
+obj-$(CONFIG_PUSH_SWITCH) += psw.o
diff --git a/arch/sh/boards/renesas/r7780rp/irq.c b/arch/sh/boards/renesas/r7780rp/irq.c
index aa15ec5bc69..cc381e19778 100644
--- a/arch/sh/boards/renesas/r7780rp/irq.c
+++ b/arch/sh/boards/renesas/r7780rp/irq.c
@@ -10,6 +10,7 @@
*/
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <asm/r7780rp.h>
diff --git a/arch/sh/boards/renesas/r7780rp/psw.c b/arch/sh/boards/renesas/r7780rp/psw.c
new file mode 100644
index 00000000000..c844dfa5d58
--- /dev/null
+++ b/arch/sh/boards/renesas/r7780rp/psw.c
@@ -0,0 +1,122 @@
+/*
+ * arch/sh/boards/renesas/r7780rp/psw.c
+ *
+ * push switch support for RDBRP-1/RDBREVRP-1 debug boards.
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/mach/r7780rp.h>
+#include <asm/push-switch.h>
+
+static irqreturn_t psw_irq_handler(int irq, void *arg)
+{
+ struct platform_device *pdev = arg;
+ struct push_switch *psw = platform_get_drvdata(pdev);
+ struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
+ unsigned int l, mask;
+ int ret = 0;
+
+ l = ctrl_inw(PA_DBSW);
+
+ /* Nothing to do if there's no state change */
+ if (psw->state) {
+ ret = 1;
+ goto out;
+ }
+
+ mask = l & 0x70;
+ /* Figure out who raised it */
+ if (mask & (1 << psw_info->bit)) {
+ psw->state = !!(mask & (1 << psw_info->bit));
+ if (psw->state) /* debounce */
+ mod_timer(&psw->debounce, jiffies + 50);
+
+ ret = 1;
+ }
+
+out:
+ /* Clear the switch IRQs */
+ l |= (0x7 << 12);
+ ctrl_outw(l, PA_DBSW);
+
+ return IRQ_RETVAL(ret);
+}
+
+static struct resource psw_resources[] = {
+ [0] = {
+ .start = IRQ_PSW,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct push_switch_platform_info s2_platform_data = {
+ .name = "s2",
+ .bit = 6,
+ .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_SHARED,
+ .irq_handler = psw_irq_handler,
+};
+
+static struct platform_device s2_switch_device = {
+ .name = "push-switch",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(psw_resources),
+ .resource = psw_resources,
+ .dev = {
+ .platform_data = &s2_platform_data,
+ },
+};
+
+static struct push_switch_platform_info s3_platform_data = {
+ .name = "s3",
+ .bit = 5,
+ .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_SHARED,
+ .irq_handler = psw_irq_handler,
+};
+
+static struct platform_device s3_switch_device = {
+ .name = "push-switch",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(psw_resources),
+ .resource = psw_resources,
+ .dev = {
+ .platform_data = &s3_platform_data,
+ },
+};
+
+static struct push_switch_platform_info s4_platform_data = {
+ .name = "s4",
+ .bit = 4,
+ .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_SHARED,
+ .irq_handler = psw_irq_handler,
+};
+
+static struct platform_device s4_switch_device = {
+ .name = "push-switch",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(psw_resources),
+ .resource = psw_resources,
+ .dev = {
+ .platform_data = &s4_platform_data,
+ },
+};
+
+static struct platform_device *psw_devices[] = {
+ &s2_switch_device, &s3_switch_device, &s4_switch_device,
+};
+
+static int __init psw_init(void)
+{
+ return platform_add_devices(psw_devices, ARRAY_SIZE(psw_devices));
+}
+module_init(psw_init);
diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c
index c331caeb694..9f89c8de9db 100644
--- a/arch/sh/boards/renesas/r7780rp/setup.c
+++ b/arch/sh/boards/renesas/r7780rp/setup.c
@@ -44,8 +44,37 @@ static struct platform_device m66596_usb_host_device = {
.resource = m66596_usb_host_resources,
};
+static struct resource cf_ide_resources[] = {
+ [0] = {
+ .start = 0x1f0,
+ .end = 0x1f0 + 8,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .start = 0x1f0 + 0x206,
+ .end = 0x1f0 + 8 + 0x206 + 8,
+ .flags = IORESOURCE_IO,
+ },
+ [2] = {
+#ifdef CONFIG_SH_R7780MP
+ .start = 1,
+#else
+ .start = 4,
+#endif
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device cf_ide_device = {
+ .name = "pata_platform",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(cf_ide_resources),
+ .resource = cf_ide_resources,
+};
+
static struct platform_device *r7780rp_devices[] __initdata = {
&m66596_usb_host_device,
+ &cf_ide_device,
};
static int __init r7780rp_devices_setup(void)
diff --git a/arch/sh/boards/se/7206/Makefile b/arch/sh/boards/se/7206/Makefile
new file mode 100644
index 00000000000..63950f4f245
--- /dev/null
+++ b/arch/sh/boards/se/7206/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the 7206 SolutionEngine specific parts of the kernel
+#
+
+obj-y := setup.o io.o irq.o
+obj-$(CONFIG_HEARTBEAT) += led.o
+
diff --git a/arch/sh/boards/se/7206/io.c b/arch/sh/boards/se/7206/io.c
new file mode 100644
index 00000000000..b557273e0cb
--- /dev/null
+++ b/arch/sh/boards/se/7206/io.c
@@ -0,0 +1,123 @@
+/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
+ *
+ * linux/arch/sh/boards/se/7206/io.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * I/O routine for Hitachi 7206 SolutionEngine.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/se7206.h>
+
+
+static inline void delay(void)
+{
+ ctrl_inw(0x20000000); /* P2 ROM Area */
+}
+
+/* MS7750 requires special versions of in*, out* routines, since
+ PC-like io ports are located at upper half byte of 16-bit word which
+ can be accessed only with 16-bit wide. */
+
+static inline volatile __u16 *
+port2adr(unsigned int port)
+{
+ if (port >= 0x2000)
+ return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
+ else if (port >= 0x300 || port < 0x310)
+ return (volatile __u16 *) (PA_SMSC + (port - 0x300));
+}
+
+unsigned char se7206_inb(unsigned long port)
+{
+ return (*port2adr(port))&0xff;
+}
+
+unsigned char se7206_inb_p(unsigned long port)
+{
+ unsigned long v;
+
+ v = (*port2adr(port))&0xff;
+ delay();
+ return v;
+}
+
+unsigned short se7206_inw(unsigned long port)
+{
+ return *port2adr(port);;
+}
+
+unsigned int se7206_inl(unsigned long port)
+{
+ maybebadio(port);
+ return 0;
+}
+
+void se7206_outb(unsigned char value, unsigned long port)
+{
+ *(port2adr(port)) = value;
+}
+
+void se7206_outb_p(unsigned char value, unsigned long port)
+{
+ *(port2adr(port)) = value;
+ delay();
+}
+
+void se7206_outw(unsigned short value, unsigned long port)
+{
+ *port2adr(port) = value;
+}
+
+void se7206_outl(unsigned int value, unsigned long port)
+{
+ maybebadio(port);
+}
+
+void se7206_insb(unsigned long port, void *addr, unsigned long count)
+{
+ volatile __u16 *p = port2adr(port);
+ __u8 *ap = addr;
+
+ while (count--)
+ *ap++ = *p;
+}
+
+void se7206_insw(unsigned long port, void *addr, unsigned long count)
+{
+ volatile __u16 *p = port2adr(port);
+ __u16 *ap = addr;
+ while (count--)
+ *ap++ = *p;
+}
+
+void se7206_insl(unsigned long port, void *addr, unsigned long count)
+{
+ maybebadio(port);
+}
+
+void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
+{
+ volatile __u16 *p = port2adr(port);
+ const __u8 *ap = addr;
+
+ while (count--)
+ *p = *ap++;
+}
+
+void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
+{
+ volatile __u16 *p = port2adr(port);
+ const __u16 *ap = addr;
+ while (count--)
+ *p = *ap++;
+}
+
+void se7206_outsl(unsigned long port, const void *addr, unsigned long count)
+{
+ maybebadio(port);
+}
diff --git a/arch/sh/boards/se/7206/irq.c b/arch/sh/boards/se/7206/irq.c
new file mode 100644
index 00000000000..3fb0c5f5b23
--- /dev/null
+++ b/arch/sh/boards/se/7206/irq.c
@@ -0,0 +1,139 @@
+/*
+ * linux/arch/sh/boards/se/7206/irq.c
+ *
+ * Copyright (C) 2005,2006 Yoshinori Sato
+ *
+ * Hitachi SolutionEngine Support.
+ *
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/se7206.h>
+
+#define INTSTS0 0x31800000
+#define INTSTS1 0x31800002
+#define INTMSK0 0x31800004
+#define INTMSK1 0x31800006
+#define INTSEL 0x31800008
+
+static void disable_se7206_irq(unsigned int irq)
+{
+ unsigned short val;
+ unsigned short mask = 0xffff ^ (0x0f << 4 * (3 - (IRQ0_IRQ - irq)));
+ unsigned short msk0,msk1;
+
+ /* Set the priority in IPR to 0 */
+ val = ctrl_inw(INTC_IPR01);
+ val &= mask;
+ ctrl_outw(val, INTC_IPR01);
+ /* FPGA mask set */
+ msk0 = ctrl_inw(INTMSK0);
+ msk1 = ctrl_inw(INTMSK1);
+
+ switch (irq) {
+ case IRQ0_IRQ:
+ msk0 |= 0x0010;
+ break;
+ case IRQ1_IRQ:
+ msk0 |= 0x000f;
+ break;
+ case IRQ2_IRQ:
+ msk0 |= 0x0f00;
+ msk1 |= 0x00ff;
+ break;
+ }
+ ctrl_outw(msk0, INTMSK0);
+ ctrl_outw(msk1, INTMSK1);
+}
+
+static void enable_se7206_irq(unsigned int irq)
+{
+ unsigned short val;
+ unsigned short value = (0x0001 << 4 * (3 - (IRQ0_IRQ - irq)));
+ unsigned short msk0,msk1;
+
+ /* Set priority in IPR back to original value */
+ val = ctrl_inw(INTC_IPR01);
+ val |= value;
+ ctrl_outw(val, INTC_IPR01);
+
+ /* FPGA mask reset */
+ msk0 = ctrl_inw(INTMSK0);
+ msk1 = ctrl_inw(INTMSK1);
+
+ switch (irq) {
+ case IRQ0_IRQ:
+ msk0 &= ~0x0010;
+ break;
+ case IRQ1_IRQ:
+ msk0 &= ~0x000f;
+ break;
+ case IRQ2_IRQ:
+ msk0 &= ~0x0f00;
+ msk1 &= ~0x00ff;
+ break;
+ }
+ ctrl_outw(msk0, INTMSK0);
+ ctrl_outw(msk1, INTMSK1);
+}
+
+static void eoi_se7206_irq(unsigned int irq)
+{
+ unsigned short sts0,sts1;
+
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ enable_se7206_irq(irq);
+ /* FPGA isr clear */
+ sts0 = ctrl_inw(INTSTS0);
+ sts1 = ctrl_inw(INTSTS1);
+
+ switch (irq) {
+ case IRQ0_IRQ:
+ sts0 &= ~0x0010;
+ break;
+ case IRQ1_IRQ:
+ sts0 &= ~0x000f;
+ break;
+ case IRQ2_IRQ:
+ sts0 &= ~0x0f00;
+ sts1 &= ~0x00ff;
+ break;
+ }
+ ctrl_outw(sts0, INTSTS0);
+ ctrl_outw(sts1, INTSTS1);
+}
+
+static struct irq_chip se7206_irq_chip __read_mostly = {
+ .name = "SE7206-FPGA-IRQ",
+ .mask = disable_se7206_irq,
+ .unmask = enable_se7206_irq,
+ .mask_ack = disable_se7206_irq,
+ .eoi = eoi_se7206_irq,
+};
+
+static void make_se7206_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ set_irq_chip_and_handler_name(irq, &se7206_irq_chip,
+ handle_level_irq, "level");
+ disable_se7206_irq(irq);
+}
+
+/*
+ * Initialize IRQ setting
+ */
+void __init init_se7206_IRQ(void)
+{
+ make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
+ make_se7206_irq(IRQ1_IRQ); /* ATA */
+ make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
+ ctrl_outw(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
+
+ /* FPGA System register setup*/
+ ctrl_outw(0x0000,INTSTS0); /* Clear INTSTS0 */
+ ctrl_outw(0x0000,INTSTS1); /* Clear INTSTS1 */
+ /* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
+ ctrl_outw(0x0001,INTSEL);
+}
diff --git a/arch/sh/boards/se/7206/led.c b/arch/sh/boards/se/7206/led.c
new file mode 100644
index 00000000000..ef794601ab8
--- /dev/null
+++ b/arch/sh/boards/se/7206/led.c
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/sh/kernel/led_se.c
+ *
+ * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * This file contains Solution Engine specific LED code.
+ */
+
+#include <linux/config.h>
+#include <asm/se7206.h>
+
+#ifdef CONFIG_HEARTBEAT
+
+#include <linux/sched.h>
+
+/* Cycle the LED's in the clasic Knightrider/Sun pattern */
+void heartbeat_se(void)
+{
+ static unsigned int cnt = 0, period = 0;
+ volatile unsigned short* p = (volatile unsigned short*)PA_LED;
+ static unsigned bit = 0, up = 1;
+
+ cnt += 1;
+ if (cnt < period) {
+ return;
+ }
+
+ cnt = 0;
+
+ /* Go through the points (roughly!):
+ * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
+ */
+ period = 110 - ( (300<<FSHIFT)/
+ ((avenrun[0]/5) + (3<<FSHIFT)) );
+
+ if (up) {
+ if (bit == 7) {
+ bit--;
+ up=0;
+ } else {
+ bit ++;
+ }
+ } else {
+ if (bit == 0) {
+ bit++;
+ up=1;
+ } else {
+ bit--;
+ }
+ }
+ *p = 1<<(bit+8);
+
+}
+#endif /* CONFIG_HEARTBEAT */
diff --git a/arch/sh/boards/se/7206/setup.c b/arch/sh/boards/se/7206/setup.c
new file mode 100644
index 00000000000..0f42e91a323
--- /dev/null
+++ b/arch/sh/boards/se/7206/setup.c
@@ -0,0 +1,79 @@
+/*
+ *
+ * linux/arch/sh/boards/se/7206/setup.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Hitachi 7206 SolutionEngine Support.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/se7206.h>
+#include <asm/io.h>
+#include <asm/machvec.h>
+
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = 0x300,
+ .end = 0x300 + 0x020 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 64,
+ .end = 64,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static int __init se7206_devices_setup(void)
+{
+ return platform_device_register(&smc91x_device);
+}
+
+__initcall(se7206_devices_setup);
+
+void heartbeat_se(void);
+
+/*
+ * The Machine Vector
+ */
+
+struct sh_machine_vector mv_se __initmv = {
+ .mv_name = "SolutionEngine",
+ .mv_nr_irqs = 256,
+ .mv_inb = se7206_inb,
+ .mv_inw = se7206_inw,
+ .mv_inl = se7206_inl,
+ .mv_outb = se7206_outb,
+ .mv_outw = se7206_outw,
+ .mv_outl = se7206_outl,
+
+ .mv_inb_p = se7206_inb_p,
+ .mv_inw_p = se7206_inw,
+ .mv_inl_p = se7206_inl,
+ .mv_outb_p = se7206_outb_p,
+ .mv_outw_p = se7206_outw,
+ .mv_outl_p = se7206_outl,
+
+ .mv_insb = se7206_insb,
+ .mv_insw = se7206_insw,
+ .mv_insl = se7206_insl,
+ .mv_outsb = se7206_outsb,
+ .mv_outsw = se7206_outsw,
+ .mv_outsl = se7206_outsl,
+
+ .mv_init_irq = init_se7206_IRQ,
+#ifdef CONFIG_HEARTBEAT
+ .mv_heartbeat = heartbeat_se,
+#endif
+};
+ALIAS_MV(se)
diff --git a/arch/sh/boards/se/7619/Makefile b/arch/sh/boards/se/7619/Makefile
new file mode 100644
index 00000000000..3666eca8a65
--- /dev/null
+++ b/arch/sh/boards/se/7619/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the 7619 SolutionEngine specific parts of the kernel
+#
+
+obj-y := setup.o io.o
diff --git a/arch/sh/boards/se/7619/io.c b/arch/sh/boards/se/7619/io.c
new file mode 100644
index 00000000000..176f1f39cd9
--- /dev/null
+++ b/arch/sh/boards/se/7619/io.c
@@ -0,0 +1,102 @@
+/*
+ *
+ * linux/arch/sh/boards/se/7619/io.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * I/O routine for Hitachi 7619 SolutionEngine.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/se7619.h>
+#include <asm/irq.h>
+
+/* FIXME: M3A-ZAB7 Compact Flash Slot support */
+
+static inline void delay(void)
+{
+ ctrl_inw(0xa0000000); /* Uncached ROM area (P2) */
+}
+
+#define badio(name,port) \
+ printk("bad I/O operation (%s) for port 0x%lx at 0x%08x\n", \
+ #name, (port), (__u32) __builtin_return_address(0))
+
+unsigned char se7619___inb(unsigned long port)
+{
+ badio(inb, port);
+ return 0;
+}
+
+unsigned char se7619___inb_p(unsigned long port)
+{
+ badio(inb_p, port);
+ delay();
+ return 0;
+}
+
+unsigned short se7619___inw(unsigned long port)
+{
+ badio(inw, port);
+ return 0;
+}
+
+unsigned int se7619___inl(unsigned long port)
+{
+ badio(inl, port);
+ return 0;
+}
+
+void se7619___outb(unsigned char value, unsigned long port)
+{
+ badio(outb, port);
+}
+
+void se7619___outb_p(unsigned char value, unsigned long port)
+{
+ badio(outb_p, port);
+ delay();
+}
+
+void se7619___outw(unsigned short value, unsigned long port)
+{
+ badio(outw, port);
+}
+
+void se7619___outl(unsigned int value, unsigned long port)
+{
+ badio(outl, port);
+}
+
+void se7619___insb(unsigned long port, void *addr, unsigned long count)
+{
+ badio(inw, port);
+}
+
+void se7619___insw(unsigned long port, void *addr, unsigned long count)
+{
+ badio(inw, port);
+}
+
+void se7619___insl(unsigned long port, void *addr, unsigned long count)
+{
+ badio(insl, port);
+}
+
+void se7619___outsb(unsigned long port, const void *addr, unsigned long count)
+{
+ badio(insl, port);
+}
+
+void se7619___outsw(unsigned long port, const void *addr, unsigned long count)
+{
+ badio(insl, port);
+}
+
+void se7619___outsl(unsigned long port, const void *addr, unsigned long count)
+{
+ badio(outsw, port);
+}
diff --git a/arch/sh/boards/se/7619/setup.c b/arch/sh/boards/se/7619/setup.c
new file mode 100644
index 00000000000..e627b26de0d
--- /dev/null
+++ b/arch/sh/boards/se/7619/setup.c
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/boards/se/7619/setup.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Hitachi SH7619 SolutionEngine Support.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/se7619.h>
+#include <asm/machvec.h>
+
+/*
+ * The Machine Vector
+ */
+
+struct sh_machine_vector mv_se __initmv = {
+ .mv_name = "SolutionEngine",
+ .mv_nr_irqs = 108,
+ .mv_inb = se7619___inb,
+ .mv_inw = se7619___inw,
+ .mv_inl = se7619___inl,
+ .mv_outb = se7619___outb,
+ .mv_outw = se7619___outw,
+ .mv_outl = se7619___outl,
+
+ .mv_inb_p = se7619___inb_p,
+ .mv_inw_p = se7619___inw,
+ .mv_inl_p = se7619___inl,
+ .mv_outb_p = se7619___outb_p,
+ .mv_outw_p = se7619___outw,
+ .mv_outl_p = se7619___outl,
+
+ .mv_insb = se7619___insb,
+ .mv_insw = se7619___insw,
+ .mv_insl = se7619___insl,
+ .mv_outsb = se7619___outsb,
+ .mv_outsw = se7619___outsw,
+ .mv_outsl = se7619___outsl,
+};
+ALIAS_MV(se)
diff --git a/arch/sh/boards/titan/setup.c b/arch/sh/boards/titan/setup.c
index a6046d93758..6bcd939bfae 100644
--- a/arch/sh/boards/titan/setup.c
+++ b/arch/sh/boards/titan/setup.c
@@ -1,26 +1,30 @@
/*
- * Setup for Titan
+ * arch/sh/boards/titan/setup.c - Setup for Titan
+ *
+ * Copyright (C) 2006 Jamie Lenehan
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
-
#include <linux/init.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
#include <asm/titan.h>
#include <asm/io.h>
-extern void __init pcibios_init_platform(void);
-
static struct ipr_data titan_ipr_map[] = {
- { TITAN_IRQ_WAN, IRL0_IPR_ADDR, IRL0_IPR_POS, IRL0_PRIORITY },
- { TITAN_IRQ_LAN, IRL1_IPR_ADDR, IRL1_IPR_POS, IRL1_PRIORITY },
- { TITAN_IRQ_MPCIA, IRL2_IPR_ADDR, IRL2_IPR_POS, IRL2_PRIORITY },
- { TITAN_IRQ_USB, IRL3_IPR_ADDR, IRL3_IPR_POS, IRL3_PRIORITY },
+ /* IRQ, IPR idx, shift, prio */
+ { TITAN_IRQ_WAN, 3, 12, 8 }, /* eth0 (WAN) */
+ { TITAN_IRQ_LAN, 3, 8, 8 }, /* eth1 (LAN) */
+ { TITAN_IRQ_MPCIA, 3, 4, 8 }, /* mPCI A (top) */
+ { TITAN_IRQ_USB, 3, 0, 8 }, /* mPCI B (bottom), USB */
};
static void __init init_titan_irq(void)
{
/* enable individual interrupt mode for externals */
- ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
-
+ ipr_irq_enable_irlm();
+ /* register ipr irqs */
make_ipr_irq(titan_ipr_map, ARRAY_SIZE(titan_ipr_map));
}
@@ -47,6 +51,5 @@ struct sh_machine_vector mv_titan __initmv = {
.mv_ioport_map = titan_ioport_map,
.mv_init_irq = init_titan_irq,
- .mv_init_pci = pcibios_init_platform,
};
ALIAS_MV(titan)
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index f2fed5ce5cc..35452d85b7f 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -12,6 +12,7 @@
*/
#include <asm/uaccess.h>
+#include <asm/addrspace.h>
#ifdef CONFIG_SH_STANDARD_BIOS
#include <asm/sh_bios.h>
#endif
@@ -228,7 +229,7 @@ long* stack_start = &user_stack[STACK_SIZE];
void decompress_kernel(void)
{
output_data = 0;
- output_ptr = (unsigned long)&_text+0x20001000;
+ output_ptr = P2SEGADDR((unsigned long)&_text+0x1000);
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
diff --git a/arch/sh/configs/r7780rp_defconfig b/arch/sh/configs/r7780rp_defconfig
index 34e2046c321..2b75b4896ba 100644
--- a/arch/sh/configs/r7780rp_defconfig
+++ b/arch/sh/configs/r7780rp_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc3
-# Tue Oct 31 12:32:06 2006
+# Linux kernel version: 2.6.19
+# Wed Dec 6 11:59:38 2006
#
CONFIG_SUPERH=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
@@ -11,6 +11,8 @@ CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
# CONFIG_GENERIC_TIME is not set
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -37,6 +39,7 @@ CONFIG_BSD_PROCESS_ACCT=y
# CONFIG_AUDIT is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+# CONFIG_SYSFS_DEPRECATED is not set
# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -118,6 +121,8 @@ CONFIG_SH_R7780RP=y
# CONFIG_SH_LANDISK is not set
# CONFIG_SH_TITAN is not set
# CONFIG_SH_SHMIN is not set
+# CONFIG_SH_7206_SOLUTION_ENGINE is not set
+# CONFIG_SH_7619_SOLUTION_ENGINE is not set
# CONFIG_SH_UNKNOWN is not set
#
@@ -130,6 +135,12 @@ CONFIG_CPU_SH4A=y
# SH-2 Processor Support
#
# CONFIG_CPU_SUBTYPE_SH7604 is not set
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+
+#
+# SH-2A Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7206 is not set
#
# SH-3 Processor Support
@@ -165,6 +176,7 @@ CONFIG_CPU_SH4A=y
#
# CONFIG_CPU_SUBTYPE_SH7770 is not set
CONFIG_CPU_SUBTYPE_SH7780=y
+# CONFIG_CPU_SUBTYPE_SH7785 is not set
#
# SH4AL-DSP Processor Support
@@ -181,8 +193,14 @@ CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x08000000
# CONFIG_32BIT is not set
CONFIG_VSYSCALL=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
CONFIG_HUGETLB_PAGE_SIZE_64K=y
+# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -204,12 +222,14 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# Processor features
#
CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_SH_FPU=y
# CONFIG_SH_DSP is not set
CONFIG_SH_STORE_QUEUES=y
CONFIG_CPU_HAS_INTEVT=y
CONFIG_CPU_HAS_INTC2_IRQ=y
CONFIG_CPU_HAS_SR_RB=y
+CONFIG_CPU_HAS_PTEA=y
#
# Timer support
@@ -220,6 +240,8 @@ CONFIG_SH_TMU=y
# R7780RP options
#
CONFIG_SH_R7780MP=y
+CONFIG_SH_TIMER_IRQ=28
+CONFIG_NO_IDLE_HZ=y
CONFIG_SH_PCLK_FREQ=32000000
#
@@ -238,13 +260,18 @@ CONFIG_SH_PCLK_FREQ=32000000
# CONFIG_HD6446X_SERIES is not set
#
+# Additional SuperH Device Drivers
+#
+CONFIG_PUSH_SWITCH=y
+
+#
# Kernel features
#
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
-# CONFIG_KEXEC is not set
+CONFIG_KEXEC=y
# CONFIG_SMP is not set
# CONFIG_PREEMPT_NONE is not set
# CONFIG_PREEMPT_VOLUNTARY is not set
@@ -278,10 +305,7 @@ CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
#
# PCI Hotplug Support
#
-CONFIG_HOTPLUG_PCI=y
-# CONFIG_HOTPLUG_PCI_FAKE is not set
-# CONFIG_HOTPLUG_PCI_CPCI is not set
-# CONFIG_HOTPLUG_PCI_SHPC is not set
+# CONFIG_HOTPLUG_PCI is not set
#
# Executable file formats
@@ -341,6 +365,7 @@ CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
# CONFIG_INET6_XFRM_TUNNEL is not set
# CONFIG_INET6_TUNNEL is not set
@@ -556,6 +581,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_JMICRON is not set
# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
# CONFIG_PATA_OLDPIIX is not set
# CONFIG_PATA_NETCELL is not set
@@ -572,6 +598,7 @@ CONFIG_SATA_SIL=y
# CONFIG_PATA_SIS is not set
# CONFIG_PATA_VIA is not set
# CONFIG_PATA_WINBOND is not set
+CONFIG_PATA_PLATFORM=y
#
# Multi-device support (RAID and LVM)
@@ -688,6 +715,7 @@ CONFIG_R8169=y
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
#
# Token Ring devices
@@ -830,10 +858,6 @@ CONFIG_HW_RANDOM=y
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
@@ -1020,7 +1044,7 @@ CONFIG_INOTIFY_USER=y
CONFIG_DNOTIFY=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
+CONFIG_FUSE_FS=m
#
# CD-ROM/DVD Filesystems
@@ -1052,7 +1076,7 @@ CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_CONFIGFS_FS=m
#
# Miscellaneous filesystems
@@ -1153,28 +1177,33 @@ CONFIG_NLS_ISO8859_1=y
#
# Profiling support
#
-# CONFIG_PROFILING is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
#
# Kernel hacking
#
-# CONFIG_PRINTK_TIME is not set
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
CONFIG_ENABLE_MUST_CHECK=y
-# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_MAGIC_SYSRQ=y
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_KERNEL=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_LIST is not set
@@ -1184,7 +1213,7 @@ CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_4KSTACKS is not set
# CONFIG_KGDB is not set
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
new file mode 100644
index 00000000000..36cec0b6e7c
--- /dev/null
+++ b/arch/sh/configs/se7206_defconfig
@@ -0,0 +1,826 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc4
+# Sun Nov 5 16:20:10 2006
+#
+CONFIG_SUPERH=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_GENERIC_TIME is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SYSVIPC is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_TINY_SHMEM=y
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+
+#
+# System type
+#
+# CONFIG_SH_SOLUTION_ENGINE is not set
+# CONFIG_SH_7751_SOLUTION_ENGINE is not set
+# CONFIG_SH_7300_SOLUTION_ENGINE is not set
+# CONFIG_SH_7343_SOLUTION_ENGINE is not set
+# CONFIG_SH_73180_SOLUTION_ENGINE is not set
+# CONFIG_SH_7751_SYSTEMH is not set
+# CONFIG_SH_HP6XX is not set
+# CONFIG_SH_EC3104 is not set
+# CONFIG_SH_SATURN is not set
+# CONFIG_SH_DREAMCAST is not set
+# CONFIG_SH_BIGSUR is not set
+# CONFIG_SH_MPC1211 is not set
+# CONFIG_SH_SH03 is not set
+# CONFIG_SH_SECUREEDGE5410 is not set
+# CONFIG_SH_HS7751RVOIP is not set
+# CONFIG_SH_7710VOIPGW is not set
+# CONFIG_SH_RTS7751R2D is not set
+# CONFIG_SH_R7780RP is not set
+# CONFIG_SH_EDOSK7705 is not set
+# CONFIG_SH_SH4202_MICRODEV is not set
+# CONFIG_SH_LANDISK is not set
+# CONFIG_SH_TITAN is not set
+# CONFIG_SH_SHMIN is not set
+CONFIG_SH_7206_SOLUTION_ENGINE=y
+# CONFIG_SH_7619_SOLUTION_ENGINE is not set
+# CONFIG_SH_UNKNOWN is not set
+
+#
+# Processor selection
+#
+CONFIG_CPU_SH2=y
+CONFIG_CPU_SH2A=y
+
+#
+# SH-2 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7604 is not set
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+
+#
+# SH-2A Processor Support
+#
+CONFIG_CPU_SUBTYPE_SH7206=y
+
+#
+# SH-3 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7300 is not set
+# CONFIG_CPU_SUBTYPE_SH7705 is not set
+# CONFIG_CPU_SUBTYPE_SH7706 is not set
+# CONFIG_CPU_SUBTYPE_SH7707 is not set
+# CONFIG_CPU_SUBTYPE_SH7708 is not set
+# CONFIG_CPU_SUBTYPE_SH7709 is not set
+# CONFIG_CPU_SUBTYPE_SH7710 is not set
+
+#
+# SH-4 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7750 is not set
+# CONFIG_CPU_SUBTYPE_SH7091 is not set
+# CONFIG_CPU_SUBTYPE_SH7750R is not set
+# CONFIG_CPU_SUBTYPE_SH7750S is not set
+# CONFIG_CPU_SUBTYPE_SH7751 is not set
+# CONFIG_CPU_SUBTYPE_SH7751R is not set
+# CONFIG_CPU_SUBTYPE_SH7760 is not set
+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+
+#
+# ST40 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_ST40STB1 is not set
+# CONFIG_CPU_SUBTYPE_ST40GX1 is not set
+
+#
+# SH-4A Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7770 is not set
+# CONFIG_CPU_SUBTYPE_SH7780 is not set
+
+#
+# SH4AL-DSP Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH73180 is not set
+# CONFIG_CPU_SUBTYPE_SH7343 is not set
+
+#
+# Memory management options
+#
+CONFIG_PAGE_OFFSET=0x00000000
+CONFIG_MEMORY_START=0x0c000000
+CONFIG_MEMORY_SIZE=0x02000000
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+
+#
+# Cache configuration
+#
+# CONFIG_SH_DIRECT_MAPPED is not set
+# CONFIG_SH_WRITETHROUGH is not set
+# CONFIG_SH_OCRAM is not set
+
+#
+# Processor features
+#
+# CONFIG_CPU_LITTLE_ENDIAN is not set
+# CONFIG_SH_FPU is not set
+# CONFIG_SH_FPU_EMU is not set
+# CONFIG_SH_DSP is not set
+
+#
+# Timer support
+#
+CONFIG_SH_CMT=y
+# CONFIG_SH_MTU2 is not set
+CONFIG_SH_PCLK_FREQ=33333333
+CONFIG_SH_CLK_MD=6
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# DMA support
+#
+# CONFIG_SH_DMA is not set
+
+#
+# Companion Chips
+#
+# CONFIG_HD6446X_SERIES is not set
+
+#
+# Kernel features
+#
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_KEXEC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+
+#
+# Boot options
+#
+CONFIG_ZERO_PAGE_OFFSET=0x00001000
+CONFIG_BOOT_LINK_OFFSET=0x00800000
+# CONFIG_UBC_WAKEUP is not set
+# CONFIG_CMDLINE_BOOL is not set
+
+#
+# Bus options
+#
+# CONFIG_PCI is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+
+#
+# PCI Hotplug Support
+#
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options (EXPERIMENTAL)
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+# CONFIG_PACKET is not set
+# CONFIG_UNIX is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x20000000
+CONFIG_MTD_PHYSMAP_LEN=0x1000000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=4
+# CONFIG_MTD_SOLUTIONENGINE is not set
+# CONFIG_MTD_UCLINUX is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_NETLINK is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+# CONFIG_NETDEVICES is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=4
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_UNIX98_PTYS is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_SYSFS is not set
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_UNWIND_INFO is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_SH_STANDARD_BIOS is not set
+# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_KGDB is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/sh/drivers/Kconfig b/arch/sh/drivers/Kconfig
new file mode 100644
index 00000000000..c54c758e624
--- /dev/null
+++ b/arch/sh/drivers/Kconfig
@@ -0,0 +1,9 @@
+menu "Additional SuperH Device Drivers"
+
+config PUSH_SWITCH
+ tristate "Push switch support"
+ help
+ This enables support for the push switch framework, a simple
+ framework that allows for sysfs driven switch status reporting.
+
+endmenu
diff --git a/arch/sh/drivers/Makefile b/arch/sh/drivers/Makefile
index 338c3729d27..bf18dbfb678 100644
--- a/arch/sh/drivers/Makefile
+++ b/arch/sh/drivers/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_SH_DMA) += dma/
obj-$(CONFIG_SUPERHYWAY) += superhyway/
-
+obj-$(CONFIG_PUSH_SWITCH) += push-switch.o
diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile
index 065d4c90970..db1295d3226 100644
--- a/arch/sh/drivers/dma/Makefile
+++ b/arch/sh/drivers/dma/Makefile
@@ -2,8 +2,8 @@
# Makefile for the SuperH DMA specific kernel interface routines under Linux.
#
-obj-y += dma-api.o dma-isa.o
+obj-y += dma-api.o
+obj-$(CONFIG_ISA_DMA_API) += dma-isa.o
obj-$(CONFIG_SYSFS) += dma-sysfs.o
obj-$(CONFIG_SH_DMA) += dma-sh.o
obj-$(CONFIG_SH_DREAMCAST) += dma-pvr2.o dma-g2.o
-
diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c
index 47c3e837599..e062067edd2 100644
--- a/arch/sh/drivers/dma/dma-api.c
+++ b/arch/sh/drivers/dma/dma-api.c
@@ -11,61 +11,27 @@
*/
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/proc_fs.h>
#include <linux/list.h>
#include <linux/platform_device.h>
+#include <linux/mm.h>
#include <asm/dma.h>
DEFINE_SPINLOCK(dma_spin_lock);
static LIST_HEAD(registered_dmac_list);
-/*
- * A brief note about the reasons for this API as it stands.
- *
- * For starters, the old ISA DMA API didn't work for us for a number of
- * reasons, for one, the vast majority of channels on the SH DMAC are
- * dual-address mode only, and both the new and the old DMA APIs are after the
- * concept of managing a DMA buffer, which doesn't overly fit this model very
- * well. In addition to which, the new API is largely geared at IOMMUs and
- * GARTs, and doesn't even support the channel notion very well.
- *
- * The other thing that's a marginal issue, is the sheer number of random DMA
- * engines that are present (ie, in boards like the Dreamcast), some of which
- * cascade off of the SH DMAC, and others do not. As such, there was a real
- * need for a scalable subsystem that could deal with both single and
- * dual-address mode usage, in addition to interoperating with cascaded DMACs.
- *
- * There really isn't any reason why this needs to be SH specific, though I'm
- * not aware of too many other processors (with the exception of some MIPS)
- * that have the same concept of a dual address mode, or any real desire to
- * actually make use of the DMAC even if such a subsystem were exposed
- * elsewhere.
- *
- * The idea for this was derived from the ARM port, which acted as an excellent
- * reference when trying to address these issues.
- *
- * It should also be noted that the decision to add Yet Another DMA API(tm) to
- * the kernel wasn't made easily, and was only decided upon after conferring
- * with jejb with regards to the state of the old and new APIs as they applied
- * to these circumstances. Philip Blundell was also a great help in figuring
- * out some single-address mode DMA semantics that were otherwise rather
- * confusing.
- */
-
struct dma_info *get_dma_info(unsigned int chan)
{
struct dma_info *info;
- unsigned int total = 0;
/*
* Look for each DMAC's range to determine who the owner of
* the channel is.
*/
list_for_each_entry(info, &registered_dmac_list, list) {
- total += info->nr_channels;
- if (chan > total)
+ if ((chan < info->first_channel_nr) ||
+ (chan >= info->first_channel_nr + info->nr_channels))
continue;
return info;
@@ -73,6 +39,22 @@ struct dma_info *get_dma_info(unsigned int chan)
return NULL;
}
+EXPORT_SYMBOL(get_dma_info);
+
+struct dma_info *get_dma_info_by_name(const char *dmac_name)
+{
+ struct dma_info *info;
+
+ list_for_each_entry(info, &registered_dmac_list, list) {
+ if (dmac_name && (strcmp(dmac_name, info->name) != 0))
+ continue;
+ else
+ return info;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(get_dma_info_by_name);
static unsigned int get_nr_channels(void)
{
@@ -91,63 +73,161 @@ static unsigned int get_nr_channels(void)
struct dma_channel *get_dma_channel(unsigned int chan)
{
struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel;
+ int i;
- if (!info)
+ if (unlikely(!info))
return ERR_PTR(-EINVAL);
- return info->channels + chan;
+ for (i = 0; i < info->nr_channels; i++) {
+ channel = &info->channels[i];
+ if (channel->chan == chan)
+ return channel;
+ }
+
+ return NULL;
}
+EXPORT_SYMBOL(get_dma_channel);
int get_dma_residue(unsigned int chan)
{
struct dma_info *info = get_dma_info(chan);
- struct dma_channel *channel = &info->channels[chan];
+ struct dma_channel *channel = get_dma_channel(chan);
if (info->ops->get_residue)
return info->ops->get_residue(channel);
return 0;
}
+EXPORT_SYMBOL(get_dma_residue);
-int request_dma(unsigned int chan, const char *dev_id)
+static int search_cap(const char **haystack, const char *needle)
{
- struct dma_info *info = get_dma_info(chan);
- struct dma_channel *channel = &info->channels[chan];
+ const char **p;
+
+ for (p = haystack; *p; p++)
+ if (strcmp(*p, needle) == 0)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * request_dma_bycap - Allocate a DMA channel based on its capabilities
+ * @dmac: List of DMA controllers to search
+ * @caps: List of capabilites
+ *
+ * Search all channels of all DMA controllers to find a channel which
+ * matches the requested capabilities. The result is the channel
+ * number if a match is found, or %-ENODEV if no match is found.
+ *
+ * Note that not all DMA controllers export capabilities, in which
+ * case they can never be allocated using this API, and so
+ * request_dma() must be used specifying the channel number.
+ */
+int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id)
+{
+ unsigned int found = 0;
+ struct dma_info *info;
+ const char **p;
+ int i;
+
+ BUG_ON(!dmac || !caps);
+
+ list_for_each_entry(info, &registered_dmac_list, list)
+ if (strcmp(*dmac, info->name) == 0) {
+ found = 1;
+ break;
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ for (i = 0; i < info->nr_channels; i++) {
+ struct dma_channel *channel = &info->channels[i];
+
+ if (unlikely(!channel->caps))
+ continue;
+
+ for (p = caps; *p; p++) {
+ if (!search_cap(channel->caps, *p))
+ break;
+ if (request_dma(channel->chan, dev_id) == 0)
+ return channel->chan;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(request_dma_bycap);
+
+int dmac_search_free_channel(const char *dev_id)
+{
+ struct dma_channel *channel = { 0 };
+ struct dma_info *info = get_dma_info(0);
+ int i;
+
+ for (i = 0; i < info->nr_channels; i++) {
+ channel = &info->channels[i];
+ if (unlikely(!channel))
+ return -ENODEV;
+
+ if (atomic_read(&channel->busy) == 0)
+ break;
+ }
- down(&channel->sem);
+ if (info->ops->request) {
+ int result = info->ops->request(channel);
+ if (result)
+ return result;
- if (!info->ops || chan >= MAX_DMA_CHANNELS) {
- up(&channel->sem);
- return -EINVAL;
+ atomic_set(&channel->busy, 1);
+ return channel->chan;
}
- atomic_set(&channel->busy, 1);
+ return -ENOSYS;
+}
+
+int request_dma(unsigned int chan, const char *dev_id)
+{
+ struct dma_channel *channel = { 0 };
+ struct dma_info *info = get_dma_info(chan);
+ int result;
+
+ channel = get_dma_channel(chan);
+ if (atomic_xchg(&channel->busy, 1))
+ return -EBUSY;
strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
- up(&channel->sem);
+ if (info->ops->request) {
+ result = info->ops->request(channel);
+ if (result)
+ atomic_set(&channel->busy, 0);
- if (info->ops->request)
- return info->ops->request(channel);
+ return result;
+ }
return 0;
}
+EXPORT_SYMBOL(request_dma);
void free_dma(unsigned int chan)
{
struct dma_info *info = get_dma_info(chan);
- struct dma_channel *channel = &info->channels[chan];
+ struct dma_channel *channel = get_dma_channel(chan);
if (info->ops->free)
info->ops->free(channel);
atomic_set(&channel->busy, 0);
}
+EXPORT_SYMBOL(free_dma);
void dma_wait_for_completion(unsigned int chan)
{
struct dma_info *info = get_dma_info(chan);
- struct dma_channel *channel = &info->channels[chan];
+ struct dma_channel *channel = get_dma_channel(chan);
if (channel->flags & DMA_TEI_CAPABLE) {
wait_event(channel->wait_queue,
@@ -158,21 +238,52 @@ void dma_wait_for_completion(unsigned int chan)
while (info->ops->get_residue(channel))
cpu_relax();
}
+EXPORT_SYMBOL(dma_wait_for_completion);
+
+int register_chan_caps(const char *dmac, struct dma_chan_caps *caps)
+{
+ struct dma_info *info;
+ unsigned int found = 0;
+ int i;
+
+ list_for_each_entry(info, &registered_dmac_list, list)
+ if (strcmp(dmac, info->name) == 0) {
+ found = 1;
+ break;
+ }
+
+ if (unlikely(!found))
+ return -ENODEV;
+
+ for (i = 0; i < info->nr_channels; i++, caps++) {
+ struct dma_channel *channel;
+
+ if ((info->first_channel_nr + i) != caps->ch_num)
+ return -EINVAL;
+
+ channel = &info->channels[i];
+ channel->caps = caps->caplist;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(register_chan_caps);
void dma_configure_channel(unsigned int chan, unsigned long flags)
{
struct dma_info *info = get_dma_info(chan);
- struct dma_channel *channel = &info->channels[chan];
+ struct dma_channel *channel = get_dma_channel(chan);
if (info->ops->configure)
info->ops->configure(channel, flags);
}
+EXPORT_SYMBOL(dma_configure_channel);
int dma_xfer(unsigned int chan, unsigned long from,
unsigned long to, size_t size, unsigned int mode)
{
struct dma_info *info = get_dma_info(chan);
- struct dma_channel *channel = &info->channels[chan];
+ struct dma_channel *channel = get_dma_channel(chan);
channel->sar = from;
channel->dar = to;
@@ -181,8 +292,20 @@ int dma_xfer(unsigned int chan, unsigned long from,
return info->ops->xfer(channel);
}
+EXPORT_SYMBOL(dma_xfer);
+
+int dma_extend(unsigned int chan, unsigned long op, void *param)
+{
+ struct dma_info *info = get_dma_info(chan);
+ struct dma_channel *channel = get_dma_channel(chan);
+
+ if (info->ops->extend)
+ return info->ops->extend(channel, op, param);
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL(dma_extend);
-#ifdef CONFIG_PROC_FS
static int dma_read_proc(char *buf, char **start, off_t off,
int len, int *eof, void *data)
{
@@ -214,8 +337,6 @@ static int dma_read_proc(char *buf, char **start, off_t off,
return p - buf;
}
-#endif
-
int register_dmac(struct dma_info *info)
{
@@ -224,8 +345,7 @@ int register_dmac(struct dma_info *info)
INIT_LIST_HEAD(&info->list);
printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n",
- info->name, info->nr_channels,
- info->nr_channels > 1 ? "s" : "");
+ info->name, info->nr_channels, info->nr_channels > 1 ? "s" : "");
BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels);
@@ -242,28 +362,26 @@ int register_dmac(struct dma_info *info)
size = sizeof(struct dma_channel) * info->nr_channels;
- info->channels = kmalloc(size, GFP_KERNEL);
+ info->channels = kzalloc(size, GFP_KERNEL);
if (!info->channels)
return -ENOMEM;
-
- memset(info->channels, 0, size);
}
total_channels = get_nr_channels();
for (i = 0; i < info->nr_channels; i++) {
- struct dma_channel *chan = info->channels + i;
+ struct dma_channel *chan = &info->channels[i];
+
+ atomic_set(&chan->busy, 0);
- chan->chan = i;
- chan->vchan = i + total_channels;
+ chan->chan = info->first_channel_nr + i;
+ chan->vchan = info->first_channel_nr + i + total_channels;
memcpy(chan->dev_id, "Unused", 7);
if (info->flags & DMAC_CHANNELS_TEI_CAPABLE)
chan->flags |= DMA_TEI_CAPABLE;
- init_MUTEX(&chan->sem);
init_waitqueue_head(&chan->wait_queue);
-
dma_create_sysfs_files(chan, info);
}
@@ -271,6 +389,7 @@ int register_dmac(struct dma_info *info)
return 0;
}
+EXPORT_SYMBOL(register_dmac);
void unregister_dmac(struct dma_info *info)
{
@@ -285,31 +404,16 @@ void unregister_dmac(struct dma_info *info)
list_del(&info->list);
platform_device_unregister(info->pdev);
}
+EXPORT_SYMBOL(unregister_dmac);
static int __init dma_api_init(void)
{
- printk("DMA: Registering DMA API.\n");
-
-#ifdef CONFIG_PROC_FS
+ printk(KERN_NOTICE "DMA: Registering DMA API.\n");
create_proc_read_entry("dma", 0, 0, dma_read_proc, 0);
-#endif
-
return 0;
}
-
subsys_initcall(dma_api_init);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("DMA API for SuperH");
MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(request_dma);
-EXPORT_SYMBOL(free_dma);
-EXPORT_SYMBOL(register_dmac);
-EXPORT_SYMBOL(get_dma_residue);
-EXPORT_SYMBOL(get_dma_info);
-EXPORT_SYMBOL(get_dma_channel);
-EXPORT_SYMBOL(dma_xfer);
-EXPORT_SYMBOL(dma_wait_for_completion);
-EXPORT_SYMBOL(dma_configure_channel);
-
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 66078601335..f63721ed86c 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -94,20 +94,13 @@ static int sh_dmac_request_dma(struct dma_channel *chan)
if (unlikely(!chan->flags & DMA_TEI_CAPABLE))
return 0;
- chan->name = kzalloc(32, GFP_KERNEL);
- if (unlikely(chan->name == NULL))
- return -ENOMEM;
- snprintf(chan->name, 32, "DMAC Transfer End (Channel %d)",
- chan->chan);
-
return request_irq(get_dmte_irq(chan->chan), dma_tei,
- IRQF_DISABLED, chan->name, chan);
+ IRQF_DISABLED, chan->dev_id, chan);
}
static void sh_dmac_free_dma(struct dma_channel *chan)
{
free_irq(get_dmte_irq(chan->chan), chan);
- kfree(chan->name);
}
static void
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index 29b8ef9873d..eebcd4768bb 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -3,7 +3,7 @@
*
* sysfs interface for SH DMA API
*
- * Copyright (C) 2004, 2005 Paul Mundt
+ * Copyright (C) 2004 - 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -21,7 +21,6 @@
static struct sysdev_class dma_sysclass = {
set_kset_name("dma"),
};
-
EXPORT_SYMBOL(dma_sysclass);
static ssize_t dma_show_devices(struct sys_device *dev, char *buf)
@@ -31,7 +30,10 @@ static ssize_t dma_show_devices(struct sys_device *dev, char *buf)
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct dma_info *info = get_dma_info(i);
- struct dma_channel *channel = &info->channels[i];
+ struct dma_channel *channel = get_dma_channel(i);
+
+ if (unlikely(!info) || !channel)
+ continue;
len += sprintf(buf + len, "%2d: %14s %s\n",
channel->chan, info->name,
@@ -125,11 +127,16 @@ int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info)
if (ret)
return ret;
- sysdev_create_file(dev, &attr_dev_id);
- sysdev_create_file(dev, &attr_count);
- sysdev_create_file(dev, &attr_mode);
- sysdev_create_file(dev, &attr_flags);
- sysdev_create_file(dev, &attr_config);
+ ret |= sysdev_create_file(dev, &attr_dev_id);
+ ret |= sysdev_create_file(dev, &attr_count);
+ ret |= sysdev_create_file(dev, &attr_mode);
+ ret |= sysdev_create_file(dev, &attr_flags);
+ ret |= sysdev_create_file(dev, &attr_config);
+
+ if (unlikely(ret)) {
+ dev_err(&info->pdev->dev, "Failed creating attrs\n");
+ return ret;
+ }
snprintf(name, sizeof(name), "dma%d", chan->chan);
return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name);
diff --git a/arch/sh/drivers/pci/ops-titan.c b/arch/sh/drivers/pci/ops-titan.c
index cd56d53375e..ac8ee2312cd 100644
--- a/arch/sh/drivers/pci/ops-titan.c
+++ b/arch/sh/drivers/pci/ops-titan.c
@@ -15,25 +15,21 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/titan.h>
#include "pci-sh4.h"
+static char titan_irq_tab[] __initdata = {
+ TITAN_IRQ_WAN,
+ TITAN_IRQ_LAN,
+ TITAN_IRQ_MPCIA,
+ TITAN_IRQ_MPCIB,
+ TITAN_IRQ_USB,
+};
+
int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
{
- int irq = -1;
-
- switch (slot) {
- case 0: irq = TITAN_IRQ_WAN; break; /* eth0 (WAN) */
- case 1: irq = TITAN_IRQ_LAN; break; /* eth1 (LAN) */
- case 2: irq = TITAN_IRQ_MPCIA; break; /* mPCI A */
- case 3: irq = TITAN_IRQ_MPCIB; break; /* mPCI B */
- case 4: irq = TITAN_IRQ_USB; break; /* USB */
- default:
- printk(KERN_INFO "PCI: Bad IRQ mapping "
- "request for slot %d\n", slot);
- return -1;
- }
+ int irq = titan_irq_tab[slot];
printk("PCI: Mapping TITAN IRQ for slot %d, pin %c to irq %d\n",
slot, pin - 1 + 'A', irq);
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
index d6e63529653..602b644c35a 100644
--- a/arch/sh/drivers/pci/pci-sh7780.c
+++ b/arch/sh/drivers/pci/pci-sh7780.c
@@ -22,6 +22,20 @@
#include <linux/delay.h>
#include "pci-sh4.h"
+#define INTC_BASE 0xffd00000
+#define INTC_ICR0 (INTC_BASE+0x0)
+#define INTC_ICR1 (INTC_BASE+0x1c)
+#define INTC_INTPRI (INTC_BASE+0x10)
+#define INTC_INTREQ (INTC_BASE+0x24)
+#define INTC_INTMSK0 (INTC_BASE+0x44)
+#define INTC_INTMSK1 (INTC_BASE+0x48)
+#define INTC_INTMSK2 (INTC_BASE+0x40080)
+#define INTC_INTMSKCLR0 (INTC_BASE+0x64)
+#define INTC_INTMSKCLR1 (INTC_BASE+0x68)
+#define INTC_INTMSKCLR2 (INTC_BASE+0x40084)
+#define INTC_INT2MSKR (INTC_BASE+0x40038)
+#define INTC_INT2MSKCR (INTC_BASE+0x4003c)
+
/*
* Initialization. Try all known PCI access methods. Note that we support
* using both PCI BIOS and direct access: in such cases, we use I/O ports
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
new file mode 100644
index 00000000000..f2b9157c314
--- /dev/null
+++ b/arch/sh/drivers/push-switch.c
@@ -0,0 +1,138 @@
+/*
+ * Generic push-switch framework
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/push-switch.h>
+
+#define DRV_NAME "push-switch"
+#define DRV_VERSION "0.1.0"
+
+static ssize_t switch_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct push_switch_platform_info *psw_info = dev->platform_data;
+ return sprintf(buf, "%s\n", psw_info->name);
+}
+static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL);
+
+static void switch_timer(unsigned long data)
+{
+ struct push_switch *psw = (struct push_switch *)data;
+
+ schedule_work(&psw->work);
+}
+
+static void switch_work_handler(void *data)
+{
+ struct platform_device *pdev = data;
+ struct push_switch *psw = platform_get_drvdata(pdev);
+
+ psw->state = 0;
+
+ kobject_uevent(&pdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int switch_drv_probe(struct platform_device *pdev)
+{
+ struct push_switch_platform_info *psw_info;
+ struct push_switch *psw;
+ int ret, irq;
+
+ psw = kzalloc(sizeof(struct push_switch), GFP_KERNEL);
+ if (unlikely(!psw))
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (unlikely(irq < 0)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ psw_info = pdev->dev.platform_data;
+ BUG_ON(!psw_info);
+
+ ret = request_irq(irq, psw_info->irq_handler,
+ IRQF_DISABLED | psw_info->irq_flags,
+ psw_info->name ? psw_info->name : DRV_NAME, pdev);
+ if (unlikely(ret < 0))
+ goto err;
+
+ if (psw_info->name) {
+ ret = device_create_file(&pdev->dev, &dev_attr_switch);
+ if (unlikely(ret)) {
+ dev_err(&pdev->dev, "Failed creating device attrs\n");
+ ret = -EINVAL;
+ goto err_irq;
+ }
+ }
+
+ INIT_WORK(&psw->work, switch_work_handler, pdev);
+ init_timer(&psw->debounce);
+
+ psw->debounce.function = switch_timer;
+ psw->debounce.data = (unsigned long)psw;
+
+ platform_set_drvdata(pdev, psw);
+
+ return 0;
+
+err_irq:
+ free_irq(irq, pdev);
+err:
+ kfree(psw);
+ return ret;
+}
+
+static int switch_drv_remove(struct platform_device *pdev)
+{
+ struct push_switch *psw = platform_get_drvdata(pdev);
+ struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
+ int irq = platform_get_irq(pdev, 0);
+
+ if (psw_info->name)
+ device_remove_file(&pdev->dev, &dev_attr_switch);
+
+ platform_set_drvdata(pdev, NULL);
+ flush_scheduled_work();
+ del_timer_sync(&psw->debounce);
+ free_irq(irq, pdev);
+
+ kfree(psw);
+
+ return 0;
+}
+
+static struct platform_driver switch_driver = {
+ .probe = switch_drv_probe,
+ .remove = switch_drv_remove,
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init switch_init(void)
+{
+ printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
+ return platform_driver_register(&switch_driver);
+}
+
+static void __exit switch_exit(void)
+{
+ platform_driver_unregister(&switch_driver);
+}
+module_init(switch_init);
+module_exit(switch_exit);
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Paul Mundt");
+MODULE_LICENSE("GPLv2");
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 5da88a43d35..99c7e5249f7 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -4,7 +4,7 @@
extra-y := head.o init_task.o vmlinux.lds
-obj-y := process.o signal.o entry.o traps.o irq.o \
+obj-y := process.o signal.o traps.o irq.o \
ptrace.o setup.o time.o sys_sh.o semaphore.o \
io.o io_generic.o sh_ksyms.o syscalls.o
@@ -21,3 +21,4 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index fb5dac06938..0582e6712b7 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -2,11 +2,12 @@
# Makefile for the Linux/SuperH CPU-specifc backends.
#
-obj-y += irq/ init.o clock.o
-
-obj-$(CONFIG_CPU_SH2) += sh2/
-obj-$(CONFIG_CPU_SH3) += sh3/
-obj-$(CONFIG_CPU_SH4) += sh4/
+obj-$(CONFIG_CPU_SH2) = sh2/
+obj-$(CONFIG_CPU_SH2A) = sh2a/
+obj-$(CONFIG_CPU_SH3) = sh3/
+obj-$(CONFIG_CPU_SH4) = sh4/
obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
+
+obj-y += irq/ init.o clock.o
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index 51ec64cdf34..abb586b1256 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -5,9 +5,11 @@
*
* This clock framework is derived from the OMAP version by:
*
- * Copyright (C) 2004 Nokia Corporation
+ * Copyright (C) 2004 - 2005 Nokia Corporation
* Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
*
+ * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -20,6 +22,7 @@
#include <linux/kref.h>
#include <linux/seq_file.h>
#include <linux/err.h>
+#include <linux/platform_device.h>
#include <asm/clock.h>
#include <asm/timer.h>
@@ -195,17 +198,37 @@ void clk_recalc_rate(struct clk *clk)
propagate_rate(clk);
}
-struct clk *clk_get(const char *id)
+/*
+ * Returns a clock. Note that we first try to use device id on the bus
+ * and clock name. If this fails, we try to use clock name only.
+ */
+struct clk *clk_get(struct device *dev, const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
+ int idno;
+
+ if (dev == NULL || dev->bus != &platform_bus_type)
+ idno = -1;
+ else
+ idno = to_platform_device(dev)->id;
mutex_lock(&clock_list_sem);
list_for_each_entry(p, &clock_list, node) {
+ if (p->id == idno &&
+ strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
+ clk = p;
+ goto found;
+ }
+ }
+
+ list_for_each_entry(p, &clock_list, node) {
if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
clk = p;
break;
}
}
+
+found:
mutex_unlock(&clock_list_sem);
return clk;
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index bfb90eb0b7a..48121766e8d 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -68,12 +68,14 @@ static void __init cache_init(void)
waysize = cpu_data->dcache.sets;
+#ifdef CCR_CACHE_ORA
/*
* If the OC is already in RAM mode, we only have
* half of the entries to flush..
*/
if (ccr & CCR_CACHE_ORA)
waysize >>= 1;
+#endif
waysize <<= cpu_data->dcache.entry_shift;
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 1c034c283f5..0049d217561 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,8 +1,9 @@
#
# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
#
-obj-y += ipr.o imask.o
+obj-y += imask.o
+obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
obj-$(CONFIG_CPU_HAS_PINT_IRQ) += pint.o
obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o
obj-$(CONFIG_CPU_HAS_INTC2_IRQ) += intc2.o
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index a33ae3e0a5a..301b505c427 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -53,7 +53,10 @@ void static inline set_interrupt_registers(int ip)
{
unsigned long __dummy;
- asm volatile("ldc %2, r6_bank\n\t"
+ asm volatile(
+#ifdef CONFIG_CPU_HAS_SR_RB
+ "ldc %2, r6_bank\n\t"
+#endif
"stc sr, %0\n\t"
"and #0xf0, %0\n\t"
"shlr2 %0\n\t"
diff --git a/arch/sh/kernel/cpu/irq/intc2.c b/arch/sh/kernel/cpu/irq/intc2.c
index 74ca576a7ce..74defe76a05 100644
--- a/arch/sh/kernel/cpu/irq/intc2.c
+++ b/arch/sh/kernel/cpu/irq/intc2.c
@@ -11,22 +11,29 @@
* Hitachi 7751, the STM ST40 STB1, SH7760, and SH7780.
*/
#include <linux/kernel.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <asm/system.h>
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7760)
+#define INTC2_BASE 0xfe080000
+#define INTC2_INTMSK (INTC2_BASE + 0x40)
+#define INTC2_INTMSKCLR (INTC2_BASE + 0x60)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
+#define INTC2_BASE 0xffd40000
+#define INTC2_INTMSK (INTC2_BASE + 0x38)
+#define INTC2_INTMSKCLR (INTC2_BASE + 0x3c)
+#endif
static void disable_intc2_irq(unsigned int irq)
{
struct intc2_data *p = get_irq_chip_data(irq);
- ctrl_outl(1 << p->msk_shift,
- INTC2_BASE + INTC2_INTMSK_OFFSET + p->msk_offset);
+ ctrl_outl(1 << p->msk_shift, INTC2_INTMSK + p->msk_offset);
}
static void enable_intc2_irq(unsigned int irq)
{
struct intc2_data *p = get_irq_chip_data(irq);
- ctrl_outl(1 << p->msk_shift,
- INTC2_BASE + INTC2_INTMSKCLR_OFFSET + p->msk_offset);
+ ctrl_outl(1 << p->msk_shift, INTC2_INTMSKCLR + p->msk_offset);
}
static struct irq_chip intc2_irq_chip = {
@@ -61,12 +68,10 @@ void make_intc2_irq(struct intc2_data *table, unsigned int nr_irqs)
/* Set the priority level */
local_irq_save(flags);
- ipr = ctrl_inl(INTC2_BASE + INTC2_INTPRI_OFFSET +
- p->ipr_offset);
+ ipr = ctrl_inl(INTC2_BASE + p->ipr_offset);
ipr &= ~(0xf << p->ipr_shift);
ipr |= p->priority << p->ipr_shift;
- ctrl_outl(ipr, INTC2_BASE + INTC2_INTPRI_OFFSET +
- p->ipr_offset);
+ ctrl_outl(ipr, INTC2_BASE + p->ipr_offset);
local_irq_restore(flags);
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index a0089563cbf..35eb5751a3a 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -19,25 +19,21 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/machvec.h>
-
+#include <linux/io.h>
+#include <linux/interrupt.h>
static void disable_ipr_irq(unsigned int irq)
{
struct ipr_data *p = get_irq_chip_data(irq);
- int shift = p->shift*4;
/* Set the priority in IPR to 0 */
- ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << shift)), p->addr);
+ ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << p->shift)), p->addr);
}
static void enable_ipr_irq(unsigned int irq)
{
struct ipr_data *p = get_irq_chip_data(irq);
- int shift = p->shift*4;
/* Set priority in IPR back to original value */
- ctrl_outw(ctrl_inw(p->addr) | (p->priority << shift), p->addr);
+ ctrl_outw(ctrl_inw(p->addr) | (p->priority << p->shift), p->addr);
}
static struct irq_chip ipr_irq_chip = {
@@ -53,6 +49,10 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs)
for (i = 0; i < nr_irqs; i++) {
unsigned int irq = table[i].irq;
+ table[i].addr = map_ipridx_to_addr(table[i].ipr_idx);
+ /* could the IPR index be mapped, if not we ignore this */
+ if (table[i].addr == 0)
+ continue;
disable_irq_nosync(irq);
set_irq_chip_and_handler_name(irq, &ipr_irq_chip,
handle_level_irq, "level");
@@ -62,83 +62,6 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs)
}
EXPORT_SYMBOL(make_ipr_irq);
-static struct ipr_data sys_ipr_map[] = {
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
- { TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY },
- { TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY },
-#ifdef RTC_IRQ
- { RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY },
-#endif
-#ifdef SCI_ERI_IRQ
- { SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
- { SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
- { SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-#endif
-#ifdef SCIF1_ERI_IRQ
- { SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
- { SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
- { SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
- { SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
- { SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY },
- { DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY },
- { DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY },
- { VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY },
-#endif
-#ifdef SCIF_ERI_IRQ
- { SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
- { SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
- { SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
- { SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-#endif
-#ifdef IRDA_ERI_IRQ
- { IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
- { IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
- { IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
- { IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
- defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
- /*
- * Initialize the Interrupt Controller (INTC)
- * registers to their power on values
- */
-
- /*
- * Enable external irq (INTC IRQ mode).
- * You should set corresponding bits of PFC to "00"
- * to enable these interrupts.
- */
- { IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY },
- { IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY },
- { IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY },
- { IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY },
- { IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY },
- { IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY },
-#endif
-#endif
-};
-
-void __init init_IRQ(void)
-{
- make_ipr_irq(sys_ipr_map, ARRAY_SIZE(sys_ipr_map));
-
-#ifdef CONFIG_CPU_HAS_PINT_IRQ
- init_IRQ_pint();
-#endif
-
-#ifdef CONFIG_CPU_HAS_INTC2_IRQ
- init_IRQ_intc2();
-#endif
- /* Perform the machine specific initialisation */
- if (sh_mv.mv_init_irq != NULL)
- sh_mv.mv_init_irq();
-
- irq_ctx_init(smp_processor_id());
-}
-
#if !defined(CONFIG_CPU_HAS_PINT_IRQ)
int ipr_irq_demux(int irq)
{
diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile
index 389353fba60..f0f059acfcf 100644
--- a/arch/sh/kernel/cpu/sh2/Makefile
+++ b/arch/sh/kernel/cpu/sh2/Makefile
@@ -2,5 +2,6 @@
# Makefile for the Linux/SuperH SH-2 backends.
#
-obj-y := probe.o
+obj-y := ex.o probe.o entry.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
new file mode 100644
index 00000000000..d0440b26970
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
@@ -0,0 +1,81 @@
+/*
+ * arch/sh/kernel/cpu/sh2/clock-sh7619.c
+ *
+ * SH7619 support for the clock framework
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={1,2};
+const static int pfc_divisors[]={1,2,0,4};
+
+#if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6)
+#define PLL2 (2)
+#else
+#error "Illigal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+}
+
+static struct clk_ops sh7619_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inw(FREQCR) & 0x0007);
+ clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7619_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+ clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+}
+
+static struct clk_ops sh7619_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ clk->rate = clk->parent->rate;
+}
+
+static struct clk_ops sh7619_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7619_clk_ops[] = {
+ &sh7619_master_clk_ops,
+ &sh7619_module_clk_ops,
+ &sh7619_bus_clk_ops,
+ &sh7619_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7619_clk_ops))
+ *ops = sh7619_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
new file mode 100644
index 00000000000..34d51b3745e
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -0,0 +1,341 @@
+/*
+ * arch/sh/kernel/cpu/sh2/entry.S
+ *
+ * The SH-2 exception entry
+ *
+ * Copyright (C) 2005,2006 Yoshinori Sato
+ * Copyright (C) 2005 AXE,Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/page.h>
+
+/* Offsets to the stack */
+OFF_R0 = 0 /* Return value. New ABI also arg4 */
+OFF_R1 = 4 /* New ABI: arg5 */
+OFF_R2 = 8 /* New ABI: arg6 */
+OFF_R3 = 12 /* New ABI: syscall_nr */
+OFF_R4 = 16 /* New ABI: arg0 */
+OFF_R5 = 20 /* New ABI: arg1 */
+OFF_R6 = 24 /* New ABI: arg2 */
+OFF_R7 = 28 /* New ABI: arg3 */
+OFF_SP = (15*4)
+OFF_PC = (16*4)
+OFF_SR = (16*4+2*4)
+OFF_TRA = (16*4+6*4)
+
+#include <asm/entry-macros.S>
+
+ENTRY(exception_handler)
+ ! already saved r0/r1
+ mov.l r2,@-sp
+ mov.l r3,@-sp
+ mov r0,r1
+ cli
+ mov.l $cpu_mode,r2
+ mov.l @r2,r0
+ mov.l @(5*4,r15),r3 ! previous SR
+ shll2 r3 ! set "S" flag
+ rotl r0 ! T <- "S" flag
+ rotl r0 ! "S" flag is LSB
+ rotcr r3 ! T -> r3:b30
+ shlr r3
+ shlr r0
+ bt/s 1f
+ mov.l r3,@(5*4,r15) ! copy cpu mode to SR
+ ! switch to kernel mode
+ mov #1,r0
+ rotr r0
+ rotr r0
+ mov.l r0,@r2 ! enter kernel mode
+ mov.l $current_thread_info,r2
+ mov.l @r2,r2
+ mov #0x20,r0
+ shll8 r0
+ add r2,r0
+ mov r15,r2 ! r2 = user stack top
+ mov r0,r15 ! switch kernel stack
+ add #-4,r15 ! dummy
+ mov.l r1,@-r15 ! TRA
+ sts.l macl, @-r15
+ sts.l mach, @-r15
+ stc.l gbr, @-r15
+ mov.l @(4*4,r2),r0
+ mov.l @(5*4,r2),r1
+ mov.l r1,@-r15 ! original SR
+ sts.l pr,@-r15
+ mov.l r0,@-r15 ! original PC
+ mov r2,r3
+ add #(4+2)*4,r3 ! rewind r0 - r3 + exception frame
+ mov.l r3,@-r15 ! original SP
+ mov.l r14,@-r15
+ mov.l r13,@-r15
+ mov.l r12,@-r15
+ mov.l r11,@-r15
+ mov.l r10,@-r15
+ mov.l r9,@-r15
+ mov.l r8,@-r15
+ mov.l r7,@-r15
+ mov.l r6,@-r15
+ mov.l r5,@-r15
+ mov.l r4,@-r15
+ mov r2,r8 ! copy user -> kernel stack
+ mov.l @r8+,r3
+ mov.l r3,@-r15
+ mov.l @r8+,r2
+ mov.l r2,@-r15
+ mov.l @r8+,r1
+ mov.l r1,@-r15
+ mov.l @r8+,r0
+ bra 2f
+ mov.l r0,@-r15
+1:
+ ! in kernel exception
+ mov #(22-4-4-1)*4+4,r0
+ mov r15,r2
+ sub r0,r15
+ mov.l @r2+,r0 ! old R3
+ mov.l r0,@-r15
+ mov.l @r2+,r0 ! old R2
+ mov.l r0,@-r15
+ mov.l @r2+,r0 ! old R1
+ mov.l r0,@-r15
+ mov.l @r2+,r0 ! old R0
+ mov.l r0,@-r15
+ mov.l @r2+,r3 ! old PC
+ mov.l @r2+,r0 ! old SR
+ add #-4,r2 ! exception frame stub (sr)
+ mov.l r1,@-r2 ! TRA
+ sts.l macl, @-r2
+ sts.l mach, @-r2
+ stc.l gbr, @-r2
+ mov.l r0,@-r2 ! save old SR
+ sts.l pr,@-r2
+ mov.l r3,@-r2 ! save old PC
+ mov r2,r0
+ add #8*4,r0
+ mov.l r0,@-r2 ! save old SP
+ mov.l r14,@-r2
+ mov.l r13,@-r2
+ mov.l r12,@-r2
+ mov.l r11,@-r2
+ mov.l r10,@-r2
+ mov.l r9,@-r2
+ mov.l r8,@-r2
+ mov.l r7,@-r2
+ mov.l r6,@-r2
+ mov.l r5,@-r2
+ mov.l r4,@-r2
+ mov.l @(OFF_R0,r15),r0
+ mov.l @(OFF_R1,r15),r1
+ mov.l @(OFF_R2,r15),r2
+ mov.l @(OFF_R3,r15),r3
+2:
+ mov #OFF_TRA,r8
+ add r15,r8
+ mov.l @r8,r9
+ mov #64,r8
+ cmp/hs r8,r9
+ bt interrupt_entry ! vec >= 64 is interrupt
+ mov #32,r8
+ cmp/hs r8,r9
+ bt trap_entry ! 64 > vec >= 32 is trap
+ mov.l 4f,r8
+ mov r9,r4
+ shll2 r9
+ add r9,r8
+ mov.l @r8,r8
+ mov #0,r9
+ cmp/eq r9,r8
+ bf 3f
+ mov.l 8f,r8 ! unhandled exception
+3:
+ mov.l 5f,r10
+ jmp @r8
+ lds r10,pr
+
+interrupt_entry:
+ mov r9,r4
+ mov.l 6f,r9
+ mov.l 7f,r8
+ jmp @r8
+ lds r9,pr
+
+ .align 2
+4: .long exception_handling_table
+5: .long ret_from_exception
+6: .long ret_from_irq
+7: .long do_IRQ
+8: .long do_exception_error
+
+trap_entry:
+ add #-0x10,r9
+ shll2 r9 ! TRA
+ mov #OFF_TRA,r8
+ add r15,r8
+ mov.l r9,@r8
+ mov r9,r8
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 5f, r9
+ jsr @r9
+ nop
+#endif
+ sti
+ bra system_call
+ nop
+
+ .align 2
+1: .long syscall_exit
+2: .long break_point_trap_software
+3: .long NR_syscalls
+4: .long sys_call_table
+#ifdef CONFIG_TRACE_IRQFLAGS
+5: .long trace_hardirqs_on
+#endif
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+ /* Unwind the stack and jmp to the debug entry */
+debug_kernel_fw:
+ mov r15,r0
+ add #(22-4)*4-4,r0
+ ldc.l @r0+,gbr
+ lds.l @r0+,mach
+ lds.l @r0+,macl
+ mov r15,r0
+ mov.l @(OFF_SP,r0),r1
+ mov #OFF_SR,r2
+ mov.l @(r0,r2),r3
+ mov.l r3,@-r1
+ mov #OFF_SP,r2
+ mov.l @(r0,r2),r3
+ mov.l r3,@-r1
+ mov r15,r0
+ add #(22-4)*4-8,r0
+ mov.l 1f,r2
+ mov.l @r2,r2
+ stc sr,r3
+ mov.l r2,@r0
+ mov.l r3,@r0
+ mov.l r1,@(8,r0)
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ add #8,r15
+ lds.l @r15+, pr
+ rte
+ mov.l @r15+,r15
+ .align 2
+1: .long gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+ENTRY(address_error_handler)
+ mov r15,r4 ! regs
+ add #4,r4
+ mov #OFF_PC,r0
+ mov.l @(r0,r15),r6 ! pc
+ mov.l 1f,r0
+ jmp @r0
+ mov #0,r5 ! writeaccess is unknown
+ .align 2
+
+1: .long do_address_error
+
+restore_all:
+ cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 3f, r0
+ jsr @r0
+ nop
+#endif
+ mov r15,r0
+ mov.l $cpu_mode,r2
+ mov #OFF_SR,r3
+ mov.l @(r0,r3),r1
+ mov.l r1,@r2
+ shll2 r1 ! clear MD bit
+ shlr2 r1
+ mov.l @(OFF_SP,r0),r2
+ add #-8,r2
+ mov.l r2,@(OFF_SP,r0) ! point exception frame top
+ mov.l r1,@(4,r2) ! set sr
+ mov #OFF_PC,r3
+ mov.l @(r0,r3),r1
+ mov.l r1,@r2 ! set pc
+ add #4*16+4,r0
+ lds.l @r0+,pr
+ add #4,r0 ! skip sr
+ ldc.l @r0+,gbr
+ lds.l @r0+,mach
+ lds.l @r0+,macl
+ get_current_thread_info r0, r1
+ mov.l $current_thread_info,r1
+ mov.l r0,@r1
+ mov.l @r15+,r0
+ mov.l @r15+,r1
+ mov.l @r15+,r2
+ mov.l @r15+,r3
+ mov.l @r15+,r4
+ mov.l @r15+,r5
+ mov.l @r15+,r6
+ mov.l @r15+,r7
+ mov.l @r15+,r8
+ mov.l @r15+,r9
+ mov.l @r15+,r10
+ mov.l @r15+,r11
+ mov.l @r15+,r12
+ mov.l @r15+,r13
+ mov.l @r15+,r14
+ mov.l @r15,r15
+ rte
+ nop
+2:
+ mov.l 1f,r8
+ mov.l 2f,r9
+ jmp @r9
+ lds r8,pr
+
+ .align 2
+$current_thread_info:
+ .long __current_thread_info
+$cpu_mode:
+ .long __cpu_mode
+#ifdef CONFIG_TRACE_IRQFLAGS
+3: .long trace_hardirqs_off
+#endif
+
+! common exception handler
+#include "../../entry-common.S"
+
+ .data
+! cpu operation mode
+! bit30 = MD (compatible SH3/4)
+__cpu_mode:
+ .long 0x40000000
+
+ .section .bss
+__current_thread_info:
+ .long 0
+
+ENTRY(exception_handling_table)
+ .space 4*32
diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S
new file mode 100644
index 00000000000..6d285af7846
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/ex.S
@@ -0,0 +1,46 @@
+/*
+ * arch/sh/kernel/cpu/sh2/ex.S
+ *
+ * The SH-2 exception vector table
+ *
+ * Copyright (C) 2005 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+
+!
+! convert Exception Vector to Exception Number
+!
+exception_entry:
+no = 0
+ .rept 256
+ mov.l r0,@-sp
+ mov #no,r0
+ bra exception_trampoline
+ and #0xff,r0
+no = no + 1
+ .endr
+exception_trampoline:
+ mov.l r1,@-sp
+ mov.l $exception_handler,r1
+ jmp @r1
+
+ .align 2
+$exception_entry:
+ .long exception_entry
+$exception_handler:
+ .long exception_handler
+!
+! Exception Vector Base
+!
+ .align 2
+ENTRY(vbr_base)
+vector = 0
+ .rept 256
+ .long exception_entry + vector * 8
+vector = vector + 1
+ .endr
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index f17a2a0d588..ba527d9b502 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -17,17 +17,23 @@
int __init detect_cpu_and_cache_system(void)
{
- /*
- * For now, assume SH7604 .. fix this later.
- */
+#if defined(CONFIG_CPU_SUBTYPE_SH7604)
cpu_data->type = CPU_SH7604;
cpu_data->dcache.ways = 4;
- cpu_data->dcache.way_shift = 6;
+ cpu_data->dcache.way_incr = (1<<10);
cpu_data->dcache.sets = 64;
cpu_data->dcache.entry_shift = 4;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
cpu_data->dcache.flags = 0;
-
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+ cpu_data->type = CPU_SH7619;
+ cpu_data->dcache.ways = 4;
+ cpu_data->dcache.way_incr = (1<<12);
+ cpu_data->dcache.sets = 256;
+ cpu_data->dcache.entry_shift = 4;
+ cpu_data->dcache.linesz = L1_CACHE_BYTES;
+ cpu_data->dcache.flags = 0;
+#endif
/*
* SH-2 doesn't have separate caches
*/
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
new file mode 100644
index 00000000000..82c2d905152
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -0,0 +1,53 @@
+/*
+ * SH7619 Setup
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xf8400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 88, 89, 91, 90},
+ }, {
+ .mapbase = 0xf8410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 92, 93, 95, 94},
+ }, {
+ .mapbase = 0xf8420000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 96, 97, 99, 98},
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7619_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7619_devices_setup(void)
+{
+ return platform_add_devices(sh7619_devices,
+ ARRAY_SIZE(sh7619_devices));
+}
+__initcall(sh7619_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
new file mode 100644
index 00000000000..350972ae941
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Linux/SuperH SH-2A backends.
+#
+
+obj-y := common.o probe.o
+
+common-y += $(addprefix ../sh2/, ex.o)
+common-y += $(addprefix ../sh2/, entry.o)
+
+obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
new file mode 100644
index 00000000000..a9ad309c6a3
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
@@ -0,0 +1,85 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+ *
+ * SH7206 support for the clock framework
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={1,2,3,4,6,8};
+const static int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+#if (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 6)
+#define PLL2 (2)
+#elif (CONFIG_SH_CLK_MD == 7)
+#define PLL2 (1)
+#else
+#error "Illigal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct clk_ops sh7206_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inw(FREQCR) & 0x0007);
+ clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7206_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+ clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct clk_ops sh7206_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ int idx = (ctrl_inw(FREQCR) & 0x0007);
+ clk->rate = clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct clk_ops sh7206_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7206_clk_ops[] = {
+ &sh7206_master_clk_ops,
+ &sh7206_module_clk_ops,
+ &sh7206_bus_clk_ops,
+ &sh7206_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7206_clk_ops))
+ *ops = sh7206_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
new file mode 100644
index 00000000000..87c6c054208
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -0,0 +1,39 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/probe.c
+ *
+ * CPU Subtype Probing for SH-2A.
+ *
+ * Copyright (C) 2004, 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+int __init detect_cpu_and_cache_system(void)
+{
+ /* Just SH7206 for now .. */
+ cpu_data->type = CPU_SH7206;
+
+ cpu_data->dcache.ways = 4;
+ cpu_data->dcache.way_incr = (1 << 11);
+ cpu_data->dcache.sets = 128;
+ cpu_data->dcache.entry_shift = 4;
+ cpu_data->dcache.linesz = L1_CACHE_BYTES;
+ cpu_data->dcache.flags = 0;
+
+ /*
+ * The icache is the same as the dcache as far as this setup is
+ * concerned. The only real difference in hardware is that the icache
+ * lacks the U bit that the dcache has, none of this has any bearing
+ * on the cache info.
+ */
+ cpu_data->icache = cpu_data->dcache;
+
+ return 0;
+}
+
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
new file mode 100644
index 00000000000..cdfeef49e62
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -0,0 +1,58 @@
+/*
+ * SH7206 Setup
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 240, 241, 242, 243},
+ }, {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 244, 245, 246, 247},
+ }, {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 248, 249, 250, 251},
+ }, {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 252, 253, 254, 255},
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7206_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7206_devices_setup(void)
+{
+ return platform_add_devices(sh7206_devices,
+ ARRAY_SIZE(sh7206_devices));
+}
+__initcall(sh7206_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index 58d3815695f..83905e4e438 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -2,7 +2,7 @@
# Makefile for the Linux/SuperH SH-3 backends.
#
-obj-y := ex.o probe.o
+obj-y := ex.o probe.o entry.o
# CPU subtype setup
obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
index 10461a745e5..b791a29fdb6 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
@@ -24,7 +24,7 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
static void set_bus_parent(struct clk *clk)
{
- struct clk *bus_clk = clk_get("bus_clk");
+ struct clk *bus_clk = clk_get(NULL, "bus_clk");
clk->parent = bus_clk;
clk_put(bus_clk);
}
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 39aaefb2d83..8c0dc2700c6 100644
--- a/arch/sh/kernel/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -1,5 +1,5 @@
/*
- * linux/arch/sh/entry.S
+ * arch/sh/kernel/entry.S
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2003 - 2006 Paul Mundt
@@ -7,15 +7,16 @@
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
- *
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
-#include <asm/cpu/mmu_context.h>
#include <asm/unistd.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -81,6 +82,8 @@ OFF_TRA = (16*4+6*4)
#define k_g_imask r6_bank /* r6_bank1 */
#define current r7 /* r7_bank1 */
+#include <asm/entry-macros.S>
+
/*
* Kernel mode register usage:
* k0 scratch
@@ -107,26 +110,6 @@ OFF_TRA = (16*4+6*4)
! this first version depends *much* on C implementation.
!
-#define CLI() \
- stc sr, r0; \
- or #0xf0, r0; \
- ldc r0, sr
-
-#define STI() \
- mov.l __INV_IMASK, r11; \
- stc sr, r10; \
- and r11, r10; \
- stc k_g_imask, r11; \
- or r11, r10; \
- ldc r10, sr
-
-#if defined(CONFIG_PREEMPT)
-# define preempt_stop() CLI()
-#else
-# define preempt_stop()
-# define resume_kernel restore_all
-#endif
-
#if defined(CONFIG_MMU)
.align 2
ENTRY(tlb_miss_load)
@@ -155,29 +138,14 @@ ENTRY(tlb_protection_violation_store)
call_dpf:
mov.l 1f, r0
- mov r5, r8
- mov.l @r0, r6
- mov r6, r9
- mov.l 2f, r0
- sts pr, r10
- jsr @r0
- mov r15, r4
- !
- tst r0, r0
- bf/s 0f
- lds r10, pr
- rts
- nop
-0: STI()
+ mov.l @r0, r6 ! address
mov.l 3f, r0
- mov r9, r6
- mov r8, r5
+
jmp @r0
- mov r15, r4
+ mov r15, r4 ! regs
.align 2
1: .long MMU_TEA
-2: .long __do_page_fault
3: .long do_page_fault
.align 2
@@ -203,32 +171,6 @@ call_dae:
2: .long do_address_error
#endif /* CONFIG_MMU */
-#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
-! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
-! If both are configured, handle the debug traps (breakpoints) in SW,
-! but still allow BIOS traps to FW.
-
- .align 2
-debug_kernel:
-#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
- /* Force BIOS call to FW (debug_trap put TRA in r8) */
- mov r8,r0
- shlr2 r0
- cmp/eq #0x3f,r0
- bt debug_kernel_fw
-#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
-
-debug_enter:
-#if defined(CONFIG_SH_KGDB)
- /* Jump to kgdb, pass stacked regs as arg */
-debug_kernel_sw:
- mov.l 3f, r0
- jmp @r0
- mov r15, r4
- .align 2
-3: .long kgdb_handle_exception
-#endif /* CONFIG_SH_KGDB */
-
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
debug_kernel_fw:
@@ -269,276 +211,6 @@ debug_kernel_fw:
2: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
-#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
-
-
- .align 2
-debug_trap:
-#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
- mov #OFF_SR, r0
- mov.l @(r0,r15), r0 ! get status register
- shll r0
- shll r0 ! kernel space?
- bt/s debug_kernel
-#endif
- mov.l @r15, r0 ! Restore R0 value
- mov.l 1f, r8
- jmp @r8
- nop
-
- .align 2
-ENTRY(exception_error)
- !
- STI()
- mov.l 2f, r0
- jmp @r0
- nop
-
-!
- .align 2
-1: .long break_point_trap_software
-2: .long do_exception_error
-
- .align 2
-ret_from_exception:
- preempt_stop()
-ENTRY(ret_from_irq)
- !
- mov #OFF_SR, r0
- mov.l @(r0,r15), r0 ! get status register
- shll r0
- shll r0 ! kernel space?
- bt/s resume_kernel ! Yes, it's from kernel, go back soon
- GET_THREAD_INFO(r8)
-
-#ifdef CONFIG_PREEMPT
- bra resume_userspace
- nop
-ENTRY(resume_kernel)
- mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
- tst r0, r0
- bf noresched
-need_resched:
- mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_NEED_RESCHED, r0 ! need_resched set?
- bt noresched
-
- mov #OFF_SR, r0
- mov.l @(r0,r15), r0 ! get status register
- and #0xf0, r0 ! interrupts off (exception path)?
- cmp/eq #0xf0, r0
- bt noresched
-
- mov.l 1f, r0
- mov.l r0, @(TI_PRE_COUNT,r8)
-
- STI()
- mov.l 2f, r0
- jsr @r0
- nop
- mov #0, r0
- mov.l r0, @(TI_PRE_COUNT,r8)
- CLI()
-
- bra need_resched
- nop
-noresched:
- bra restore_all
- nop
-
- .align 2
-1: .long PREEMPT_ACTIVE
-2: .long schedule
-#endif
-
-ENTRY(resume_userspace)
- ! r8: current_thread_info
- CLI()
- mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_WORK_MASK, r0
- bt/s restore_all
- tst #_TIF_NEED_RESCHED, r0
-
- .align 2
-work_pending:
- ! r0: current_thread_info->flags
- ! r8: current_thread_info
- ! t: result of "tst #_TIF_NEED_RESCHED, r0"
- bf/s work_resched
- tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
-work_notifysig:
- bt/s restore_all
- mov r15, r4
- mov r12, r5 ! set arg1(save_r0)
- mov r0, r6
- mov.l 2f, r1
- mova restore_all, r0
- jmp @r1
- lds r0, pr
-work_resched:
-#ifndef CONFIG_PREEMPT
- ! gUSA handling
- mov.l @(OFF_SP,r15), r0 ! get user space stack pointer
- mov r0, r1
- shll r0
- bf/s 1f
- shll r0
- bf/s 1f
- mov #OFF_PC, r0
- ! SP >= 0xc0000000 : gUSA mark
- mov.l @(r0,r15), r2 ! get user space PC (program counter)
- mov.l @(OFF_R0,r15), r3 ! end point
- cmp/hs r3, r2 ! r2 >= r3?
- bt 1f
- add r3, r1 ! rewind point #2
- mov.l r1, @(r0,r15) ! reset PC to rewind point #2
- !
-1:
-#endif
- mov.l 1f, r1
- jsr @r1 ! schedule
- nop
- CLI()
- !
- mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_WORK_MASK, r0
- bt restore_all
- bra work_pending
- tst #_TIF_NEED_RESCHED, r0
-
- .align 2
-1: .long schedule
-2: .long do_notify_resume
-
- .align 2
-syscall_exit_work:
- ! r0: current_thread_info->flags
- ! r8: current_thread_info
- tst #_TIF_SYSCALL_TRACE, r0
- bt/s work_pending
- tst #_TIF_NEED_RESCHED, r0
- STI()
- ! XXX setup arguments...
- mov.l 4f, r0 ! do_syscall_trace
- jsr @r0
- nop
- bra resume_userspace
- nop
-
- .align 2
-syscall_trace_entry:
- ! Yes it is traced.
- ! XXX setup arguments...
- mov.l 4f, r11 ! Call do_syscall_trace which notifies
- jsr @r11 ! superior (will chomp R[0-7])
- nop
- ! Reload R0-R4 from kernel stack, where the
- ! parent may have modified them using
- ! ptrace(POKEUSR). (Note that R0-R2 are
- ! used by the system call handler directly
- ! from the kernel stack anyway, so don't need
- ! to be reloaded here.) This allows the parent
- ! to rewrite system calls and args on the fly.
- mov.l @(OFF_R4,r15), r4 ! arg0
- mov.l @(OFF_R5,r15), r5
- mov.l @(OFF_R6,r15), r6
- mov.l @(OFF_R7,r15), r7 ! arg3
- mov.l @(OFF_R3,r15), r3 ! syscall_nr
- ! Arrange for do_syscall_trace to be called
- ! again as the system call returns.
- mov.l 2f, r10 ! Number of syscalls
- cmp/hs r10, r3
- bf syscall_call
- mov #-ENOSYS, r0
- bra syscall_exit
- mov.l r0, @(OFF_R0,r15) ! Return value
-
-/*
- * Syscall interface:
- *
- * Syscall #: R3
- * Arguments #0 to #3: R4--R7
- * Arguments #4 to #6: R0, R1, R2
- * TRA: (number of arguments + 0x10) x 4
- *
- * This code also handles delegating other traps to the BIOS/gdb stub
- * according to:
- *
- * Trap number
- * (TRA>>2) Purpose
- * -------- -------
- * 0x0-0xf old syscall ABI
- * 0x10-0x1f new syscall ABI
- * 0x20-0xff delegated through debug_trap to BIOS/gdb stub.
- *
- * Note: When we're first called, the TRA value must be shifted
- * right 2 bits in order to get the value that was used as the "trapa"
- * argument.
- */
-
- .align 2
- .globl ret_from_fork
-ret_from_fork:
- mov.l 1f, r8
- jsr @r8
- mov r0, r4
- bra syscall_exit
- nop
- .align 2
-1: .long schedule_tail
- !
-ENTRY(system_call)
- mov.l 1f, r9
- mov.l @r9, r8 ! Read from TRA (Trap Address) Register
- !
- ! Is the trap argument >= 0x20? (TRA will be >= 0x80)
- mov #0x7f, r9
- cmp/hi r9, r8
- bt/s 0f
- mov #OFF_TRA, r9
- add r15, r9
- !
- mov.l r8, @r9 ! set TRA value to tra
- STI()
- ! Call the system call handler through the table.
- ! First check for bad syscall number
- mov r3, r9
- mov.l 2f, r8 ! Number of syscalls
- cmp/hs r8, r9
- bf/s good_system_call
- GET_THREAD_INFO(r8)
-syscall_badsys: ! Bad syscall number
- mov #-ENOSYS, r0
- bra resume_userspace
- mov.l r0, @(OFF_R0,r15) ! Return value
- !
-0:
- bra debug_trap
- nop
- !
-good_system_call: ! Good syscall number
- mov.l @(TI_FLAGS,r8), r8
- mov #_TIF_SYSCALL_TRACE, r10
- tst r10, r8
- bf syscall_trace_entry
- !
-syscall_call:
- shll2 r9 ! x4
- mov.l 3f, r8 ! Load the address of sys_call_table
- add r8, r9
- mov.l @r9, r8
- jsr @r8 ! jump to specific syscall handler
- nop
- mov.l @(OFF_R0,r15), r12 ! save r0
- mov.l r0, @(OFF_R0,r15) ! save the return value
- !
-syscall_exit:
- CLI()
- !
- GET_THREAD_INFO(r8)
- mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_ALLWORK_MASK, r0
- bf syscall_exit_work
restore_all:
mov.l @r15+, r0
mov.l @r15+, r1
@@ -606,7 +278,9 @@ skip_restore:
!
! Calculate new SR value
mov k3, k2 ! original SR value
- mov.l 9f, k1
+ mov #0xf0, k1
+ extu.b k1, k1
+ not k1, k1
and k1, k2 ! Mask orignal SR value
!
mov k3, k0 ! Calculate IMASK-bits
@@ -632,16 +306,12 @@ skip_restore:
nop
.align 2
-1: .long TRA
-2: .long NR_syscalls
-3: .long sys_call_table
-4: .long do_syscall_trace
5: .long 0x00001000 ! DSP
7: .long 0x30000000
-9:
-__INV_IMASK:
- .long 0xffffff0f ! ~(IMASK)
+! common exception handler
+#include "../../entry-common.S"
+
! Exception Vector Base
!
! Should be aligned page boundary.
@@ -661,9 +331,176 @@ general_exception:
2: .long ret_from_exception
!
!
+
+/* This code makes some assumptions to improve performance.
+ * Make sure they are stil true. */
+#if PTRS_PER_PGD != PTRS_PER_PTE
+#error PGD and PTE sizes don't match
+#endif
+
+/* gas doesn't flag impossible values for mov #immediate as an error */
+#if (_PAGE_PRESENT >> 2) > 0x7f
+#error cannot load PAGE_PRESENT as an immediate
+#endif
+#if _PAGE_DIRTY > 0x7f
+#error cannot load PAGE_DIRTY as an immediate
+#endif
+#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
+#error cannot derive PAGE_ACCESSED from PAGE_PRESENT
+#endif
+
+#if defined(CONFIG_CPU_SH4)
+#define ldmmupteh(r) mov.l 8f, r
+#else
+#define ldmmupteh(r) mov #MMU_PTEH, r
+#endif
+
.balign 1024,0,1024
tlb_miss:
- mov.l 1f, k2
+#ifdef COUNT_EXCEPTIONS
+ ! Increment the counts
+ mov.l 9f, k1
+ mov.l @k1, k2
+ add #1, k2
+ mov.l k2, @k1
+#endif
+
+ ! k0 scratch
+ ! k1 pgd and pte pointers
+ ! k2 faulting address
+ ! k3 pgd and pte index masks
+ ! k4 shift
+
+ ! Load up the pgd entry (k1)
+
+ ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
+
+ mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
+ mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
+
+ mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
+
+ mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
+
+ mov k2, k0 ! 5 MT (latency=0)
+ shld k4, k0 ! 99 EX
+
+ and k3, k0 ! 78 EX
+
+ mov.l @(k0, k1), k1 ! 21 LS (latency=2)
+ mov #-(PAGE_SHIFT-2), k4 ! 6 EX
+
+ ! Load up the pte entry (k2)
+
+ mov k2, k0 ! 5 MT (latency=0)
+ shld k4, k0 ! 99 EX
+
+ tst k1, k1 ! 86 MT
+
+ bt 20f ! 110 BR
+
+ and k3, k0 ! 78 EX
+ mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
+
+ mov.l @(k0, k1), k2 ! 21 LS (latency=2)
+ add k0, k1 ! 49 EX
+
+#ifdef CONFIG_CPU_HAS_PTEA
+ ! Test the entry for present and _PAGE_ACCESSED
+
+ mov #-28, k3 ! 6 EX
+ mov k2, k0 ! 5 MT (latency=0)
+
+ tst k4, k2 ! 68 MT
+ shld k3, k0 ! 99 EX
+
+ bt 20f ! 110 BR
+
+ ! Set PTEA register
+ ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
+ !
+ ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
+
+ and #0xe, k0 ! 79 EX
+
+ mov k0, k3 ! 5 MT (latency=0)
+ mov k2, k0 ! 5 MT (latency=0)
+
+ and #1, k0 ! 79 EX
+
+ or k0, k3 ! 82 EX
+
+ ldmmupteh(k0) ! 9 LS (latency=2)
+ shll2 k4 ! 101 EX _PAGE_ACCESSED
+
+ tst k4, k2 ! 68 MT
+
+ mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
+
+ mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
+
+ ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
+#else
+
+ ! Test the entry for present and _PAGE_ACCESSED
+
+ mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
+ tst k4, k2 ! 68 MT
+
+ shll2 k4 ! 101 EX _PAGE_ACCESSED
+ ldmmupteh(k0) ! 9 LS (latency=2)
+
+ bt 20f ! 110 BR
+ tst k4, k2 ! 68 MT
+
+ ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
+
+#endif
+
+ ! Set up the entry
+
+ and k2, k3 ! 78 EX
+ bt/s 10f ! 108 BR
+
+ mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
+
+ ldtlb ! 128 CO
+
+ ! At least one instruction between ldtlb and rte
+ nop ! 119 NOP
+
+ rte ! 126 CO
+
+ nop ! 119 NOP
+
+
+10: or k4, k2 ! 82 EX
+
+ ldtlb ! 128 CO
+
+ ! At least one instruction between ldtlb and rte
+ mov.l k2, @k1 ! 27 LS
+
+ rte ! 126 CO
+
+ ! Note we cannot execute mov here, because it is executed after
+ ! restoring SSR, so would be executed in user space.
+ nop ! 119 NOP
+
+
+ .align 5
+ ! Once cache line if possible...
+1: .long swapper_pg_dir
+4: .short (PTRS_PER_PGD-1) << 2
+5: .short _PAGE_PRESENT
+7: .long _PAGE_FLAGS_HARDWARE_MASK
+8: .long MMU_PTEH
+#ifdef COUNT_EXCEPTIONS
+9: .long exception_count_miss
+#endif
+
+ ! Either pgd or pte not present
+20: mov.l 1f, k2
mov.l 4f, k3
bra handle_exception
mov.l @k2, k2
@@ -710,8 +547,9 @@ ENTRY(handle_exception)
bt/s 1f ! It's a kernel to kernel transition.
mov r15, k0 ! save original stack to k0
/* User space to kernel */
- mov #(THREAD_SIZE >> 8), k1
+ mov #(THREAD_SIZE >> 10), k1
shll8 k1 ! k1 := THREAD_SIZE
+ shll2 k1
add current, k1
mov k1, r15 ! change to kernel stack
!
@@ -761,7 +599,7 @@ skip_save:
! Save the user registers on the stack.
mov.l k2, @-r15 ! EXPEVT
- mov #-1, k4
+ mov #-1, k4
mov.l k4, @-r15 ! set TRA (default: -1)
!
sts.l macl, @-r15
@@ -813,6 +651,15 @@ skip_save:
bf interrupt_exception
shlr2 r8
shlr r8
+
+#ifdef COUNT_EXCEPTIONS
+ mov.l 5f, r9
+ add r8, r9
+ mov.l @r9, r10
+ add #1, r10
+ mov.l r10, @r9
+#endif
+
mov.l 4f, r9
add r8, r9
mov.l @r9, r9
@@ -826,6 +673,9 @@ skip_save:
2: .long 0x000080f0 ! FD=1, IMASK=15
3: .long 0xcfffffff ! RB=0, BL=0
4: .long exception_handling_table
+#ifdef COUNT_EXCEPTIONS
+5: .long exception_count_table
+#endif
interrupt_exception:
mov.l 1f, r9
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index 8dbf3895ece..6e415baf04b 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -2,7 +2,8 @@
# Makefile for the Linux/SuperH SH-4 backends.
#
-obj-y := ex.o probe.o
+obj-y := ex.o probe.o common.o
+common-y += $(addprefix ../sh3/, entry.o)
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index bfdf5fe8d94..fa2019aabd7 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -97,7 +97,7 @@ static void shoc_clk_recalc(struct clk *clk)
static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
{
- struct clk *bclk = clk_get("bus_clk");
+ struct clk *bclk = clk_get(NULL, "bus_clk");
unsigned long bclk_rate = clk_get_rate(bclk);
clk_put(bclk);
@@ -151,7 +151,7 @@ static struct clk *sh4202_onchip_clocks[] = {
static int __init sh4202_clk_init(void)
{
- struct clk *clk = clk_get("master_clk");
+ struct clk *clk = clk_get(NULL, "master_clk");
int i;
for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) {
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh7780.c b/arch/sh/kernel/cpu/sh4/clock-sh7780.c
index 93ad367342c..9e6a216750c 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh7780.c
@@ -98,7 +98,7 @@ static struct clk *sh7780_onchip_clocks[] = {
static int __init sh7780_clk_init(void)
{
- struct clk *clk = clk_get("master_clk");
+ struct clk *clk = clk_get(NULL, "master_clk");
int i;
for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) {
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index f486c07e10e..7624677f662 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -282,11 +282,8 @@ ieee_fpe_handler (struct pt_regs *regs)
grab_fpu(regs);
restore_fpu(tsk);
set_tsk_thread_flag(tsk, TIF_USEDFPU);
- } else {
- tsk->thread.trap_no = 11;
- tsk->thread.error_code = 0;
+ } else
force_sig(SIGFPE, tsk);
- }
regs->pc = nextpc;
return 1;
@@ -296,29 +293,29 @@ ieee_fpe_handler (struct pt_regs *regs)
}
asmlinkage void
-do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6,
+ unsigned long r7, struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
struct task_struct *tsk = current;
- if (ieee_fpe_handler (&regs))
+ if (ieee_fpe_handler(regs))
return;
- regs.pc += 2;
- save_fpu(tsk, &regs);
- tsk->thread.trap_no = 11;
- tsk->thread.error_code = 0;
+ regs->pc += 2;
+ save_fpu(tsk, regs);
force_sig(SIGFPE, tsk);
}
asmlinkage void
do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
- unsigned long r7, struct pt_regs regs)
+ unsigned long r7, struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
struct task_struct *tsk = current;
- grab_fpu(&regs);
- if (!user_mode(&regs)) {
+ grab_fpu(regs);
+ if (!user_mode(regs)) {
printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
return;
}
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index c294de1e14a..afe0f1b1c03 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -79,16 +79,16 @@ int __init detect_cpu_and_cache_system(void)
case 0x205:
cpu_data->type = CPU_SH7750;
cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
- CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
+ CPU_HAS_PERF_COUNTER;
break;
case 0x206:
cpu_data->type = CPU_SH7750S;
cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
- CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
+ CPU_HAS_PERF_COUNTER;
break;
case 0x1100:
cpu_data->type = CPU_SH7751;
- cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+ cpu_data->flags |= CPU_HAS_FPU;
break;
case 0x2000:
cpu_data->type = CPU_SH73180;
@@ -126,23 +126,22 @@ int __init detect_cpu_and_cache_system(void)
break;
case 0x8000:
cpu_data->type = CPU_ST40RA;
- cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+ cpu_data->flags |= CPU_HAS_FPU;
break;
case 0x8100:
cpu_data->type = CPU_ST40GX1;
- cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+ cpu_data->flags |= CPU_HAS_FPU;
break;
case 0x700:
cpu_data->type = CPU_SH4_501;
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
- cpu_data->flags |= CPU_HAS_PTEA;
break;
case 0x600:
cpu_data->type = CPU_SH4_202;
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
- cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+ cpu_data->flags |= CPU_HAS_FPU;
break;
case 0x500 ... 0x501:
switch (prr) {
@@ -160,7 +159,7 @@ int __init detect_cpu_and_cache_system(void)
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
- cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+ cpu_data->flags |= CPU_HAS_FPU;
break;
default:
@@ -173,6 +172,10 @@ int __init detect_cpu_and_cache_system(void)
cpu_data->dcache.ways = 1;
#endif
+#ifdef CONFIG_CPU_HAS_PTEA
+ cpu_data->flags |= CPU_HAS_PTEA;
+#endif
+
/*
* On anything that's not a direct-mapped cache, look to the CVR
* for I/D-cache specifics.
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 50812d57c1c..bbcb06f18b0 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -2,6 +2,7 @@
* SH7750/SH7751 Setup
*
* Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2006 Jamie Lenehan
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -10,6 +11,7 @@
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
+#include <linux/io.h>
#include <asm/sci.h>
static struct plat_sci_port sci_platform_data[] = {
@@ -46,3 +48,71 @@ static int __init sh7750_devices_setup(void)
ARRAY_SIZE(sh7750_devices));
}
__initcall(sh7750_devices_setup);
+
+static struct ipr_data sh7750_ipr_map[] = {
+ /* IRQ, IPR-idx, shift, priority */
+ { 16, 0, 12, 2 }, /* TMU0 TUNI*/
+ { 17, 0, 12, 2 }, /* TMU1 TUNI */
+ { 18, 0, 4, 2 }, /* TMU2 TUNI */
+ { 19, 0, 4, 2 }, /* TMU2 TIPCI */
+ { 27, 1, 12, 2 }, /* WDT ITI */
+ { 20, 0, 0, 2 }, /* RTC ATI (alarm) */
+ { 21, 0, 0, 2 }, /* RTC PRI (period) */
+ { 22, 0, 0, 2 }, /* RTC CUI (carry) */
+ { 23, 1, 4, 3 }, /* SCI ERI */
+ { 24, 1, 4, 3 }, /* SCI RXI */
+ { 25, 1, 4, 3 }, /* SCI TXI */
+ { 40, 2, 4, 3 }, /* SCIF ERI */
+ { 41, 2, 4, 3 }, /* SCIF RXI */
+ { 42, 2, 4, 3 }, /* SCIF BRI */
+ { 43, 2, 4, 3 }, /* SCIF TXI */
+ { 34, 2, 8, 7 }, /* DMAC DMTE0 */
+ { 35, 2, 8, 7 }, /* DMAC DMTE1 */
+ { 36, 2, 8, 7 }, /* DMAC DMTE2 */
+ { 37, 2, 8, 7 }, /* DMAC DMTE3 */
+ { 28, 2, 8, 7 }, /* DMAC DMAE */
+};
+
+static struct ipr_data sh7751_ipr_map[] = {
+ { 44, 2, 8, 7 }, /* DMAC DMTE4 */
+ { 45, 2, 8, 7 }, /* DMAC DMTE5 */
+ { 46, 2, 8, 7 }, /* DMAC DMTE6 */
+ { 47, 2, 8, 7 }, /* DMAC DMTE7 */
+ /* The following use INTC_INPRI00 for masking, which is a 32-bit
+ register, not a 16-bit register like the IPRx registers, so it
+ would need special support */
+ /*{ 72, INTPRI00, 8, ? },*/ /* TMU3 TUNI */
+ /*{ 76, INTPRI00, 12, ? },*/ /* TMU4 TUNI */
+};
+
+static unsigned long ipr_offsets[] = {
+ 0xffd00004UL, /* 0: IPRA */
+ 0xffd00008UL, /* 1: IPRB */
+ 0xffd0000cUL, /* 2: IPRC */
+ 0xffd00010UL, /* 3: IPRD */
+};
+
+/* given the IPR index return the address of the IPR register */
+unsigned int map_ipridx_to_addr(int idx)
+{
+ if (idx >= ARRAY_SIZE(ipr_offsets))
+ return 0;
+ return ipr_offsets[idx];
+}
+
+#define INTC_ICR 0xffd00000UL
+#define INTC_ICR_IRLM (1<<7)
+
+/* enable individual interrupt mode for external interupts */
+void ipr_irq_enable_irlm(void)
+{
+ ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+}
+
+void __init init_IRQ_ipr()
+{
+ make_ipr_irq(sh7750_ipr_map, ARRAY_SIZE(sh7750_ipr_map));
+#ifdef CONFIG_CPU_SUBTYPE_SH7751
+ make_ipr_irq(sh7751_ipr_map, ARRAY_SIZE(sh7751_ipr_map));
+#endif
+}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7780.c b/arch/sh/kernel/cpu/sh4/setup-sh7780.c
index 814ddb22653..9aeaa2ddaa2 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7780.c
@@ -79,25 +79,27 @@ static int __init sh7780_devices_setup(void)
__initcall(sh7780_devices_setup);
static struct intc2_data intc2_irq_table[] = {
- { TIMER_IRQ, 0, 24, 0, INTC_TMU0_MSK, 2 },
- { 21, 1, 0, 0, INTC_RTC_MSK, TIMER_PRIORITY },
- { 22, 1, 1, 0, INTC_RTC_MSK, TIMER_PRIORITY },
- { 23, 1, 2, 0, INTC_RTC_MSK, TIMER_PRIORITY },
- { SCIF0_ERI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
- { SCIF0_RXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
- { SCIF0_BRI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
- { SCIF0_TXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
+ { 28, 0, 24, 0, 0, 2 }, /* TMU0 */
- { SCIF1_ERI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
- { SCIF1_RXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
- { SCIF1_BRI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
- { SCIF1_TXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
+ { 21, 1, 0, 0, 2, 2 },
+ { 22, 1, 1, 0, 2, 2 },
+ { 23, 1, 2, 0, 2, 2 },
- { PCIC0_IRQ, 0x10, 8, 0, INTC_PCIC0_MSK, PCIC0_PRIORITY },
- { PCIC1_IRQ, 0x10, 0, 0, INTC_PCIC1_MSK, PCIC1_PRIORITY },
- { PCIC2_IRQ, 0x14, 24, 0, INTC_PCIC2_MSK, PCIC2_PRIORITY },
- { PCIC3_IRQ, 0x14, 16, 0, INTC_PCIC3_MSK, PCIC3_PRIORITY },
- { PCIC4_IRQ, 0x14, 8, 0, INTC_PCIC4_MSK, PCIC4_PRIORITY },
+ { 40, 8, 24, 0, 3, 3 }, /* SCIF0 ERI */
+ { 41, 8, 24, 0, 3, 3 }, /* SCIF0 RXI */
+ { 42, 8, 24, 0, 3, 3 }, /* SCIF0 BRI */
+ { 43, 8, 24, 0, 3, 3 }, /* SCIF0 TXI */
+
+ { 76, 8, 16, 0, 4, 3 }, /* SCIF1 ERI */
+ { 77, 8, 16, 0, 4, 3 }, /* SCIF1 RXI */
+ { 78, 8, 16, 0, 4, 3 }, /* SCIF1 BRI */
+ { 79, 8, 16, 0, 4, 3 }, /* SCIF1 TXI */
+
+ { 64, 0x10, 8, 0, 14, 2 }, /* PCIC0 */
+ { 65, 0x10, 0, 0, 15, 2 }, /* PCIC1 */
+ { 66, 0x14, 24, 0, 16, 2 }, /* PCIC2 */
+ { 67, 0x14, 16, 0, 17, 2 }, /* PCIC3 */
+ { 68, 0x14, 8, 0, 18, 2 }, /* PCIC4 */
};
void __init init_IRQ_intc2(void)
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 7bcc73f9b8d..0c9ea38d2ca 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/cpu/sq.h>
@@ -38,7 +38,7 @@ struct sq_mapping {
static struct sq_mapping *sq_mapping_list;
static DEFINE_SPINLOCK(sq_mapping_lock);
-static kmem_cache_t *sq_cache;
+static struct kmem_cache *sq_cache;
static unsigned long *sq_bitmap;
#define store_queue_barrier() \
@@ -67,6 +67,7 @@ void sq_flush_range(unsigned long start, unsigned int len)
/* Wait for completion */
store_queue_barrier();
}
+EXPORT_SYMBOL(sq_flush_range);
static inline void sq_mapping_list_add(struct sq_mapping *map)
{
@@ -166,7 +167,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
map->size = size;
map->name = name;
- page = bitmap_find_free_region(sq_bitmap, 0x04000000,
+ page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
get_order(map->size));
if (unlikely(page < 0)) {
ret = -ENOSPC;
@@ -193,6 +194,7 @@ out:
kmem_cache_free(sq_cache, map);
return ret;
}
+EXPORT_SYMBOL(sq_remap);
/**
* sq_unmap - Unmap a Store Queue allocation
@@ -234,6 +236,7 @@ void sq_unmap(unsigned long vaddr)
kmem_cache_free(sq_cache, map);
}
+EXPORT_SYMBOL(sq_unmap);
/*
* Needlessly complex sysfs interface. Unfortunately it doesn't seem like
@@ -402,7 +405,3 @@ module_exit(sq_api_exit);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(sq_remap);
-EXPORT_SYMBOL(sq_unmap);
-EXPORT_SYMBOL(sq_flush_range);
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index a00022722e9..60340823798 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -12,7 +12,7 @@
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
#ifdef CONFIG_SH_STANDARD_BIOS
#include <asm/sh_bios.h>
@@ -62,17 +62,9 @@ static struct console bios_console = {
#include <linux/serial_core.h>
#include "../../../drivers/serial/sh-sci.h"
-#ifdef CONFIG_CPU_SH4
-#define SCIF_REG 0xffe80000
-#elif defined(CONFIG_CPU_SUBTYPE_SH72060)
-#define SCIF_REG 0xfffe9800
-#else
-#error "Undefined SCIF for this subtype"
-#endif
-
static struct uart_port scif_port = {
- .mapbase = SCIF_REG,
- .membase = (char __iomem *)SCIF_REG,
+ .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT,
+ .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT,
};
static void scif_sercon_putc(int c)
@@ -113,23 +105,29 @@ static struct console scif_console = {
.index = -1,
};
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
+/*
+ * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
+ * devices that aren't using sh-ipl+g.
+ */
static void scif_sercon_init(int baud)
{
- ctrl_outw(0, SCIF_REG + 8);
- ctrl_outw(0, SCIF_REG);
+ ctrl_outw(0, scif_port.mapbase + 8);
+ ctrl_outw(0, scif_port.mapbase);
/* Set baud rate */
ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) /
- (32 * baud) - 1, SCIF_REG + 4);
-
- ctrl_outw(12, SCIF_REG + 24);
- ctrl_outw(8, SCIF_REG + 24);
- ctrl_outw(0, SCIF_REG + 32);
- ctrl_outw(0x60, SCIF_REG + 16);
- ctrl_outw(0, SCIF_REG + 36);
- ctrl_outw(0x30, SCIF_REG + 8);
+ (32 * baud) - 1, scif_port.mapbase + 4);
+
+ ctrl_outw(12, scif_port.mapbase + 24);
+ ctrl_outw(8, scif_port.mapbase + 24);
+ ctrl_outw(0, scif_port.mapbase + 32);
+ ctrl_outw(0x60, scif_port.mapbase + 16);
+ ctrl_outw(0, scif_port.mapbase + 36);
+ ctrl_outw(0x30, scif_port.mapbase + 8);
}
-#endif
+#endif /* CONFIG_CPU_SH4 && !CONFIG_SH_STANDARD_BIOS */
+#endif /* CONFIG_EARLY_SCIF_CONSOLE */
/*
* Setup a default console, if more than one is compiled in, rely on the
@@ -168,7 +166,7 @@ int __init setup_early_printk(char *opt)
if (!strncmp(buf, "serial", 6)) {
early_console = &scif_console;
-#ifdef CONFIG_CPU_SH4
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
scif_sercon_init(115200);
#endif
}
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
new file mode 100644
index 00000000000..29136a35d7c
--- /dev/null
+++ b/arch/sh/kernel/entry-common.S
@@ -0,0 +1,433 @@
+/* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $
+ *
+ * linux/arch/sh/entry.S
+ *
+ * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * NOTE: This code uses a convention that instructions in the delay slot
+ * of a transfer-control instruction are indented by an extra space, thus:
+ *
+ * jmp @k0 ! control-transfer instruction
+ * ldc k1, ssr ! delay slot
+ *
+ * Stack layout in 'ret_from_syscall':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in ptrace.c and ptrace.h
+ *
+ * r0
+ * ...
+ * r15 = stack pointer
+ * spc
+ * pr
+ * ssr
+ * gbr
+ * mach
+ * macl
+ * syscall #
+ *
+ */
+
+#if defined(CONFIG_PREEMPT)
+# define preempt_stop() cli
+#else
+# define preempt_stop()
+# define resume_kernel __restore_all
+#endif
+
+#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
+! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
+! If both are configured, handle the debug traps (breakpoints) in SW,
+! but still allow BIOS traps to FW.
+
+ .align 2
+debug_kernel:
+#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
+ /* Force BIOS call to FW (debug_trap put TRA in r8) */
+ mov r8,r0
+ shlr2 r0
+ cmp/eq #0x3f,r0
+ bt debug_kernel_fw
+#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
+
+debug_enter:
+#if defined(CONFIG_SH_KGDB)
+ /* Jump to kgdb, pass stacked regs as arg */
+debug_kernel_sw:
+ mov.l 3f, r0
+ jmp @r0
+ mov r15, r4
+ .align 2
+3: .long kgdb_handle_exception
+#endif /* CONFIG_SH_KGDB */
+
+#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
+
+
+ .align 2
+debug_trap:
+#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
+ bt/s debug_kernel
+#endif
+ mov.l @r15, r0 ! Restore R0 value
+ mov.l 1f, r8
+ jmp @r8
+ nop
+
+ .align 2
+ENTRY(exception_error)
+ !
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 3f, r0
+ jsr @r0
+ nop
+#endif
+ sti
+ mov.l 2f, r0
+ jmp @r0
+ nop
+
+!
+ .align 2
+1: .long break_point_trap_software
+2: .long do_exception_error
+#ifdef CONFIG_TRACE_IRQFLAGS
+3: .long trace_hardirqs_on
+#endif
+
+ .align 2
+ret_from_exception:
+ preempt_stop()
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 4f, r0
+ jsr @r0
+ nop
+#endif
+ENTRY(ret_from_irq)
+ !
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
+ get_current_thread_info r8, r0
+ bt resume_kernel ! Yes, it's from kernel, go back soon
+
+#ifdef CONFIG_PREEMPT
+ bra resume_userspace
+ nop
+ENTRY(resume_kernel)
+ mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
+ tst r0, r0
+ bf noresched
+need_resched:
+ mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
+ tst #_TIF_NEED_RESCHED, r0 ! need_resched set?
+ bt noresched
+
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ and #0xf0, r0 ! interrupts off (exception path)?
+ cmp/eq #0xf0, r0
+ bt noresched
+
+ mov.l 1f, r0
+ mov.l r0, @(TI_PRE_COUNT,r8)
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 3f, r0
+ jsr @r0
+ nop
+#endif
+ sti
+ mov.l 2f, r0
+ jsr @r0
+ nop
+ mov #0, r0
+ mov.l r0, @(TI_PRE_COUNT,r8)
+ cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 4f, r0
+ jsr @r0
+ nop
+#endif
+
+ bra need_resched
+ nop
+
+noresched:
+ bra __restore_all
+ nop
+
+ .align 2
+1: .long PREEMPT_ACTIVE
+2: .long schedule
+#ifdef CONFIG_TRACE_IRQFLAGS
+3: .long trace_hardirqs_on
+4: .long trace_hardirqs_off
+#endif
+#endif
+
+ENTRY(resume_userspace)
+ ! r8: current_thread_info
+ cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 5f, r0
+ jsr @r0
+ nop
+#endif
+ mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
+ tst #_TIF_WORK_MASK, r0
+ bt/s __restore_all
+ tst #_TIF_NEED_RESCHED, r0
+
+ .align 2
+work_pending:
+ ! r0: current_thread_info->flags
+ ! r8: current_thread_info
+ ! t: result of "tst #_TIF_NEED_RESCHED, r0"
+ bf/s work_resched
+ tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
+work_notifysig:
+ bt/s __restore_all
+ mov r15, r4
+ mov r12, r5 ! set arg1(save_r0)
+ mov r0, r6
+ mov.l 2f, r1
+ mov.l 3f, r0
+ jmp @r1
+ lds r0, pr
+work_resched:
+#ifndef CONFIG_PREEMPT
+ ! gUSA handling
+ mov.l @(OFF_SP,r15), r0 ! get user space stack pointer
+ mov r0, r1
+ shll r0
+ bf/s 1f
+ shll r0
+ bf/s 1f
+ mov #OFF_PC, r0
+ ! SP >= 0xc0000000 : gUSA mark
+ mov.l @(r0,r15), r2 ! get user space PC (program counter)
+ mov.l @(OFF_R0,r15), r3 ! end point
+ cmp/hs r3, r2 ! r2 >= r3?
+ bt 1f
+ add r3, r1 ! rewind point #2
+ mov.l r1, @(r0,r15) ! reset PC to rewind point #2
+ !
+1:
+#endif
+ mov.l 1f, r1
+ jsr @r1 ! schedule
+ nop
+ cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 5f, r0
+ jsr @r0
+ nop
+#endif
+ !
+ mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
+ tst #_TIF_WORK_MASK, r0
+ bt __restore_all
+ bra work_pending
+ tst #_TIF_NEED_RESCHED, r0
+
+ .align 2
+1: .long schedule
+2: .long do_notify_resume
+3: .long restore_all
+#ifdef CONFIG_TRACE_IRQFLAGS
+4: .long trace_hardirqs_on
+5: .long trace_hardirqs_off
+#endif
+
+ .align 2
+syscall_exit_work:
+ ! r0: current_thread_info->flags
+ ! r8: current_thread_info
+ tst #_TIF_SYSCALL_TRACE, r0
+ bt/s work_pending
+ tst #_TIF_NEED_RESCHED, r0
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 5f, r0
+ jsr @r0
+ nop
+#endif
+ sti
+ ! XXX setup arguments...
+ mov.l 4f, r0 ! do_syscall_trace
+ jsr @r0
+ nop
+ bra resume_userspace
+ nop
+
+ .align 2
+syscall_trace_entry:
+ ! Yes it is traced.
+ ! XXX setup arguments...
+ mov.l 4f, r11 ! Call do_syscall_trace which notifies
+ jsr @r11 ! superior (will chomp R[0-7])
+ nop
+ ! Reload R0-R4 from kernel stack, where the
+ ! parent may have modified them using
+ ! ptrace(POKEUSR). (Note that R0-R2 are
+ ! used by the system call handler directly
+ ! from the kernel stack anyway, so don't need
+ ! to be reloaded here.) This allows the parent
+ ! to rewrite system calls and args on the fly.
+ mov.l @(OFF_R4,r15), r4 ! arg0
+ mov.l @(OFF_R5,r15), r5
+ mov.l @(OFF_R6,r15), r6
+ mov.l @(OFF_R7,r15), r7 ! arg3
+ mov.l @(OFF_R3,r15), r3 ! syscall_nr
+ !
+ mov.l 2f, r10 ! Number of syscalls
+ cmp/hs r10, r3
+ bf syscall_call
+ mov #-ENOSYS, r0
+ bra syscall_exit
+ mov.l r0, @(OFF_R0,r15) ! Return value
+
+__restore_all:
+ mov.l 1f, r0
+ jmp @r0
+ nop
+
+ .align 2
+1: .long restore_all
+
+ .align 2
+not_syscall_tra:
+ bra debug_trap
+ nop
+
+ .align 2
+syscall_badsys: ! Bad syscall number
+ mov #-ENOSYS, r0
+ bra resume_userspace
+ mov.l r0, @(OFF_R0,r15) ! Return value
+
+
+/*
+ * Syscall interface:
+ *
+ * Syscall #: R3
+ * Arguments #0 to #3: R4--R7
+ * Arguments #4 to #6: R0, R1, R2
+ * TRA: (number of arguments + 0x10) x 4
+ *
+ * This code also handles delegating other traps to the BIOS/gdb stub
+ * according to:
+ *
+ * Trap number
+ * (TRA>>2) Purpose
+ * -------- -------
+ * 0x0-0xf old syscall ABI
+ * 0x10-0x1f new syscall ABI
+ * 0x20-0xff delegated through debug_trap to BIOS/gdb stub.
+ *
+ * Note: When we're first called, the TRA value must be shifted
+ * right 2 bits in order to get the value that was used as the "trapa"
+ * argument.
+ */
+
+ .align 2
+ .globl ret_from_fork
+ret_from_fork:
+ mov.l 1f, r8
+ jsr @r8
+ mov r0, r4
+ bra syscall_exit
+ nop
+ .align 2
+1: .long schedule_tail
+ !
+ENTRY(system_call)
+#if !defined(CONFIG_CPU_SH2)
+ mov.l 1f, r9
+ mov.l @r9, r8 ! Read from TRA (Trap Address) Register
+#endif
+ !
+ ! Is the trap argument >= 0x20? (TRA will be >= 0x80)
+ mov #0x7f, r9
+ cmp/hi r9, r8
+ bt/s not_syscall_tra
+ mov #OFF_TRA, r9
+ add r15, r9
+ mov.l r8, @r9 ! set TRA value to tra
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 5f, r10
+ jsr @r10
+ nop
+#endif
+ sti
+
+ !
+ get_current_thread_info r8, r10
+ mov.l @(TI_FLAGS,r8), r8
+ mov #_TIF_SYSCALL_TRACE, r10
+ tst r10, r8
+ bf syscall_trace_entry
+ !
+ mov.l 2f, r8 ! Number of syscalls
+ cmp/hs r8, r3
+ bt syscall_badsys
+ !
+syscall_call:
+ shll2 r3 ! x4
+ mov.l 3f, r8 ! Load the address of sys_call_table
+ add r8, r3
+ mov.l @r3, r8
+ jsr @r8 ! jump to specific syscall handler
+ nop
+ mov.l @(OFF_R0,r15), r12 ! save r0
+ mov.l r0, @(OFF_R0,r15) ! save the return value
+ !
+syscall_exit:
+ cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+ mov.l 6f, r0
+ jsr @r0
+ nop
+#endif
+ !
+ get_current_thread_info r8, r0
+ mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
+ tst #_TIF_ALLWORK_MASK, r0
+ bf syscall_exit_work
+ bra __restore_all
+ nop
+ .align 2
+#if !defined(CONFIG_CPU_SH2)
+1: .long TRA
+#endif
+2: .long NR_syscalls
+3: .long sys_call_table
+4: .long do_syscall_trace
+#ifdef CONFIG_TRACE_IRQFLAGS
+5: .long trace_hardirqs_on
+6: .long trace_hardirqs_off
+#endif
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index f5f53d14f24..6aca4bc6ec5 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -33,7 +33,7 @@ ENTRY(empty_zero_page)
.long 0x00360000 /* INITRD_START */
.long 0x000a0000 /* INITRD_SIZE */
.long 0
- .balign 4096,0,4096
+ .balign PAGE_SIZE,0,PAGE_SIZE
.text
/*
@@ -53,8 +53,10 @@ ENTRY(_stext)
ldc r0, sr
! Initialize global interrupt mask
mov #0, r0
+#ifdef CONFIG_CPU_HAS_SR_RB
ldc r0, r6_bank
-
+#endif
+
/*
* Prefetch if possible to reduce cache miss penalty.
*
@@ -68,11 +70,14 @@ ENTRY(_stext)
!
mov.l 2f, r0
mov r0, r15 ! Set initial r15 (stack pointer)
- mov #(THREAD_SIZE >> 8), r1
+ mov #(THREAD_SIZE >> 10), r1
shll8 r1 ! r1 = THREAD_SIZE
+ shll2 r1
sub r1, r0 !
+#ifdef CONFIG_CPU_HAS_SR_RB
ldc r0, r7_bank ! ... and initial thread_info
-
+#endif
+
! Clear BSS area
mov.l 3f, r1
add #4, r1
@@ -95,7 +100,11 @@ ENTRY(_stext)
nop
.balign 4
+#if defined(CONFIG_CPU_SH2)
+1: .long 0x000000F0 ! IMASK=0xF
+#else
1: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
+#endif
2: .long init_thread_union+THREAD_SIZE
3: .long __bss_start
4: .long _end
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 944128ce970..67be2b6e8cd 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -12,7 +12,7 @@
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/io.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/thread_info.h>
@@ -78,15 +78,16 @@ union irq_ctx {
u32 stack[THREAD_SIZE/sizeof(u32)];
};
-static union irq_ctx *hardirq_ctx[NR_CPUS];
-static union irq_ctx *softirq_ctx[NR_CPUS];
+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
#endif
asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
- struct pt_regs *old_regs = set_irq_regs(&regs);
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ struct pt_regs *old_regs = set_irq_regs(regs);
int irq;
#ifdef CONFIG_4KSTACKS
union irq_ctx *curctx, *irqctx;
@@ -111,7 +112,7 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
#endif
#ifdef CONFIG_CPU_HAS_INTEVT
- irq = (ctrl_inl(INTEVT) >> 5) - 16;
+ irq = evt2irq(ctrl_inl(INTEVT));
#else
irq = r4;
#endif
@@ -135,17 +136,24 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
irqctx->tinfo.task = curctx->tinfo.task;
irqctx->tinfo.previous_sp = current_stack_pointer;
+ /*
+ * Copy the softirq bits in preempt_count so that the
+ * softirq checks work in the hardirq context.
+ */
+ irqctx->tinfo.preempt_count =
+ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
__asm__ __volatile__ (
"mov %0, r4 \n"
- "mov r15, r9 \n"
+ "mov r15, r8 \n"
"jsr @%1 \n"
/* swith to the irq stack */
" mov %2, r15 \n"
/* restore the stack (ring zero) */
- "mov r9, r15 \n"
+ "mov r8, r15 \n"
: /* no outputs */
: "r" (irq), "r" (generic_handle_irq), "r" (isp)
- /* XXX: A somewhat excessive clobber list? -PFM */
: "memory", "r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "r8", "t", "pr"
);
@@ -193,7 +201,7 @@ void irq_ctx_init(int cpu)
irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu;
- irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET;
+ irqctx->tinfo.preempt_count = 0;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
softirq_ctx[cpu] = irqctx;
@@ -239,13 +247,38 @@ asmlinkage void do_softirq(void)
"mov r9, r15 \n"
: /* no outputs */
: "r" (__do_softirq), "r" (isp)
- /* XXX: A somewhat excessive clobber list? -PFM */
: "memory", "r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
);
+
+ /*
+ * Shouldnt happen, we returned above if in_interrupt():
+ */
+ WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
}
EXPORT_SYMBOL(do_softirq);
#endif
+
+void __init init_IRQ(void)
+{
+#ifdef CONFIG_CPU_HAS_PINT_IRQ
+ init_IRQ_pint();
+#endif
+
+#ifdef CONFIG_CPU_HAS_INTC2_IRQ
+ init_IRQ_intc2();
+#endif
+
+#ifdef CONFIG_CPU_HAS_IPR_IRQ
+ init_IRQ_ipr();
+#endif
+
+ /* Perform the machine specific initialisation */
+ if (sh_mv.mv_init_irq)
+ sh_mv.mv_init_irq();
+
+ irq_ctx_init(smp_processor_id());
+}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index a52b13ac6b7..f3e2631be14 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -385,10 +385,11 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
#ifdef CONFIG_MMU
- return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL);
+ return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
#else
/* fork almost works, enough to trick you into looking elsewhere :-( */
return -EINVAL;
@@ -398,11 +399,12 @@ asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
unsigned long parent_tidptr,
unsigned long child_tidptr,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
if (!newsp)
- newsp = regs.regs[15];
- return do_fork(clone_flags, newsp, &regs, 0,
+ newsp = regs->regs[15];
+ return do_fork(clone_flags, newsp, regs, 0,
(int __user *)parent_tidptr, (int __user *)child_tidptr);
}
@@ -418,9 +420,10 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
*/
asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs,
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
0, NULL, NULL);
}
@@ -429,8 +432,9 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
*/
asmlinkage int sys_execve(char *ufilename, char **uargv,
char **uenvp, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
int error;
char *filename;
@@ -442,7 +446,7 @@ asmlinkage int sys_execve(char *ufilename, char **uargv,
error = do_execve(filename,
(char __user * __user *)uargv,
(char __user * __user *)uenvp,
- &regs);
+ regs);
if (error == 0) {
task_lock(current);
current->ptrace &= ~PT_DTRACE;
@@ -472,9 +476,7 @@ unsigned long get_wchan(struct task_struct *p)
return pc;
}
-asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+asmlinkage void break_point_trap(void)
{
/* Clear tracing. */
#if defined(CONFIG_CPU_SH4A)
@@ -492,8 +494,10 @@ asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
- regs.pc -= 2;
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+
+ regs->pc -= 2;
force_sig(SIGTRAP, current);
}
diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S
index 8221b37c977..c66cb3209db 100644
--- a/arch/sh/kernel/relocate_kernel.S
+++ b/arch/sh/kernel/relocate_kernel.S
@@ -7,11 +7,9 @@
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
-
#include <linux/linkage.h>
-
-#define PAGE_SIZE 4096 /* must be same value as in <asm/page.h> */
-
+#include <asm/addrspace.h>
+#include <asm/page.h>
.globl relocate_new_kernel
relocate_new_kernel:
@@ -20,8 +18,8 @@ relocate_new_kernel:
/* r6 = start_address */
/* r7 = vbr_reg */
- mov.l 10f,r8 /* 4096 */
- mov.l 11f,r9 /* 0xa0000000 */
+ mov.l 10f,r8 /* PAGE_SIZE */
+ mov.l 11f,r9 /* P2SEG */
/* stack setting */
add r8,r5
@@ -32,7 +30,7 @@ relocate_new_kernel:
0:
mov.l @r4+,r0 /* cmd = *ind++ */
-1: /* addr = (cmd | 0xa0000000) & 0xfffffff0 */
+1: /* addr = (cmd | P2SEG) & 0xfffffff0 */
mov r0,r2
or r9,r2
mov #-16,r1
@@ -92,7 +90,7 @@ relocate_new_kernel:
10:
.long PAGE_SIZE
11:
- .long 0xa0000000
+ .long P2SEG
relocate_new_kernel_end:
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 36d86f9ac38..f8dd6b7bfab 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -332,8 +332,7 @@ void __init setup_arch(char **cmdline_p)
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE);
- initrd_start =
- INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
+ initrd_start = INITRD_START + PAGE_OFFSET + __MEMORY_START;
initrd_end = initrd_start + INITRD_SIZE;
} else {
printk("initrd extends beyond end of memory "
@@ -392,6 +391,7 @@ static int __init topology_init(void)
subsys_initcall(topology_init);
static const char *cpu_name[] = {
+ [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
[CPU_SH7604] = "SH7604", [CPU_SH7300] = "SH7300",
[CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
[CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
@@ -404,6 +404,7 @@ static const char *cpu_name[] = {
[CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
[CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780",
[CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343",
+ [CPU_SH7785] = "SH7785",
[CPU_SH_NONE] = "Unknown"
};
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index 9daad70bc30..ceee7914340 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -18,7 +18,6 @@
#include <asm/delay.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-#include <asm/checksum.h>
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
extern struct hw_interrupt_type no_irq_type;
@@ -74,8 +73,6 @@ DECLARE_EXPORT(__lshrdi3);
DECLARE_EXPORT(__movstr);
DECLARE_EXPORT(__movstrSI16);
-EXPORT_SYMBOL(strcpy);
-
#ifdef CONFIG_CPU_SH4
DECLARE_EXPORT(__movstr_i4_even);
DECLARE_EXPORT(__movstr_i4_odd);
@@ -102,10 +99,6 @@ EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(synchronize_irq);
#endif
-#ifdef CONFIG_PM
-EXPORT_SYMBOL(pm_suspend);
-#endif
-
EXPORT_SYMBOL(csum_partial);
#ifdef CONFIG_IPV6
EXPORT_SYMBOL(csum_ipv6_magic);
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 5213f5bc6ce..bb1c480a59c 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -23,6 +23,7 @@
#include <linux/elf.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
+#include <linux/freezer.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
@@ -37,7 +38,7 @@
asmlinkage int
sys_sigsuspend(old_sigset_t mask,
unsigned long r5, unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
@@ -52,7 +53,7 @@ sys_sigsuspend(old_sigset_t mask,
return -ERESTARTNOHAND;
}
-asmlinkage int
+asmlinkage int
sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact)
{
@@ -87,9 +88,11 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
asmlinkage int
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
- return do_sigaltstack(uss, uoss, regs.regs[15]);
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+
+ return do_sigaltstack(uss, uoss, regs->regs[15]);
}
@@ -98,7 +101,11 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
*/
#define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
-#define TRAP16 0xc310 /* Syscall w/no args (NR in R3) */
+#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
+#define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */
+#else
+#define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */
+#endif
#define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */
struct sigframe
@@ -194,9 +201,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
- struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15];
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
sigset_t set;
int r0;
@@ -216,7 +224,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(&regs, &frame->sc, &r0))
+ if (restore_sigcontext(regs, &frame->sc, &r0))
goto badframe;
return r0;
@@ -227,9 +235,10 @@ badframe:
asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
- struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15];
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
sigset_t set;
stack_t st;
int r0;
@@ -246,14 +255,14 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &r0))
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
goto badframe;
if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
- do_sigaltstack(&st, NULL, regs.regs[15]);
+ do_sigaltstack(&st, NULL, regs->regs[15]);
return r0;
@@ -350,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
} else {
/* Generate return code (system call to sigreturn) */
err |= __put_user(MOVW(7), &frame->retcode[0]);
- err |= __put_user(TRAP16, &frame->retcode[1]);
+ err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
err |= __put_user(OR_R0_R0, &frame->retcode[2]);
err |= __put_user(OR_R0_R0, &frame->retcode[3]);
err |= __put_user(OR_R0_R0, &frame->retcode[4]);
@@ -430,7 +439,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
} else {
/* Generate return code (system call to rt_sigreturn) */
err |= __put_user(MOVW(7), &frame->retcode[0]);
- err |= __put_user(TRAP16, &frame->retcode[1]);
+ err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
err |= __put_user(OR_R0_R0, &frame->retcode[2]);
err |= __put_user(OR_R0_R0, &frame->retcode[3]);
err |= __put_user(OR_R0_R0, &frame->retcode[4]);
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
new file mode 100644
index 00000000000..0d5268afe80
--- /dev/null
+++ b/arch/sh/kernel/stacktrace.c
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/kernel/stacktrace.c
+ *
+ * Stack trace management functions
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <asm/ptrace.h>
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+{
+ unsigned long *sp;
+
+ if (!task)
+ task = current;
+ if (task == current)
+ sp = (unsigned long *)current_stack_pointer;
+ else
+ sp = (unsigned long *)task->thread.sp;
+
+ while (!kstack_end(sp)) {
+ unsigned long addr = *sp++;
+
+ if (__kernel_text_address(addr)) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = addr;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 8fde95001c3..5083b6ed4b3 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -33,14 +33,15 @@
*/
asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
int fd[2];
int error;
error = do_pipe(fd);
if (!error) {
- regs.regs[1] = fd[1];
+ regs->regs[1] = fd[1];
return fd[0];
}
return error;
@@ -50,6 +51,7 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
+#ifdef CONFIG_MMU
/*
* To avoid cache aliases, we map the shared page with same color.
*/
@@ -135,6 +137,7 @@ full_search:
addr = COLOUR_ALIGN(addr, pgoff);
}
}
+#endif /* CONFIG_MMU */
static inline long
do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 57e708d7b52..c206c9504c4 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -13,6 +13,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/profile.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
#include <asm/clock.h>
#include <asm/rtc.h>
#include <asm/timer.h>
@@ -50,15 +52,20 @@ unsigned long long __attribute__ ((weak)) sched_clock(void)
#ifndef CONFIG_GENERIC_TIME
void do_gettimeofday(struct timeval *tv)
{
+ unsigned long flags;
unsigned long seq;
unsigned long usec, sec;
do {
- seq = read_seqbegin(&xtime_lock);
+ /*
+ * Turn off IRQs when grabbing xtime_lock, so that
+ * the sys_timer get_offset code doesn't have to handle it.
+ */
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
usec = get_timer_offset();
sec = xtime.tv_sec;
- usec += xtime.tv_nsec / 1000;
- } while (read_seqretry(&xtime_lock, seq));
+ usec += xtime.tv_nsec / NSEC_PER_USEC;
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
@@ -85,7 +92,7 @@ int do_settimeofday(struct timespec *tv)
* wall time. Discover what correction gettimeofday() would have
* made, and then undo it!
*/
- nsec -= 1000 * get_timer_offset();
+ nsec -= get_timer_offset() * NSEC_PER_USEC;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -169,6 +176,108 @@ static struct sysdev_class timer_sysclass = {
.resume = timer_resume,
};
+#ifdef CONFIG_NO_IDLE_HZ
+static int timer_dyn_tick_enable(void)
+{
+ struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+ unsigned long flags;
+ int ret = -ENODEV;
+
+ if (dyn_tick) {
+ spin_lock_irqsave(&dyn_tick->lock, flags);
+ ret = 0;
+ if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
+ ret = dyn_tick->enable();
+
+ if (ret == 0)
+ dyn_tick->state |= DYN_TICK_ENABLED;
+ }
+ spin_unlock_irqrestore(&dyn_tick->lock, flags);
+ }
+
+ return ret;
+}
+
+static int timer_dyn_tick_disable(void)
+{
+ struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+ unsigned long flags;
+ int ret = -ENODEV;
+
+ if (dyn_tick) {
+ spin_lock_irqsave(&dyn_tick->lock, flags);
+ ret = 0;
+ if (dyn_tick->state & DYN_TICK_ENABLED) {
+ ret = dyn_tick->disable();
+
+ if (ret == 0)
+ dyn_tick->state &= ~DYN_TICK_ENABLED;
+ }
+ spin_unlock_irqrestore(&dyn_tick->lock, flags);
+ }
+
+ return ret;
+}
+
+/*
+ * Reprogram the system timer for at least the calculated time interval.
+ * This function should be called from the idle thread with IRQs disabled,
+ * immediately before sleeping.
+ */
+void timer_dyn_reprogram(void)
+{
+ struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+ unsigned long next, seq, flags;
+
+ if (!dyn_tick)
+ return;
+
+ spin_lock_irqsave(&dyn_tick->lock, flags);
+ if (dyn_tick->state & DYN_TICK_ENABLED) {
+ next = next_timer_interrupt();
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ dyn_tick->reprogram(next - jiffies);
+ } while (read_seqretry(&xtime_lock, seq));
+ }
+ spin_unlock_irqrestore(&dyn_tick->lock, flags);
+}
+
+static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf)
+{
+ return sprintf(buf, "%i\n",
+ (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1);
+}
+
+static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf,
+ size_t count)
+{
+ unsigned int enable = simple_strtoul(buf, NULL, 2);
+
+ if (enable)
+ timer_dyn_tick_enable();
+ else
+ timer_dyn_tick_disable();
+
+ return count;
+}
+static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick);
+
+/*
+ * dyntick=enable|disable
+ */
+static char dyntick_str[4] __initdata = "";
+
+static int __init dyntick_setup(char *str)
+{
+ if (str)
+ strlcpy(dyntick_str, str, sizeof(dyntick_str));
+ return 1;
+}
+
+__setup("dyntick=", dyntick_setup);
+#endif
+
static int __init timer_init_sysfs(void)
{
int ret = sysdev_class_register(&timer_sysclass);
@@ -176,7 +285,22 @@ static int __init timer_init_sysfs(void)
return ret;
sys_timer->dev.cls = &timer_sysclass;
- return sysdev_register(&sys_timer->dev);
+ ret = sysdev_register(&sys_timer->dev);
+
+#ifdef CONFIG_NO_IDLE_HZ
+ if (ret == 0 && sys_timer->dyn_tick) {
+ ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick);
+
+ /*
+ * Turn on dynamic tick after calibrate delay
+ * for correct bogomips
+ */
+ if (ret == 0 && dyntick_str[0] == 'e')
+ ret = timer_dyn_tick_enable();
+ }
+#endif
+
+ return ret;
}
device_initcall(timer_init_sysfs);
@@ -200,6 +324,11 @@ void __init time_init(void)
sys_timer = get_sys_timer();
printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
+#ifdef CONFIG_NO_IDLE_HZ
+ if (sys_timer->dyn_tick)
+ spin_lock_init(&sys_timer->dyn_tick->lock);
+#endif
+
#if defined(CONFIG_SH_KGDB)
/*
* Set up kgdb as requested. We do it here because the serial
diff --git a/arch/sh/kernel/timers/Makefile b/arch/sh/kernel/timers/Makefile
index 151a6a304ce..bcf244ff6a1 100644
--- a/arch/sh/kernel/timers/Makefile
+++ b/arch/sh/kernel/timers/Makefile
@@ -5,4 +5,6 @@
obj-y := timer.o
obj-$(CONFIG_SH_TMU) += timer-tmu.o
+obj-$(CONFIG_SH_MTU2) += timer-mtu2.o
+obj-$(CONFIG_SH_CMT) += timer-cmt.o
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
new file mode 100644
index 00000000000..a574b93a4e7
--- /dev/null
+++ b/arch/sh/kernel/timers/timer-cmt.c
@@ -0,0 +1,196 @@
+/*
+ * arch/sh/kernel/timers/timer-cmt.c - CMT Timer Support
+ *
+ * Copyright (C) 2005 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/seqlock.h>
+#include <asm/timer.h>
+#include <asm/rtc.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/clock.h>
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define CMT_CMSTR 0xf84a0070
+#define CMT_CMCSR_0 0xf84a0072
+#define CMT_CMCNT_0 0xf84a0074
+#define CMT_CMCOR_0 0xf84a0076
+#define CMT_CMCSR_1 0xf84a0078
+#define CMT_CMCNT_1 0xf84a007a
+#define CMT_CMCOR_1 0xf84a007c
+
+#define STBCR3 0xf80a0000
+#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0)
+#define CMT_CMCSR_INIT 0x0040
+#define CMT_CMCSR_CALIB 0x0000
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+#define CMT_CMSTR 0xfffec000
+#define CMT_CMCSR_0 0xfffec002
+#define CMT_CMCNT_0 0xfffec004
+#define CMT_CMCOR_0 0xfffec006
+
+#define STBCR4 0xfffe040c
+#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR4) & ~0x04, STBCR4); } while(0)
+#define CMT_CMCSR_INIT 0x0040
+#define CMT_CMCSR_CALIB 0x0000
+#else
+#error "Unknown CPU SUBTYPE"
+#endif
+
+static unsigned long cmt_timer_get_offset(void)
+{
+ int count;
+ static unsigned short count_p = 0xffff; /* for the first call after boot */
+ static unsigned long jiffies_p = 0;
+
+ /*
+ * cache volatile jiffies temporarily; we have IRQs turned off.
+ */
+ unsigned long jiffies_t;
+
+ /* timer count may underflow right here */
+ count = ctrl_inw(CMT_CMCOR_0);
+ count -= ctrl_inw(CMT_CMCNT_0);
+
+ jiffies_t = jiffies;
+
+ /*
+ * avoiding timer inconsistencies (they are rare, but they happen)...
+ * there is one kind of problem that must be avoided here:
+ * 1. the timer counter underflows
+ */
+
+ if (jiffies_t == jiffies_p) {
+ if (count > count_p) {
+ /* the nutcase */
+ if (ctrl_inw(CMT_CMCSR_0) & 0x80) { /* Check CMF bit */
+ count -= LATCH;
+ } else {
+ printk("%s (): hardware timer problem?\n",
+ __FUNCTION__);
+ }
+ }
+ } else
+ jiffies_p = jiffies_t;
+
+ count_p = count;
+
+ count = ((LATCH-1) - count) * TICK_SIZE;
+ count = (count + LATCH/2) / LATCH;
+
+ return count;
+}
+
+static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id)
+{
+ unsigned long timer_status;
+
+ /* Clear CMF bit */
+ timer_status = ctrl_inw(CMT_CMCSR_0);
+ timer_status &= ~0x80;
+ ctrl_outw(timer_status, CMT_CMCSR_0);
+
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_seqlock(&xtime_lock);
+ handle_timer_tick();
+ write_sequnlock(&xtime_lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction cmt_irq = {
+ .name = "timer",
+ .handler = cmt_timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .mask = CPU_MASK_NONE,
+};
+
+static void cmt_clk_init(struct clk *clk)
+{
+ u8 divisor = CMT_CMCSR_INIT & 0x3;
+ ctrl_inw(CMT_CMCSR_0);
+ ctrl_outw(CMT_CMCSR_INIT, CMT_CMCSR_0);
+ clk->parent = clk_get(NULL, "module_clk");
+ clk->rate = clk->parent->rate / (8 << (divisor << 1));
+}
+
+static void cmt_clk_recalc(struct clk *clk)
+{
+ u8 divisor = ctrl_inw(CMT_CMCSR_0) & 0x3;
+ clk->rate = clk->parent->rate / (8 << (divisor << 1));
+}
+
+static struct clk_ops cmt_clk_ops = {
+ .init = cmt_clk_init,
+ .recalc = cmt_clk_recalc,
+};
+
+static struct clk cmt0_clk = {
+ .name = "cmt0_clk",
+ .ops = &cmt_clk_ops,
+};
+
+static int cmt_timer_start(void)
+{
+ ctrl_outw(ctrl_inw(CMT_CMSTR) | 0x01, CMT_CMSTR);
+ return 0;
+}
+
+static int cmt_timer_stop(void)
+{
+ ctrl_outw(ctrl_inw(CMT_CMSTR) & ~0x01, CMT_CMSTR);
+ return 0;
+}
+
+static int cmt_timer_init(void)
+{
+ unsigned long interval;
+
+ cmt_clock_enable();
+
+ setup_irq(CONFIG_SH_TIMER_IRQ, &cmt_irq);
+
+ cmt0_clk.parent = clk_get(NULL, "module_clk");
+
+ cmt_timer_stop();
+
+ interval = cmt0_clk.parent->rate / 8 / HZ;
+ printk(KERN_INFO "Interval = %ld\n", interval);
+
+ ctrl_outw(interval, CMT_CMCOR_0);
+
+ clk_register(&cmt0_clk);
+ clk_enable(&cmt0_clk);
+
+ cmt_timer_start();
+
+ return 0;
+}
+
+struct sys_timer_ops cmt_timer_ops = {
+ .init = cmt_timer_init,
+ .start = cmt_timer_start,
+ .stop = cmt_timer_stop,
+#ifndef CONFIG_GENERIC_TIME
+ .get_offset = cmt_timer_get_offset,
+#endif
+};
+
+struct sys_timer cmt_timer = {
+ .name = "cmt",
+ .ops = &cmt_timer_ops,
+};
diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c
new file mode 100644
index 00000000000..fffcd1c0987
--- /dev/null
+++ b/arch/sh/kernel/timers/timer-mtu2.c
@@ -0,0 +1,200 @@
+/*
+ * arch/sh/kernel/timers/timer-mtu2.c - MTU2 Timer Support
+ *
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * Based off of arch/sh/kernel/timers/timer-tmu.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/seqlock.h>
+#include <asm/timer.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/clock.h>
+
+/*
+ * We use channel 1 for our lowly system timer. Channel 2 would be the other
+ * likely candidate, but we leave it alone as it has higher divisors that
+ * would be of more use to other more interesting applications.
+ *
+ * TODO: Presently we only implement a 16-bit single-channel system timer.
+ * However, we can implement channel cascade if we go the overflow route and
+ * get away with using 2 MTU2 channels as a 32-bit timer.
+ */
+#define MTU2_TSTR 0xfffe4280
+#define MTU2_TCR_1 0xfffe4380
+#define MTU2_TMDR_1 0xfffe4381
+#define MTU2_TIOR_1 0xfffe4382
+#define MTU2_TIER_1 0xfffe4384
+#define MTU2_TSR_1 0xfffe4385
+#define MTU2_TCNT_1 0xfffe4386 /* 16-bit counter */
+#define MTU2_TGRA_1 0xfffe438a
+
+#define STBCR3 0xfffe0408
+
+#define MTU2_TSTR_CST1 (1 << 1) /* Counter Start 1 */
+
+#define MTU2_TSR_TGFA (1 << 0) /* GRA compare match */
+
+#define MTU2_TIER_TGIEA (1 << 0) /* GRA compare match interrupt enable */
+
+#define MTU2_TCR_INIT 0x22
+
+#define MTU2_TCR_CALIB 0x00
+
+static unsigned long mtu2_timer_get_offset(void)
+{
+ int count;
+ static int count_p = 0x7fff; /* for the first call after boot */
+ static unsigned long jiffies_p = 0;
+
+ /*
+ * cache volatile jiffies temporarily; we have IRQs turned off.
+ */
+ unsigned long jiffies_t;
+
+ /* timer count may underflow right here */
+ count = ctrl_inw(MTU2_TCNT_1); /* read the latched count */
+
+ jiffies_t = jiffies;
+
+ /*
+ * avoiding timer inconsistencies (they are rare, but they happen)...
+ * there is one kind of problem that must be avoided here:
+ * 1. the timer counter underflows
+ */
+
+ if (jiffies_t == jiffies_p) {
+ if (count > count_p) {
+ if (ctrl_inb(MTU2_TSR_1) & MTU2_TSR_TGFA) {
+ count -= LATCH;
+ } else {
+ printk("%s (): hardware timer problem?\n",
+ __FUNCTION__);
+ }
+ }
+ } else
+ jiffies_p = jiffies_t;
+
+ count_p = count;
+
+ count = ((LATCH-1) - count) * TICK_SIZE;
+ count = (count + LATCH/2) / LATCH;
+
+ return count;
+}
+
+static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id)
+{
+ unsigned long timer_status;
+
+ /* Clear TGFA bit */
+ timer_status = ctrl_inb(MTU2_TSR_1);
+ timer_status &= ~MTU2_TSR_TGFA;
+ ctrl_outb(timer_status, MTU2_TSR_1);
+
+ /* Do timer tick */
+ write_seqlock(&xtime_lock);
+ handle_timer_tick();
+ write_sequnlock(&xtime_lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mtu2_irq = {
+ .name = "timer",
+ .handler = mtu2_timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .mask = CPU_MASK_NONE,
+};
+
+static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 };
+
+static void mtu2_clk_init(struct clk *clk)
+{
+ u8 idx = MTU2_TCR_INIT & 0x7;
+
+ clk->rate = clk->parent->rate / divisors[idx];
+ /* Start TCNT counting */
+ ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
+
+}
+
+static void mtu2_clk_recalc(struct clk *clk)
+{
+ u8 idx = ctrl_inb(MTU2_TCR_1) & 0x7;
+ clk->rate = clk->parent->rate / divisors[idx];
+}
+
+static struct clk_ops mtu2_clk_ops = {
+ .init = mtu2_clk_init,
+ .recalc = mtu2_clk_recalc,
+};
+
+static struct clk mtu2_clk1 = {
+ .name = "mtu2_clk1",
+ .ops = &mtu2_clk_ops,
+};
+
+static int mtu2_timer_start(void)
+{
+ ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
+ return 0;
+}
+
+static int mtu2_timer_stop(void)
+{
+ ctrl_outb(ctrl_inb(MTU2_TSTR) & ~MTU2_TSTR_CST1, MTU2_TSTR);
+ return 0;
+}
+
+static int mtu2_timer_init(void)
+{
+ u8 tmp;
+ unsigned long interval;
+
+ setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq);
+
+ mtu2_clk1.parent = clk_get(NULL, "module_clk");
+
+ ctrl_outb(ctrl_inb(STBCR3) & (~0x20), STBCR3);
+
+ /* Normal operation */
+ ctrl_outb(0, MTU2_TMDR_1);
+ ctrl_outb(MTU2_TCR_INIT, MTU2_TCR_1);
+ ctrl_outb(0x01, MTU2_TIOR_1);
+
+ /* Enable underflow interrupt */
+ ctrl_outb(ctrl_inb(MTU2_TIER_1) | MTU2_TIER_TGIEA, MTU2_TIER_1);
+
+ interval = CONFIG_SH_PCLK_FREQ / 16 / HZ;
+ printk(KERN_INFO "Interval = %ld\n", interval);
+
+ ctrl_outw(interval, MTU2_TGRA_1);
+ ctrl_outw(0, MTU2_TCNT_1);
+
+ clk_register(&mtu2_clk1);
+ clk_enable(&mtu2_clk1);
+
+ return 0;
+}
+
+struct sys_timer_ops mtu2_timer_ops = {
+ .init = mtu2_timer_init,
+ .start = mtu2_timer_start,
+ .stop = mtu2_timer_stop,
+#ifndef CONFIG_GENERIC_TIME
+ .get_offset = mtu2_timer_get_offset,
+#endif
+};
+
+struct sys_timer mtu2_timer = {
+ .name = "mtu2",
+ .ops = &mtu2_timer_ops,
+};
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index 24927015dc3..e060e71d078 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/seqlock.h>
#include <asm/timer.h>
#include <asm/rtc.h>
@@ -31,13 +30,9 @@
#define TMU0_TCR_CALIB 0x0000
-static DEFINE_SPINLOCK(tmu0_lock);
-
static unsigned long tmu_timer_get_offset(void)
{
int count;
- unsigned long flags;
-
static int count_p = 0x7fffffff; /* for the first call after boot */
static unsigned long jiffies_p = 0;
@@ -46,7 +41,6 @@ static unsigned long tmu_timer_get_offset(void)
*/
unsigned long jiffies_t;
- spin_lock_irqsave(&tmu0_lock, flags);
/* timer count may underflow right here */
count = ctrl_inl(TMU0_TCNT); /* read the latched count */
@@ -72,7 +66,6 @@ static unsigned long tmu_timer_get_offset(void)
jiffies_p = jiffies_t;
count_p = count;
- spin_unlock_irqrestore(&tmu0_lock, flags);
count = ((LATCH-1) - count) * TICK_SIZE;
count = (count + LATCH/2) / LATCH;
@@ -106,7 +99,7 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
static struct irqaction tmu_irq = {
.name = "timer",
.handler = tmu_timer_interrupt,
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.mask = CPU_MASK_NONE,
};
@@ -149,9 +142,9 @@ static int tmu_timer_init(void)
{
unsigned long interval;
- setup_irq(TIMER_IRQ, &tmu_irq);
+ setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq);
- tmu0_clk.parent = clk_get("module_clk");
+ tmu0_clk.parent = clk_get(NULL, "module_clk");
/* Start TMU0 */
tmu_timer_stop();
diff --git a/arch/sh/kernel/timers/timer.c b/arch/sh/kernel/timers/timer.c
index dc1f631053a..a6bcc913d25 100644
--- a/arch/sh/kernel/timers/timer.c
+++ b/arch/sh/kernel/timers/timer.c
@@ -17,6 +17,12 @@ static struct sys_timer *sys_timers[] __initdata = {
#ifdef CONFIG_SH_TMU
&tmu_timer,
#endif
+#ifdef CONFIG_SH_MTU2
+ &mtu2_timer,
+#endif
+#ifdef CONFIG_SH_CMT
+ &cmt_timer,
+#endif
NULL,
};
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 53dfa55f315..3762d9dc204 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -18,13 +18,14 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/io.h>
+#include <linux/debug_locks.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#ifdef CONFIG_SH_KGDB
#include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs) \
-{ \
+#define CHK_REMOTE_DEBUG(regs) \
+{ \
if (kgdb_debug_hook && !user_mode(regs))\
(*kgdb_debug_hook)(regs); \
}
@@ -33,8 +34,13 @@
#endif
#ifdef CONFIG_CPU_SH2
-#define TRAP_RESERVED_INST 4
-#define TRAP_ILLEGAL_SLOT_INST 6
+# define TRAP_RESERVED_INST 4
+# define TRAP_ILLEGAL_SLOT_INST 6
+# define TRAP_ADDRESS_ERROR 9
+# ifdef CONFIG_CPU_SH2A
+# define TRAP_DIVZERO_ERROR 17
+# define TRAP_DIVOVF_ERROR 18
+# endif
#else
#define TRAP_RESERVED_INST 12
#define TRAP_ILLEGAL_SLOT_INST 13
@@ -88,7 +94,7 @@ void die(const char * str, struct pt_regs * regs, long err)
if (!user_mode(regs) || in_interrupt())
dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
- (unsigned long)task_stack_page(current));
+ (unsigned long)task_stack_page(current));
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
@@ -102,8 +108,6 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs,
die(str, regs, err);
}
-static int handle_unaligned_notify_count = 10;
-
/*
* try and fix up kernelspace address errors
* - userspace errors just cause EFAULT to be returned, resulting in SEGV
@@ -198,7 +202,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
if (copy_to_user(dst,src,4))
goto fetch_fault;
ret = 0;
- break;
+ break;
case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
if (instruction & 4)
@@ -222,7 +226,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
if (copy_from_user(dst,src,4))
goto fetch_fault;
ret = 0;
- break;
+ break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
src = (unsigned char*) *rm;
@@ -230,7 +234,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
*rm += count;
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
-
+
#ifdef __LITTLE_ENDIAN__
if (copy_from_user(dst, src, count))
goto fetch_fault;
@@ -241,7 +245,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
}
#else
dst += 4-count;
-
+
if (copy_from_user(dst, src, count))
goto fetch_fault;
@@ -320,7 +324,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
return -EFAULT;
/* kernel */
- die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
+ die("delay-slot-insn faulting in handle_unaligned_delayslot",
+ regs, 0);
}
return handle_unaligned_ins(instruction,regs);
@@ -342,6 +347,13 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
+/*
+ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
+ * opcodes..
+ */
+#ifndef CONFIG_CPU_SH2A
+static int handle_unaligned_notify_count = 10;
+
static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
{
u_int rm;
@@ -354,7 +366,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
if (user_mode(regs) && handle_unaligned_notify_count>0) {
handle_unaligned_notify_count--;
- printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ printk(KERN_NOTICE "Fixing up unaligned userspace access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
current->comm,current->pid,(u16*)regs->pc,instruction);
}
@@ -478,32 +491,58 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
regs->pc += 2;
return ret;
}
+#endif /* CONFIG_CPU_SH2A */
+
+#ifdef CONFIG_CPU_HAS_SR_RB
+#define lookup_exception_vector(x) \
+ __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
+#else
+#define lookup_exception_vector(x) \
+ __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
+#endif
/*
- * Handle various address error exceptions
+ * Handle various address error exceptions:
+ * - instruction address error:
+ * misaligned PC
+ * PC >= 0x80000000 in user mode
+ * - data address error (read and write)
+ * misaligned data access
+ * access to >= 0x80000000 is user mode
+ * Unfortuntaly we can't distinguish between instruction address error
+ * and data address errors caused by read acceses.
*/
-asmlinkage void do_address_error(struct pt_regs *regs,
+asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
- unsigned long error_code;
+ unsigned long error_code = 0;
mm_segment_t oldfs;
+ siginfo_t info;
+#ifndef CONFIG_CPU_SH2A
u16 instruction;
int tmp;
+#endif
- asm volatile("stc r2_bank,%0": "=r" (error_code));
+ /* Intentional ifdef */
+#ifdef CONFIG_CPU_HAS_SR_RB
+ lookup_exception_vector(error_code);
+#endif
oldfs = get_fs();
if (user_mode(regs)) {
+ int si_code = BUS_ADRERR;
+
local_irq_enable();
- current->thread.error_code = error_code;
- current->thread.trap_no = (writeaccess) ? 8 : 7;
/* bad PC is not something we can fix */
- if (regs->pc & 1)
+ if (regs->pc & 1) {
+ si_code = BUS_ADRALN;
goto uspace_segv;
+ }
+#ifndef CONFIG_CPU_SH2A
set_fs(USER_DS);
if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
/* Argh. Fault on the instruction itself.
@@ -518,14 +557,23 @@ asmlinkage void do_address_error(struct pt_regs *regs,
if (tmp==0)
return; /* sorted */
+#endif
- uspace_segv:
- printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
- force_sig(SIGSEGV, current);
+uspace_segv:
+ printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+ "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+ regs->pr);
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void *) address;
+ force_sig_info(SIGBUS, &info, current);
} else {
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
+#ifndef CONFIG_CPU_SH2A
set_fs(KERNEL_DS);
if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
/* Argh. Fault on the instruction itself.
@@ -537,6 +585,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
handle_unaligned_access(instruction, regs);
set_fs(oldfs);
+#else
+ printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+ "access\n", current->comm);
+
+ force_sig(SIGSEGV, current);
+#endif
}
}
@@ -548,7 +602,7 @@ int is_dsp_inst(struct pt_regs *regs)
{
unsigned short inst;
- /*
+ /*
* Safe guard if DSP mode is already enabled or we're lacking
* the DSP altogether.
*/
@@ -569,27 +623,49 @@ int is_dsp_inst(struct pt_regs *regs)
#define is_dsp_inst(regs) (0)
#endif /* CONFIG_SH_DSP */
+#ifdef CONFIG_CPU_SH2A
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ siginfo_t info;
+
+ switch (r4) {
+ case TRAP_DIVZERO_ERROR:
+ info.si_code = FPE_INTDIV;
+ break;
+ case TRAP_DIVOVF_ERROR:
+ info.si_code = FPE_INTOVF;
+ break;
+ }
+
+ force_sig_info(SIGFPE, &info, current);
+}
+#endif
+
/* arch/sh/kernel/cpu/sh4/fpu.c */
extern int do_fpu_inst(unsigned short, struct pt_regs *);
extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7, struct pt_regs regs);
+ unsigned long r6, unsigned long r7, struct pt_regs __regs);
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
- unsigned short inst;
+ unsigned short inst = 0;
int err;
- get_user(inst, (unsigned short*)regs.pc);
+ get_user(inst, (unsigned short*)regs->pc);
- err = do_fpu_inst(inst, &regs);
+ err = do_fpu_inst(inst, regs);
if (!err) {
- regs.pc += 2;
+ regs->pc += 2;
return;
}
/* not a FPU inst. */
@@ -597,20 +673,19 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
#ifdef CONFIG_SH_DSP
/* Check if it's a DSP instruction */
- if (is_dsp_inst(&regs)) {
+ if (is_dsp_inst(regs)) {
/* Enable DSP mode, and restart instruction. */
- regs.sr |= SR_DSP;
+ regs->sr |= SR_DSP;
return;
}
#endif
- asm volatile("stc r2_bank, %0": "=r" (error_code));
+ lookup_exception_vector(error_code);
+
local_irq_enable();
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = TRAP_RESERVED_INST;
- CHK_REMOTE_DEBUG(&regs);
+ CHK_REMOTE_DEBUG(regs);
force_sig(SIGILL, tsk);
- die_if_no_fixup("reserved instruction", &regs, error_code);
+ die_if_no_fixup("reserved instruction", regs, error_code);
}
#ifdef CONFIG_SH_FPU_EMU
@@ -658,39 +733,41 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs)
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
- unsigned short inst;
+ unsigned short inst = 0;
- get_user(inst, (unsigned short *)regs.pc + 1);
- if (!do_fpu_inst(inst, &regs)) {
- get_user(inst, (unsigned short *)regs.pc);
- if (!emulate_branch(inst, &regs))
+ get_user(inst, (unsigned short *)regs->pc + 1);
+ if (!do_fpu_inst(inst, regs)) {
+ get_user(inst, (unsigned short *)regs->pc);
+ if (!emulate_branch(inst, regs))
return;
/* fault in branch.*/
}
/* not a FPU inst. */
#endif
- asm volatile("stc r2_bank, %0": "=r" (error_code));
+ lookup_exception_vector(error_code);
+
local_irq_enable();
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = TRAP_RESERVED_INST;
- CHK_REMOTE_DEBUG(&regs);
+ CHK_REMOTE_DEBUG(regs);
force_sig(SIGILL, tsk);
- die_if_no_fixup("illegal slot instruction", &regs, error_code);
+ die_if_no_fixup("illegal slot instruction", regs, error_code);
}
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
long ex;
- asm volatile("stc r2_bank, %0" : "=r" (ex));
- die_if_kernel("exception", &regs, ex);
+
+ lookup_exception_vector(ex);
+ die_if_kernel("exception", regs, ex);
}
#if defined(CONFIG_SH_STANDARD_BIOS)
@@ -735,12 +812,16 @@ void *set_exception_table_vec(unsigned int vec, void *handler)
{
extern void *exception_handling_table[];
void *old_handler;
-
+
old_handler = exception_handling_table[vec];
exception_handling_table[vec] = handler;
return old_handler;
}
+extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+
void __init trap_init(void)
{
set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
@@ -759,7 +840,15 @@ void __init trap_init(void)
set_exception_table_evt(0x800, do_fpu_state_restore);
set_exception_table_evt(0x820, do_fpu_state_restore);
#endif
-
+
+#ifdef CONFIG_CPU_SH2
+ set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
+#endif
+#ifdef CONFIG_CPU_SH2A
+ set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+ set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#endif
+
/* Setup VBR for boot cpu */
per_cpu_trap_init();
}
@@ -784,6 +873,11 @@ void show_trace(struct task_struct *tsk, unsigned long *sp,
}
printk("\n");
+
+ if (!tsk)
+ tsk = current;
+
+ debug_show_held_locks(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 075d6cc1a2d..deb46941f31 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -97,7 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
goto up_fail;
}
- vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
ret = -ENOMEM;
goto up_fail;
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 9dd606464d2..4e0362f5038 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -4,8 +4,12 @@ menu "Processor selection"
# Processor families
#
config CPU_SH2
+ select SH_WRITETHROUGH if !CPU_SH2A
bool
- select SH_WRITETHROUGH
+
+config CPU_SH2A
+ bool
+ select CPU_SH2
config CPU_SH3
bool
@@ -16,6 +20,7 @@ config CPU_SH4
bool
select CPU_HAS_INTEVT
select CPU_HAS_SR_RB
+ select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40
config CPU_SH4A
bool
@@ -40,6 +45,16 @@ config CPU_SUBTYPE_SH7604
bool "Support SH7604 processor"
select CPU_SH2
+config CPU_SUBTYPE_SH7619
+ bool "Support SH7619 processor"
+ select CPU_SH2
+
+comment "SH-2A Processor Support"
+
+config CPU_SUBTYPE_SH7206
+ bool "Support SH7206 processor"
+ select CPU_SH2A
+
comment "SH-3 Processor Support"
config CPU_SUBTYPE_SH7300
@@ -89,6 +104,7 @@ comment "SH-4 Processor Support"
config CPU_SUBTYPE_SH7750
bool "Support SH7750 processor"
select CPU_SH4
+ select CPU_HAS_IPR_IRQ
help
Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
@@ -104,15 +120,18 @@ config CPU_SUBTYPE_SH7750R
bool "Support SH7750R processor"
select CPU_SH4
select CPU_SUBTYPE_SH7750
+ select CPU_HAS_IPR_IRQ
config CPU_SUBTYPE_SH7750S
bool "Support SH7750S processor"
select CPU_SH4
select CPU_SUBTYPE_SH7750
+ select CPU_HAS_IPR_IRQ
config CPU_SUBTYPE_SH7751
bool "Support SH7751 processor"
select CPU_SH4
+ select CPU_HAS_IPR_IRQ
help
Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
or if you have a HD6417751R CPU.
@@ -121,6 +140,7 @@ config CPU_SUBTYPE_SH7751R
bool "Support SH7751R processor"
select CPU_SH4
select CPU_SUBTYPE_SH7751
+ select CPU_HAS_IPR_IRQ
config CPU_SUBTYPE_SH7760
bool "Support SH7760 processor"
@@ -157,6 +177,11 @@ config CPU_SUBTYPE_SH7780
select CPU_SH4A
select CPU_HAS_INTC2_IRQ
+config CPU_SUBTYPE_SH7785
+ bool "Support SH7785 processor"
+ select CPU_SH4A
+ select CPU_HAS_INTC2_IRQ
+
comment "SH4AL-DSP Processor Support"
config CPU_SUBTYPE_SH73180
@@ -216,13 +241,22 @@ config MEMORY_SIZE
config 32BIT
bool "Support 32-bit physical addressing through PMB"
- depends on CPU_SH4A && MMU
+ depends on CPU_SH4A && MMU && (!X2TLB || BROKEN)
default y
help
If you say Y here, physical addressing will be extended to
32-bits through the SH-4A PMB. If this is not set, legacy
29-bit physical addressing will be used.
+config X2TLB
+ bool "Enable extended TLB mode"
+ depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL
+ help
+ Selecting this option will enable the extended mode of the SH-X2
+ TLB. For legacy SH-X behaviour and interoperability, say N. For
+ all of the fun new features and a willingless to submit bug reports,
+ say Y.
+
config VSYSCALL
bool "Support vsyscall page"
depends on MMU
@@ -237,16 +271,52 @@ config VSYSCALL
(the default value) say Y.
choice
+ prompt "Kernel page size"
+ default PAGE_SIZE_4KB
+
+config PAGE_SIZE_4KB
+ bool "4kB"
+ help
+ This is the default page size used by all SuperH CPUs.
+
+config PAGE_SIZE_8KB
+ bool "8kB"
+ depends on EXPERIMENTAL && X2TLB
+ help
+ This enables 8kB pages as supported by SH-X2 and later MMUs.
+
+config PAGE_SIZE_64KB
+ bool "64kB"
+ depends on EXPERIMENTAL && CPU_SH4
+ help
+ This enables support for 64kB pages, possible on all SH-4
+ CPUs and later. Highly experimental, not recommended.
+
+endchoice
+
+choice
prompt "HugeTLB page size"
depends on HUGETLB_PAGE && CPU_SH4 && MMU
default HUGETLB_PAGE_SIZE_64K
config HUGETLB_PAGE_SIZE_64K
- bool "64K"
+ bool "64kB"
+
+config HUGETLB_PAGE_SIZE_256K
+ bool "256kB"
+ depends on X2TLB
config HUGETLB_PAGE_SIZE_1MB
bool "1MB"
+config HUGETLB_PAGE_SIZE_4MB
+ bool "4MB"
+ depends on X2TLB
+
+config HUGETLB_PAGE_SIZE_64MB
+ bool "64MB"
+ depends on X2TLB
+
endchoice
source "mm/Kconfig"
@@ -274,7 +344,6 @@ config SH_DIRECT_MAPPED
config SH_WRITETHROUGH
bool "Use write-through caching"
- default y if CPU_SH2
help
Selecting this option will configure the caches in write-through
mode, as opposed to the default write-back configuration.
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
index 2689cb24ea2..6614033f6be 100644
--- a/arch/sh/mm/cache-sh2.c
+++ b/arch/sh/mm/cache-sh2.c
@@ -5,6 +5,7 @@
*
* Released under the terms of the GNU GPL v2.0.
*/
+
#include <linux/init.h>
#include <linux/mm.h>
@@ -14,37 +15,43 @@
#include <asm/cacheflush.h>
#include <asm/io.h>
-/*
- * Calculate the OC address and set the way bit on the SH-2.
- *
- * We must have already jump_to_P2()'ed prior to calling this
- * function, since we rely on CCR manipulation to do the
- * Right Thing(tm).
- */
-unsigned long __get_oc_addr(unsigned long set, unsigned long way)
+void __flush_wback_region(void *start, int size)
{
- unsigned long ccr;
-
- /*
- * On SH-2 the way bit isn't tracked in the address field
- * if we're doing address array access .. instead, we need
- * to manually switch out the way in the CCR.
- */
- ccr = ctrl_inl(CCR);
- ccr &= ~0x00c0;
- ccr |= way << cpu_data->dcache.way_shift;
-
- /*
- * Despite the number of sets being halved, we end up losing
- * the first 2 ways to OCRAM instead of the last 2 (if we're
- * 4-way). As a result, forcibly setting the W1 bit handily
- * bumps us up 2 ways.
- */
- if (ccr & CCR_CACHE_ORA)
- ccr |= 1 << (cpu_data->dcache.way_shift + 1);
-
- ctrl_outl(ccr, CCR);
-
- return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift);
+ unsigned long v;
+ unsigned long begin, end;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ /* FIXME cache purge */
+ ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+ }
+}
+
+void __flush_purge_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+ }
+}
+
+void __flush_invalidate_region(void *start, int size)
+{
+ unsigned long v;
+ unsigned long begin, end;
+
+ begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+ end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+ & ~(L1_CACHE_BYTES-1);
+ for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+ ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+ }
}
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index e48cc22724d..ae531affccb 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -11,12 +11,8 @@
*/
#include <linux/init.h>
#include <linux/mm.h>
-#include <asm/addrspace.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -83,9 +79,9 @@ static void __init emit_cache_params(void)
*/
/* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
-#define MAX_P3_SEMAPHORES 16
+#define MAX_P3_MUTEXES 16
-struct semaphore p3map_sem[MAX_P3_SEMAPHORES];
+struct mutex p3map_mutex[MAX_P3_MUTEXES];
void __init p3_cache_init(void)
{
@@ -115,7 +111,7 @@ void __init p3_cache_init(void)
panic("%s failed.", __FUNCTION__);
for (i = 0; i < cpu_data->dcache.n_aliases; i++)
- sema_init(&p3map_sem[i], 1);
+ mutex_init(&p3map_mutex[i]);
}
/*
@@ -229,7 +225,7 @@ static inline void flush_cache_4096(unsigned long start,
*/
if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
(start < CACHE_OC_ADDRESS_ARRAY))
- exec_offset = 0x20000000;
+ exec_offset = 0x20000000;
local_irq_save(flags);
__flush_cache_4096(start | SH_CACHE_ASSOC,
@@ -250,7 +246,7 @@ void flush_dcache_page(struct page *page)
/* Loop all the D-cache */
n = cpu_data->dcache.n_aliases;
- for (i = 0; i < n; i++, addr += PAGE_SIZE)
+ for (i = 0; i < n; i++, addr += 4096)
flush_cache_4096(addr, phys);
}
diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S
index 7b96425ae27..8a706131e52 100644
--- a/arch/sh/mm/clear_page.S
+++ b/arch/sh/mm/clear_page.S
@@ -1,12 +1,12 @@
-/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $
- *
+/*
* __clear_user_page, __clear_user, clear_page implementation of SuperH
*
* Copyright (C) 2001 Kaz Kojima
* Copyright (C) 2001, 2002 Niibe Yutaka
- *
+ * Copyright (C) 2006 Paul Mundt
*/
#include <linux/linkage.h>
+#include <asm/page.h>
/*
* clear_page_slow
@@ -18,11 +18,11 @@
/*
* r0 --- scratch
* r4 --- to
- * r5 --- to + 4096
+ * r5 --- to + PAGE_SIZE
*/
ENTRY(clear_page_slow)
mov r4,r5
- mov.w .Llimit,r0
+ mov.l .Llimit,r0
add r0,r5
mov #0,r0
!
@@ -50,7 +50,7 @@ ENTRY(clear_page_slow)
!
rts
nop
-.Llimit: .word (4096-28)
+.Llimit: .long (PAGE_SIZE-28)
ENTRY(__clear_user)
!
@@ -164,10 +164,10 @@ ENTRY(__clear_user)
* r0 --- scratch
* r4 --- to
* r5 --- orig_to
- * r6 --- to + 4096
+ * r6 --- to + PAGE_SIZE
*/
ENTRY(__clear_user_page)
- mov.w .L4096,r0
+ mov.l .Lpsz,r0
mov r4,r6
add r0,r6
mov #0,r0
@@ -191,7 +191,7 @@ ENTRY(__clear_user_page)
!
rts
nop
-.L4096: .word 4096
+.Lpsz: .long PAGE_SIZE
#endif
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
index 1addffe117c..397c94c9731 100644
--- a/arch/sh/mm/copy_page.S
+++ b/arch/sh/mm/copy_page.S
@@ -1,12 +1,12 @@
-/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $
- *
+/*
* copy_page, __copy_user_page, __copy_user implementation of SuperH
*
* Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Toshinobu Sugioka
- *
+ * Copyright (C) 2006 Paul Mundt
*/
#include <linux/linkage.h>
+#include <asm/page.h>
/*
* copy_page_slow
@@ -18,7 +18,7 @@
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
* r9 --- not used
* r10 --- to
* r11 --- from
@@ -30,7 +30,7 @@ ENTRY(copy_page_slow)
mov r4,r10
mov r5,r11
mov r5,r8
- mov.w .L4096,r0
+ mov.l .Lpsz,r0
add r0,r8
!
1: mov.l @r11+,r0
@@ -80,7 +80,7 @@ ENTRY(copy_page_slow)
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
* r9 --- orig_to
* r10 --- to
* r11 --- from
@@ -94,7 +94,7 @@ ENTRY(__copy_user_page)
mov r5,r11
mov r6,r9
mov r5,r8
- mov.w .L4096,r0
+ mov.l .Lpsz,r0
add r0,r8
!
1: ocbi @r9
@@ -129,7 +129,7 @@ ENTRY(__copy_user_page)
rts
nop
#endif
-.L4096: .word 4096
+.Lpsz: .long PAGE_SIZE
/*
* __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
* Return the number of bytes NOT copied
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 68663b8f99a..716ebf568af 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -26,13 +26,19 @@ extern void die(const char *,struct pt_regs *,long);
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
- unsigned long address)
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long writeaccess,
+ unsigned long address)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
unsigned long page;
+ int si_code;
+ siginfo_t info;
+
+ trace_hardirqs_on();
+ local_irq_enable();
#ifdef CONFIG_SH_KGDB
if (kgdb_nofault && kgdb_bus_err_hook)
@@ -41,6 +47,46 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
tsk = current;
mm = tsk->mm;
+ si_code = SEGV_MAPERR;
+
+ if (unlikely(address >= TASK_SIZE)) {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int offset = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ pgd = get_TTB() + offset;
+ pgd_k = swapper_pg_dir + offset;
+
+ /* This will never happen with the folded page table. */
+ if (!pgd_present(*pgd)) {
+ if (!pgd_present(*pgd_k))
+ goto bad_area_nosemaphore;
+ set_pgd(pgd, *pgd_k);
+ return;
+ }
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (pud_present(*pud) || !pud_present(*pud_k))
+ goto bad_area_nosemaphore;
+ set_pud(pud, *pud_k);
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+ goto bad_area_nosemaphore;
+ set_pmd(pmd, *pmd_k);
+
+ return;
+ }
/*
* If we're in an interrupt or have no user
@@ -65,6 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
* we can handle it..
*/
good_area:
+ si_code = SEGV_ACCERR;
if (writeaccess) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
@@ -104,10 +151,13 @@ survive:
bad_area:
up_read(&mm->mmap_sem);
+bad_area_nosemaphore:
if (user_mode(regs)) {
- tsk->thread.address = address;
- tsk->thread.error_code = writeaccess;
- force_sig(SIGSEGV, tsk);
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void *) address;
+ force_sig_info(SIGSEGV, &info, tsk);
return;
}
@@ -127,11 +177,9 @@ no_context:
printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at virtual address %08lx\n", address);
printk(KERN_ALERT "pc = %08lx\n", regs->pc);
- asm volatile("mov.l %1, %0"
- : "=r" (page)
- : "m" (__m(MMU_TTB)));
+ page = (unsigned long)get_TTB();
if (page) {
- page = ((unsigned long *) page)[address >> 22];
+ page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
printk(KERN_ALERT "*pde = %08lx\n", page);
if (page & _PAGE_PRESENT) {
page &= PAGE_MASK;
@@ -166,98 +214,13 @@ do_sigbus:
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
- tsk->thread.address = address;
- tsk->thread.error_code = writeaccess;
- tsk->thread.trap_no = 14;
- force_sig(SIGBUS, tsk);
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, tsk);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
}
-
-#ifdef CONFIG_SH_STORE_QUEUES
-/*
- * This is a special case for the SH-4 store queues, as pages for this
- * space still need to be faulted in before it's possible to flush the
- * store queue cache for writeout to the remapped region.
- */
-#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
-#else
-#define P3_ADDR_MAX P4SEG
-#endif
-
-/*
- * Called with interrupts disabled.
- */
-asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
- unsigned long writeaccess,
- unsigned long address)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t entry;
- struct mm_struct *mm = current->mm;
- spinlock_t *ptl;
- int ret = 1;
-
-#ifdef CONFIG_SH_KGDB
- if (kgdb_nofault && kgdb_bus_err_hook)
- kgdb_bus_err_hook();
-#endif
-
- /*
- * We don't take page faults for P1, P2, and parts of P4, these
- * are always mapped, whether it be due to legacy behaviour in
- * 29-bit mode, or due to PMB configuration in 32-bit mode.
- */
- if (address >= P3SEG && address < P3_ADDR_MAX) {
- pgd = pgd_offset_k(address);
- mm = NULL;
- } else {
- if (unlikely(address >= TASK_SIZE || !mm))
- return 1;
-
- pgd = pgd_offset(mm, address);
- }
-
- pud = pud_offset(pgd, address);
- if (pud_none_or_clear_bad(pud))
- return 1;
- pmd = pmd_offset(pud, address);
- if (pmd_none_or_clear_bad(pmd))
- return 1;
-
- if (mm)
- pte = pte_offset_map_lock(mm, pmd, address, &ptl);
- else
- pte = pte_offset_kernel(pmd, address);
-
- entry = *pte;
- if (unlikely(pte_none(entry) || pte_not_present(entry)))
- goto unlock;
- if (unlikely(writeaccess && !pte_write(entry)))
- goto unlock;
-
- if (writeaccess)
- entry = pte_mkdirty(entry);
- entry = pte_mkyoung(entry);
-
-#ifdef CONFIG_CPU_SH4
- /*
- * ITLB is not affected by "ldtlb" instruction.
- * So, we need to flush the entry by ourselves.
- */
- __flush_tlb_page(get_asid(), address & PAGE_MASK);
-#endif
-
- set_pte(pte, entry);
- update_mmu_cache(NULL, address, entry);
- ret = 0;
-unlock:
- if (mm)
- pte_unmap_unlock(pte, ptl);
- return ret;
-}
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 329059d6b54..cf2c2ee35a3 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -63,6 +63,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte;
}
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+
struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 7154d1ce978..59f4cc18235 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -84,30 +84,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
pmd_t *pmd;
pte_t *pte;
- pgd = swapper_pg_dir + pgd_index(addr);
+ pgd = pgd_offset_k(addr);
if (pgd_none(*pgd)) {
pgd_ERROR(*pgd);
return;
}
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud)) {
- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
- if (pmd != pmd_offset(pud, 0)) {
- pud_ERROR(*pud);
- return;
- }
+ pud = pud_alloc(NULL, pgd, addr);
+ if (unlikely(!pud)) {
+ pud_ERROR(*pud);
+ return;
}
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd)) {
- pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
- set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
- if (pte != pte_offset_kernel(pmd, 0)) {
- pmd_ERROR(*pmd);
- return;
- }
+ pmd = pmd_alloc(NULL, pud, addr);
+ if (unlikely(!pmd)) {
+ pmd_ERROR(*pmd);
+ return;
}
pte = pte_offset_kernel(pmd, addr);
@@ -155,9 +147,6 @@ extern char __init_begin, __init_end;
/*
* paging_init() sets up the page tables
- *
- * This routines also unmaps the page at virtual kernel address 0, so
- * that we can trap those pesky NULL-reference errors in the kernel.
*/
void __init paging_init(void)
{
@@ -180,14 +169,11 @@ void __init paging_init(void)
*/
{
unsigned long max_dma, low, start_pfn;
- pgd_t *pg_dir;
- int i;
-
- /* We don't need kernel mapping as hardware support that. */
- pg_dir = swapper_pg_dir;
- for (i = 0; i < PTRS_PER_PGD; i++)
- pgd_val(pg_dir[i]) = 0;
+ /* We don't need to map the kernel through the TLB, as
+ * it is permanatly mapped using P1. So clear the
+ * entire pgd. */
+ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
/* Turn on the MMU */
enable_mmu();
@@ -206,6 +192,10 @@ void __init paging_init(void)
}
}
+ /* Set an initial value for the MMU.TTB so we don't have to
+ * check for a null value. */
+ set_TTB(swapper_pg_dir);
+
#elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
/*
* If we don't have CONFIG_MMU set and the processor in question
@@ -227,7 +217,6 @@ static struct kcore_list kcore_mem, kcore_vmalloc;
void __init mem_init(void)
{
- extern unsigned long empty_zero_page[1024];
int codesize, reservedpages, datasize, initsize;
int tmp;
extern unsigned long memory_start;
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index a9fe80cfc23..11d54c14982 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -28,9 +28,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
{
unsigned long end;
unsigned long pfn;
- pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
- _PAGE_DIRTY | _PAGE_ACCESSED |
- _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
+ pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
address &= ~PMD_MASK;
end = address + size;
diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c
index 1406d2e348c..bb23679369d 100644
--- a/arch/sh/mm/pg-dma.c
+++ b/arch/sh/mm/pg-dma.c
@@ -39,8 +39,6 @@ static void copy_page_dma(void *to, void *from)
static void clear_page_dma(void *to)
{
- extern unsigned long empty_zero_page[1024];
-
/*
* We get invoked quite early on, if the DMAC hasn't been initialized
* yet, fall back on the slow manual implementation.
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 07371ed7a31..3f98d2a4f93 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -6,22 +6,12 @@
*
* Released under the terms of the GNU GPL v2.0.
*/
-#include <linux/init.h>
-#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/threads.h>
-#include <asm/addrspace.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
+#include <linux/mutex.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
-extern struct semaphore p3map_sem[];
+extern struct mutex p3map_mutex[];
#define CACHE_ALIAS (cpu_data->dcache.alias_mask)
@@ -37,10 +27,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
clear_page(to);
else {
- pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
- _PAGE_RW | _PAGE_CACHABLE |
- _PAGE_DIRTY | _PAGE_ACCESSED |
- _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
unsigned long phys_addr = PHYSADDR(to);
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -50,8 +36,8 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
pte_t entry;
unsigned long flags;
- entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
- down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+ entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
+ mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
local_irq_save(flags);
__flush_tlb_page(get_asid(), p3_addr);
@@ -59,7 +45,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
update_mmu_cache(NULL, p3_addr, entry);
__clear_user_page((void *)p3_addr, to);
pte_clear(&init_mm, p3_addr, pte);
- up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+ mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
}
}
@@ -77,10 +63,6 @@ void copy_user_page(void *to, void *from, unsigned long address,
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
copy_page(to, from);
else {
- pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
- _PAGE_RW | _PAGE_CACHABLE |
- _PAGE_DIRTY | _PAGE_ACCESSED |
- _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
unsigned long phys_addr = PHYSADDR(to);
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -90,8 +72,8 @@ void copy_user_page(void *to, void *from, unsigned long address,
pte_t entry;
unsigned long flags;
- entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
- down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+ entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
+ mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
set_pte(pte, entry);
local_irq_save(flags);
__flush_tlb_page(get_asid(), p3_addr);
@@ -99,7 +81,7 @@ void copy_user_page(void *to, void *from, unsigned long address,
update_mmu_cache(NULL, p3_addr, entry);
__copy_user_page((void *)p3_addr, from, to);
pte_clear(&init_mm, p3_addr, pte);
- up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+ mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
}
}
@@ -122,4 +104,3 @@ inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t
}
return pte;
}
-
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 92e745341e4..b60ad83a763 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -30,7 +30,7 @@
#define NR_PMB_ENTRIES 16
-static kmem_cache_t *pmb_cache;
+static struct kmem_cache *pmb_cache;
static unsigned long pmb_map;
static struct pmb_entry pmb_init_map[] = {
@@ -283,7 +283,7 @@ void pmb_unmap(unsigned long addr)
} while (pmbe);
}
-static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
+static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
{
struct pmb_entry *pmbe = pmb;
@@ -297,7 +297,7 @@ static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
spin_unlock_irq(&pmb_list_lock);
}
-static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
+static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
{
spin_lock_irq(&pmb_list_lock);
pmb_list_del(pmb);
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index ac57638977e..0571755e9a8 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -30,3 +30,5 @@ R7780MP SH_R7780MP
TITAN SH_TITAN
SHMIN SH_SHMIN
7710VOIPGW SH_7710VOIPGW
+7206SE SH_7206_SOLUTION_ENGINE
+7619SE SH_7619_SOLUTION_ENGINE
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
index ffb310e33ce..b9e7d54d7b8 100644
--- a/arch/sh64/kernel/setup.c
+++ b/arch/sh64/kernel/setup.c
@@ -243,9 +243,7 @@ void __init setup_arch(char **cmdline_p)
if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) {
reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE);
- initrd_start =
- (long) INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
-
+ initrd_start = (long) INITRD_START + PAGE_OFFSET + __MEMORY_START;
initrd_end = initrd_start + INITRD_SIZE;
} else {
printk("initrd extends beyond end of memory "
diff --git a/arch/sh64/kernel/sh_ksyms.c b/arch/sh64/kernel/sh_ksyms.c
index 4b2df7247b5..7aa4b4f7bc5 100644
--- a/arch/sh64/kernel/sh_ksyms.c
+++ b/arch/sh64/kernel/sh_ksyms.c
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(kernel_thread);
/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(strstr);
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
index 9e2ffc45c0e..1666d3efb52 100644
--- a/arch/sh64/kernel/signal.c
+++ b/arch/sh64/kernel/signal.c
@@ -22,7 +22,7 @@
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
diff --git a/arch/sh64/lib/c-checksum.c b/arch/sh64/lib/c-checksum.c
index 0e8a742abf8..4b2676380de 100644
--- a/arch/sh64/lib/c-checksum.c
+++ b/arch/sh64/lib/c-checksum.c
@@ -118,24 +118,24 @@ static unsigned long do_csum(const unsigned char *buff, int len)
/* computes the checksum of a memory block at buff, length len,
and adds in "sum" (32-bit) */
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned long long result = do_csum(buff, len);
/* add in old sum, and carry.. */
- result += sum;
+ result += (__force u32)sum;
/* 32+c bits -> 32 bits */
result = (result & 0xffffffff) + (result >> 32);
pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n",
buff, len, sum, result);
- return result;
+ return (__force __wsum)result;
}
/* Copy while checksumming, otherwise like csum_partial. */
-unsigned int
-csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, unsigned int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
sum = csum_partial(src, len, sum);
memcpy(dst, src, len);
@@ -145,9 +145,9 @@ csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, unsigne
/* Copy from userspace and compute checksum. If we catch an exception
then zero the rest of the buffer. */
-unsigned int
-csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, int len,
- unsigned int sum, int *err_ptr)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum sum, int *err_ptr)
{
int missing;
@@ -166,9 +166,9 @@ csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, int le
}
/* Copy to userspace and compute checksum. */
-unsigned int
+__wsum
csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len,
- unsigned int sum, int *err_ptr)
+ __wsum sum, int *err_ptr)
{
sum = csum_partial(src, len, sum);
@@ -182,28 +182,24 @@ csum_partial_copy_to_user(const unsigned char *src, unsigned char *dst, int len,
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
-unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
pr_debug("ip_fast_csum %p,%d\n", iph, ihl);
- return ~do_csum(iph, ihl * 4);
+ return (__force __sum16)~do_csum(iph, ihl * 4);
}
-unsigned int csum_tcpudp_nofold(unsigned long saddr,
- unsigned long daddr,
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
- unsigned short proto, unsigned int sum)
+ unsigned short proto, __wsum sum)
{
unsigned long long result;
pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead));
pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead));
- result = ((unsigned long long) saddr +
- (unsigned long long) daddr +
- (unsigned long long) sum +
- ((unsigned long long) ntohs(len) << 16) +
- ((unsigned long long) proto << 8));
+ result = (__force u64) saddr + (__force u64) daddr +
+ (__force u64) sum + ((len + proto) << 8);
/* Fold down to 32-bits so we don't loose in the typedef-less
network stack. */
@@ -215,16 +211,5 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr,
pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n",
__FUNCTION__, saddr, daddr, len, proto, sum, result);
- return result;
-}
-
-// Post SIM:
-unsigned int
-csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, unsigned int sum)
-{
- // unsigned dummy;
- pr_debug("csum_partial_copy_nocheck src %p dst %p len %d\n", src, dst,
- len);
-
- return csum_partial_copy(src, dst, len, sum);
+ return (__wsum)result;
}
diff --git a/arch/sh64/lib/dbg.c b/arch/sh64/lib/dbg.c
index 1326f45f31e..4310fc87444 100644
--- a/arch/sh64/lib/dbg.c
+++ b/arch/sh64/lib/dbg.c
@@ -383,7 +383,7 @@ void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs)
/* ======================================================================= */
/*
-** Depending on <base> scan the MMU, Data or Instrction side
+** Depending on <base> scan the MMU, Data or Instruction side
** looking for a valid mapping matching Eaddr & asid.
** Return -1 if not found or the TLB id entry otherwise.
** Note: it works only for 4k pages!
diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c
index 8e2f6c28b73..4f72ab33bb2 100644
--- a/arch/sh64/mm/fault.c
+++ b/arch/sh64/mm/fault.c
@@ -154,7 +154,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_interrupt() || !mm)
+ if (in_atomic() || !mm)
goto no_context;
/* TLB misses upon some cache flushes get done under cli() */
diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c
index 187cf01750b..4b455f61114 100644
--- a/arch/sh64/mm/hugetlbpage.c
+++ b/arch/sh64/mm/hugetlbpage.c
@@ -53,6 +53,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte;
}
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 2f96610a83e..92a7c8a636d 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -212,8 +212,8 @@ config SPARC_LED
tristate "Sun4m LED driver"
help
This driver toggles the front-panel LED on sun4m systems
- in a user-specifyable manner. It's state can be probed
- by reading /proc/led and it's blinking mode can be changed
+ in a user-specifiable manner. Its state can be probed
+ by reading /proc/led and its blinking mode can be changed
via writes to /proc/led
source "fs/Kconfig.binfmt"
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 5cc5ff7f882..b73e6b9067e 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -11,6 +11,7 @@ SECTIONS
. = 0x10000 + SIZEOF_HEADERS;
.text 0xf0004000 :
{
+ _text = .;
*(.text)
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 4d8ed9c6518..01fc6c25429 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -35,7 +35,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- inc_preempt_count();
+ pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
@@ -70,8 +70,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIXADDR_START) { // FIXME
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_enable();
return;
}
@@ -97,8 +96,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
#endif
#endif
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_enable();
}
/* We may be fed a pagetable here by ptep_to_xxx and others. */
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
index a98f3ae175a..9ad84ff10a1 100644
--- a/arch/sparc64/kernel/binfmt_elf32.c
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -141,7 +141,6 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
value->tv_sec = jiffies / HZ;
}
-#define elf_addr_t u32
#undef start_thread
#define start_thread start_thread32
#define init_elf_binfmt init_elf32_binfmt
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index e02f01b644a..dfc41cd4bb5 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -646,13 +646,4 @@ int pci_domain_nr(struct pci_bus *pbus)
}
EXPORT_SYMBOL(pci_domain_nr);
-int pcibios_prep_mwi(struct pci_dev *dev)
-{
- /* We set correct PCI_CACHE_LINE_SIZE register values for every
- * device probed on this platform. So there is nothing to check
- * and this always succeeds.
- */
- return 0;
-}
-
#endif /* !(CONFIG_PCI) */
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index bd9de8c2a2a..4a6063f33e7 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -13,6 +13,7 @@ SECTIONS
. = 0x4000;
.text 0x0000000000404000 :
{
+ _text = .;
*(.text)
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 53b9b1f528e..33fd0b265e7 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -235,6 +235,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte;
}
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 09cb7fccc03..a8e8802eed4 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -176,9 +176,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
int bigkernel = 0;
-kmem_cache_t *pgtable_cache __read_mostly;
+struct kmem_cache *pgtable_cache __read_mostly;
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
clear_page(addr);
}
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index beaa02810f0..236d02f41a0 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -239,7 +239,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
}
}
-static kmem_cache_t *tsb_caches[8] __read_mostly;
+static struct kmem_cache *tsb_caches[8] __read_mostly;
static const char *tsb_cache_names[8] = {
"tsb_8KB",
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 3576b3cc505..7d4190e5565 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans)
return -1;
}
-void chan_interrupt(struct list_head *chans, struct work_struct *task,
+void chan_interrupt(struct list_head *chans, struct delayed_work *task,
struct tty_struct *tty, int irq)
{
struct list_head *ele, *next;
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index 2f880cb167a..0cad3546cb8 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -120,7 +120,7 @@ static int winch_thread(void *arg)
/* These are synchronization calls between various UML threads on the
* host - since they are not different kernel threads, we cannot use
* kernel semaphores. We don't use SysV semaphores because they are
- * persistant. */
+ * persistent. */
count = os_read_file(pipe_fd, &c, sizeof(c));
if(count != sizeof(c))
printk("winch_thread : failed to read synchronization byte, "
diff --git a/arch/um/drivers/daemon_kern.c b/arch/um/drivers/daemon_kern.c
index 824386974f8..9c2e7a758f2 100644
--- a/arch/um/drivers/daemon_kern.c
+++ b/arch/um/drivers/daemon_kern.c
@@ -98,4 +98,4 @@ static int register_daemon(void)
return 0;
}
-__initcall(register_daemon);
+late_initcall(register_daemon);
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 426633e5d6e..aa3090d05a8 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -31,9 +31,9 @@ static irqreturn_t line_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static void line_timer_cb(void *arg)
+static void line_timer_cb(struct work_struct *work)
{
- struct line *line = arg;
+ struct line *line = container_of(work, struct line, task.work);
if(!line->throttled)
chan_interrupt(&line->chan_list, &line->task, line->tty,
@@ -443,7 +443,7 @@ int line_open(struct line *lines, struct tty_struct *tty)
* is registered.
*/
enable_chan(line);
- INIT_WORK(&line->task, line_timer_cb, line);
+ INIT_DELAYED_WORK(&line->task, line_timer_cb);
if(!line->sigio){
chan_enable_winch(&line->chan_list, tty);
diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c
index c090fbd464e..52ccb7b53cd 100644
--- a/arch/um/drivers/mcast_kern.c
+++ b/arch/um/drivers/mcast_kern.c
@@ -127,4 +127,4 @@ static int register_mcast(void)
return 0;
}
-__initcall(register_mcast);
+late_initcall(register_mcast);
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 7b172160fe0..96f0189327a 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = {
static LIST_HEAD(mc_requests);
-static void mc_work_proc(void *unused)
+static void mc_work_proc(struct work_struct *unused)
{
struct mconsole_entry *req;
unsigned long flags;
@@ -72,7 +72,7 @@ static void mc_work_proc(void *unused)
}
}
-static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
+static DECLARE_WORK(mconsole_work, mc_work_proc);
static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
{
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index ec9eb8bd943..286bc0b3207 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -99,6 +99,7 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id)
* same device, since it tests for (dev->flags & IFF_UP). So
* there's no harm in delaying the device shutdown. */
schedule_work(&close_work);
+#error this is not permitted - close_work will go out of scope
goto out;
}
reactivate_fd(lp->fd, UM_ETH_IRQ);
diff --git a/arch/um/drivers/pcap_kern.c b/arch/um/drivers/pcap_kern.c
index 6e1ef855828..e67362acf0e 100644
--- a/arch/um/drivers/pcap_kern.c
+++ b/arch/um/drivers/pcap_kern.c
@@ -109,4 +109,4 @@ static int register_pcap(void)
return 0;
}
-__initcall(register_pcap);
+late_initcall(register_pcap);
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index ce9f3733f73..6dfe632f1c1 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -132,7 +132,7 @@ static int port_accept(struct port_list *port)
DECLARE_MUTEX(ports_sem);
struct list_head ports = LIST_HEAD_INIT(ports);
-void port_work_proc(void *unused)
+void port_work_proc(struct work_struct *unused)
{
struct port_list *port;
struct list_head *ele;
@@ -150,7 +150,7 @@ void port_work_proc(void *unused)
local_irq_restore(flags);
}
-DECLARE_WORK(port_work, port_work_proc, NULL);
+DECLARE_WORK(port_work, port_work_proc);
static irqreturn_t port_interrupt(int irq, void *data)
{
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c
index 788da5439a2..25634bd1f58 100644
--- a/arch/um/drivers/slip_kern.c
+++ b/arch/um/drivers/slip_kern.c
@@ -95,4 +95,4 @@ static int register_slip(void)
return 0;
}
-__initcall(register_slip);
+late_initcall(register_slip);
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c
index ae322e1c8a8..b3ed8fb874a 100644
--- a/arch/um/drivers/slirp_kern.c
+++ b/arch/um/drivers/slirp_kern.c
@@ -119,4 +119,4 @@ static int register_slirp(void)
return 0;
}
-__initcall(register_slirp);
+late_initcall(register_slirp);
diff --git a/arch/um/include/chan_kern.h b/arch/um/include/chan_kern.h
index 572d286ed2c..9003a343e14 100644
--- a/arch/um/include/chan_kern.h
+++ b/arch/um/include/chan_kern.h
@@ -27,7 +27,7 @@ struct chan {
void *data;
};
-extern void chan_interrupt(struct list_head *chans, struct work_struct *task,
+extern void chan_interrupt(struct list_head *chans, struct delayed_work *task,
struct tty_struct *tty, int irq);
extern int parse_chan_pair(char *str, struct line *line, int device,
const struct chan_opts *opts);
diff --git a/arch/um/include/line.h b/arch/um/include/line.h
index 7be24811bb3..214ee76c40d 100644
--- a/arch/um/include/line.h
+++ b/arch/um/include/line.h
@@ -51,7 +51,7 @@ struct line {
char *tail;
int sigio;
- struct work_struct task;
+ struct delayed_work task;
const struct line_driver *driver;
int have_irq;
};
diff --git a/arch/um/include/sysdep-i386/checksum.h b/arch/um/include/sysdep-i386/checksum.h
index 052bb061a97..0cb4645cbeb 100644
--- a/arch/um/include/sysdep-i386/checksum.h
+++ b/arch/um/include/sysdep-i386/checksum.h
@@ -20,8 +20,7 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int csum_partial(const unsigned char * buff, int len,
- unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* Note: when you get a NULL pointer exception here this means someone
@@ -32,8 +31,8 @@ unsigned int csum_partial(const unsigned char * buff, int len,
*/
static __inline__
-unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst,
- int len, int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
@@ -48,36 +47,25 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *
*/
static __inline__
-unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
- unsigned char *dst,
- int len, int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *err_ptr)
{
- if(copy_from_user(dst, src, len)){
+ if (copy_from_user(dst, src, len)) {
*err_ptr = -EFAULT;
- return(-1);
+ return (__force __wsum)-1;
}
return csum_partial(dst, len, sum);
}
/*
- * These are the old (and unsafe) way of doing checksums, a warning message
- * will be printed if they are used and an exception occurs.
- *
- * these functions should go away after some time.
- */
-
-#define csum_partial_copy_fromuser csum_partial_copy_from_user
-
-/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
-static inline unsigned short ip_fast_csum(unsigned char * iph,
- unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
@@ -105,29 +93,29 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
- return sum;
+ return (__force __sum16)sum;
}
/*
* Fold a partial checksum
*/
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
{
__asm__(
"addl %1, %0 ;\n"
"adcl $0xffff, %0 ;\n"
: "=r" (sum)
- : "r" (sum << 16), "0" (sum & 0xffff0000)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000)
);
- return (~sum) >> 16;
+ return (__force __sum16)(~(__force u32)sum >> 16);
}
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
- unsigned long daddr,
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
- unsigned int sum)
+ __wsum sum)
{
__asm__(
"addl %1, %0 ;\n"
@@ -135,7 +123,7 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
"adcl %3, %0 ;\n"
"adcl $0, %0 ;\n"
: "=r" (sum)
- : "g" (daddr), "g"(saddr), "g"((ntohs(len)<<16)+proto*256), "0"(sum));
+ : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
return sum;
}
@@ -143,11 +131,10 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
- unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
- unsigned int sum)
+ __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
@@ -157,17 +144,16 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
* in icmp.c
*/
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold (csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
- struct in6_addr *daddr,
- __u32 len,
- unsigned short proto,
- unsigned int sum)
+static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum sum)
{
__asm__(
"addl 0(%1), %0 ;\n"
@@ -192,14 +178,14 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
-static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
- unsigned char __user *dst,
- int len, int sum, int *err_ptr)
+static __inline__ __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst,
+ int len, __wsum sum, int *err_ptr)
{
- if (access_ok(VERIFY_WRITE, dst, len)){
- if(copy_to_user(dst, src, len)){
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+ if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT;
- return(-1);
+ return (__force __wsum)-1;
}
return csum_partial(src, len, sum);
@@ -208,7 +194,7 @@ static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
if (len)
*err_ptr = -EFAULT;
- return -1; /* invalid checksum */
+ return (__force __wsum)-1; /* invalid checksum */
}
#endif
diff --git a/arch/um/include/sysdep-i386/ptrace.h b/arch/um/include/sysdep-i386/ptrace.h
index 6670cc992ec..52b398bcafc 100644
--- a/arch/um/include/sysdep-i386/ptrace.h
+++ b/arch/um/include/sysdep-i386/ptrace.h
@@ -75,7 +75,7 @@ union uml_pt_regs {
#endif
#ifdef UML_CONFIG_MODE_SKAS
struct skas_regs {
- unsigned long regs[HOST_FRAME_SIZE];
+ unsigned long regs[MAX_REG_NR];
unsigned long fp[HOST_FP_SIZE];
unsigned long xfp[HOST_XFP_SIZE];
struct faultinfo faultinfo;
diff --git a/arch/um/include/sysdep-i386/stub.h b/arch/um/include/sysdep-i386/stub.h
index b492b12b4a1..4fffae75ba5 100644
--- a/arch/um/include/sysdep-i386/stub.h
+++ b/arch/um/include/sysdep-i386/stub.h
@@ -9,6 +9,7 @@
#include <sys/mman.h>
#include <asm/ptrace.h>
#include <asm/unistd.h>
+#include <asm/page.h>
#include "stub-data.h"
#include "kern_constants.h"
#include "uml-config.h"
diff --git a/arch/um/include/sysdep-x86_64/checksum.h b/arch/um/include/sysdep-x86_64/checksum.h
index ea97005af69..a5be9031ea8 100644
--- a/arch/um/include/sysdep-x86_64/checksum.h
+++ b/arch/um/include/sysdep-x86_64/checksum.h
@@ -9,8 +9,7 @@
#include "linux/in6.h"
#include "asm/uaccess.h"
-extern unsigned csum_partial(const unsigned char *buff, unsigned len,
- unsigned sum);
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* Note: when you get a NULL pointer exception here this means someone
@@ -21,21 +20,21 @@ extern unsigned csum_partial(const unsigned char *buff, unsigned len,
*/
static __inline__
-unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst,
- int len, int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
{
memcpy(dst, src, len);
return(csum_partial(dst, len, sum));
}
static __inline__
-unsigned int csum_partial_copy_from_user(const unsigned char *src,
- unsigned char *dst, int len, int sum,
+__wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst, int len, __wsum sum,
int *err_ptr)
{
- if(copy_from_user(dst, src, len)){
+ if (copy_from_user(dst, src, len)) {
*err_ptr = -EFAULT;
- return(-1);
+ return (__force __wsum)-1;
}
return csum_partial(dst, len, sum);
}
@@ -48,15 +47,16 @@ unsigned int csum_partial_copy_from_user(const unsigned char *src,
* the last step before putting a checksum into a packet.
* Make sure not to mix with 64bit checksums.
*/
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
{
__asm__(
" addl %1,%0\n"
" adcl $0xffff,%0"
: "=r" (sum)
- : "r" (sum << 16), "0" (sum & 0xffff0000)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000)
);
- return (~sum) >> 16;
+ return (__force __sum16)(~(__force u32)sum >> 16);
}
/**
@@ -70,28 +70,27 @@ static inline unsigned int csum_fold(unsigned int sum)
* Returns the pseudo header checksum the input data. Result is
* 32bit unfolded.
*/
-static inline unsigned long
-csum_tcpudp_nofold(unsigned saddr, unsigned daddr, unsigned short len,
- unsigned short proto, unsigned int sum)
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
{
asm(" addl %1, %0\n"
" adcl %2, %0\n"
" adcl %3, %0\n"
" adcl $0, %0\n"
: "=r" (sum)
- : "g" (daddr), "g" (saddr), "g" ((ntohs(len)<<16)+proto*256), "0" (sum));
- return sum;
+ : "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum));
+ return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
- unsigned long daddr,
- unsigned short len,
- unsigned short proto,
- unsigned int sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
@@ -101,7 +100,7 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
* iph: ipv4 header
* ihl: length of header / 4
*/
-static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
@@ -128,7 +127,7 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
- return(sum);
+ return (__force __sum16)sum;
}
static inline unsigned add32_with_carry(unsigned a, unsigned b)
@@ -140,6 +139,6 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
return a;
}
-extern unsigned short ip_compute_csum(unsigned char * buff, int len);
+extern __sum16 ip_compute_csum(const void *buff, int len);
#endif
diff --git a/arch/um/include/sysdep-x86_64/ptrace.h b/arch/um/include/sysdep-x86_64/ptrace.h
index 617bb9efc93..66cb400c2c9 100644
--- a/arch/um/include/sysdep-x86_64/ptrace.h
+++ b/arch/um/include/sysdep-x86_64/ptrace.h
@@ -108,7 +108,7 @@ union uml_pt_regs {
* file size, while i386 uses FRAME_SIZE. Therefore, we need
* to use UM_FRAME_SIZE here instead of HOST_FRAME_SIZE.
*/
- unsigned long regs[UM_FRAME_SIZE];
+ unsigned long regs[MAX_REG_NR];
unsigned long fp[HOST_FP_SIZE];
struct faultinfo faultinfo;
long syscall;
diff --git a/arch/um/os-Linux/drivers/ethertap_kern.c b/arch/um/os-Linux/drivers/ethertap_kern.c
index 16385e2ada8..70541821775 100644
--- a/arch/um/os-Linux/drivers/ethertap_kern.c
+++ b/arch/um/os-Linux/drivers/ethertap_kern.c
@@ -105,4 +105,4 @@ static int register_ethertap(void)
return 0;
}
-__initcall(register_ethertap);
+late_initcall(register_ethertap);
diff --git a/arch/um/os-Linux/drivers/tuntap_kern.c b/arch/um/os-Linux/drivers/tuntap_kern.c
index 0edbac63c52..76570a2c25c 100644
--- a/arch/um/os-Linux/drivers/tuntap_kern.c
+++ b/arch/um/os-Linux/drivers/tuntap_kern.c
@@ -90,4 +90,4 @@ static int register_tuntap(void)
return 0;
}
-__initcall(register_tuntap);
+late_initcall(register_tuntap);
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index e299ee5a753..49057d8bc66 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -3,7 +3,6 @@
* Licensed under the GPL
*/
-#include "linux/stddef.h"
#include "linux/sched.h"
#include "linux/slab.h"
#include "linux/types.h"
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c
index 5f3cc668582..01212c88fcc 100644
--- a/arch/um/sys-i386/ptrace_user.c
+++ b/arch/um/sys-i386/ptrace_user.c
@@ -4,9 +4,9 @@
*/
#include <stdio.h>
+#include <stddef.h>
#include <errno.h>
#include <unistd.h>
-#include <linux/stddef.h>
#include "ptrace_user.h"
/* Grr, asm/user.h includes asm/ptrace.h, so has to follow ptrace_user.h */
#include <asm/user.h>
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
index 6f4ef2b7fa4..447306b20ae 100644
--- a/arch/um/sys-i386/user-offsets.c
+++ b/arch/um/sys-i386/user-offsets.c
@@ -2,7 +2,7 @@
#include <signal.h>
#include <asm/ptrace.h>
#include <asm/user.h>
-#include <linux/stddef.h>
+#include <stddef.h>
#include <sys/poll.h>
#define DEFINE(sym, val) \
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c
index 67bc48e57c6..93575fdc874 100644
--- a/arch/v850/kernel/v850_ksyms.c
+++ b/arch/v850/kernel/v850_ksyms.c
@@ -24,7 +24,7 @@ EXPORT_SYMBOL (kernel_thread);
EXPORT_SYMBOL (__bug);
/* Networking helper routines. */
-EXPORT_SYMBOL (csum_partial_copy);
+EXPORT_SYMBOL (csum_partial_copy_nocheck);
EXPORT_SYMBOL (csum_partial_copy_from_user);
EXPORT_SYMBOL (ip_compute_csum);
EXPORT_SYMBOL (ip_fast_csum);
diff --git a/arch/v850/kernel/vmlinux.lds.S b/arch/v850/kernel/vmlinux.lds.S
index 88d087f527c..3a5fd07fe06 100644
--- a/arch/v850/kernel/vmlinux.lds.S
+++ b/arch/v850/kernel/vmlinux.lds.S
@@ -90,6 +90,7 @@
/* Kernel text segment, and some constant data areas. */
#define TEXT_CONTENTS \
+ _text = .; \
__stext = . ; \
*(.text) \
SCHED_TEXT \
diff --git a/arch/v850/lib/checksum.c b/arch/v850/lib/checksum.c
index fa587263307..042158dfe17 100644
--- a/arch/v850/lib/checksum.c
+++ b/arch/v850/lib/checksum.c
@@ -88,32 +88,32 @@ out:
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
-unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
- return ~do_csum(iph,ihl*4);
+ return (__force __sum16)~do_csum(iph,ihl*4);
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-unsigned short ip_compute_csum(const unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
{
- return ~do_csum(buff,len);
+ return (__force __sum16)~do_csum(buff,len);
}
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned int result = do_csum(buff, len);
/* add in old sum, and carry.. */
- result += sum;
- if(sum > result)
+ result += (__force u32)sum;
+ if ((__force u32)sum > result)
result += 1;
- return result;
+ return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
@@ -121,8 +121,8 @@ EXPORT_SYMBOL(csum_partial);
/*
* copy while checksumming, otherwise like csum_partial
*/
-unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst,
- int len, unsigned int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
{
/*
* It's 2:30 am and I don't feel like doing it real ...
@@ -138,9 +138,9 @@ unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst,
* Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer.
*/
-unsigned int csum_partial_copy_from_user (const unsigned char *src,
- unsigned char *dst,
- int len, unsigned int sum,
+__wsum csum_partial_copy_from_user (const void *src,
+ void *dst,
+ int len, __wsum sum,
int *err_ptr)
{
int missing;
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 010d2265f1c..bfbb9bcae12 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -122,7 +122,7 @@ endchoice
choice
prompt "Processor family"
- default MK8
+ default GENERIC_CPU
config MK8
bool "AMD-Opteron/Athlon64"
@@ -130,16 +130,31 @@ config MK8
Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
config MPSC
- bool "Intel EM64T"
+ bool "Intel P4 / older Netburst based Xeon"
help
- Optimize for Intel Pentium 4 and Xeon CPUs with Intel
- Extended Memory 64 Technology(EM64T). For details see
+ Optimize for Intel Pentium 4 and older Nocona/Dempsey Xeon CPUs
+ with Intel Extended Memory 64 Technology(EM64T). For details see
<http://www.intel.com/technology/64bitextensions/>.
+ Note the the latest Xeons (Xeon 51xx and 53xx) are not based on the
+ Netburst core and shouldn't use this option. You can distingush them
+ using the cpu family field
+ in /proc/cpuinfo. Family 15 is a older Xeon, Family 6 a newer one
+ (this rule only applies to system that support EM64T)
+
+config MCORE2
+ bool "Intel Core2 / newer Xeon"
+ help
+ Optimize for Intel Core2 and newer Xeons (51xx)
+ You can distingush the newer Xeons from the older ones using
+ the cpu family field in /proc/cpuinfo. 15 is a older Xeon
+ (use CONFIG_MPSC then), 6 is a newer one. This rule only
+ applies to CPUs that support EM64T.
config GENERIC_CPU
bool "Generic-x86-64"
help
Generic x86-64 CPU.
+ Run equally well on all x86-64 CPUs.
endchoice
@@ -149,12 +164,12 @@ endchoice
config X86_L1_CACHE_BYTES
int
default "128" if GENERIC_CPU || MPSC
- default "64" if MK8
+ default "64" if MK8 || MCORE2
config X86_L1_CACHE_SHIFT
int
default "7" if GENERIC_CPU || MPSC
- default "6" if MK8
+ default "6" if MK8 || MCORE2
config X86_INTERNODE_CACHE_BYTES
int
@@ -344,11 +359,6 @@ config ARCH_DISCONTIGMEM_ENABLE
depends on NUMA
default y
-
-config ARCH_DISCONTIGMEM_ENABLE
- def_bool y
- depends on NUMA
-
config ARCH_DISCONTIGMEM_DEFAULT
def_bool y
depends on NUMA
@@ -455,6 +465,17 @@ config CALGARY_IOMMU
Normally the kernel will make the right choice by itself.
If unsure, say Y.
+config CALGARY_IOMMU_ENABLED_BY_DEFAULT
+ bool "Should Calgary be enabled by default?"
+ default y
+ depends on CALGARY_IOMMU
+ help
+ Should Calgary be enabled by default? if you choose 'y', Calgary
+ will be used (if it exists). If you choose 'n', Calgary will not be
+ used even if it exists. If you choose 'n' and would like to use
+ Calgary anyway, pass 'iommu=calgary' on the kernel command line.
+ If unsure, say Y.
+
# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
bool
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 6e38d4daeed..b471b8550d0 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -30,6 +30,10 @@ cflags-y :=
cflags-kernel-y :=
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+# gcc doesn't support -march=core2 yet as of gcc 4.3, but I hope it
+# will eventually. Use -mtune=generic as fallback
+cflags-$(CONFIG_MCORE2) += \
+ $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
cflags-y += -m64
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 0f5d44e86be..96f226cfb33 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc2-git4
-# Sat Oct 21 03:38:52 2006
+# Linux kernel version: 2.6.19-git7
+# Wed Dec 6 23:50:47 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@@ -47,13 +47,14 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
+CONFIG_SYSFS_DEPRECATED=y
# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
# CONFIG_EMBEDDED is not set
CONFIG_UID16=y
-# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -87,9 +88,7 @@ CONFIG_STOP_MACHINE=y
# Block layer
#
CONFIG_BLOCK=y
-CONFIG_LBD=y
# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
#
# IO Schedulers
@@ -111,10 +110,11 @@ CONFIG_X86_PC=y
# CONFIG_X86_VSMP is not set
# CONFIG_MK8 is not set
# CONFIG_MPSC is not set
-CONFIG_GENERIC_CPU=y
-CONFIG_X86_L1_CACHE_BYTES=128
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_X86_INTERNODE_CACHE_BYTES=128
+CONFIG_MCORE2=y
+# CONFIG_GENERIC_CPU is not set
+CONFIG_X86_L1_CACHE_BYTES=64
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTERNODE_CACHE_BYTES=64
CONFIG_X86_TSC=y
CONFIG_X86_GOOD_APIC=y
# CONFIG_MICROCODE is not set
@@ -322,6 +322,7 @@ CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -624,6 +625,7 @@ CONFIG_SATA_INTEL_COMBINED=y
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_JMICRON is not set
# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
# CONFIG_PATA_MPIIX is not set
# CONFIG_PATA_OLDPIIX is not set
# CONFIG_PATA_NETCELL is not set
@@ -795,6 +797,7 @@ CONFIG_BNX2=y
CONFIG_S2IO=m
# CONFIG_S2IO_NAPI is not set
# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
#
# Token Ring devices
@@ -927,10 +930,6 @@ CONFIG_RTC=y
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
CONFIG_AGP=y
CONFIG_AGP_AMD64=y
CONFIG_AGP_INTEL=y
@@ -1135,6 +1134,7 @@ CONFIG_USB_DEVICEFS=y
# CONFIG_USB_BANDWIDTH is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_MULTITHREAD_PROBE is not set
# CONFIG_USB_OTG is not set
#
@@ -1212,6 +1212,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_KAWETH is not set
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET_MII is not set
# CONFIG_USB_USBNET is not set
CONFIG_USB_MON=y
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 82ef182de6a..543ef4f405e 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -305,8 +305,6 @@ MODULE_AUTHOR("Eric Youngdale, Andi Kleen");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
-#define elf_addr_t __u32
-
static void elf32_init(struct pt_regs *);
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
@@ -351,7 +349,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
bprm->loader += stack_base;
bprm->exec += stack_base;
- mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index 0e0a266d976..ff499ef2a1b 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -584,6 +584,11 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->rdx = (unsigned long) &frame->info;
regs->rcx = (unsigned long) &frame->uc;
+ /* Make -mregparm=3 work */
+ regs->rax = sig;
+ regs->rdx = (unsigned long) &frame->info;
+ regs->rcx = (unsigned long) &frame->uc;
+
asm volatile("movl %0,%%ds" :: "r" (__USER32_DS));
asm volatile("movl %0,%%es" :: "r" (__USER32_DS));
diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
index 3a01329473a..3e5ed20cba4 100644
--- a/arch/x86_64/ia32/syscall32.c
+++ b/arch/x86_64/ia32/syscall32.c
@@ -49,7 +49,7 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
struct mm_struct *mm = current->mm;
int ret;
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!vma)
return -ENOMEM;
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 4d9d5ed942b..124b2d27b4a 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -25,6 +25,7 @@
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
#include <linux/module.h>
+#include <linux/ioport.h>
#include <asm/atomic.h>
#include <asm/smp.h>
@@ -45,6 +46,12 @@ int apic_calibrate_pmtmr __initdata;
int disable_apic_timer __initdata;
+static struct resource *ioapic_resources;
+static struct resource lapic_resource = {
+ .name = "Local APIC",
+ .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+};
+
/*
* cpu_mask that denotes the CPUs that needs timer interrupt coming in as
* IPIs in place of local APIC timers
@@ -133,7 +140,6 @@ void clear_local_APIC(void)
apic_write(APIC_LVTERR, APIC_LVT_MASKED);
if (maxlvt >= 4)
apic_write(APIC_LVTPC, APIC_LVT_MASKED);
- v = GET_APIC_VERSION(apic_read(APIC_LVR));
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
}
@@ -452,23 +458,30 @@ static struct {
static int lapic_suspend(struct sys_device *dev, pm_message_t state)
{
unsigned long flags;
+ int maxlvt;
if (!apic_pm_state.active)
return 0;
+ maxlvt = get_maxlvt();
+
apic_pm_state.apic_id = apic_read(APIC_ID);
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
- apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
+ if (maxlvt >= 4)
+ apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
- apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#ifdef CONFIG_X86_MCE_INTEL
+ if (maxlvt >= 5)
+ apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#endif
local_irq_save(flags);
disable_local_APIC();
local_irq_restore(flags);
@@ -479,10 +492,13 @@ static int lapic_resume(struct sys_device *dev)
{
unsigned int l, h;
unsigned long flags;
+ int maxlvt;
if (!apic_pm_state.active)
return 0;
+ maxlvt = get_maxlvt();
+
local_irq_save(flags);
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
@@ -496,8 +512,12 @@ static int lapic_resume(struct sys_device *dev)
apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
- apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
- apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
+#ifdef CONFIG_X86_MCE_INTEL
+ if (maxlvt >= 5)
+ apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
+#endif
+ if (maxlvt >= 4)
+ apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
@@ -585,6 +605,64 @@ static int __init detect_init_APIC (void)
return 0;
}
+#ifdef CONFIG_X86_IO_APIC
+static struct resource * __init ioapic_setup_resources(void)
+{
+#define IOAPIC_RESOURCE_NAME_SIZE 11
+ unsigned long n;
+ struct resource *res;
+ char *mem;
+ int i;
+
+ if (nr_ioapics <= 0)
+ return NULL;
+
+ n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
+ n *= nr_ioapics;
+
+ mem = alloc_bootmem(n);
+ res = (void *)mem;
+
+ if (mem != NULL) {
+ memset(mem, 0, n);
+ mem += sizeof(struct resource) * nr_ioapics;
+
+ for (i = 0; i < nr_ioapics; i++) {
+ res[i].name = mem;
+ res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ sprintf(mem, "IOAPIC %u", i);
+ mem += IOAPIC_RESOURCE_NAME_SIZE;
+ }
+ }
+
+ ioapic_resources = res;
+
+ return res;
+}
+
+static int __init ioapic_insert_resources(void)
+{
+ int i;
+ struct resource *r = ioapic_resources;
+
+ if (!r) {
+ printk("IO APIC resources could be not be allocated.\n");
+ return -1;
+ }
+
+ for (i = 0; i < nr_ioapics; i++) {
+ insert_resource(&iomem_resource, r);
+ r++;
+ }
+
+ return 0;
+}
+
+/* Insert the IO APIC resources after PCI initialization has occured to handle
+ * IO APICS that are mapped in on a BAR in PCI space. */
+late_initcall(ioapic_insert_resources);
+#endif
+
void __init init_apic_mappings(void)
{
unsigned long apic_phys;
@@ -604,6 +682,11 @@ void __init init_apic_mappings(void)
apic_mapped = 1;
apic_printk(APIC_VERBOSE,"mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys);
+ /* Put local APIC into the resource map. */
+ lapic_resource.start = apic_phys;
+ lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
+ insert_resource(&iomem_resource, &lapic_resource);
+
/*
* Fetch the APIC ID of the BSP in case we have a
* default configuration (or the MP table is broken).
@@ -613,7 +696,9 @@ void __init init_apic_mappings(void)
{
unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
int i;
+ struct resource *ioapic_res;
+ ioapic_res = ioapic_setup_resources();
for (i = 0; i < nr_ioapics; i++) {
if (smp_found_config) {
ioapic_phys = mp_ioapics[i].mpc_apicaddr;
@@ -625,6 +710,12 @@ void __init init_apic_mappings(void)
apic_printk(APIC_VERBOSE,"mapped IOAPIC to %016lx (%016lx)\n",
__fix_to_virt(idx), ioapic_phys);
idx++;
+
+ if (ioapic_res != NULL) {
+ ioapic_res->start = ioapic_phys;
+ ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
+ ioapic_res++;
+ }
}
}
}
@@ -644,10 +735,9 @@ void __init init_apic_mappings(void)
static void __setup_APIC_LVTT(unsigned int clocks)
{
- unsigned int lvtt_value, tmp_value, ver;
+ unsigned int lvtt_value, tmp_value;
int cpu = smp_processor_id();
- ver = GET_APIC_VERSION(apic_read(APIC_LVR));
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask))
diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c
index 3525f884af8..95a7a2c1313 100644
--- a/arch/x86_64/kernel/crash.c
+++ b/arch/x86_64/kernel/crash.c
@@ -28,71 +28,6 @@
/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu;
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type,
- void *data, size_t data_len)
-{
- struct elf_note note;
-
- note.n_namesz = strlen(name) + 1;
- note.n_descsz = data_len;
- note.n_type = type;
- memcpy(buf, &note, sizeof(note));
- buf += (sizeof(note) +3)/4;
- memcpy(buf, name, note.n_namesz);
- buf += (note.n_namesz + 3)/4;
- memcpy(buf, data, note.n_descsz);
- buf += (note.n_descsz + 3)/4;
-
- return buf;
-}
-
-static void final_note(u32 *buf)
-{
- struct elf_note note;
-
- note.n_namesz = 0;
- note.n_descsz = 0;
- note.n_type = 0;
- memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
- struct elf_prstatus prstatus;
- u32 *buf;
-
- if ((cpu < 0) || (cpu >= NR_CPUS))
- return;
-
- /* Using ELF notes here is opportunistic.
- * I need a well defined structure format
- * for the data I pass, and I need tags
- * on the data to indicate what information I have
- * squirrelled away. ELF notes happen to provide
- * all of that, no need to invent something new.
- */
-
- buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-
- if (!buf)
- return;
-
- memset(&prstatus, 0, sizeof(prstatus));
- prstatus.pr_pid = current->pid;
- elf_core_copy_regs(&prstatus.pr_reg, regs);
- buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
- sizeof(prstatus));
- final_note(buf);
-}
-
-static void crash_save_self(struct pt_regs *regs)
-{
- int cpu;
-
- cpu = smp_processor_id();
- crash_save_this_cpu(regs, cpu);
-}
-
#ifdef CONFIG_SMP
static atomic_t waiting_for_crash_ipi;
@@ -117,7 +52,7 @@ static int crash_nmi_callback(struct notifier_block *self,
return NOTIFY_STOP;
local_irq_disable();
- crash_save_this_cpu(regs, cpu);
+ crash_save_cpu(regs, cpu);
disable_local_APIC();
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
@@ -196,5 +131,5 @@ void machine_crash_shutdown(struct pt_regs *regs)
disable_IO_APIC();
- crash_save_self(regs);
+ crash_save_cpu(regs, smp_processor_id());
}
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 68273bff58c..829698f6d04 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -69,11 +69,18 @@ static void nvidia_bugs(void)
static void ati_bugs(void)
{
- if (timer_over_8254 == 1) {
- timer_over_8254 = 0;
- printk(KERN_INFO
- "ATI board detected. Disabling timer routing over 8254.\n");
- }
+}
+
+static void intel_bugs(void)
+{
+ u16 device = read_pci_config_16(0, 0, 0, PCI_DEVICE_ID);
+
+#ifdef CONFIG_SMP
+ if (device == PCI_DEVICE_ID_INTEL_E7320_MCH ||
+ device == PCI_DEVICE_ID_INTEL_E7520_MCH ||
+ device == PCI_DEVICE_ID_INTEL_E7525_MCH)
+ quirk_intel_irqbalance();
+#endif
}
struct chipset {
@@ -85,6 +92,7 @@ static struct chipset early_qrk[] = {
{ PCI_VENDOR_ID_NVIDIA, nvidia_bugs },
{ PCI_VENDOR_ID_VIA, via_bugs },
{ PCI_VENDOR_ID_ATI, ati_bugs },
+ { PCI_VENDOR_ID_INTEL, intel_bugs},
{}
};
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 7d401b00d82..601d332c4b7 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -230,7 +230,6 @@ ENTRY(system_call)
CFI_REL_OFFSET rip,RIP-ARGOFFSET
GET_THREAD_INFO(%rcx)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
- CFI_REMEMBER_STATE
jnz tracesys
cmpq $__NR_syscall_max,%rax
ja badsys
@@ -241,7 +240,6 @@ ENTRY(system_call)
* Syscall return path ending with SYSRET (fast path)
* Has incomplete stack frame and undefined top of stack.
*/
- .globl ret_from_sys_call
ret_from_sys_call:
movl $_TIF_ALLWORK_MASK,%edi
/* edi: flagmask */
@@ -251,8 +249,8 @@ sysret_check:
TRACE_IRQS_OFF
movl threadinfo_flags(%rcx),%edx
andl %edi,%edx
- CFI_REMEMBER_STATE
jnz sysret_careful
+ CFI_REMEMBER_STATE
/*
* sysretq will re-enable interrupts:
*/
@@ -265,10 +263,10 @@ sysret_check:
swapgs
sysretq
+ CFI_RESTORE_STATE
/* Handle reschedules */
/* edx: work, edi: workmask */
sysret_careful:
- CFI_RESTORE_STATE
bt $TIF_NEED_RESCHED,%edx
jnc sysret_signal
TRACE_IRQS_ON
@@ -306,7 +304,6 @@ badsys:
/* Do syscall tracing */
tracesys:
- CFI_RESTORE_STATE
SAVE_REST
movq $-ENOSYS,RAX(%rsp)
FIXUP_TOP_OF_STACK %rdi
@@ -322,32 +319,13 @@ tracesys:
call *sys_call_table(,%rax,8)
1: movq %rax,RAX-ARGOFFSET(%rsp)
/* Use IRET because user could have changed frame */
- jmp int_ret_from_sys_call
- CFI_ENDPROC
-END(system_call)
/*
* Syscall return path ending with IRET.
* Has correct top of stack, but partial stack frame.
- */
-ENTRY(int_ret_from_sys_call)
- CFI_STARTPROC simple
- CFI_SIGNAL_FRAME
- CFI_DEF_CFA rsp,SS+8-ARGOFFSET
- /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
- CFI_REL_OFFSET rsp,RSP-ARGOFFSET
- /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
- /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
- CFI_REL_OFFSET rip,RIP-ARGOFFSET
- CFI_REL_OFFSET rdx,RDX-ARGOFFSET
- CFI_REL_OFFSET rcx,RCX-ARGOFFSET
- CFI_REL_OFFSET rax,RAX-ARGOFFSET
- CFI_REL_OFFSET rdi,RDI-ARGOFFSET
- CFI_REL_OFFSET rsi,RSI-ARGOFFSET
- CFI_REL_OFFSET r8,R8-ARGOFFSET
- CFI_REL_OFFSET r9,R9-ARGOFFSET
- CFI_REL_OFFSET r10,R10-ARGOFFSET
- CFI_REL_OFFSET r11,R11-ARGOFFSET
+ */
+ .globl int_ret_from_sys_call
+int_ret_from_sys_call:
cli
TRACE_IRQS_OFF
testl $3,CS-ARGOFFSET(%rsp)
@@ -394,8 +372,6 @@ int_very_careful:
popq %rdi
CFI_ADJUST_CFA_OFFSET -8
andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
- cli
- TRACE_IRQS_OFF
jmp int_restore_rest
int_signal:
@@ -411,7 +387,7 @@ int_restore_rest:
TRACE_IRQS_OFF
jmp int_with_check
CFI_ENDPROC
-END(int_ret_from_sys_call)
+END(system_call)
/*
* Certain special system calls that need to save a complete full stack frame.
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index 8e78a75d186..b007433f96b 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -33,7 +33,7 @@ extern struct genapic apic_flat;
extern struct genapic apic_physflat;
struct genapic *genapic = &apic_flat;
-
+struct genapic *genapic_force;
/*
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
@@ -46,6 +46,13 @@ void __init clustered_apic_check(void)
u8 cluster_cnt[NUM_APIC_CLUSTERS];
int max_apic = 0;
+ /* genapic selection can be forced because of certain quirks.
+ */
+ if (genapic_force) {
+ genapic = genapic_force;
+ goto print;
+ }
+
#if defined(CONFIG_ACPI)
/*
* Some x86_64 machines use physical APIC mode regardless of how many
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index 9561eb3c5b5..cc230b93cd1 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -57,10 +57,12 @@ void __init x86_64_start_kernel(char * real_mode_data)
{
int i;
- for (i = 0; i < 256; i++)
+ /* clear bss before set_intr_gate with early_idt_handler */
+ clear_bss();
+
+ for (i = 0; i < IDT_ENTRIES; i++)
set_intr_gate(i, early_idt_handler);
asm volatile("lidt %0" :: "m" (idt_descr));
- clear_bss();
early_printk("Kernel alive\n");
diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
index 3aa1e9bb781..1d58c13bc6b 100644
--- a/arch/x86_64/kernel/i387.c
+++ b/arch/x86_64/kernel/i387.c
@@ -82,11 +82,8 @@ int save_i387(struct _fpstate __user *buf)
struct task_struct *tsk = current;
int err = 0;
- {
- extern void bad_user_i387_struct(void);
- if (sizeof(struct user_i387_struct) != sizeof(tsk->thread.i387.fxsave))
- bad_user_i387_struct();
- }
+ BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
+ sizeof(tsk->thread.i387.fxsave));
if ((unsigned long)buf % 16)
printk("save_i387: bad fpstate %p\n",buf);
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index c4ef801b765..d73c79e821f 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -76,7 +76,8 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
-void (*interrupt[NR_IRQS])(void) = {
+/* for the irq vectors */
+static void (*interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
IRQLIST_16(0x2), IRQLIST_16(0x3),
IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index c80081a6ba4..2a1dcd5f69c 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -55,10 +55,6 @@ int sis_apic_bug; /* not actually supported, dummy for compile */
static int no_timer_check;
-static int disable_timer_pin_1 __initdata;
-
-int timer_over_8254 __initdata = 1;
-
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
@@ -178,14 +174,20 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
* the interrupt, and we need to make sure the entry is fully populated
* before that happens.
*/
-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+static void
+__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
- unsigned long flags;
union entry_union eu;
eu.entry = e;
- spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+}
+
+static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __ioapic_write_entry(apic, pin, e);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -348,29 +350,6 @@ static int __init disable_ioapic_setup(char *str)
}
early_param("noapic", disable_ioapic_setup);
-/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
-static int __init disable_timer_pin_setup(char *arg)
-{
- disable_timer_pin_1 = 1;
- return 1;
-}
-__setup("disable_timer_pin_1", disable_timer_pin_setup);
-
-static int __init setup_disable_8254_timer(char *s)
-{
- timer_over_8254 = -1;
- return 1;
-}
-static int __init setup_enable_8254_timer(char *s)
-{
- timer_over_8254 = 2;
- return 1;
-}
-
-__setup("disable_8254_timer", setup_disable_8254_timer);
-__setup("enable_8254_timer", setup_enable_8254_timer);
-
-
/*
* Find the IRQ entry number of a certain pin.
*/
@@ -750,6 +729,22 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
return vector;
}
+static void __clear_irq_vector(int irq)
+{
+ cpumask_t mask;
+ int cpu, vector;
+
+ BUG_ON(!irq_vector[irq]);
+
+ vector = irq_vector[irq];
+ cpus_and(mask, irq_domain[irq], cpu_online_map);
+ for_each_cpu_mask(cpu, mask)
+ per_cpu(vector_irq, cpu)[vector] = -1;
+
+ irq_vector[irq] = 0;
+ irq_domain[irq] = CPU_MASK_NONE;
+}
+
void __setup_vector_irq(int cpu)
{
/* Initialize vector_irq on a new cpu */
@@ -794,27 +789,65 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
handle_edge_irq, "edge");
}
}
-
-static void __init setup_IO_APIC_irqs(void)
+static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
{
struct IO_APIC_route_entry entry;
- int apic, pin, idx, irq, first_notcon = 1, vector;
+ int vector;
unsigned long flags;
- apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
- for (apic = 0; apic < nr_ioapics; apic++) {
- for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ /*
+ * add it to the IO-APIC irq-routing table:
+ */
+ memset(&entry,0,sizeof(entry));
- /*
- * add it to the IO-APIC irq-routing table:
- */
- memset(&entry,0,sizeof(entry));
+ entry.delivery_mode = INT_DELIVERY_MODE;
+ entry.dest_mode = INT_DEST_MODE;
+ entry.mask = 0; /* enable IRQ */
+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+
+ entry.trigger = irq_trigger(idx);
+ entry.polarity = irq_polarity(idx);
- entry.delivery_mode = INT_DELIVERY_MODE;
- entry.dest_mode = INT_DEST_MODE;
- entry.mask = 0; /* enable IRQ */
+ if (irq_trigger(idx)) {
+ entry.trigger = 1;
+ entry.mask = 1;
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+ }
+
+ if (!apic && !IO_APIC_IRQ(irq))
+ return;
+
+ if (IO_APIC_IRQ(irq)) {
+ cpumask_t mask;
+ vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
+ if (vector < 0)
+ return;
+
+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
+ entry.vector = vector;
+
+ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
+ if (!apic && (irq < 16))
+ disable_8259A_irq(irq);
+ }
+
+ ioapic_write_entry(apic, pin, entry);
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ set_native_irq_info(irq, TARGET_CPUS);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+}
+
+static void __init setup_IO_APIC_irqs(void)
+{
+ int apic, pin, idx, irq, first_notcon = 1;
+
+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
idx = find_irq_entry(apic,pin,mp_INT);
if (idx == -1) {
@@ -826,39 +859,11 @@ static void __init setup_IO_APIC_irqs(void)
continue;
}
- entry.trigger = irq_trigger(idx);
- entry.polarity = irq_polarity(idx);
-
- if (irq_trigger(idx)) {
- entry.trigger = 1;
- entry.mask = 1;
- entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
- }
-
irq = pin_2_irq(idx, apic, pin);
add_pin_to_irq(irq, apic, pin);
- if (!apic && !IO_APIC_IRQ(irq))
- continue;
-
- if (IO_APIC_IRQ(irq)) {
- cpumask_t mask;
- vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
- if (vector < 0)
- continue;
-
- entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
- entry.vector = vector;
-
- ioapic_register_intr(irq, vector, IOAPIC_AUTO);
- if (!apic && (irq < 16))
- disable_8259A_irq(irq);
- }
- ioapic_write_entry(apic, pin, entry);
+ setup_IO_APIC_irq(apic, pin, idx, irq);
- spin_lock_irqsave(&ioapic_lock, flags);
- set_native_irq_info(irq, TARGET_CPUS);
- spin_unlock_irqrestore(&ioapic_lock, flags);
}
}
@@ -1563,10 +1568,33 @@ static inline void unlock_ExtINT_logic(void)
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
* is so screwy. Thanks to Brian Perkins for testing/hacking this beast
* fanatically on his truly buggy board.
- *
- * FIXME: really need to revamp this for modern platforms only.
*/
-static inline void check_timer(void)
+
+static int try_apic_pin(int apic, int pin, char *msg)
+{
+ apic_printk(APIC_VERBOSE, KERN_INFO
+ "..TIMER: trying IO-APIC=%d PIN=%d %s",
+ apic, pin, msg);
+
+ /*
+ * Ok, does IRQ0 through the IOAPIC work?
+ */
+ if (!no_timer_check && timer_irq_works()) {
+ nmi_watchdog_default();
+ if (nmi_watchdog == NMI_IO_APIC) {
+ disable_8259A_irq(0);
+ setup_nmi();
+ enable_8259A_irq(0);
+ }
+ return 1;
+ }
+ clear_IO_APIC_pin(apic, pin);
+ apic_printk(APIC_QUIET, KERN_ERR " .. failed\n");
+ return 0;
+}
+
+/* The function from hell */
+static void check_timer(void)
{
int apic1, pin1, apic2, pin2;
int vector;
@@ -1587,61 +1615,43 @@ static inline void check_timer(void)
*/
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
- if (timer_over_8254 > 0)
- enable_8259A_irq(0);
pin1 = find_isa_irq_pin(0, mp_INT);
apic1 = find_isa_irq_apic(0, mp_INT);
pin2 = ioapic_i8259.pin;
apic2 = ioapic_i8259.apic;
- apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
- vector, apic1, pin1, apic2, pin2);
+ /* Do this first, otherwise we get double interrupts on ATI boards */
+ if ((pin1 != -1) && try_apic_pin(apic1, pin1,"with 8259 IRQ0 disabled"))
+ return;
- if (pin1 != -1) {
- /*
- * Ok, does IRQ0 through the IOAPIC work?
- */
- unmask_IO_APIC_irq(0);
- if (!no_timer_check && timer_irq_works()) {
- nmi_watchdog_default();
- if (nmi_watchdog == NMI_IO_APIC) {
- disable_8259A_irq(0);
- setup_nmi();
- enable_8259A_irq(0);
- }
- if (disable_timer_pin_1 > 0)
- clear_IO_APIC_pin(0, pin1);
- return;
- }
- clear_IO_APIC_pin(apic1, pin1);
- apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
- "connected to IO-APIC\n");
- }
+ /* Now try again with IRQ0 8259A enabled.
+ Assumes timer is on IO-APIC 0 ?!? */
+ enable_8259A_irq(0);
+ unmask_IO_APIC_irq(0);
+ if (try_apic_pin(apic1, pin1, "with 8259 IRQ0 enabled"))
+ return;
+ disable_8259A_irq(0);
+
+ /* Always try pin0 and pin2 on APIC 0 to handle buggy timer overrides
+ on Nvidia boards */
+ if (!(apic1 == 0 && pin1 == 0) &&
+ try_apic_pin(0, 0, "fallback with 8259 IRQ0 disabled"))
+ return;
+ if (!(apic1 == 0 && pin1 == 2) &&
+ try_apic_pin(0, 2, "fallback with 8259 IRQ0 disabled"))
+ return;
- apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
- "through the 8259A ... ");
+ /* Then try pure 8259A routing on the 8259 as reported by BIOS*/
+ enable_8259A_irq(0);
if (pin2 != -1) {
- apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
- apic2, pin2);
- /*
- * legacy devices should be connected to IO APIC #0
- */
setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
- if (timer_irq_works()) {
- apic_printk(APIC_VERBOSE," works.\n");
- nmi_watchdog_default();
- if (nmi_watchdog == NMI_IO_APIC) {
- setup_nmi();
- }
+ if (try_apic_pin(apic2,pin2,"8259A broadcast ExtINT from BIOS"))
return;
- }
- /*
- * Cleanup, just in case ...
- */
- clear_IO_APIC_pin(apic2, pin2);
}
- apic_printk(APIC_VERBOSE," failed.\n");
+
+ /* Tried all possibilities to go through the IO-APIC. Now come the
+ really cheesy fallbacks. */
if (nmi_watchdog == NMI_IO_APIC) {
printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
@@ -1837,7 +1847,7 @@ void destroy_irq(unsigned int irq)
dynamic_irq_cleanup(irq);
spin_lock_irqsave(&vector_lock, flags);
- irq_vector[irq] = 0;
+ __clear_irq_vector(irq);
spin_unlock_irqrestore(&vector_lock, flags);
}
@@ -2139,7 +2149,15 @@ void __init setup_ioapic_dest(void)
if (irq_entry == -1)
continue;
irq = pin_2_irq(irq_entry, ioapic, pin);
- set_ioapic_affinity_irq(irq, TARGET_CPUS);
+
+ /* setup_IO_APIC_irqs could fail to get vector for some device
+ * when you have too many devices, because at that time only boot
+ * cpu is online.
+ */
+ if(!irq_vector[irq])
+ setup_IO_APIC_irq(ioapic, pin, irq_entry, irq);
+ else
+ set_ioapic_affinity_irq(irq, TARGET_CPUS);
}
}
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index e46c55856d4..0c06af6c13b 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -120,7 +120,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
if (likely(irq < NR_IRQS))
generic_handle_irq(irq);
- else
+ else if (printk_ratelimit())
printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
__func__, smp_processor_id(), vector);
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index ac241567e68..209c8c0bec7 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -224,7 +224,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
mutex_lock(&kprobe_mutex);
- free_insn_slot(p->ainsn.insn);
+ free_insn_slot(p->ainsn.insn, 0);
mutex_unlock(&kprobe_mutex);
}
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index bbea88801d8..ac085038af2 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
*/
static int check_interval = 5 * 60; /* 5 minutes */
-static void mcheck_timer(void *data);
-static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
+static void mcheck_timer(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
static void mcheck_check_cpu(void *info)
{
@@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info)
do_machine_check(NULL, 0);
}
-static void mcheck_timer(void *data)
+static void mcheck_timer(struct work_struct *work)
{
on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
schedule_delayed_work(&mcheck_work, check_interval * HZ);
@@ -641,7 +641,6 @@ static __cpuinit int mce_create_device(unsigned int cpu)
return err;
}
-#ifdef CONFIG_HOTPLUG_CPU
static void mce_remove_device(unsigned int cpu)
{
int i;
@@ -652,6 +651,7 @@ static void mce_remove_device(unsigned int cpu)
sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_tolerant);
sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
sysdev_unregister(&per_cpu(device_mce,cpu));
+ memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
@@ -674,7 +674,6 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
static struct notifier_block mce_cpu_notifier = {
.notifier_call = mce_cpu_callback,
};
-#endif
static __init int mce_init_device(void)
{
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 883fe747f64..fa09debad4b 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -551,7 +551,6 @@ out:
return err;
}
-#ifdef CONFIG_HOTPLUG_CPU
/*
* let's be hotplug friendly.
* in case of multiple core processors, the first core always takes ownership
@@ -594,12 +593,14 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
sprintf(name, "threshold_bank%i", bank);
+#ifdef CONFIG_SMP
/* sibling symlink */
if (shared_bank[bank] && b->blocks->cpu != cpu) {
sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
per_cpu(threshold_banks, cpu)[bank] = NULL;
return;
}
+#endif
/* remove all sibling symlinks before unregistering */
for_each_cpu_mask(i, b->cpus) {
@@ -656,7 +657,6 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
static struct notifier_block threshold_cpu_notifier = {
.notifier_call = threshold_cpu_callback,
};
-#endif /* CONFIG_HOTPLUG_CPU */
static __init int threshold_init_device(void)
{
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index b147ab19fbd..08072568847 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -35,8 +35,6 @@
int smp_found_config;
unsigned int __initdata maxcpus = NR_CPUS;
-int acpi_found_madt;
-
/*
* Various Linux-internal data structures created from the
* MP-table.
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 7af9cb3e2d9..27e95e7922c 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -12,14 +12,15 @@
* Mikael Pettersson : PM converted to driver model. Disable/enable API.
*/
+#include <linux/nmi.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/sysdev.h>
-#include <linux/nmi.h>
#include <linux/sysctl.h>
#include <linux/kprobes.h>
+#include <linux/cpumask.h>
#include <asm/smp.h>
#include <asm/nmi.h>
@@ -41,6 +42,8 @@ int panic_on_unrecovered_nmi;
static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
+
/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
* offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
*/
@@ -782,6 +785,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
{
int sum;
int touched = 0;
+ int cpu = smp_processor_id();
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
u64 dummy;
int rc=0;
@@ -799,6 +803,16 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
touched = 1;
}
+ if (cpu_isset(cpu, backtrace_mask)) {
+ static DEFINE_SPINLOCK(lock); /* Serialise the printks */
+
+ spin_lock(&lock);
+ printk("NMI backtrace for cpu %d\n", cpu);
+ dump_stack();
+ spin_unlock(&lock);
+ cpu_clear(cpu, backtrace_mask);
+ }
+
#ifdef CONFIG_X86_MCE
/* Could check oops_in_progress here too, but it's safer
not too */
@@ -931,6 +945,19 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
#endif
+void __trigger_all_cpu_backtrace(void)
+{
+ int i;
+
+ backtrace_mask = cpu_online_map;
+ /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpus_empty(backtrace_mask))
+ break;
+ mdelay(1);
+ }
+}
+
EXPORT_SYMBOL(nmi_active);
EXPORT_SYMBOL(nmi_watchdog);
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
diff --git a/arch/x86_64/kernel/pci-calgary.c b/arch/x86_64/kernel/pci-calgary.c
index 37a770859e7..3215675ab12 100644
--- a/arch/x86_64/kernel/pci-calgary.c
+++ b/arch/x86_64/kernel/pci-calgary.c
@@ -41,6 +41,13 @@
#include <asm/pci-direct.h>
#include <asm/system.h>
#include <asm/dma.h>
+#include <asm/rio.h>
+
+#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
+int use_calgary __read_mostly = 1;
+#else
+int use_calgary __read_mostly = 0;
+#endif /* CONFIG_CALGARY_DEFAULT_ENABLED */
#define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
#define PCI_VENDOR_DEVICE_ID_CALGARY \
@@ -115,14 +122,35 @@ static const unsigned long phb_offsets[] = {
0xB000 /* PHB3 */
};
+/* PHB debug registers */
+
+static const unsigned long phb_debug_offsets[] = {
+ 0x4000 /* PHB 0 DEBUG */,
+ 0x5000 /* PHB 1 DEBUG */,
+ 0x6000 /* PHB 2 DEBUG */,
+ 0x7000 /* PHB 3 DEBUG */
+};
+
+/*
+ * STUFF register for each debug PHB,
+ * byte 1 = start bus number, byte 2 = end bus number
+ */
+
+#define PHB_DEBUG_STUFF_OFFSET 0x0020
+
unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
static int translate_empty_slots __read_mostly = 0;
static int calgary_detected __read_mostly = 0;
+static struct rio_table_hdr *rio_table_hdr __initdata;
+static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
+static struct rio_detail *rio_devs[MAX_NUMNODES * 4] __initdata;
+
struct calgary_bus_info {
void *tce_space;
unsigned char translation_disabled;
signed char phbid;
+ void __iomem *bbar;
};
static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
@@ -475,6 +503,11 @@ static struct dma_mapping_ops calgary_dma_ops = {
.unmap_sg = calgary_unmap_sg,
};
+static inline void __iomem * busno_to_bbar(unsigned char num)
+{
+ return bus_info[num].bbar;
+}
+
static inline int busno_to_phbid(unsigned char num)
{
return bus_info[num].phbid;
@@ -620,14 +653,9 @@ static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
static void __init calgary_reserve_regions(struct pci_dev *dev)
{
unsigned int npages;
- void __iomem *bbar;
- unsigned char busnum;
u64 start;
struct iommu_table *tbl = dev->sysdata;
- bbar = tbl->bbar;
- busnum = dev->bus->number;
-
/* reserve bad_dma_address in case it's a legal address */
iommu_range_reserve(tbl, bad_dma_address, 1);
@@ -740,7 +768,7 @@ static void __init calgary_increase_split_completion_timeout(void __iomem *bbar,
{
u64 val64;
void __iomem *target;
- unsigned long phb_shift = -1;
+ unsigned int phb_shift = ~0; /* silence gcc */
u64 mask;
switch (busno_to_phbid(busnum)) {
@@ -828,33 +856,6 @@ static void __init calgary_disable_translation(struct pci_dev *dev)
del_timer_sync(&tbl->watchdog_timer);
}
-static inline unsigned int __init locate_register_space(struct pci_dev *dev)
-{
- int rionodeid;
- u32 address;
-
- /*
- * Each Calgary has four busses. The first four busses (first Calgary)
- * have RIO node ID 2, then the next four (second Calgary) have RIO
- * node ID 3, the next four (third Calgary) have node ID 2 again, etc.
- * We use a gross hack - relying on the dev->bus->number ordering,
- * modulo 14 - to decide which Calgary a given bus is on. Busses 0, 1,
- * 2 and 4 are on the first Calgary (id 2), 6, 8, a and c are on the
- * second (id 3), and then it repeats modulo 14.
- */
- rionodeid = (dev->bus->number % 14 > 4) ? 3 : 2;
- /*
- * register space address calculation as follows:
- * FE0MB-8MB*OneBasedChassisNumber+1MB*(RioNodeId-ChassisBase)
- * ChassisBase is always zero for x366/x260/x460
- * RioNodeId is 2 for first Calgary, 3 for second Calgary
- */
- address = START_ADDRESS -
- (0x800000 * (ONE_BASED_CHASSIS_NUM + dev->bus->number / 14)) +
- (0x100000) * (rionodeid - CHASSIS_BASE);
- return address;
-}
-
static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
{
pci_dev_get(dev);
@@ -864,23 +865,15 @@ static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
static int __init calgary_init_one(struct pci_dev *dev)
{
- u32 address;
void __iomem *bbar;
int ret;
BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
- address = locate_register_space(dev);
- /* map entire 1MB of Calgary config space */
- bbar = ioremap_nocache(address, 1024 * 1024);
- if (!bbar) {
- ret = -ENODATA;
- goto done;
- }
-
+ bbar = busno_to_bbar(dev->bus->number);
ret = calgary_setup_tar(dev, bbar);
if (ret)
- goto iounmap;
+ goto done;
pci_dev_get(dev);
dev->bus->self = dev;
@@ -888,17 +881,66 @@ static int __init calgary_init_one(struct pci_dev *dev)
return 0;
-iounmap:
- iounmap(bbar);
done:
return ret;
}
+static int __init calgary_locate_bbars(void)
+{
+ int ret;
+ int rioidx, phb, bus;
+ void __iomem *bbar;
+ void __iomem *target;
+ unsigned long offset;
+ u8 start_bus, end_bus;
+ u32 val;
+
+ ret = -ENODATA;
+ for (rioidx = 0; rioidx < rio_table_hdr->num_rio_dev; rioidx++) {
+ struct rio_detail *rio = rio_devs[rioidx];
+
+ if ((rio->type != COMPAT_CALGARY) && (rio->type != ALT_CALGARY))
+ continue;
+
+ /* map entire 1MB of Calgary config space */
+ bbar = ioremap_nocache(rio->BBAR, 1024 * 1024);
+ if (!bbar)
+ goto error;
+
+ for (phb = 0; phb < PHBS_PER_CALGARY; phb++) {
+ offset = phb_debug_offsets[phb] | PHB_DEBUG_STUFF_OFFSET;
+ target = calgary_reg(bbar, offset);
+
+ val = be32_to_cpu(readl(target));
+ start_bus = (u8)((val & 0x00FF0000) >> 16);
+ end_bus = (u8)((val & 0x0000FF00) >> 8);
+ for (bus = start_bus; bus <= end_bus; bus++) {
+ bus_info[bus].bbar = bbar;
+ bus_info[bus].phbid = phb;
+ }
+ }
+ }
+
+ return 0;
+
+error:
+ /* scan bus_info and iounmap any bbars we previously ioremap'd */
+ for (bus = 0; bus < ARRAY_SIZE(bus_info); bus++)
+ if (bus_info[bus].bbar)
+ iounmap(bus_info[bus].bbar);
+
+ return ret;
+}
+
static int __init calgary_init(void)
{
- int ret = -ENODEV;
+ int ret;
struct pci_dev *dev = NULL;
+ ret = calgary_locate_bbars();
+ if (ret)
+ return ret;
+
do {
dev = pci_get_device(PCI_VENDOR_ID_IBM,
PCI_DEVICE_ID_IBM_CALGARY,
@@ -921,7 +963,7 @@ static int __init calgary_init(void)
error:
do {
- dev = pci_find_device_reverse(PCI_VENDOR_ID_IBM,
+ dev = pci_get_device_reverse(PCI_VENDOR_ID_IBM,
PCI_DEVICE_ID_IBM_CALGARY,
dev);
if (!dev)
@@ -962,13 +1004,56 @@ static inline int __init determine_tce_table_size(u64 ram)
return ret;
}
+static int __init build_detail_arrays(void)
+{
+ unsigned long ptr;
+ int i, scal_detail_size, rio_detail_size;
+
+ if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){
+ printk(KERN_WARNING
+ "Calgary: MAX_NUMNODES too low! Defined as %d, "
+ "but system has %d nodes.\n",
+ MAX_NUMNODES, rio_table_hdr->num_scal_dev);
+ return -ENODEV;
+ }
+
+ switch (rio_table_hdr->version){
+ case 2:
+ scal_detail_size = 11;
+ rio_detail_size = 13;
+ break;
+ case 3:
+ scal_detail_size = 12;
+ rio_detail_size = 15;
+ break;
+ default:
+ printk(KERN_WARNING
+ "Calgary: Invalid Rio Grande Table Version: %d\n",
+ rio_table_hdr->version);
+ return -EPROTO;
+ }
+
+ ptr = ((unsigned long)rio_table_hdr) + 3;
+ for (i = 0; i < rio_table_hdr->num_scal_dev;
+ i++, ptr += scal_detail_size)
+ scal_devs[i] = (struct scal_detail *)ptr;
+
+ for (i = 0; i < rio_table_hdr->num_rio_dev;
+ i++, ptr += rio_detail_size)
+ rio_devs[i] = (struct rio_detail *)ptr;
+
+ return 0;
+}
+
void __init detect_calgary(void)
{
u32 val;
int bus;
void *tbl;
int calgary_found = 0;
- int phb = -1;
+ unsigned long ptr;
+ int offset;
+ int ret;
/*
* if the user specified iommu=off or iommu=soft or we found
@@ -977,25 +1062,47 @@ void __init detect_calgary(void)
if (swiotlb || no_iommu || iommu_detected)
return;
+ if (!use_calgary)
+ return;
+
if (!early_pci_allowed())
return;
+ ptr = (unsigned long)phys_to_virt(get_bios_ebda());
+
+ rio_table_hdr = NULL;
+ offset = 0x180;
+ while (offset) {
+ /* The block id is stored in the 2nd word */
+ if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){
+ /* set the pointer past the offset & block id */
+ rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
+ break;
+ }
+ /* The next offset is stored in the 1st word. 0 means no more */
+ offset = *((unsigned short *)(ptr + offset));
+ }
+ if (!rio_table_hdr) {
+ printk(KERN_ERR "Calgary: Unable to locate "
+ "Rio Grande Table in EBDA - bailing!\n");
+ return;
+ }
+
+ ret = build_detail_arrays();
+ if (ret) {
+ printk(KERN_ERR "Calgary: build_detail_arrays ret %d\n", ret);
+ return;
+ }
+
specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
int dev;
struct calgary_bus_info *info = &bus_info[bus];
- info->phbid = -1;
if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
continue;
- /*
- * There are 4 PHBs per Calgary chip. Set phb to which phb (0-3)
- * it is connected to releative to the clagary chip.
- */
- phb = (phb + 1) % PHBS_PER_CALGARY;
-
if (info->translation_disabled)
continue;
@@ -1010,7 +1117,6 @@ void __init detect_calgary(void)
if (!tbl)
goto cleanup;
info->tce_space = tbl;
- info->phbid = phb;
calgary_found = 1;
break;
}
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index f8d857453f8..683b7a5c1ab 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -296,6 +296,11 @@ __init int iommu_setup(char *p)
gart_parse_options(p);
#endif
+#ifdef CONFIG_CALGARY_IOMMU
+ if (!strncmp(p, "calgary", 7))
+ use_calgary = 1;
+#endif /* CONFIG_CALGARY_IOMMU */
+
p += strcspn(p, ",");
if (*p == ',')
++p;
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 16261a8a330..fc1960f1f24 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -601,10 +601,9 @@ void __init gart_iommu_init(void)
(!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
!iommu_aperture ||
(no_agp && init_k8_gatt(&info) < 0)) {
- printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
if (end_pfn > MAX_DMA32_PFN) {
printk(KERN_ERR "WARNING more than 4GB of memory "
- "but IOMMU not available.\n"
+ "but GART IOMMU not available.\n"
KERN_ERR "WARNING 32bit PCI may malfunction.\n");
}
return;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 7451a4c43c1..a418ee4c8c6 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -108,17 +108,15 @@ void exit_idle(void)
*/
static void default_idle(void)
{
- local_irq_enable();
-
current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit();
- while (!need_resched()) {
- local_irq_disable();
- if (!need_resched())
- safe_halt();
- else
- local_irq_enable();
- }
+ local_irq_disable();
+ if (!need_resched()) {
+ /* Enables interrupts one instruction before HLT.
+ x86 special cases this so there is no race. */
+ safe_halt();
+ } else
+ local_irq_enable();
current_thread_info()->status |= TS_POLLING;
}
@@ -130,15 +128,7 @@ static void default_idle(void)
static void poll_idle (void)
{
local_irq_enable();
-
- asm volatile(
- "2:"
- "testl %0,%1;"
- "rep; nop;"
- "je 2b;"
- : :
- "i" (_TIF_NEED_RESCHED),
- "m" (current_thread_info()->flags));
+ cpu_relax();
}
void cpu_idle_wait(void)
@@ -219,6 +209,12 @@ void cpu_idle (void)
idle = default_idle;
if (cpu_is_offline(smp_processor_id()))
play_dead();
+ /*
+ * Idle routines should keep interrupts disabled
+ * from here on, until they go to idle.
+ * Otherwise, idle callbacks can misfire.
+ */
+ local_irq_disable();
enter_idle();
idle();
/* In many cases the interrupt that ended idle
@@ -256,9 +252,16 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
/* Default MONITOR/MWAIT with no hints, used for default C1 state */
static void mwait_idle(void)
{
- local_irq_enable();
- while (!need_resched())
- mwait_idle_with_hints(0,0);
+ if (!need_resched()) {
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())
+ __sti_mwait(0, 0);
+ else
+ local_irq_enable();
+ } else {
+ local_irq_enable();
+ }
}
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index fc944b5e8f4..af425a8049f 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -471,8 +471,7 @@ void __init setup_arch(char **cmdline_p)
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
- initrd_start =
- INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+ initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start+INITRD_SIZE;
}
else {
@@ -732,11 +731,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
/* Fix cpuid4 emulation for more */
num_cache_leaves = 3;
- /* When there is only one core no need to synchronize RDTSC */
- if (num_possible_cpus() == 1)
- set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
- else
- clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+ /* RDTSC can be speculated around */
+ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
}
static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -835,6 +831,15 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
}
+ if (cpu_has_ds) {
+ unsigned int l1, l2;
+ rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+ if (!(l1 & (1<<11)))
+ set_bit(X86_FEATURE_BTS, c->x86_capability);
+ if (!(l1 & (1<<12)))
+ set_bit(X86_FEATURE_PEBS, c->x86_capability);
+ }
+
n = c->extended_cpuid_level;
if (n >= 0x80000008) {
unsigned eax = cpuid_eax(0x80000008);
@@ -854,7 +859,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
if (c->x86 == 6)
set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
- set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+ if (c->x86 == 15)
+ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+ else
+ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
c->x86_max_cores = intel_num_cpu_cores(c);
srat_detect_node();
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 9f74c883568..af1ec4d23cf 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -379,12 +379,17 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
put_cpu();
return 0;
}
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
spin_lock_bh(&call_lock);
__smp_call_function_single(cpu, func, info, nonatomic, wait);
spin_unlock_bh(&call_lock);
put_cpu();
return 0;
}
+EXPORT_SYMBOL(smp_call_function_single);
/*
* this function sends a 'generic call function' IPI to all other CPUs
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 62c2e747af5..daf19332f0d 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -60,6 +60,7 @@
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/numa.h>
+#include <asm/genapic.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -753,14 +754,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
}
struct create_idle {
+ struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
-void do_fork_idle(void *_c_idle)
+void do_fork_idle(struct work_struct *work)
{
- struct create_idle *c_idle = _c_idle;
+ struct create_idle *c_idle =
+ container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
@@ -775,10 +778,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
int timeout;
unsigned long start_rip;
struct create_idle c_idle = {
+ .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
- DECLARE_WORK(work, do_fork_idle, &c_idle);
/* allocate memory for gdts of secondary cpus. Hotplug is considered */
if (!cpu_gdt_descr[cpu].address &&
@@ -825,9 +828,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
* thread.
*/
if (!keventd_up() || current_is_keventd())
- work.func(work.data);
+ c_idle.work.func(&c_idle.work);
else {
- schedule_work(&work);
+ schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
}
@@ -1167,6 +1170,13 @@ int __cpuinit __cpu_up(unsigned int cpu)
while (!cpu_isset(cpu, cpu_online_map))
cpu_relax();
+
+ if (num_online_cpus() > 8 && genapic == &apic_flat) {
+ printk(KERN_WARNING
+ "flat APIC routing can't be used with > 8 cpus\n");
+ BUG();
+ }
+
err = 0;
return err;
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index e3ef544d2cf..9f05bc9b2da 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0;
static unsigned int cpufreq_init = 0;
static struct work_struct cpufreq_delayed_get_work;
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *v)
{
unsigned int cpu;
for_each_online_cpu(cpu) {
@@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = {
static int __init cpufreq_tsc(void)
{
- INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+ INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER))
cpufreq_init = 1;
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 0d65b22f229..a1641ffdffc 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -30,9 +30,9 @@
#include <linux/kprobes.h>
#include <linux/kexec.h>
#include <linux/unwind.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/atomic.h>
#include <asm/debugreg.h>
@@ -108,7 +108,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
preempt_enable_no_resched();
}
-static int kstack_depth_to_print = 12;
+int kstack_depth_to_print = 12;
#ifdef CONFIG_STACK_UNWIND
static int call_trace = 1;
#else
@@ -225,16 +225,25 @@ static int dump_trace_unwind(struct unwind_frame_info *info, void *context)
{
struct ops_and_data *oad = (struct ops_and_data *)context;
int n = 0;
+ unsigned long sp = UNW_SP(info);
+ if (arch_unw_user_mode(info))
+ return -1;
while (unwind(info) == 0 && UNW_PC(info)) {
n++;
oad->ops->address(oad->data, UNW_PC(info));
if (arch_unw_user_mode(info))
break;
+ if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
+ && sp > UNW_SP(info))
+ break;
+ sp = UNW_SP(info);
}
return n;
}
+#define MSG(txt) ops->warning(data, txt)
+
/*
* x86-64 can have upto three kernel stacks:
* process stack
@@ -248,11 +257,12 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
return p > t && p < t + THREAD_SIZE - 3;
}
-void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack,
+void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+ unsigned long *stack,
struct stacktrace_ops *ops, void *data)
{
- const unsigned cpu = smp_processor_id();
- unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
+ const unsigned cpu = get_cpu();
+ unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
unsigned used = 0;
struct thread_info *tinfo;
@@ -268,28 +278,30 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
if (unwind_init_frame_info(&info, tsk, regs) == 0)
unw_ret = dump_trace_unwind(&info, &oad);
} else if (tsk == current)
- unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+ unw_ret = unwind_init_running(&info, dump_trace_unwind,
+ &oad);
else {
if (unwind_init_blocked(&info, tsk) == 0)
unw_ret = dump_trace_unwind(&info, &oad);
}
if (unw_ret > 0) {
if (call_trace == 1 && !arch_unw_user_mode(&info)) {
- ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+ ops->warning_symbol(data,
+ "DWARF2 unwinder stuck at %s",
UNW_PC(&info));
if ((long)UNW_SP(&info) < 0) {
- ops->warning(data, "Leftover inexact backtrace:\n");
+ MSG("Leftover inexact backtrace:");
stack = (unsigned long *)UNW_SP(&info);
if (!stack)
- return;
+ goto out;
} else
- ops->warning(data, "Full inexact backtrace again:\n");
+ MSG("Full inexact backtrace again:");
} else if (call_trace >= 1)
- return;
+ goto out;
else
- ops->warning(data, "Full inexact backtrace again:\n");
+ MSG("Full inexact backtrace again:");
} else
- ops->warning(data, "Inexact backtrace:\n");
+ MSG("Inexact backtrace:");
}
if (!stack) {
unsigned long dummy;
@@ -297,12 +309,6 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
if (tsk && tsk != current)
stack = (unsigned long *)tsk->thread.rsp;
}
- /*
- * Align the stack pointer on word boundary, later loops
- * rely on that (and corruption / debug info bugs can cause
- * unaligned values here):
- */
- stack = (unsigned long *)((unsigned long)stack & ~(sizeof(long)-1));
/*
* Print function call entries within a stack. 'cond' is the
@@ -312,9 +318,9 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
#define HANDLE_STACK(cond) \
do while (cond) { \
unsigned long addr = *stack++; \
- if (oops_in_progress ? \
- __kernel_text_address(addr) : \
- kernel_text_address(addr)) { \
+ /* Use unlocked access here because except for NMIs \
+ we should be already protected against module unloads */ \
+ if (__kernel_text_address(addr)) { \
/* \
* If the address is either in the text segment of the \
* kernel, or in the region which contains vmalloc'ed \
@@ -380,6 +386,8 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
tinfo = current_thread_info();
HANDLE_STACK (valid_stack_ptr(tinfo, stack));
#undef HANDLE_STACK
+out:
+ put_cpu();
}
EXPORT_SYMBOL(dump_trace);
@@ -786,8 +794,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs)
{
printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
reason);
- printk(KERN_EMERG "You probably have a hardware problem with your "
- "RAM chips\n");
+ printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
if (panic_on_unrecovered_nmi)
panic("NMI: Not continuing");
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index d9534e750d4..6a1f8f491e5 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -51,15 +51,6 @@ SECTIONS
RODATA
-#ifdef CONFIG_STACK_UNWIND
- . = ALIGN(8);
- .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
- __start_unwind = .;
- *(.eh_frame)
- __end_unwind = .;
- }
-#endif
-
. = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 92546c1526f..4a673f5397a 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -42,6 +42,7 @@
#include <asm/topology.h>
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+#define __syscall_clobber "r11","rcx","memory"
int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
@@ -274,7 +275,6 @@ static void __cpuinit cpu_vsyscall_init(void *arg)
vsyscall_set_cpu(raw_smp_processor_id());
}
-#ifdef CONFIG_HOTPLUG_CPU
static int __cpuinit
cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
{
@@ -283,13 +283,13 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
return NOTIFY_DONE;
}
-#endif
static void __init map_vsyscall(void)
{
extern char __vsyscall_0;
unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
+ /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
}
diff --git a/arch/x86_64/lib/csum-partial.c b/arch/x86_64/lib/csum-partial.c
index c493735218d..bc503f50690 100644
--- a/arch/x86_64/lib/csum-partial.c
+++ b/arch/x86_64/lib/csum-partial.c
@@ -9,8 +9,6 @@
#include <linux/module.h>
#include <asm/checksum.h>
-#define __force_inline inline __attribute__((always_inline))
-
static inline unsigned short from32to16(unsigned a)
{
unsigned short b = a >> 16;
@@ -33,7 +31,7 @@ static inline unsigned short from32to16(unsigned a)
* Unrolling to an 128 bytes inner loop.
* Using interleaving with more registers to break the carry chains.
*/
-static __force_inline unsigned do_csum(const unsigned char *buff, unsigned len)
+static unsigned do_csum(const unsigned char *buff, unsigned len)
{
unsigned odd, count;
unsigned long result = 0;
@@ -132,9 +130,10 @@ static __force_inline unsigned do_csum(const unsigned char *buff, unsigned len)
*
* it's best to have buff aligned on a 64-bit boundary
*/
-unsigned csum_partial(const unsigned char *buff, unsigned len, unsigned sum)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
- return add32_with_carry(do_csum(buff, len), sum);
+ return (__force __wsum)add32_with_carry(do_csum(buff, len),
+ (__force u32)sum);
}
EXPORT_SYMBOL(csum_partial);
@@ -143,7 +142,7 @@ EXPORT_SYMBOL(csum_partial);
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-unsigned short ip_compute_csum(unsigned char * buff, int len)
+__sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff,len,0));
}
diff --git a/arch/x86_64/lib/csum-wrappers.c b/arch/x86_64/lib/csum-wrappers.c
index b1320ec5842..fd42a4a095f 100644
--- a/arch/x86_64/lib/csum-wrappers.c
+++ b/arch/x86_64/lib/csum-wrappers.c
@@ -18,9 +18,9 @@
* Returns an 32bit unfolded checksum of the buffer.
* src and dst are best aligned to 64bits.
*/
-unsigned int
-csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
- int len, unsigned int isum, int *errp)
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum isum, int *errp)
{
might_sleep();
*errp = 0;
@@ -34,17 +34,19 @@ csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
if (unlikely((unsigned long)src & 6)) {
while (((unsigned long)src & 6) && len >= 2) {
__u16 val16;
- *errp = __get_user(val16, (__u16 __user *)src);
+ *errp = __get_user(val16, (const __u16 __user *)src);
if (*errp)
return isum;
*(__u16 *)dst = val16;
- isum = add32_with_carry(isum, val16);
+ isum = (__force __wsum)add32_with_carry(
+ (__force unsigned)isum, val16);
src += 2;
dst += 2;
len -= 2;
}
}
- isum = csum_partial_copy_generic((__force void *)src,dst,len,isum,errp,NULL);
+ isum = csum_partial_copy_generic((__force const void *)src,
+ dst, len, isum, errp, NULL);
if (likely(*errp == 0))
return isum;
}
@@ -66,9 +68,9 @@ EXPORT_SYMBOL(csum_partial_copy_from_user);
* Returns an 32bit unfolded checksum of the buffer.
* src and dst are best aligned to 64bits.
*/
-unsigned int
-csum_partial_copy_to_user(unsigned const char *src, unsigned char __user *dst,
- int len, unsigned int isum, int *errp)
+__wsum
+csum_partial_copy_to_user(const void *src, void __user *dst,
+ int len, __wsum isum, int *errp)
{
might_sleep();
if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
@@ -79,7 +81,8 @@ csum_partial_copy_to_user(unsigned const char *src, unsigned char __user *dst,
if (unlikely((unsigned long)dst & 6)) {
while (((unsigned long)dst & 6) && len >= 2) {
__u16 val16 = *(__u16 *)src;
- isum = add32_with_carry(isum, val16);
+ isum = (__force __wsum)add32_with_carry(
+ (__force unsigned)isum, val16);
*errp = __put_user(val16, (__u16 __user *)dst);
if (*errp)
return isum;
@@ -104,19 +107,21 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
*
* Returns an 32bit unfolded checksum of the buffer.
*/
-unsigned int
-csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, unsigned int sum)
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
-unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
- __u32 len, unsigned short proto, unsigned int sum)
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto, __wsum sum)
{
__u64 rest, sum64;
- rest = (__u64)htonl(len) + (__u64)htons(proto) + (__u64)sum;
+ rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
+ (__force __u64)sum;
asm(" addq (%[saddr]),%[sum]\n"
" adcq 8(%[saddr]),%[sum]\n"
" adcq (%[daddr]),%[sum]\n"
@@ -124,7 +129,7 @@ unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr,
" adcq $0,%[sum]\n"
: [sum] "=r" (sum64)
: "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr));
- return csum_fold(add32_with_carry(sum64 & 0xffffffff, sum64>>32));
+ return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
}
EXPORT_SYMBOL(csum_ipv6_magic);
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 50be90975d0..2dbebd30834 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -40,13 +40,13 @@ EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
- __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32);
+ __delay(((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) + 1);
}
EXPORT_SYMBOL(__const_udelay);
void __udelay(unsigned long usecs)
{
- __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
+ __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
EXPORT_SYMBOL(__udelay);
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 3751b4788e2..a65fc6f1dca 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -23,9 +23,9 @@
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/kprobes.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
@@ -96,7 +96,7 @@ void bust_spinlocks(int yes)
static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
unsigned long error_code)
{
- unsigned char __user *instr;
+ unsigned char *instr;
int scan_more = 1;
int prefetch = 0;
unsigned char *max_instr;
@@ -116,7 +116,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
unsigned char instr_hi;
unsigned char instr_lo;
- if (__get_user(opcode, (char __user *)instr))
+ if (probe_kernel_address(instr, opcode))
break;
instr_hi = opcode & 0xf0;
@@ -154,7 +154,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
case 0x00:
/* Prefetch instruction is 0x0F0D or 0x0F18 */
scan_more = 0;
- if (__get_user(opcode, (char __user *)instr))
+ if (probe_kernel_address(instr, opcode))
break;
prefetch = (instr_lo == 0xF) &&
(opcode == 0x0D || opcode == 0x18);
@@ -170,7 +170,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
static int bad_address(void *p)
{
unsigned long dummy;
- return __get_user(dummy, (unsigned long __user *)p);
+ return probe_kernel_address((unsigned long *)p, dummy);
}
void dump_pagetable(unsigned long address)
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 4c0c00ef3ca..2968b90ef8a 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -730,14 +730,15 @@ static __init int x8664_sysctl_init(void)
__initcall(x8664_sysctl_init);
#endif
-/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
+/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
covers the 64bit vsyscall page now. 32bit has a real VMA now and does
not need special handling anymore. */
static struct vm_area_struct gate_vma = {
.vm_start = VSYSCALL_START,
- .vm_end = VSYSCALL_END,
- .vm_page_prot = PAGE_READONLY
+ .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
+ .vm_page_prot = PAGE_READONLY_EXEC,
+ .vm_flags = VM_READ | VM_EXEC
};
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index 3e231d762aa..ccb91dd996a 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -61,34 +61,40 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
return base;
}
-
-static void flush_kernel_map(void *address)
+static void cache_flush_page(void *adr)
{
- if (0 && address && cpu_has_clflush) {
- /* is this worth it? */
- int i;
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- asm volatile("clflush (%0)" :: "r" (address + i));
- } else
- asm volatile("wbinvd":::"memory");
- if (address)
- __flush_tlb_one(address);
- else
- __flush_tlb_all();
+ int i;
+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ asm volatile("clflush (%0)" :: "r" (adr + i));
}
+static void flush_kernel_map(void *arg)
+{
+ struct list_head *l = (struct list_head *)arg;
+ struct page *pg;
+
+ /* When clflush is available always use it because it is
+ much cheaper than WBINVD */
+ if (!cpu_has_clflush)
+ asm volatile("wbinvd" ::: "memory");
+ list_for_each_entry(pg, l, lru) {
+ void *adr = page_address(pg);
+ if (cpu_has_clflush)
+ cache_flush_page(adr);
+ __flush_tlb_one(adr);
+ }
+}
-static inline void flush_map(unsigned long address)
+static inline void flush_map(struct list_head *l)
{
- on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
+ on_each_cpu(flush_kernel_map, l, 1, 1);
}
-static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
+static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
static inline void save_page(struct page *fpage)
{
- fpage->lru.next = (struct list_head *)deferred_pages;
- deferred_pages = fpage;
+ list_add(&fpage->lru, &deferred_pages);
}
/*
@@ -207,18 +213,18 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
void global_flush_tlb(void)
{
- struct page *dpage;
+ struct page *pg, *next;
+ struct list_head l;
down_read(&init_mm.mmap_sem);
- dpage = xchg(&deferred_pages, NULL);
+ list_replace_init(&deferred_pages, &l);
up_read(&init_mm.mmap_sem);
- flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
- while (dpage) {
- struct page *tmp = dpage;
- dpage = (struct page *)dpage->lru.next;
- ClearPagePrivate(tmp);
- __free_page(tmp);
+ flush_map(&l);
+
+ list_for_each_entry_safe(pg, next, &l, lru) {
+ ClearPagePrivate(pg);
+ __free_page(pg);
}
}