summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/filesystems.tmpl20
-rw-r--r--Documentation/DocBook/kernel-api.tmpl84
-rw-r--r--Documentation/DocBook/networking.tmpl106
-rw-r--r--Documentation/RCU/NMI-RCU.txt2
-rw-r--r--Documentation/SubmitChecklist16
-rw-r--r--Documentation/cpuidle/core.txt23
-rw-r--r--Documentation/cpuidle/driver.txt31
-rw-r--r--Documentation/cpuidle/governor.txt29
-rw-r--r--Documentation/cpuidle/sysfs.txt79
-rw-r--r--Documentation/kprobes.txt11
-rw-r--r--Documentation/sched-rt-group.txt59
-rw-r--r--Documentation/sysctl/kernel.txt2
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/alpha/kernel/osf_sys.c4
-rw-r--r--arch/alpha/kernel/time.c15
-rw-r--r--arch/blackfin/kernel/time.c8
-rw-r--r--arch/blackfin/kernel/traps.c12
-rw-r--r--arch/cris/arch-v10/lib/memset.c397
-rw-r--r--arch/cris/arch-v32/lib/memset.c398
-rw-r--r--arch/frv/kernel/time.c6
-rw-r--r--arch/frv/kernel/vmlinux.lds.S4
-rw-r--r--arch/ia64/Kconfig7
-rw-r--r--arch/m68knommu/kernel/time.c12
-rw-r--r--arch/m68knommu/platform/5206/Makefile4
-rw-r--r--arch/m68knommu/platform/5206e/Makefile4
-rw-r--r--arch/m68knommu/platform/520x/Makefile4
-rw-r--r--arch/m68knommu/platform/523x/Makefile4
-rw-r--r--arch/m68knommu/platform/5249/Makefile4
-rw-r--r--arch/m68knommu/platform/5272/Makefile4
-rw-r--r--arch/m68knommu/platform/527x/Makefile4
-rw-r--r--arch/m68knommu/platform/528x/Makefile4
-rw-r--r--arch/m68knommu/platform/5307/Makefile4
-rw-r--r--arch/m68knommu/platform/532x/Makefile4
-rw-r--r--arch/m68knommu/platform/5407/Makefile4
-rw-r--r--arch/m68knommu/platform/coldfire/Makefile4
-rw-r--r--arch/m68knommu/platform/coldfire/entry.S9
-rw-r--r--arch/m68knommu/platform/coldfire/timers.c17
-rw-r--r--arch/mips/kernel/sysirix.c12
-rw-r--r--arch/parisc/hpux/sys_hpux.c4
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/ps3-hvcall.S2
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/vdso.c12
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c15
-rw-r--r--arch/powerpc/platforms/512x/Kconfig1
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype4
-rw-r--r--arch/powerpc/platforms/cell/ras.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c18
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c31
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/iseries/vio.c2
-rw-r--r--arch/sh/Kconfig19
-rw-r--r--arch/sh/Kconfig.cpu4
-rw-r--r--arch/sh/Kconfig.debug3
-rw-r--r--arch/sh/Makefile1
-rw-r--r--arch/sh/boards/renesas/migor/Makefile1
-rw-r--r--arch/sh/boards/renesas/migor/setup.c61
-rw-r--r--arch/sh/boards/renesas/r7780rp/setup.c47
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/setup.c45
-rw-r--r--arch/sh/boards/renesas/sdk7780/Kconfig7
-rw-r--r--arch/sh/cchips/hd6446x/hd64465/setup.c47
-rw-r--r--arch/sh/configs/migor_defconfig824
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig340
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig340
-rw-r--r--arch/sh/configs/se7705_defconfig1
-rw-r--r--arch/sh/drivers/dma/dma-api.c2
-rw-r--r--arch/sh/drivers/pci/fixups-lboxre2.c4
-rw-r--r--arch/sh/drivers/pci/fixups-rts7751r2d.c4
-rw-r--r--arch/sh/drivers/pci/ops-dreamcast.c44
-rw-r--r--arch/sh/drivers/pci/ops-rts7751r2d.c3
-rw-r--r--arch/sh/drivers/pci/pci-sh4.h4
-rw-r--r--arch/sh/drivers/pci/pci-sh7751.c16
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.c2
-rw-r--r--arch/sh/kernel/Makefile_321
-rw-r--r--arch/sh/kernel/Makefile_641
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile1
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c27
-rw-r--r--arch/sh/kernel/cpu/irq/maskreg.c93
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c8
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c10
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c177
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c61
-rw-r--r--arch/sh/kernel/io.c8
-rw-r--r--arch/sh/kernel/io_generic.c24
-rw-r--r--arch/sh/kernel/io_trapped.c276
-rw-r--r--arch/sh/kernel/irq.c3
-rw-r--r--arch/sh/kernel/process_64.c9
-rw-r--r--arch/sh/kernel/ptrace_32.c4
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/syscalls_32.S4
-rw-r--r--arch/sh/kernel/syscalls_64.S4
-rw-r--r--arch/sh/kernel/time_32.c19
-rw-r--r--arch/sh/kernel/time_64.c31
-rw-r--r--arch/sh/kernel/timers/timer-cmt.c9
-rw-r--r--arch/sh/kernel/timers/timer-mtu2.c3
-rw-r--r--arch/sh/kernel/traps_32.c164
-rw-r--r--arch/sh/kernel/traps_64.c4
-rw-r--r--arch/sh/kernel/vmlinux_64.lds.S2
-rw-r--r--arch/sh/mm/cache-sh5.c1019
-rw-r--r--arch/sh/mm/consistent.c32
-rw-r--r--arch/sh/mm/fault_32.c11
-rw-r--r--arch/sh/mm/init.c2
-rw-r--r--arch/sh/tools/mach-types2
-rw-r--r--arch/sparc/kernel/pcic.c2
-rw-r--r--arch/sparc/kernel/time.c7
-rw-r--r--arch/sparc64/solaris/fs.c12
-rw-r--r--arch/um/drivers/mconsole_kern.c6
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/efi.c14
-rw-r--r--arch/x86/kernel/efi_64.c32
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c9
-rw-r--r--arch/x86/kernel/quirks.c9
-rw-r--r--arch/x86/kernel/reboot.c46
-rw-r--r--arch/x86/kernel/test_rodata.c2
-rw-r--r--arch/x86/kernel/traps_64.c4
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/pageattr-test.c7
-rw-r--r--arch/x86/mm/pageattr.c21
-rw-r--r--arch/x86/vdso/Makefile22
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--drivers/acpi/blacklist.c64
-rw-r--r--drivers/acpi/event.c2
-rw-r--r--drivers/acpi/hardware/hwsleep.c1
-rw-r--r--drivers/acpi/osl.c22
-rw-r--r--drivers/acpi/processor_idle.c19
-rw-r--r--drivers/acpi/wmi.c6
-rw-r--r--drivers/ata/libata-core.c48
-rw-r--r--drivers/ata/pata_amd.c2
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_ninja32.c9
-rw-r--r--drivers/ata/pata_via.c6
-rw-r--r--drivers/ata/sata_mv.c50
-rw-r--r--drivers/block/swim3.c4
-rw-r--r--drivers/char/drm/i830_dma.c2
-rw-r--r--drivers/char/hvc_rtas.c2
-rw-r--r--drivers/char/pcmcia/Kconfig2
-rw-r--r--drivers/cpuidle/cpuidle.c3
-rw-r--r--drivers/cpuidle/sysfs.c14
-rw-r--r--drivers/ide/Kconfig26
-rw-r--r--drivers/ide/arm/bast-ide.c12
-rw-r--r--drivers/ide/arm/palm_bk3710.c74
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-disk.c18
-rw-r--r--drivers/ide/ide-dma.c14
-rw-r--r--drivers/ide/ide-io.c19
-rw-r--r--drivers/ide/ide-iops.c10
-rw-r--r--drivers/ide/ide-lib.c9
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide-tape.c34
-rw-r--r--drivers/ide/ide.c4
-rw-r--r--drivers/ide/legacy/gayle.c2
-rw-r--r--drivers/ide/pci/cs5520.c5
-rw-r--r--drivers/ide/pci/pdc202xx_old.c22
-rw-r--r--drivers/infiniband/core/cm.c26
-rw-r--r--drivers/infiniband/core/cma.c10
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c17
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/md/bitmap.c8
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/memstick/host/tifm_ms.c2
-rw-r--r--drivers/message/fusion/mptbase.c54
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/misc/thinkpad_acpi.c4
-rw-r--r--drivers/mtd/mtdsuper.c14
-rw-r--r--drivers/net/mlx4/alloc.c1
-rw-r--r--drivers/net/mlx4/mr.c21
-rw-r--r--drivers/oprofile/buffer_sync.c21
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/pnp/pnpacpi/core.c2
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/ps3/ps3-lpm.c22
-rw-r--r--drivers/ps3/ps3-sys-manager.c44
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aacraid/aachba.c70
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c47
-rw-r--r--drivers/scsi/aacraid/rx.c5
-rw-r--r--drivers/scsi/aacraid/sa.c5
-rw-r--r--drivers/scsi/advansys.c13
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sas.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c2
-rw-r--r--drivers/scsi/arm/fas216.c16
-rw-r--r--drivers/scsi/arm/fas216.h3
-rw-r--r--drivers/scsi/gdth.c16
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h66
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c384
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c328
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c154
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c105
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c70
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h1
-rw-r--r--drivers/scsi/ses.c23
-rw-r--r--drivers/scsi/sym53c416.c16
-rw-r--r--drivers/serial/sh-sci.c2
-rw-r--r--drivers/serial/sh-sci.h9
-rw-r--r--drivers/sh/maple/maple.c981
-rw-r--r--drivers/usb/gadget/file_storage.c8
-rw-r--r--fs/afs/mntpt.c23
-rw-r--r--fs/autofs4/root.c5
-rw-r--r--fs/binfmt_flat.c8
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/cifs/cifs_dfs_ref.c25
-rw-r--r--fs/coda/pioctl.c6
-rw-r--r--fs/compat.c8
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/configfs/symlink.c8
-rw-r--r--fs/dcache.c103
-rw-r--r--fs/dcookies.c34
-rw-r--r--fs/dquot.c9
-rw-r--r--fs/ecryptfs/dentry.c12
-rw-r--r--fs/ecryptfs/inode.c24
-rw-r--r--fs/ecryptfs/main.c6
-rw-r--r--fs/exec.c8
-rw-r--r--fs/ext3/super.c8
-rw-r--r--fs/ext4/super.c8
-rw-r--r--fs/gfs2/ops_fstype.c7
-rw-r--r--fs/inotify_user.c12
-rw-r--r--fs/lockd/host.c10
-rw-r--r--fs/lockd/svclock.c28
-rw-r--r--fs/namei.c311
-rw-r--r--fs/namespace.c268
-rw-r--r--fs/nfs/namespace.c29
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfsctl.c4
-rw-r--r--fs/nfsd/export.c122
-rw-r--r--fs/nfsd/nfs3proc.c2
-rw-r--r--fs/nfsd/nfs3xdr.c4
-rw-r--r--fs/nfsd/nfs4recover.c34
-rw-r--r--fs/nfsd/nfs4state.c4
-rw-r--r--fs/nfsd/nfs4xdr.c12
-rw-r--r--fs/nfsd/nfsfh.c26
-rw-r--r--fs/nfsd/nfsproc.c6
-rw-r--r--fs/nfsd/nfsxdr.c2
-rw-r--r--fs/nfsd/vfs.c13
-rw-r--r--fs/open.c61
-rw-r--r--fs/pipe.c10
-rw-r--r--fs/proc/base.c61
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/nommu.c2
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/proc/task_mmu.c8
-rw-r--r--fs/proc/task_nommu.c6
-rw-r--r--fs/reiserfs/super.c14
-rw-r--r--fs/seq_file.c6
-rw-r--r--fs/smbfs/inode.c2
-rw-r--r--fs/stat.c19
-rw-r--r--fs/udf/balloc.c2
-rw-r--r--fs/udf/dir.c14
-rw-r--r--fs/utimes.c4
-rw-r--r--fs/xattr.c32
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c8
-rw-r--r--fs/xfs/quota/xfs_qm.c6
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c6
-rw-r--r--fs/xfs/xfs_alloc.c16
-rw-r--r--fs/xfs/xfs_alloc_btree.c16
-rw-r--r--fs/xfs/xfs_arch.h15
-rw-r--r--fs/xfs/xfs_attr_leaf.c46
-rw-r--r--fs/xfs/xfs_bmap_btree.c16
-rw-r--r--fs/xfs/xfs_da_btree.c14
-rw-r--r--fs/xfs/xfs_dir2_block.c8
-rw-r--r--fs/xfs/xfs_dir2_data.c4
-rw-r--r--fs/xfs/xfs_dir2_leaf.c16
-rw-r--r--fs/xfs/xfs_dir2_node.c18
-rw-r--r--fs/xfs/xfs_fsops.c4
-rw-r--r--fs/xfs/xfs_ialloc.c12
-rw-r--r--fs/xfs/xfs_ialloc_btree.c16
-rw-r--r--fs/xfs/xfs_log.c6
-rw-r--r--fs/xfs/xfs_trans.c24
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/processor.h9
-rw-r--r--include/asm-generic/topology.h10
-rw-r--r--include/asm-ia64/param.h10
-rw-r--r--include/asm-m68knommu/cacheflush.h14
-rw-r--r--include/asm-m68knommu/system.h2
-rw-r--r--include/asm-mn10300/highmem.h4
-rw-r--r--include/asm-mn10300/linkage.h2
-rw-r--r--include/asm-powerpc/systbl.h4
-rw-r--r--include/asm-powerpc/unistd.h6
-rw-r--r--include/asm-ppc/page.h2
-rw-r--r--include/asm-sh/bugs.h2
-rw-r--r--include/asm-sh/cpu-sh4/freq.h4
-rw-r--r--include/asm-sh/cpu-sh5/cacheflush.h6
-rw-r--r--include/asm-sh/cpu-sh5/mmu_context.h6
-rw-r--r--include/asm-sh/hp6xx.h28
-rw-r--r--include/asm-sh/io.h22
-rw-r--r--include/asm-sh/io_trapped.h58
-rw-r--r--include/asm-sh/ioctls.h4
-rw-r--r--include/asm-sh/irq.h4
-rw-r--r--include/asm-sh/mmu_context_64.h3
-rw-r--r--include/asm-sh/page.h7
-rw-r--r--include/asm-sh/pgtable_64.h13
-rw-r--r--include/asm-sh/processor.h2
-rw-r--r--include/asm-sh/r7780rp.h3
-rw-r--r--include/asm-sh/rts7751r2d.h3
-rw-r--r--include/asm-sh/system.h5
-rw-r--r--include/asm-sh/system_32.h3
-rw-r--r--include/asm-sh/termbits.h5
-rw-r--r--include/asm-sh/termios.h6
-rw-r--r--include/asm-sh/tlb.h1
-rw-r--r--include/asm-sh/uaccess.h29
-rw-r--r--include/asm-sh/uaccess_32.h24
-rw-r--r--include/asm-sh/uaccess_64.h19
-rw-r--r--include/asm-sh/unistd_32.h6
-rw-r--r--include/asm-sh/unistd_64.h6
-rw-r--r--include/asm-x86/cacheflush.h7
-rw-r--r--include/asm-x86/kdebug.h1
-rw-r--r--include/asm-x86/sigcontext.h66
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/aio.h20
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/buffer_head.h6
-rw-r--r--include/linux/cgroup_subsys.h2
-rw-r--r--include/linux/configfs.h1
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/cpuset.h3
-rw-r--r--include/linux/dcache.h5
-rw-r--r--include/linux/dcookies.h15
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/file.h16
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/fs_struct.h10
-rw-r--r--include/linux/gfp.h15
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/ide.h13
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/ktime.h2
-rw-r--r--include/linux/linkage.h5
-rw-r--r--include/linux/maple.h100
-rw-r--r--include/linux/marker.h68
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/module.h5
-rw-r--r--include/linux/moduleparam.h12
-rw-r--r--include/linux/mutex-debug.h2
-rw-r--r--include/linux/namei.h17
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/nfsd/export.h8
-rw-r--r--include/linux/pagemap.h10
-rw-r--r--include/linux/path.h15
-rw-r--r--include/linux/pid.h21
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/rwsem-spinlock.h16
-rw-r--r--include/linux/sched.h32
-rw-r--r--include/linux/seq_file.h5
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/slub_def.h15
-rw-r--r--include/linux/sunrpc/svc.h13
-rw-r--r--include/linux/swap.h8
-rw-r--r--include/linux/wait.h34
-rw-r--r--include/linux/workqueue.h13
-rw-r--r--include/scsi/scsi_host.h8
-rw-r--r--init/Kconfig23
-rw-r--r--init/Makefile1
-rw-r--r--init/do_mounts.c6
-rw-r--r--kernel/audit.c12
-rw-r--r--kernel/audit_tree.c28
-rw-r--r--kernel/auditfilter.c15
-rw-r--r--kernel/auditsc.c28
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/fork.c18
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/futex_compat.c2
-rw-r--r--kernel/hrtimer.c48
-rw-r--r--kernel/kmod.c5
-rw-r--r--kernel/marker.c677
-rw-r--r--kernel/module.c7
-rw-r--r--kernel/posix-timers.c8
-rw-r--r--kernel/rcupdate.c5
-rw-r--r--kernel/rtmutex.c5
-rw-r--r--kernel/sched.c494
-rw-r--r--kernel/sched_rt.c102
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sysctl.c36
-rw-r--r--kernel/timeconst.pl2
-rw-r--r--kernel/user.c50
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c11
-rw-r--r--mm/mempolicy.c63
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slub.c94
-rw-r--r--mm/swapfile.c2
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c4
-rw-r--r--net/sunrpc/clnt.c10
-rw-r--r--net/sunrpc/rpc_pipe.c10
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c3
-rw-r--r--net/unix/af_unix.c26
-rw-r--r--samples/markers/probe-example.c25
-rw-r--r--scripts/Kbuild.include3
-rw-r--r--scripts/Makefile.modpost11
-rwxr-xr-xscripts/kernel-doc1
-rw-r--r--scripts/mod/modpost.c164
-rw-r--r--scripts/mod/modpost.h3
-rw-r--r--security/selinux/avc.c15
-rw-r--r--security/selinux/hooks.c53
-rw-r--r--security/selinux/include/av_perm_to_string.h3
-rw-r--r--security/selinux/include/av_permissions.h3
-rw-r--r--security/selinux/include/avc.h6
-rw-r--r--security/selinux/include/class_to_string.h1
-rw-r--r--security/selinux/include/flask.h1
-rw-r--r--security/smack/smack_lsm.c11
-rw-r--r--sound/core/seq/seq_clientmgr.c4
-rw-r--r--sound/core/seq/seq_device.c3
-rw-r--r--sound/core/sound.c4
-rw-r--r--sound/core/timer.c2
-rw-r--r--sound/ppc/daca.c5
-rw-r--r--sound/ppc/tumbler.c5
443 files changed, 8731 insertions, 5005 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 8d556707bb6..30b327a116e 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -109,6 +109,8 @@ cpu-hotplug.txt
- document describing CPU hotplug support in the Linux kernel.
cpu-load.txt
- document describing how CPU load statistics are collected.
+cpuidle/
+ - info on CPU_IDLE, CPU idle state management subsystem.
cpusets.txt
- documents the cpusets feature; assign CPUs and Mem to a set of tasks.
cputopology.txt
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 6a0ad4715e9..300e1707893 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -8,7 +8,7 @@
DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
- procfs-guide.xml writing_usb_driver.xml \
+ procfs-guide.xml writing_usb_driver.xml networking.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 5eaef87e8f1..5e87ad58c0b 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -398,4 +398,24 @@ an example.
</chapter>
+ <chapter id="splice">
+ <title>splice API</title>
+ <para>
+ splice is a method for moving blocks of data around inside the
+ kernel, without continually transferring them between the kernel
+ and user space.
+ </para>
+!Ffs/splice.c
+ </chapter>
+
+ <chapter id="pipes">
+ <title>pipes API</title>
+ <para>
+ Pipe interfaces are all for in-kernel (builtin image) use.
+ They are not exported for use by modules.
+ </para>
+!Iinclude/linux/pipe_fs_i.h
+!Ffs/pipe.c
+ </chapter>
+
</book>
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 059aaf20951..f31601e8bd8 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -204,65 +204,6 @@ X!Ilib/string.c
</sect1>
</chapter>
- <chapter id="netcore">
- <title>Linux Networking</title>
- <sect1><title>Networking Base Types</title>
-!Iinclude/linux/net.h
- </sect1>
- <sect1><title>Socket Buffer Functions</title>
-!Iinclude/linux/skbuff.h
-!Iinclude/net/sock.h
-!Enet/socket.c
-!Enet/core/skbuff.c
-!Enet/core/sock.c
-!Enet/core/datagram.c
-!Enet/core/stream.c
- </sect1>
- <sect1><title>Socket Filter</title>
-!Enet/core/filter.c
- </sect1>
- <sect1><title>Generic Network Statistics</title>
-!Iinclude/linux/gen_stats.h
-!Enet/core/gen_stats.c
-!Enet/core/gen_estimator.c
- </sect1>
- <sect1><title>SUN RPC subsystem</title>
-<!-- The !D functionality is not perfect, garbage has to be protected by comments
-!Dnet/sunrpc/sunrpc_syms.c
--->
-!Enet/sunrpc/xdr.c
-!Enet/sunrpc/svcsock.c
-!Enet/sunrpc/sched.c
- </sect1>
- </chapter>
-
- <chapter id="netdev">
- <title>Network device support</title>
- <sect1><title>Driver Support</title>
-!Enet/core/dev.c
-!Enet/ethernet/eth.c
-!Enet/sched/sch_generic.c
-!Iinclude/linux/etherdevice.h
-!Iinclude/linux/netdevice.h
- </sect1>
- <sect1><title>PHY Support</title>
-!Edrivers/net/phy/phy.c
-!Idrivers/net/phy/phy.c
-!Edrivers/net/phy/phy_device.c
-!Idrivers/net/phy/phy_device.c
-!Edrivers/net/phy/mdio_bus.c
-!Idrivers/net/phy/mdio_bus.c
- </sect1>
-<!-- FIXME: Removed for now since no structured comments in source
- <sect1><title>Wireless</title>
-X!Enet/core/wireless.c
- </sect1>
--->
- <sect1><title>Synchronous PPP</title>
-!Edrivers/net/wan/syncppp.c
- </sect1>
- </chapter>
-
<chapter id="modload">
<title>Module Support</title>
<sect1><title>Module Loading</title>
@@ -508,11 +449,6 @@ X!Isound/sound_firmware.c
!Edrivers/serial/8250.c
</chapter>
- <chapter id="z85230">
- <title>Z85230 Support Library</title>
-!Edrivers/net/wan/z85230.c
- </chapter>
-
<chapter id="fbdev">
<title>Frame Buffer Library</title>
@@ -712,24 +648,4 @@ X!Idrivers/video/console/fonts.c
!Edrivers/i2c/i2c-core.c
</chapter>
- <chapter id="splice">
- <title>splice API</title>
- <para>
- splice is a method for moving blocks of data around inside the
- kernel, without continually transferring them between the kernel
- and user space.
- </para>
-!Ffs/splice.c
- </chapter>
-
- <chapter id="pipes">
- <title>pipes API</title>
- <para>
- Pipe interfaces are all for in-kernel (builtin image) use.
- They are not exported for use by modules.
- </para>
-!Iinclude/linux/pipe_fs_i.h
-!Ffs/pipe.c
- </chapter>
-
</book>
diff --git a/Documentation/DocBook/networking.tmpl b/Documentation/DocBook/networking.tmpl
new file mode 100644
index 00000000000..f24f9e85e4a
--- /dev/null
+++ b/Documentation/DocBook/networking.tmpl
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="LinuxNetworking">
+ <bookinfo>
+ <title>Linux Networking and Network Devices APIs</title>
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later
+ version.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="netcore">
+ <title>Linux Networking</title>
+ <sect1><title>Networking Base Types</title>
+!Iinclude/linux/net.h
+ </sect1>
+ <sect1><title>Socket Buffer Functions</title>
+!Iinclude/linux/skbuff.h
+!Iinclude/net/sock.h
+!Enet/socket.c
+!Enet/core/skbuff.c
+!Enet/core/sock.c
+!Enet/core/datagram.c
+!Enet/core/stream.c
+ </sect1>
+ <sect1><title>Socket Filter</title>
+!Enet/core/filter.c
+ </sect1>
+ <sect1><title>Generic Network Statistics</title>
+!Iinclude/linux/gen_stats.h
+!Enet/core/gen_stats.c
+!Enet/core/gen_estimator.c
+ </sect1>
+ <sect1><title>SUN RPC subsystem</title>
+<!-- The !D functionality is not perfect, garbage has to be protected by comments
+!Dnet/sunrpc/sunrpc_syms.c
+-->
+!Enet/sunrpc/xdr.c
+!Enet/sunrpc/svc_xprt.c
+!Enet/sunrpc/xprt.c
+!Enet/sunrpc/sched.c
+!Enet/sunrpc/socklib.c
+!Enet/sunrpc/stats.c
+!Enet/sunrpc/rpc_pipe.c
+!Enet/sunrpc/rpcb_clnt.c
+!Enet/sunrpc/clnt.c
+ </sect1>
+ </chapter>
+
+ <chapter id="netdev">
+ <title>Network device support</title>
+ <sect1><title>Driver Support</title>
+!Enet/core/dev.c
+!Enet/ethernet/eth.c
+!Enet/sched/sch_generic.c
+!Iinclude/linux/etherdevice.h
+!Iinclude/linux/netdevice.h
+ </sect1>
+ <sect1><title>PHY Support</title>
+!Edrivers/net/phy/phy.c
+!Idrivers/net/phy/phy.c
+!Edrivers/net/phy/phy_device.c
+!Idrivers/net/phy/phy_device.c
+!Edrivers/net/phy/mdio_bus.c
+!Idrivers/net/phy/mdio_bus.c
+ </sect1>
+<!-- FIXME: Removed for now since no structured comments in source
+ <sect1><title>Wireless</title>
+X!Enet/core/wireless.c
+ </sect1>
+-->
+ <sect1><title>Synchronous PPP</title>
+!Edrivers/net/wan/syncppp.c
+ </sect1>
+ </chapter>
+
+</book>
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
index d0634a5c344..c64158ecde4 100644
--- a/Documentation/RCU/NMI-RCU.txt
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -25,7 +25,7 @@ the NMI handler to take the default machine-specific action.
This nmi_callback variable is a global function pointer to the current
NMI handler.
- fastcall void do_nmi(struct pt_regs * regs, long error_code)
+ void do_nmi(struct pt_regs * regs, long error_code)
{
int cpu;
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 34e06d2f194..da10e071424 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -20,7 +20,11 @@ kernel patches.
4: ppc64 is a good architecture for cross-compilation checking because it
tends to use `unsigned long' for 64-bit quantities.
-5: Matches kernel coding style(!)
+5: Check your patch for general style as detailed in
+ Documentation/CodingStyle. Check for trivial violations with the
+ patch style checker prior to submission (scripts/checkpatch.pl).
+ You should be able to justify all violations that remain in
+ your patch.
6: Any new or modified CONFIG options don't muck up the config menu.
@@ -79,13 +83,3 @@ kernel patches.
23: Tested after it has been merged into the -mm patchset to make sure
that it still works with all of the other queued patches and various
changes in the VM, VFS, and other subsystems.
-
-24: Avoid whitespace damage such as indenting with spaces or whitespace
- at the end of lines. You can test this by feeding the patch to
- "git apply --check --whitespace=error-all"
-
-25: Check your patch for general style as detailed in
- Documentation/CodingStyle. Check for trivial violations with the
- patch style checker prior to submission (scripts/checkpatch.pl).
- You should be able to justify all violations that remain in
- your patch.
diff --git a/Documentation/cpuidle/core.txt b/Documentation/cpuidle/core.txt
new file mode 100644
index 00000000000..63ecc5dc9d8
--- /dev/null
+++ b/Documentation/cpuidle/core.txt
@@ -0,0 +1,23 @@
+
+ Supporting multiple CPU idle levels in kernel
+
+ cpuidle
+
+General Information:
+
+Various CPUs today support multiple idle levels that are differentiated
+by varying exit latencies and power consumption during idle.
+cpuidle is a generic in-kernel infrastructure that separates
+idle policy (governor) from idle mechanism (driver) and provides a
+standardized infrastructure to support independent development of
+governors and drivers.
+
+cpuidle resides under drivers/cpuidle.
+
+Boot options:
+"cpuidle_sysfs_switch"
+enables current_governor interface in /sys/devices/system/cpu/cpuidle/,
+which can be used to switch governors at run time. This boot option
+is meant for developer testing only. In normal usage, kernel picks the
+best governor based on governor ratings.
+SEE ALSO: sysfs.txt in this directory.
diff --git a/Documentation/cpuidle/driver.txt b/Documentation/cpuidle/driver.txt
new file mode 100644
index 00000000000..7a9e09ece93
--- /dev/null
+++ b/Documentation/cpuidle/driver.txt
@@ -0,0 +1,31 @@
+
+
+ Supporting multiple CPU idle levels in kernel
+
+ cpuidle drivers
+
+
+
+
+cpuidle driver hooks into the cpuidle infrastructure and handles the
+architecture/platform dependent part of CPU idle states. Driver
+provides the platform idle state detection capability and also
+has mechanisms in place to support actual entry-exit into CPU idle states.
+
+cpuidle driver initializes the cpuidle_device structure for each CPU device
+and registers with cpuidle using cpuidle_register_device.
+
+It can also support the dynamic changes (like battery <-> AC), by using
+cpuidle_pause_and_lock, cpuidle_disable_device and cpuidle_enable_device,
+cpuidle_resume_and_unlock.
+
+Interfaces:
+extern int cpuidle_register_driver(struct cpuidle_driver *drv);
+extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
+extern int cpuidle_register_device(struct cpuidle_device *dev);
+extern void cpuidle_unregister_device(struct cpuidle_device *dev);
+
+extern void cpuidle_pause_and_lock(void);
+extern void cpuidle_resume_and_unlock(void);
+extern int cpuidle_enable_device(struct cpuidle_device *dev);
+extern void cpuidle_disable_device(struct cpuidle_device *dev);
diff --git a/Documentation/cpuidle/governor.txt b/Documentation/cpuidle/governor.txt
new file mode 100644
index 00000000000..12c6bd50c9f
--- /dev/null
+++ b/Documentation/cpuidle/governor.txt
@@ -0,0 +1,29 @@
+
+
+
+ Supporting multiple CPU idle levels in kernel
+
+ cpuidle governors
+
+
+
+
+cpuidle governor is policy routine that decides what idle state to enter at
+any given time. cpuidle core uses different callbacks to the governor.
+
+* enable() to enable governor for a particular device
+* disable() to disable governor for a particular device
+* select() to select an idle state to enter
+* reflect() called after returning from the idle state, which can be used
+ by the governor for some record keeping.
+
+More than one governor can be registered at the same time and
+users can switch between drivers using /sysfs interface (when enabled).
+More than one governor part is supported for developers to easily experiment
+with different governors. By default, most optimal governor based on your
+kernel configuration and platform will be selected by cpuidle.
+
+Interfaces:
+extern int cpuidle_register_governor(struct cpuidle_governor *gov);
+extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
+struct cpuidle_governor
diff --git a/Documentation/cpuidle/sysfs.txt b/Documentation/cpuidle/sysfs.txt
new file mode 100644
index 00000000000..50d7b164275
--- /dev/null
+++ b/Documentation/cpuidle/sysfs.txt
@@ -0,0 +1,79 @@
+
+
+ Supporting multiple CPU idle levels in kernel
+
+ cpuidle sysfs
+
+System global cpuidle related information and tunables are under
+/sys/devices/system/cpu/cpuidle
+
+The current interfaces in this directory has self-explanatory names:
+* current_driver
+* current_governor_ro
+
+With cpuidle_sysfs_switch boot option (meant for developer testing)
+following objects are visible instead.
+* current_driver
+* available_governors
+* current_governor
+In this case users can switch the governor at run time by writing
+to current_governor.
+
+
+Per logical CPU specific cpuidle information are under
+/sys/devices/system/cpu/cpuX/cpuidle
+for each online cpu X
+
+--------------------------------------------------------------------------------
+# ls -lR /sys/devices/system/cpu/cpu0/cpuidle/
+/sys/devices/system/cpu/cpu0/cpuidle/:
+total 0
+drwxr-xr-x 2 root root 0 Feb 8 10:42 state0
+drwxr-xr-x 2 root root 0 Feb 8 10:42 state1
+drwxr-xr-x 2 root root 0 Feb 8 10:42 state2
+drwxr-xr-x 2 root root 0 Feb 8 10:42 state3
+
+/sys/devices/system/cpu/cpu0/cpuidle/state0:
+total 0
+-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
+-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
+-r--r--r-- 1 root root 4096 Feb 8 10:42 name
+-r--r--r-- 1 root root 4096 Feb 8 10:42 power
+-r--r--r-- 1 root root 4096 Feb 8 10:42 time
+-r--r--r-- 1 root root 4096 Feb 8 10:42 usage
+
+/sys/devices/system/cpu/cpu0/cpuidle/state1:
+total 0
+-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
+-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
+-r--r--r-- 1 root root 4096 Feb 8 10:42 name
+-r--r--r-- 1 root root 4096 Feb 8 10:42 power
+-r--r--r-- 1 root root 4096 Feb 8 10:42 time
+-r--r--r-- 1 root root 4096 Feb 8 10:42 usage
+
+/sys/devices/system/cpu/cpu0/cpuidle/state2:
+total 0
+-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
+-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
+-r--r--r-- 1 root root 4096 Feb 8 10:42 name
+-r--r--r-- 1 root root 4096 Feb 8 10:42 power
+-r--r--r-- 1 root root 4096 Feb 8 10:42 time
+-r--r--r-- 1 root root 4096 Feb 8 10:42 usage
+
+/sys/devices/system/cpu/cpu0/cpuidle/state3:
+total 0
+-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
+-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
+-r--r--r-- 1 root root 4096 Feb 8 10:42 name
+-r--r--r-- 1 root root 4096 Feb 8 10:42 power
+-r--r--r-- 1 root root 4096 Feb 8 10:42 time
+-r--r--r-- 1 root root 4096 Feb 8 10:42 usage
+--------------------------------------------------------------------------------
+
+
+* desc : Small description about the idle state (string)
+* latency : Latency to exit out of this idle state (in microseconds)
+* name : Name of the idle state (string)
+* power : Power consumed while in this idle state (in milliwatts)
+* time : Total time spent in this idle state (in microseconds)
+* usage : Number of times this state was entered (count)
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 30c101761d0..83f515c2905 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -92,9 +92,8 @@ handler has run. Up to MAX_STACK_SIZE bytes are copied -- e.g.,
64 bytes on i386.
Note that the probed function's args may be passed on the stack
-or in registers (e.g., for x86_64 or for an i386 fastcall function).
-The jprobe will work in either case, so long as the handler's
-prototype matches that of the probed function.
+or in registers. The jprobe will work in either case, so long as the
+handler's prototype matches that of the probed function.
1.3 Return Probes
@@ -270,9 +269,9 @@ Kprobes runs the handler whose address is jp->entry.
The handler should have the same arg list and return type as the probed
function; and just before it returns, it must call jprobe_return().
(The handler never actually returns, since jprobe_return() returns
-control to Kprobes.) If the probed function is declared asmlinkage,
-fastcall, or anything else that affects how args are passed, the
-handler's declaration must match.
+control to Kprobes.) If the probed function is declared asmlinkage
+or anything else that affects how args are passed, the handler's
+declaration must match.
register_jprobe() returns 0 on success, or a negative errno otherwise.
diff --git a/Documentation/sched-rt-group.txt b/Documentation/sched-rt-group.txt
new file mode 100644
index 00000000000..1c6332f4543
--- /dev/null
+++ b/Documentation/sched-rt-group.txt
@@ -0,0 +1,59 @@
+
+
+Real-Time group scheduling.
+
+The problem space:
+
+In order to schedule multiple groups of realtime tasks each group must
+be assigned a fixed portion of the CPU time available. Without a minimum
+guarantee a realtime group can obviously fall short. A fuzzy upper limit
+is of no use since it cannot be relied upon. Which leaves us with just
+the single fixed portion.
+
+CPU time is divided by means of specifying how much time can be spent
+running in a given period. Say a frame fixed realtime renderer must
+deliver 25 frames a second, which yields a period of 0.04s. Now say
+it will also have to play some music and respond to input, leaving it
+with around 80% for the graphics. We can then give this group a runtime
+of 0.8 * 0.04s = 0.032s.
+
+This way the graphics group will have a 0.04s period with a 0.032s runtime
+limit.
+
+Now if the audio thread needs to refill the DMA buffer every 0.005s, but
+needs only about 3% CPU time to do so, it can do with a 0.03 * 0.005s
+= 0.00015s.
+
+
+The Interface:
+
+system wide:
+
+/proc/sys/kernel/sched_rt_period_ms
+/proc/sys/kernel/sched_rt_runtime_us
+
+CONFIG_FAIR_USER_SCHED
+
+/sys/kernel/uids/<uid>/cpu_rt_runtime_us
+
+or
+
+CONFIG_FAIR_CGROUP_SCHED
+
+/cgroup/<cgroup>/cpu.rt_runtime_us
+
+[ time is specified in us because the interface is s32; this gives an
+ operating range of ~35m to 1us ]
+
+The period takes values in [ 1, INT_MAX ], runtime in [ -1, INT_MAX - 1 ].
+
+A runtime of -1 specifies runtime == period, ie. no limit.
+
+New groups get the period from /proc/sys/kernel/sched_rt_period_us and
+a runtime of 0.
+
+Settings are constrained to:
+
+ \Sum_{i} runtime_{i} / global_period <= global_runtime / global_period
+
+in order to keep the configuration schedulable.
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index dc8801d4e94..276a7e63782 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -29,7 +29,7 @@ show up in /proc/sys/kernel:
- java-interpreter [ binfmt_java, obsolete ]
- kstack_depth_to_print [ X86 only ]
- l2cr [ PPC only ]
-- modprobe ==> Documentation/kmod.txt
+- modprobe ==> Documentation/debugging-modules.txt
- msgmax
- msgmnb
- msgmni
diff --git a/MAINTAINERS b/MAINTAINERS
index c40f0ae9655..1d2edb491b3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1255,8 +1255,8 @@ W: http://linux-net.osdl.org/index.php/DCCP
S: Maintained
DECnet NETWORK LAYER
-P: Patrick Caulfield
-M: patrick@tykepenguin.com
+P: Christine Caulfield
+M: christine.caulfield@googlemail.com
W: http://linux-decnet.sourceforge.net
L: linux-decnet-user@lists.sourceforge.net
S: Maintained
@@ -1318,8 +1318,8 @@ L: linux-kernel@vger.kernel.org
S: Maintained
DISTRIBUTED LOCK MANAGER
-P: Patrick Caulfield
-M: pcaulfie@redhat.com
+P: Christine Caulfield
+M: ccaulfie@redhat.com
P: David Teigland
M: teigland@redhat.com
L: cluster-devel@redhat.com
@@ -1616,6 +1616,7 @@ S: Maintained
FILESYSTEMS (VFS and infrastructure)
P: Alexander Viro
M: viro@zeniv.linux.org.uk
+L: linux-fsdevel@vger.kernel.org
S: Maintained
FIREWIRE SUBSYSTEM (drivers/firewire, <linux/firewire*.h>)
@@ -3561,6 +3562,8 @@ P: Christoph Lameter
M: clameter@sgi.com
P: Pekka Enberg
M: penberg@cs.helsinki.fi
+P: Matt Mackall
+M: mpm@selenic.com
L: linux-mm@kvack.org
S: Maintained
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 973c5c3705e..8c71daf94a5 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -259,8 +259,8 @@ osf_statfs(char __user *path, struct osf_statfs __user *buffer, unsigned long bu
retval = user_path_walk(path, &nd);
if (!retval) {
- retval = do_osf_statfs(nd.dentry, buffer, bufsiz);
- path_release(&nd);
+ retval = do_osf_statfs(nd.path.dentry, buffer, bufsiz);
+ path_put(&nd.path);
}
return retval;
}
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 1dd50d07693..75480cab089 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -119,13 +119,8 @@ irqreturn_t timer_interrupt(int irq, void *dev)
state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1);
nticks = delta >> FIX_SHIFT;
- while (nticks > 0) {
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
- nticks--;
- }
+ if (nticks)
+ do_timer(nticks);
/*
* If we have an externally synchronized Linux clock, then update
@@ -141,6 +136,12 @@ irqreturn_t timer_interrupt(int irq, void *dev)
}
write_sequnlock(&xtime_lock);
+
+#ifndef CONFIG_SMP
+ while (nticks--)
+ update_process_times(user_mode(get_irq_regs()));
+#endif
+
return IRQ_HANDLED;
}
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 5bd64e341df..9bdc8f99183 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -137,9 +137,6 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
profile_tick(CPU_PROFILING);
/*
@@ -161,6 +158,11 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
last_rtc_update = xtime.tv_sec - 600;
}
write_sequnlock(&xtime_lock);
+
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(get_irq_regs()));
+#endif
+
return IRQ_HANDLED;
}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 58717cb1970..56a67ab698c 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -126,15 +126,13 @@ static void decode_address(char *buf, unsigned long address)
struct vm_area_struct *vma = vml->vma;
if (address >= vma->vm_start && address < vma->vm_end) {
+ char _tmpbuf[256];
char *name = p->comm;
struct file *file = vma->vm_file;
- if (file) {
- char _tmpbuf[256];
- name = d_path(file->f_dentry,
- file->f_vfsmnt,
- _tmpbuf,
- sizeof(_tmpbuf));
- }
+
+ if (file)
+ name = d_path(&file->f_path, _tmpbuf,
+ sizeof(_tmpbuf));
/* FLAT does not have its text aligned to the start of
* the map while FDPIC ELF does ...
diff --git a/arch/cris/arch-v10/lib/memset.c b/arch/cris/arch-v10/lib/memset.c
index 42c1101043a..c94ea9b3ec2 100644
--- a/arch/cris/arch-v10/lib/memset.c
+++ b/arch/cris/arch-v10/lib/memset.c
@@ -1,252 +1,259 @@
-/*#************************************************************************#*/
-/*#-------------------------------------------------------------------------*/
-/*# */
-/*# FUNCTION NAME: memset() */
-/*# */
-/*# PARAMETERS: void* dst; Destination address. */
-/*# int c; Value of byte to write. */
-/*# int len; Number of bytes to write. */
-/*# */
-/*# RETURNS: dst. */
-/*# */
-/*# DESCRIPTION: Sets the memory dst of length len bytes to c, as standard. */
-/*# Framework taken from memcpy. This routine is */
-/*# very sensitive to compiler changes in register allocation. */
-/*# Should really be rewritten to avoid this problem. */
-/*# */
-/*#-------------------------------------------------------------------------*/
-/*# */
-/*# HISTORY */
-/*# */
-/*# DATE NAME CHANGES */
-/*# ---- ---- ------- */
-/*# 990713 HP Tired of watching this function (or */
-/*# really, the nonoptimized generic */
-/*# implementation) take up 90% of simulator */
-/*# output. Measurements needed. */
-/*# */
-/*#-------------------------------------------------------------------------*/
-
-#include <linux/types.h>
-
-/* No, there's no macro saying 12*4, since it is "hard" to get it into
- the asm in a good way. Thus better to expose the problem everywhere.
- */
-
-/* Assuming 1 cycle per dword written or read (ok, not really true), and
- one per instruction, then 43+3*(n/48-1) <= 24+24*(n/48-1)
- so n >= 45.7; n >= 0.9; we win on the first full 48-byte block to set. */
-
-#define ZERO_BLOCK_SIZE (1*12*4)
-
-void *memset(void *pdst,
- int c,
- size_t plen)
+/* A memset for CRIS.
+ Copyright (C) 1999-2005 Axis Communications.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Neither the name of Axis Communications nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS
+ COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE. */
+
+/* FIXME: This file should really only be used for reference, as the
+ result is somewhat depending on gcc generating what we expect rather
+ than what we describe. An assembly file should be used instead. */
+
+/* Note the multiple occurrence of the expression "12*4", including the
+ asm. It is hard to get it into the asm in a good way. Thus better to
+ expose the problem everywhere: no macro. */
+
+/* Assuming one cycle per dword written or read (ok, not really true; the
+ world is not ideal), and one cycle per instruction, then 43+3*(n/48-1)
+ <= 24+24*(n/48-1) so n >= 45.7; n >= 0.9; we win on the first full
+ 48-byte block to set. */
+
+#define MEMSET_BY_BLOCK_THRESHOLD (1 * 48)
+
+/* No name ambiguities in this file. */
+__asm__ (".syntax no_register_prefix");
+
+void *memset(void *pdst, int c, unsigned int plen)
{
- /* Ok. Now we want the parameters put in special registers.
- Make sure the compiler is able to make something useful of this. */
+ /* Now we want the parameters in special registers. Make sure the
+ compiler does something usable with this. */
register char *return_dst __asm__ ("r10") = pdst;
register int n __asm__ ("r12") = plen;
register int lc __asm__ ("r11") = c;
- /* Most apps use memset sanely. Only those memsetting about 3..4
- bytes or less get penalized compared to the generic implementation
- - and that's not really sane use. */
+ /* Most apps use memset sanely. Memsetting about 3..4 bytes or less get
+ penalized here compared to the generic implementation. */
- /* Ugh. This is fragile at best. Check with newer GCC releases, if
- they compile cascaded "x |= x << 8" sanely! */
- __asm__("movu.b %0,$r13\n\t"
- "lslq 8,$r13\n\t"
- "move.b %0,$r13\n\t"
- "move.d $r13,%0\n\t"
- "lslq 16,$r13\n\t"
- "or.d $r13,%0"
- : "=r" (lc) : "0" (lc) : "r13");
+ /* This is fragile performancewise at best. Check with newer GCC
+ releases, if they compile cascaded "x |= x << 8" to sane code. */
+ __asm__("movu.b %0,r13 \n\
+ lslq 8,r13 \n\
+ move.b %0,r13 \n\
+ move.d r13,%0 \n\
+ lslq 16,r13 \n\
+ or.d r13,%0"
+ : "=r" (lc) /* Inputs. */
+ : "0" (lc) /* Outputs. */
+ : "r13"); /* Trash. */
{
register char *dst __asm__ ("r13") = pdst;
- /* This is NONPORTABLE, but since this whole routine is */
- /* grossly nonportable that doesn't matter. */
+ if (((unsigned long) pdst & 3) != 0
+ /* Oops! n = 0 must be a valid call, regardless of alignment. */
+ && n >= 3)
+ {
+ if ((unsigned long) dst & 1)
+ {
+ *dst = (char) lc;
+ n--;
+ dst++;
+ }
- if (((unsigned long) pdst & 3) != 0
- /* Oops! n=0 must be a legal call, regardless of alignment. */
- && n >= 3)
- {
- if ((unsigned long)dst & 1)
- {
- *dst = (char) lc;
- n--;
- dst++;
- }
-
- if ((unsigned long)dst & 2)
- {
- *(short *)dst = lc;
- n -= 2;
- dst += 2;
- }
- }
+ if ((unsigned long) dst & 2)
+ {
+ *(short *) dst = lc;
+ n -= 2;
+ dst += 2;
+ }
+ }
- /* Now the fun part. For the threshold value of this, check the equation
- above. */
- /* Decide which copying method to use. */
- if (n >= ZERO_BLOCK_SIZE)
- {
- /* For large copies we use 'movem' */
-
- /* It is not optimal to tell the compiler about clobbering any
- registers; that will move the saving/restoring of those registers
- to the function prologue/epilogue, and make non-movem sizes
- suboptimal.
-
- This method is not foolproof; it assumes that the "asm reg"
- declarations at the beginning of the function really are used
- here (beware: they may be moved to temporary registers).
- This way, we do not have to save/move the registers around into
- temporaries; we can safely use them straight away.
-
- If you want to check that the allocation was right; then
- check the equalities in the first comment. It should say
- "r13=r13, r12=r12, r11=r11" */
- __asm__ volatile ("\n\
- ;; Check that the following is true (same register names on \n\
- ;; both sides of equal sign, as in r8=r8): \n\
- ;; %0=r13, %1=r12, %4=r11 \n\
- ;; \n\
- ;; Save the registers we'll clobber in the movem process \n\
- ;; on the stack. Don't mention them to gcc, it will only be \n\
- ;; upset. \n\
- subq 11*4,$sp \n\
- movem $r10,[$sp] \n\
+ /* Decide which setting method to use. */
+ if (n >= MEMSET_BY_BLOCK_THRESHOLD)
+ {
+ /* It is not optimal to tell the compiler about clobbering any
+ registers; that will move the saving/restoring of those registers
+ to the function prologue/epilogue, and make non-block sizes
+ suboptimal. */
+ __asm__ volatile
+ ("\
+ ;; GCC does promise correct register allocations, but let's \n\
+ ;; make sure it keeps its promises. \n\
+ .ifnc %0-%1-%4,$r13-$r12-$r11 \n\
+ .error \"GCC reg alloc bug: %0-%1-%4 != $r13-$r12-$r11\" \n\
+ .endif \n\
+ \n\
+ ;; Save the registers we'll clobber in the movem process \n\
+ ;; on the stack. Don't mention them to gcc, it will only be \n\
+ ;; upset. \n\
+ subq 11*4,sp \n\
+ movem r10,[sp] \n\
\n\
- move.d $r11,$r0 \n\
- move.d $r11,$r1 \n\
- move.d $r11,$r2 \n\
- move.d $r11,$r3 \n\
- move.d $r11,$r4 \n\
- move.d $r11,$r5 \n\
- move.d $r11,$r6 \n\
- move.d $r11,$r7 \n\
- move.d $r11,$r8 \n\
- move.d $r11,$r9 \n\
- move.d $r11,$r10 \n\
+ move.d r11,r0 \n\
+ move.d r11,r1 \n\
+ move.d r11,r2 \n\
+ move.d r11,r3 \n\
+ move.d r11,r4 \n\
+ move.d r11,r5 \n\
+ move.d r11,r6 \n\
+ move.d r11,r7 \n\
+ move.d r11,r8 \n\
+ move.d r11,r9 \n\
+ move.d r11,r10 \n\
\n\
- ;; Now we've got this: \n\
- ;; r13 - dst \n\
- ;; r12 - n \n\
+ ;; Now we've got this: \n\
+ ;; r13 - dst \n\
+ ;; r12 - n \n\
\n\
- ;; Update n for the first loop \n\
- subq 12*4,$r12 \n\
+ ;; Update n for the first loop \n\
+ subq 12*4,r12 \n\
0: \n\
- subq 12*4,$r12 \n\
- bge 0b \n\
- movem $r11,[$r13+] \n\
+"
+#ifdef __arch_common_v10_v32
+ /* Cater to branch offset difference between v32 and v10. We
+ assume the branch below has an 8-bit offset. */
+" setf\n"
+#endif
+" subq 12*4,r12 \n\
+ bge 0b \n\
+ movem r11,[r13+] \n\
\n\
- addq 12*4,$r12 ;; compensate for last loop underflowing n \n\
+ ;; Compensate for last loop underflowing n. \n\
+ addq 12*4,r12 \n\
\n\
- ;; Restore registers from stack \n\
- movem [$sp+],$r10"
+ ;; Restore registers from stack. \n\
+ movem [sp+],r10"
- /* Outputs */ : "=r" (dst), "=r" (n)
- /* Inputs */ : "0" (dst), "1" (n), "r" (lc));
+ /* Outputs. */
+ : "=r" (dst), "=r" (n)
- }
+ /* Inputs. */
+ : "0" (dst), "1" (n), "r" (lc));
+ }
+
+ /* An ad-hoc unroll, used for 4*12-1..16 bytes. */
+ while (n >= 16)
+ {
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ n -= 16;
+ }
- /* Either we directly starts copying, using dword copying
- in a loop, or we copy as much as possible with 'movem'
- and then the last block (<44 bytes) is copied here.
- This will work since 'movem' will have updated src,dst,n. */
-
- while ( n >= 16 )
- {
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- n -= 16;
- }
-
- /* A switch() is definitely the fastest although it takes a LOT of code.
- * Particularly if you inline code this.
- */
switch (n)
- {
+ {
case 0:
break;
+
case 1:
- *(char*)dst = (char) lc;
+ *dst = (char) lc;
break;
+
case 2:
- *(short*)dst = (short) lc;
+ *(short *) dst = (short) lc;
break;
+
case 3:
- *((short*)dst)++ = (short) lc;
- *(char*)dst = (char) lc;
+ *(short *) dst = (short) lc; dst += 2;
+ *dst = (char) lc;
break;
+
case 4:
- *((long*)dst)++ = lc;
+ *(long *) dst = lc;
break;
+
case 5:
- *((long*)dst)++ = lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *dst = (char) lc;
break;
+
case 6:
- *((long*)dst)++ = lc;
- *(short*)dst = (short) lc;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc;
break;
+
case 7:
- *((long*)dst)++ = lc;
- *((short*)dst)++ = (short) lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc; dst += 2;
+ *dst = (char) lc;
break;
+
case 8:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc;
break;
+
case 9:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *dst = (char) lc;
break;
+
case 10:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *(short*)dst = (short) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc;
break;
+
case 11:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((short*)dst)++ = (short) lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc; dst += 2;
+ *dst = (char) lc;
break;
+
case 12:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc;
break;
+
case 13:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *dst = (char) lc;
break;
+
case 14:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *(short*)dst = (short) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc;
break;
+
case 15:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((short*)dst)++ = (short) lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc; dst += 2;
+ *dst = (char) lc;
break;
- }
+ }
}
- return return_dst; /* destination pointer. */
-} /* memset() */
+ return return_dst;
+}
diff --git a/arch/cris/arch-v32/lib/memset.c b/arch/cris/arch-v32/lib/memset.c
index ffca1214674..c94ea9b3ec2 100644
--- a/arch/cris/arch-v32/lib/memset.c
+++ b/arch/cris/arch-v32/lib/memset.c
@@ -1,253 +1,259 @@
-/*#************************************************************************#*/
-/*#-------------------------------------------------------------------------*/
-/*# */
-/*# FUNCTION NAME: memset() */
-/*# */
-/*# PARAMETERS: void* dst; Destination address. */
-/*# int c; Value of byte to write. */
-/*# int len; Number of bytes to write. */
-/*# */
-/*# RETURNS: dst. */
-/*# */
-/*# DESCRIPTION: Sets the memory dst of length len bytes to c, as standard. */
-/*# Framework taken from memcpy. This routine is */
-/*# very sensitive to compiler changes in register allocation. */
-/*# Should really be rewritten to avoid this problem. */
-/*# */
-/*#-------------------------------------------------------------------------*/
-/*# */
-/*# HISTORY */
-/*# */
-/*# DATE NAME CHANGES */
-/*# ---- ---- ------- */
-/*# 990713 HP Tired of watching this function (or */
-/*# really, the nonoptimized generic */
-/*# implementation) take up 90% of simulator */
-/*# output. Measurements needed. */
-/*# */
-/*#-------------------------------------------------------------------------*/
-
-#include <linux/types.h>
-
-/* No, there's no macro saying 12*4, since it is "hard" to get it into
- the asm in a good way. Thus better to expose the problem everywhere.
- */
-
-/* Assuming 1 cycle per dword written or read (ok, not really true), and
- one per instruction, then 43+3*(n/48-1) <= 24+24*(n/48-1)
- so n >= 45.7; n >= 0.9; we win on the first full 48-byte block to set. */
-
-#define ZERO_BLOCK_SIZE (1*12*4)
-
-void *memset(void *pdst,
- int c,
- size_t plen)
+/* A memset for CRIS.
+ Copyright (C) 1999-2005 Axis Communications.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Neither the name of Axis Communications nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS
+ COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE. */
+
+/* FIXME: This file should really only be used for reference, as the
+ result is somewhat depending on gcc generating what we expect rather
+ than what we describe. An assembly file should be used instead. */
+
+/* Note the multiple occurrence of the expression "12*4", including the
+ asm. It is hard to get it into the asm in a good way. Thus better to
+ expose the problem everywhere: no macro. */
+
+/* Assuming one cycle per dword written or read (ok, not really true; the
+ world is not ideal), and one cycle per instruction, then 43+3*(n/48-1)
+ <= 24+24*(n/48-1) so n >= 45.7; n >= 0.9; we win on the first full
+ 48-byte block to set. */
+
+#define MEMSET_BY_BLOCK_THRESHOLD (1 * 48)
+
+/* No name ambiguities in this file. */
+__asm__ (".syntax no_register_prefix");
+
+void *memset(void *pdst, int c, unsigned int plen)
{
- /* Ok. Now we want the parameters put in special registers.
- Make sure the compiler is able to make something useful of this. */
+ /* Now we want the parameters in special registers. Make sure the
+ compiler does something usable with this. */
register char *return_dst __asm__ ("r10") = pdst;
register int n __asm__ ("r12") = plen;
register int lc __asm__ ("r11") = c;
- /* Most apps use memset sanely. Only those memsetting about 3..4
- bytes or less get penalized compared to the generic implementation
- - and that's not really sane use. */
+ /* Most apps use memset sanely. Memsetting about 3..4 bytes or less get
+ penalized here compared to the generic implementation. */
- /* Ugh. This is fragile at best. Check with newer GCC releases, if
- they compile cascaded "x |= x << 8" sanely! */
- __asm__("movu.b %0,$r13 \n\
- lslq 8,$r13 \n\
- move.b %0,$r13 \n\
- move.d $r13,%0 \n\
- lslq 16,$r13 \n\
- or.d $r13,%0"
- : "=r" (lc) : "0" (lc) : "r13");
+ /* This is fragile performancewise at best. Check with newer GCC
+ releases, if they compile cascaded "x |= x << 8" to sane code. */
+ __asm__("movu.b %0,r13 \n\
+ lslq 8,r13 \n\
+ move.b %0,r13 \n\
+ move.d r13,%0 \n\
+ lslq 16,r13 \n\
+ or.d r13,%0"
+ : "=r" (lc) /* Inputs. */
+ : "0" (lc) /* Outputs. */
+ : "r13"); /* Trash. */
{
register char *dst __asm__ ("r13") = pdst;
- /* This is NONPORTABLE, but since this whole routine is */
- /* grossly nonportable that doesn't matter. */
+ if (((unsigned long) pdst & 3) != 0
+ /* Oops! n = 0 must be a valid call, regardless of alignment. */
+ && n >= 3)
+ {
+ if ((unsigned long) dst & 1)
+ {
+ *dst = (char) lc;
+ n--;
+ dst++;
+ }
- if (((unsigned long) pdst & 3) != 0
- /* Oops! n=0 must be a legal call, regardless of alignment. */
- && n >= 3)
- {
- if ((unsigned long)dst & 1)
- {
- *dst = (char) lc;
- n--;
- dst++;
- }
-
- if ((unsigned long)dst & 2)
- {
- *(short *)dst = lc;
- n -= 2;
- dst += 2;
- }
- }
+ if ((unsigned long) dst & 2)
+ {
+ *(short *) dst = lc;
+ n -= 2;
+ dst += 2;
+ }
+ }
- /* Now the fun part. For the threshold value of this, check the equation
- above. */
- /* Decide which copying method to use. */
- if (n >= ZERO_BLOCK_SIZE)
- {
- /* For large copies we use 'movem' */
-
- /* It is not optimal to tell the compiler about clobbering any
- registers; that will move the saving/restoring of those registers
- to the function prologue/epilogue, and make non-movem sizes
- suboptimal.
-
- This method is not foolproof; it assumes that the "asm reg"
- declarations at the beginning of the function really are used
- here (beware: they may be moved to temporary registers).
- This way, we do not have to save/move the registers around into
- temporaries; we can safely use them straight away.
-
- If you want to check that the allocation was right; then
- check the equalities in the first comment. It should say
- "r13=r13, r12=r12, r11=r11" */
- __asm__ volatile (" \n\
- ;; Check that the register asm declaration got right. \n\
- ;; The GCC manual says it will work, but there *has* been bugs. \n\
- .ifnc %0-%1-%4,$r13-$r12-$r11 \n\
- .err \n\
- .endif \n\
+ /* Decide which setting method to use. */
+ if (n >= MEMSET_BY_BLOCK_THRESHOLD)
+ {
+ /* It is not optimal to tell the compiler about clobbering any
+ registers; that will move the saving/restoring of those registers
+ to the function prologue/epilogue, and make non-block sizes
+ suboptimal. */
+ __asm__ volatile
+ ("\
+ ;; GCC does promise correct register allocations, but let's \n\
+ ;; make sure it keeps its promises. \n\
+ .ifnc %0-%1-%4,$r13-$r12-$r11 \n\
+ .error \"GCC reg alloc bug: %0-%1-%4 != $r13-$r12-$r11\" \n\
+ .endif \n\
\n\
- ;; Save the registers we'll clobber in the movem process \n\
- ;; on the stack. Don't mention them to gcc, it will only be \n\
- ;; upset. \n\
- subq 11*4,$sp \n\
- movem $r10,[$sp] \n\
+ ;; Save the registers we'll clobber in the movem process \n\
+ ;; on the stack. Don't mention them to gcc, it will only be \n\
+ ;; upset. \n\
+ subq 11*4,sp \n\
+ movem r10,[sp] \n\
\n\
- move.d $r11,$r0 \n\
- move.d $r11,$r1 \n\
- move.d $r11,$r2 \n\
- move.d $r11,$r3 \n\
- move.d $r11,$r4 \n\
- move.d $r11,$r5 \n\
- move.d $r11,$r6 \n\
- move.d $r11,$r7 \n\
- move.d $r11,$r8 \n\
- move.d $r11,$r9 \n\
- move.d $r11,$r10 \n\
+ move.d r11,r0 \n\
+ move.d r11,r1 \n\
+ move.d r11,r2 \n\
+ move.d r11,r3 \n\
+ move.d r11,r4 \n\
+ move.d r11,r5 \n\
+ move.d r11,r6 \n\
+ move.d r11,r7 \n\
+ move.d r11,r8 \n\
+ move.d r11,r9 \n\
+ move.d r11,r10 \n\
\n\
- ;; Now we've got this: \n\
- ;; r13 - dst \n\
- ;; r12 - n \n\
+ ;; Now we've got this: \n\
+ ;; r13 - dst \n\
+ ;; r12 - n \n\
\n\
- ;; Update n for the first loop \n\
- subq 12*4,$r12 \n\
+ ;; Update n for the first loop \n\
+ subq 12*4,r12 \n\
0: \n\
- subq 12*4,$r12 \n\
- bge 0b \n\
- movem $r11,[$r13+] \n\
+"
+#ifdef __arch_common_v10_v32
+ /* Cater to branch offset difference between v32 and v10. We
+ assume the branch below has an 8-bit offset. */
+" setf\n"
+#endif
+" subq 12*4,r12 \n\
+ bge 0b \n\
+ movem r11,[r13+] \n\
\n\
- addq 12*4,$r12 ;; compensate for last loop underflowing n \n\
+ ;; Compensate for last loop underflowing n. \n\
+ addq 12*4,r12 \n\
\n\
- ;; Restore registers from stack \n\
- movem [$sp+],$r10"
+ ;; Restore registers from stack. \n\
+ movem [sp+],r10"
- /* Outputs */ : "=r" (dst), "=r" (n)
- /* Inputs */ : "0" (dst), "1" (n), "r" (lc));
- }
+ /* Outputs. */
+ : "=r" (dst), "=r" (n)
+
+ /* Inputs. */
+ : "0" (dst), "1" (n), "r" (lc));
+ }
+
+ /* An ad-hoc unroll, used for 4*12-1..16 bytes. */
+ while (n >= 16)
+ {
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ n -= 16;
+ }
- /* Either we directly starts copying, using dword copying
- in a loop, or we copy as much as possible with 'movem'
- and then the last block (<44 bytes) is copied here.
- This will work since 'movem' will have updated src,dst,n. */
-
- while ( n >= 16 )
- {
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- n -= 16;
- }
-
- /* A switch() is definitely the fastest although it takes a LOT of code.
- * Particularly if you inline code this.
- */
switch (n)
- {
+ {
case 0:
break;
+
case 1:
- *(char*)dst = (char) lc;
+ *dst = (char) lc;
break;
+
case 2:
- *(short*)dst = (short) lc;
+ *(short *) dst = (short) lc;
break;
+
case 3:
- *((short*)dst)++ = (short) lc;
- *(char*)dst = (char) lc;
+ *(short *) dst = (short) lc; dst += 2;
+ *dst = (char) lc;
break;
+
case 4:
- *((long*)dst)++ = lc;
+ *(long *) dst = lc;
break;
+
case 5:
- *((long*)dst)++ = lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *dst = (char) lc;
break;
+
case 6:
- *((long*)dst)++ = lc;
- *(short*)dst = (short) lc;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc;
break;
+
case 7:
- *((long*)dst)++ = lc;
- *((short*)dst)++ = (short) lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc; dst += 2;
+ *dst = (char) lc;
break;
+
case 8:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc;
break;
+
case 9:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *dst = (char) lc;
break;
+
case 10:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *(short*)dst = (short) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc;
break;
+
case 11:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((short*)dst)++ = (short) lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc; dst += 2;
+ *dst = (char) lc;
break;
+
case 12:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc;
break;
+
case 13:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *dst = (char) lc;
break;
+
case 14:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *(short*)dst = (short) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc;
break;
+
case 15:
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((long*)dst)++ = lc;
- *((short*)dst)++ = (short) lc;
- *(char*)dst = (char) lc;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(long *) dst = lc; dst += 4;
+ *(short *) dst = (short) lc; dst += 2;
+ *dst = (char) lc;
break;
- }
+ }
}
- return return_dst; /* destination pointer. */
-} /* memset() */
+ return return_dst;
+}
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index 925fb0199a0..69f6a4ef5d6 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -63,6 +63,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
/* last time the cmos clock got updated */
static long last_rtc_update = 0;
+ profile_tick(CPU_PROFILING);
/*
* Here we are in the timer irq handler. We just have irqs locally
* disabled but we don't know if the timer_bh is running on the other
@@ -73,8 +74,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
write_seqlock(&xtime_lock);
do_timer(1);
- update_process_times(user_mode(get_irq_regs()));
- profile_tick(CPU_PROFILING);
/*
* If we have an externally synchronized Linux clock, then update
@@ -99,6 +98,9 @@ static irqreturn_t timer_interrupt(int irq, void *dummy)
#endif /* CONFIG_HEARTBEAT */
write_sequnlock(&xtime_lock);
+
+ update_process_times(user_mode(get_irq_regs()));
+
return IRQ_HANDLED;
}
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index ef7527b8b0c..17725a55aed 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -105,11 +105,9 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
#ifdef CONFIG_DEBUG_INFO
- *(
INIT_TEXT
EXIT_TEXT
- .exitcall.exit
- )
+ *(.exitcall.exit)
#endif
*(.fixup)
*(.gnu.warning)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 2d4fcd01bc9..dff9edfc746 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -232,7 +232,14 @@ config PGTABLE_4
endchoice
+if IA64_HP_SIM
+config HZ
+ default 32
+endif
+
+if !IA64_HP_SIM
source kernel/Kconfig.hz
+endif
config IA64_BRL_EMU
bool
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index 89cdbcaeb45..0ccfb2ad638 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -42,14 +42,12 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
/* last time the cmos clock got updated */
static long last_rtc_update=0;
+ if (current->pid)
+ profile_tick(CPU_PROFILING);
+
write_seqlock(&xtime_lock);
do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
- if (current->pid)
- profile_tick(CPU_PROFILING);
/*
* If we have an externally synchronized Linux clock, then update
@@ -67,6 +65,10 @@ irqreturn_t arch_timer_interrupt(int irq, void *dummy)
}
write_sequnlock(&xtime_lock);
+
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(get_irq_regs()));
+#endif
return(IRQ_HANDLED);
}
diff --git a/arch/m68knommu/platform/5206/Makefile b/arch/m68knommu/platform/5206/Makefile
index c7bb0cef31a..a439d9ab3f2 100644
--- a/arch/m68knommu/platform/5206/Makefile
+++ b/arch/m68knommu/platform/5206/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o
diff --git a/arch/m68knommu/platform/5206e/Makefile b/arch/m68knommu/platform/5206e/Makefile
index c7bb0cef31a..a439d9ab3f2 100644
--- a/arch/m68knommu/platform/5206e/Makefile
+++ b/arch/m68knommu/platform/5206e/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o
diff --git a/arch/m68knommu/platform/520x/Makefile b/arch/m68knommu/platform/520x/Makefile
index 31b4eb51739..a50e76acc8f 100644
--- a/arch/m68knommu/platform/520x/Makefile
+++ b/arch/m68knommu/platform/520x/Makefile
@@ -12,8 +12,6 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o
diff --git a/arch/m68knommu/platform/523x/Makefile b/arch/m68knommu/platform/523x/Makefile
index ac9fbece8a4..5694d593f02 100644
--- a/arch/m68knommu/platform/523x/Makefile
+++ b/arch/m68knommu/platform/523x/Makefile
@@ -12,8 +12,6 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o
diff --git a/arch/m68knommu/platform/5249/Makefile b/arch/m68knommu/platform/5249/Makefile
index c7bb0cef31a..a439d9ab3f2 100644
--- a/arch/m68knommu/platform/5249/Makefile
+++ b/arch/m68knommu/platform/5249/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o
diff --git a/arch/m68knommu/platform/5272/Makefile b/arch/m68knommu/platform/5272/Makefile
index 7475c38c3b4..26135d92b34 100644
--- a/arch/m68knommu/platform/5272/Makefile
+++ b/arch/m68knommu/platform/5272/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o
diff --git a/arch/m68knommu/platform/527x/Makefile b/arch/m68knommu/platform/527x/Makefile
index 7475c38c3b4..26135d92b34 100644
--- a/arch/m68knommu/platform/527x/Makefile
+++ b/arch/m68knommu/platform/527x/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o
diff --git a/arch/m68knommu/platform/528x/Makefile b/arch/m68knommu/platform/528x/Makefile
index 7475c38c3b4..26135d92b34 100644
--- a/arch/m68knommu/platform/528x/Makefile
+++ b/arch/m68knommu/platform/528x/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o
diff --git a/arch/m68knommu/platform/5307/Makefile b/arch/m68knommu/platform/5307/Makefile
index 580fd6658d7..cfd586860fd 100644
--- a/arch/m68knommu/platform/5307/Makefile
+++ b/arch/m68knommu/platform/5307/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y += config.o
diff --git a/arch/m68knommu/platform/532x/Makefile b/arch/m68knommu/platform/532x/Makefile
index 475b92866a9..e431912f562 100644
--- a/arch/m68knommu/platform/532x/Makefile
+++ b/arch/m68knommu/platform/532x/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
#obj-y := config.o usb-mcf532x.o spi-mcf532x.o
obj-y := config.o
diff --git a/arch/m68knommu/platform/5407/Makefile b/arch/m68knommu/platform/5407/Makefile
index 68633b27df5..e6035e7a2d3 100644
--- a/arch/m68knommu/platform/5407/Makefile
+++ b/arch/m68knommu/platform/5407/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-EXTRA_AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o
diff --git a/arch/m68knommu/platform/coldfire/Makefile b/arch/m68knommu/platform/coldfire/Makefile
index e5fff297ae0..40cf20be1b9 100644
--- a/arch/m68knommu/platform/coldfire/Makefile
+++ b/arch/m68knommu/platform/coldfire/Makefile
@@ -12,9 +12,7 @@
# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
#
-ifdef CONFIG_FULLDEBUG
-AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
-endif
+asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-$(CONFIG_COLDFIRE) += dma.o entry.o vectors.o
obj-$(CONFIG_M5206) += timers.o
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index b333731b875..111b66dc737 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -197,14 +197,13 @@ ENTRY(fasthandler)
RESTORE_LOCAL
ENTRY(ret_from_interrupt)
- jeq 2f
-1:
- RESTORE_ALL
-2:
moveb %sp@(PT_SR),%d0
andl #0x7,%d0
- jhi 1b
+ jeq 1f
+ RESTORE_ALL
+
+1:
/* check if we need to do software interrupts */
movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0
jeq ret_from_exception
diff --git a/arch/m68knommu/platform/coldfire/timers.c b/arch/m68knommu/platform/coldfire/timers.c
index a60213e877e..ba5a9f32ebd 100644
--- a/arch/m68knommu/platform/coldfire/timers.c
+++ b/arch/m68knommu/platform/coldfire/timers.c
@@ -148,25 +148,32 @@ irqreturn_t coldfire_profile_tick(int irq, void *dummy)
/* Reset ColdFire timer2 */
__raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, PA(MCFTIMER_TER));
if (current->pid)
- profile_tick(CPU_PROFILING, regs);
+ profile_tick(CPU_PROFILING);
return IRQ_HANDLED;
}
/***************************************************************************/
+static struct irqaction coldfire_profile_irq = {
+ .name = "profile timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = coldfire_profile_tick,
+};
+
void coldfire_profile_init(void)
{
- printk(KERN_INFO "PROFILE: lodging TIMER2 @ %dHz as profile timer\n", PROFILEHZ);
+ printk(KERN_INFO "PROFILE: lodging TIMER2 @ %dHz as profile timer\n",
+ PROFILEHZ);
+
+ setup_irq(mcf_profilevector, &coldfire_profile_irq);
/* Set up TIMER 2 as high speed profile clock */
__raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
- __raw_writetrr(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
+ __raw_writetrr(((MCF_BUSCLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
- request_irq(mcf_profilevector, coldfire_profile_tick,
- (IRQF_DISABLED | IRQ_FLG_FAST), "profile timer", NULL);
mcf_settimericr(2, 7);
}
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index d70c4e0e85f..672fba84b2c 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -694,7 +694,7 @@ asmlinkage int irix_statfs(const char __user *path,
if (error)
goto out;
- error = vfs_statfs(nd.dentry, &kbuf);
+ error = vfs_statfs(nd.path.dentry, &kbuf);
if (error)
goto dput_and_out;
@@ -711,7 +711,7 @@ asmlinkage int irix_statfs(const char __user *path,
}
dput_and_out:
- path_release(&nd);
+ path_put(&nd.path);
out:
return error;
}
@@ -1360,7 +1360,7 @@ asmlinkage int irix_statvfs(char __user *fname, struct irix_statvfs __user *buf)
error = user_path_walk(fname, &nd);
if (error)
goto out;
- error = vfs_statfs(nd.dentry, &kbuf);
+ error = vfs_statfs(nd.path.dentry, &kbuf);
if (error)
goto dput_and_out;
@@ -1385,7 +1385,7 @@ asmlinkage int irix_statvfs(char __user *fname, struct irix_statvfs __user *buf)
error |= __put_user(0, &buf->f_fstr[i]);
dput_and_out:
- path_release(&nd);
+ path_put(&nd.path);
out:
return error;
}
@@ -1611,7 +1611,7 @@ asmlinkage int irix_statvfs64(char __user *fname, struct irix_statvfs64 __user *
error = user_path_walk(fname, &nd);
if (error)
goto out;
- error = vfs_statfs(nd.dentry, &kbuf);
+ error = vfs_statfs(nd.path.dentry, &kbuf);
if (error)
goto dput_and_out;
@@ -1636,7 +1636,7 @@ asmlinkage int irix_statvfs64(char __user *fname, struct irix_statvfs64 __user *
error |= __put_user(0, &buf->f_fstr[i]);
dput_and_out:
- path_release(&nd);
+ path_put(&nd.path);
out:
return error;
}
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 3e025df2dc8..0c5b9dabb47 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -219,10 +219,10 @@ asmlinkage long hpux_statfs(const char __user *path,
error = user_path_walk(path, &nd);
if (!error) {
struct hpux_statfs tmp;
- error = vfs_statfs_hpux(nd.dentry, &tmp);
+ error = vfs_statfs_hpux(nd.path.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
- path_release(&nd);
+ path_put(&nd.path);
}
return error;
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 485513c9f1a..5b8d8382b76 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -442,10 +442,6 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
-config WANT_DEVICE_TREE
- bool
- default n
-
endmenu
config ISA_DMA_API
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 49797a45416..63d07ccbb9d 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -147,6 +147,8 @@ HOSTCFLAGS += -I$(src)/dtc-src/ -I$(src)/libfdt/
targets += dtc-src/dtc-parser.tab.c
targets += dtc-src/dtc-lexer.lex.c
+clean-files += dtc-src/dtc-parser.tab.h
+
ifdef DTC_GENPARSER
BISON = bison
FLEX = flex
diff --git a/arch/powerpc/boot/ps3-hvcall.S b/arch/powerpc/boot/ps3-hvcall.S
index 585965f7e6a..d6068f1829c 100644
--- a/arch/powerpc/boot/ps3-hvcall.S
+++ b/arch/powerpc/boot/ps3-hvcall.S
@@ -145,7 +145,7 @@
.macro STORE_REGS_5_2
lwz r11, 16(r1)
std r4, 0(r11)
- lwz r11, 24(r1)
+ lwz r11, 20(r1)
std r5, 0(r11)
.endm
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 0662ae46f72..c1baf9d5903 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -104,3 +104,5 @@ quiet_cmd_systbl_chk = CALL $<
PHONY += systbl_chk
systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
$(call cmd,systbl_chk)
+
+clean-files := vmlinux.lds
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b9d88374f14..4846bf543a8 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -462,7 +462,7 @@ void show_regs(struct pt_regs * regs)
current, task_pid_nr(current), current->comm, task_thread_info(current));
#ifdef CONFIG_SMP
- printk(" CPU: %d", smp_processor_id());
+ printk(" CPU: %d", raw_smp_processor_id());
#endif /* CONFIG_SMP */
for (i = 0; i < 32; i++) {
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 3702df7dc56..d3437c4c4a6 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -336,9 +336,9 @@ static unsigned long __init find_function32(struct lib32_elfinfo *lib,
return sym->st_value - VDSO32_LBASE;
}
-static int vdso_do_func_patch32(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64,
- const char *orig, const char *fix)
+static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
+ struct lib64_elfinfo *v64,
+ const char *orig, const char *fix)
{
Elf32_Sym *sym32_gen, *sym32_fix;
@@ -433,9 +433,9 @@ static unsigned long __init find_function64(struct lib64_elfinfo *lib,
#endif
}
-static int vdso_do_func_patch64(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64,
- const char *orig, const char *fix)
+static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32,
+ struct lib64_elfinfo *v64,
+ const char *orig, const char *fix)
{
Elf64_Sym *sym64_gen, *sym64_fix;
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 4a890cb42b9..257b13cb18a 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -198,14 +198,13 @@ out:
* dcookie user still being registered (namely, the reader
* of the event buffer).
*/
-static inline unsigned long fast_get_dcookie(struct dentry *dentry,
- struct vfsmount *vfsmnt)
+static inline unsigned long fast_get_dcookie(struct path *path)
{
unsigned long cookie;
- if (dentry->d_cookie)
- return (unsigned long)dentry;
- get_dcookie(dentry, vfsmnt, &cookie);
+ if (path->dentry->d_cookie)
+ return (unsigned long)path->dentry;
+ get_dcookie(path, &cookie);
return cookie;
}
@@ -240,8 +239,7 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
continue;
if (!(vma->vm_flags & VM_EXECUTABLE))
continue;
- app_cookie = fast_get_dcookie(vma->vm_file->f_dentry,
- vma->vm_file->f_vfsmnt);
+ app_cookie = fast_get_dcookie(&vma->vm_file->f_path);
pr_debug("got dcookie for %s\n",
vma->vm_file->f_dentry->d_name.name);
app = vma->vm_file;
@@ -262,8 +260,7 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
break;
}
- *spu_bin_dcookie = fast_get_dcookie(vma->vm_file->f_dentry,
- vma->vm_file->f_vfsmnt);
+ *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
pr_debug("got dcookie for %s\n", vma->vm_file->f_dentry->d_name.name);
up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index c6fa49e23dc..4c0da0c079e 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -13,7 +13,6 @@ config MPC5121_ADS
bool "Freescale MPC5121E ADS"
depends on PPC_MULTIPLATFORM && PPC32
select DEFAULT_UIMAGE
- select WANT_DEVICE_TREE
select PPC_MPC5121
help
This option enables support for the MPC5121E ADS board.
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 515f244c90b..cf945d55c27 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -8,7 +8,6 @@ config PPC_MPC5200_SIMPLE
bool "Generic support for simple MPC5200 based boards"
depends on PPC_MPC52xx
select DEFAULT_UIMAGE
- select WANT_DEVICE_TREE
help
This option enables support for a simple MPC52xx based boards which
do not need a custom platform specific setup. Such boards are
@@ -35,7 +34,6 @@ config PPC_LITE5200
bool "Freescale Lite5200 Eval Board"
depends on PPC_MPC52xx
select DEFAULT_UIMAGE
- select WANT_DEVICE_TREE
config PPC_MPC5200_BUGFIX
bool "MPC5200 (L25R) bugfix support"
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index fcedbec07f9..0afd2259554 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -15,7 +15,6 @@ config PPC_MULTIPLATFORM
config PPC_82xx
bool "Freescale 82xx"
depends on 6xx
- select WANT_DEVICE_TREE
config PPC_83xx
bool "Freescale 83xx"
@@ -23,7 +22,6 @@ config PPC_83xx
select FSL_SOC
select MPC83xx
select IPIC
- select WANT_DEVICE_TREE
select FSL_EMB_PERFMON
config PPC_86xx
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 69941ba7097..73d81ce14b6 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -29,26 +29,22 @@ config PPC_85xx
bool "Freescale 85xx"
select E500
select FSL_SOC
- select WANT_DEVICE_TREE
select MPC85xx
config PPC_8xx
bool "Freescale 8xx"
select FSL_SOC
select 8xx
- select WANT_DEVICE_TREE
select PPC_LIB_RHEAP
config 40x
bool "AMCC 40x"
select PPC_DCR_NATIVE
- select WANT_DEVICE_TREE
select PPC_UDBG_16550
config 44x
bool "AMCC 44x"
select PPC_DCR_NATIVE
- select WANT_DEVICE_TREE
select PPC_UDBG_16550
config E200
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index b2494ebcdbe..e43024c0392 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -1,4 +1,13 @@
-#define DEBUG
+/*
+ * Copyright 2006-2008, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
#include <linux/types.h>
#include <linux/kernel.h>
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index e6e6559c55e..6d1228c66c5 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -1,3 +1,4 @@
+
/*
* SPU file system
*
@@ -592,7 +593,7 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
ret = -EINVAL;
/* check if we are on spufs */
- if (nd->dentry->d_sb->s_type != &spufs_type)
+ if (nd->path.dentry->d_sb->s_type != &spufs_type)
goto out;
/* don't accept undefined flags */
@@ -600,9 +601,9 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
goto out;
/* only threads can be underneath a gang */
- if (nd->dentry != nd->dentry->d_sb->s_root) {
+ if (nd->path.dentry != nd->path.dentry->d_sb->s_root) {
if ((flags & SPU_CREATE_GANG) ||
- !SPUFS_I(nd->dentry->d_inode)->i_gang)
+ !SPUFS_I(nd->path.dentry->d_inode)->i_gang)
goto out;
}
@@ -618,16 +619,17 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
mode &= ~current->fs->umask;
if (flags & SPU_CREATE_GANG)
- return spufs_create_gang(nd->dentry->d_inode,
- dentry, nd->mnt, mode);
+ return spufs_create_gang(nd->path.dentry->d_inode,
+ dentry, nd->path.mnt, mode);
else
- return spufs_create_context(nd->dentry->d_inode,
- dentry, nd->mnt, flags, mode, filp);
+ return spufs_create_context(nd->path.dentry->d_inode,
+ dentry, nd->path.mnt, flags, mode,
+ filp);
out_dput:
dput(dentry);
out_dir:
- mutex_unlock(&nd->dentry->d_inode->i_mutex);
+ mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
out:
return ret;
}
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index 2b1953f6f12..01974f7776e 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -146,34 +146,28 @@ static void sputrace_log_item(const char *name, struct spu_context *ctx,
wake_up(&sputrace_wait);
}
-static void spu_context_event(const struct marker *mdata,
- void *private, const char *format, ...)
+static void spu_context_event(void *probe_private, void *call_data,
+ const char *format, va_list *args)
{
- struct spu_probe *p = mdata->private;
- va_list ap;
+ struct spu_probe *p = probe_private;
struct spu_context *ctx;
struct spu *spu;
- va_start(ap, format);
- ctx = va_arg(ap, struct spu_context *);
- spu = va_arg(ap, struct spu *);
+ ctx = va_arg(*args, struct spu_context *);
+ spu = va_arg(*args, struct spu *);
sputrace_log_item(p->name, ctx, spu);
- va_end(ap);
}
-static void spu_context_nospu_event(const struct marker *mdata,
- void *private, const char *format, ...)
+static void spu_context_nospu_event(void *probe_private, void *call_data,
+ const char *format, va_list *args)
{
- struct spu_probe *p = mdata->private;
- va_list ap;
+ struct spu_probe *p = probe_private;
struct spu_context *ctx;
- va_start(ap, format);
- ctx = va_arg(ap, struct spu_context *);
+ ctx = va_arg(*args, struct spu_context *);
sputrace_log_item(p->name, ctx, NULL);
- va_end(ap);
}
struct spu_probe spu_probes[] = {
@@ -219,10 +213,6 @@ static int __init sputrace_init(void)
if (error)
printk(KERN_INFO "Unable to register probe %s\n",
p->name);
-
- error = marker_arm(p->name);
- if (error)
- printk(KERN_INFO "Unable to arm probe %s\n", p->name);
}
return 0;
@@ -238,7 +228,8 @@ static void __exit sputrace_exit(void)
int i;
for (i = 0; i < ARRAY_SIZE(spu_probes); i++)
- marker_probe_unregister(spu_probes[i].name);
+ marker_probe_unregister(spu_probes[i].name,
+ spu_probes[i].probe_func, &spu_probes[i]);
remove_proc_entry("sputrace", NULL);
kfree(sputrace_log);
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 43040441317..49c87769b1f 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -73,7 +73,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
LOOKUP_OPEN|LOOKUP_CREATE, &nd);
if (!ret) {
ret = spufs_create(&nd, flags, mode, neighbor);
- path_release(&nd);
+ path_put(&nd.path);
}
putname(tmp);
}
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 6c808375793..42908896781 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -24,7 +24,6 @@ config STORCENTER
select MPIC
select FSL_SOC
select PPC_UDBG_16550 if SERIAL_8250
- select WANT_DEVICE_TREE
select MPC10X_OPENPIC
select MPC10X_BRIDGE
help
@@ -37,7 +36,6 @@ config MPC7448HPC2
select TSI108_BRIDGE
select DEFAULT_UIMAGE
select PPC_UDBG_16550
- select WANT_DEVICE_TREE
select TSI108_BRIDGE
help
Select MPC7448HPC2 if configuring for Freescale MPC7448HPC2 (Taiga)
@@ -48,7 +46,6 @@ config PPC_HOLLY
depends on EMBEDDED6xx
select TSI108_BRIDGE
select PPC_UDBG_16550
- select WANT_DEVICE_TREE
select TSI108_BRIDGE
help
Select PPC_HOLLY if configuring for an IBM 750GX/CL Eval
@@ -59,7 +56,6 @@ config PPC_PRPMC2800
depends on EMBEDDED6xx
select MV64X60
select NOT_COHERENT_CACHE
- select WANT_DEVICE_TREE
help
This option enables support for the Motorola PrPMC2800 board
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
index be06cfd9fa3..657b72f6849 100644
--- a/arch/powerpc/platforms/iseries/vio.c
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -75,7 +75,7 @@ static struct property *new_property(const char *name, int length,
return np;
}
-static void __init free_property(struct property *np)
+static void free_property(struct property *np)
{
kfree(np);
}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index d87d4bf8880..b3400b5ad5c 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -93,6 +93,9 @@ config ARCH_NO_VIRT_TO_BUS
config ARCH_SUPPORTS_AOUT
def_bool y
+config IO_TRAPPED
+ bool
+
source "init/Kconfig"
menu "System type"
@@ -312,6 +315,13 @@ config CPU_SUBTYPE_SH7722
select ARCH_SPARSEMEM_ENABLE
select SYS_SUPPORTS_NUMA
+config CPU_SUBTYPE_SH7366
+ bool "Support SH7366 processor"
+ select CPU_SH4AL_DSP
+ select CPU_SHX2
+ select ARCH_SPARSEMEM_ENABLE
+ select SYS_SUPPORTS_NUMA
+
# SH-5 Processor Support
config CPU_SUBTYPE_SH5_101
@@ -456,6 +466,7 @@ config SH_RTS7751R2D
bool "RTS7751R2D"
depends on CPU_SUBTYPE_SH7751R
select SYS_SUPPORTS_PCI
+ select IO_TRAPPED
help
Select RTS7751R2D if configuring for a Renesas Technology
Sales SH-Graphics board.
@@ -472,6 +483,14 @@ config SH_HIGHLANDER
bool "Highlander"
depends on CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785
select SYS_SUPPORTS_PCI
+ select IO_TRAPPED
+
+config SH_MIGOR
+ bool "Migo-R"
+ depends on CPU_SUBTYPE_SH7722
+ help
+ Select Migo-R if configuring for the SH7722 Migo-R platform
+ by Renesas System Solutions Asia Pte. Ltd.
config SH_EDOSK7705
bool "EDOSK7705"
diff --git a/arch/sh/Kconfig.cpu b/arch/sh/Kconfig.cpu
index d850184d069..0e27fe3b182 100644
--- a/arch/sh/Kconfig.cpu
+++ b/arch/sh/Kconfig.cpu
@@ -12,6 +12,7 @@ config CPU_LITTLE_ENDIAN
config CPU_BIG_ENDIAN
bool "Big Endian"
+ depends on !CPU_SH5
endchoice
@@ -87,9 +88,6 @@ config SH64_ID2815_WORKAROUND
config CPU_HAS_INTEVT
bool
-config CPU_HAS_MASKREG_IRQ
- bool
-
config CPU_HAS_IPR_IRQ
bool
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index f7c716166ce..5dcb74b947a 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -29,7 +29,8 @@ config EARLY_SCIF_CONSOLE
config EARLY_SCIF_CONSOLE_PORT
hex
depends on EARLY_SCIF_CONSOLE
- default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763
+ default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763
+ default "0xffe00000" if CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7366
default "0xffea0000" if CPU_SUBTYPE_SH7785
default "0xfffe8000" if CPU_SUBTYPE_SH7203
default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 17fc36186bf..81381e5773c 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -116,6 +116,7 @@ machdir-$(CONFIG_SH_RTS7751R2D) += renesas/rts7751r2d
machdir-$(CONFIG_SH_7751_SYSTEMH) += renesas/systemh
machdir-$(CONFIG_SH_EDOSK7705) += renesas/edosk7705
machdir-$(CONFIG_SH_HIGHLANDER) += renesas/r7780rp
+machdir-$(CONFIG_SH_MIGOR) += renesas/migor
machdir-$(CONFIG_SH_SDK7780) += renesas/sdk7780
machdir-$(CONFIG_SH_7710VOIPGW) += renesas/sh7710voipgw
machdir-$(CONFIG_SH_X3PROTO) += renesas/x3proto
diff --git a/arch/sh/boards/renesas/migor/Makefile b/arch/sh/boards/renesas/migor/Makefile
new file mode 100644
index 00000000000..77037567633
--- /dev/null
+++ b/arch/sh/boards/renesas/migor/Makefile
@@ -0,0 +1 @@
+obj-y := setup.o
diff --git a/arch/sh/boards/renesas/migor/setup.c b/arch/sh/boards/renesas/migor/setup.c
new file mode 100644
index 00000000000..21ab8c8fb59
--- /dev/null
+++ b/arch/sh/boards/renesas/migor/setup.c
@@ -0,0 +1,61 @@
+/*
+ * Renesas System Solutions Asia Pte. Ltd - Migo-R
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <asm/machvec.h>
+#include <asm/io.h>
+
+/* Address IRQ Size Bus Description
+ * 0x00000000 64MB 16 NOR Flash (SP29PL256N)
+ * 0x0c000000 64MB 64 SDRAM (2xK4M563233G)
+ * 0x10000000 IRQ0 16 Ethernet (SMC91C111)
+ * 0x14000000 IRQ4 16 USB 2.0 Host Controller (M66596)
+ * 0x18000000 8GB 8 NAND Flash (K9K8G08U0A)
+ */
+
+static struct resource smc91x_eth_resources[] = {
+ [0] = {
+ .name = "smc91x-regs" ,
+ .start = P2SEGADDR(0x10000300),
+ .end = P2SEGADDR(0x1000030f),
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 32, /* IRQ0 */
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH,
+ },
+};
+
+static struct platform_device smc91x_eth_device = {
+ .name = "smc91x",
+ .num_resources = ARRAY_SIZE(smc91x_eth_resources),
+ .resource = smc91x_eth_resources,
+};
+
+static struct platform_device *migor_devices[] __initdata = {
+ &smc91x_eth_device,
+};
+
+static int __init migor_devices_setup(void)
+{
+ return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices));
+}
+__initcall(migor_devices_setup);
+
+static void __init migor_setup(char **cmdline_p)
+{
+ ctrl_outw(0x1000, 0xa4050110); /* Enable IRQ0 in PJCR */
+}
+
+static struct sh_machine_vector mv_migor __initmv = {
+ .mv_name = "Migo-R",
+ .mv_setup = migor_setup,
+};
diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c
index f7a8d5c9d51..2f68bea7890 100644
--- a/arch/sh/boards/renesas/r7780rp/setup.c
+++ b/arch/sh/boards/renesas/r7780rp/setup.c
@@ -23,6 +23,7 @@
#include <asm/clock.h>
#include <asm/heartbeat.h>
#include <asm/io.h>
+#include <asm/io_trapped.h>
static struct resource r8a66597_usb_host_resources[] = {
[0] = {
@@ -181,13 +182,27 @@ static struct platform_device *r7780rp_devices[] __initdata = {
&m66592_usb_peripheral_device,
&heartbeat_device,
#ifndef CONFIG_SH_R7780RP
- &cf_ide_device,
&ax88796_device,
#endif
};
+/*
+ * The CF is connected using a 16-bit bus where 8-bit operations are
+ * unsupported. The linux ata driver is however using 8-bit operations, so
+ * insert a trapped io filter to convert 8-bit operations into 16-bit.
+ */
+static struct trapped_io cf_trapped_io = {
+ .resource = cf_ide_resources,
+ .num_resources = 2,
+ .minimum_bus_width = 16,
+};
+
static int __init r7780rp_devices_setup(void)
{
+#ifndef CONFIG_SH_R7780RP
+ if (register_trapped_io(&cf_trapped_io) == 0)
+ platform_device_register(&cf_ide_device);
+#endif
return platform_add_devices(r7780rp_devices,
ARRAY_SIZE(r7780rp_devices));
}
@@ -226,34 +241,6 @@ static void r7780rp_power_off(void)
ctrl_outw(0x0001, PA_POFF);
}
-static inline unsigned char is_ide_ioaddr(unsigned long addr)
-{
- return ((cf_ide_resources[0].start <= addr &&
- addr <= cf_ide_resources[0].end) ||
- (cf_ide_resources[1].start <= addr &&
- addr <= cf_ide_resources[1].end));
-}
-
-void highlander_writeb(u8 b, void __iomem *addr)
-{
- unsigned long tmp = (unsigned long __force)addr;
-
- if (is_ide_ioaddr(tmp))
- ctrl_outw((u16)b, tmp);
- else
- ctrl_outb(b, tmp);
-}
-
-u8 highlander_readb(void __iomem *addr)
-{
- unsigned long tmp = (unsigned long __force)addr;
-
- if (is_ide_ioaddr(tmp))
- return ctrl_inw(tmp) & 0xff;
- else
- return ctrl_inb(tmp);
-}
-
/*
* Initialize the board
*/
@@ -338,6 +325,4 @@ static struct sh_machine_vector mv_highlander __initmv = {
.mv_setup = highlander_setup,
.mv_init_irq = highlander_init_irq,
.mv_irq_demux = highlander_irq_demux,
- .mv_readb = highlander_readb,
- .mv_writeb = highlander_writeb,
};
diff --git a/arch/sh/boards/renesas/rts7751r2d/setup.c b/arch/sh/boards/renesas/rts7751r2d/setup.c
index a0ef81b7de3..f21ee49ef3a 100644
--- a/arch/sh/boards/renesas/rts7751r2d/setup.c
+++ b/arch/sh/boards/renesas/rts7751r2d/setup.c
@@ -21,6 +21,7 @@
#include <asm/machvec.h>
#include <asm/rts7751r2d.h>
#include <asm/io.h>
+#include <asm/io_trapped.h>
#include <asm/spi.h>
static struct resource cf_ide_resources[] = {
@@ -214,13 +215,25 @@ static struct platform_device *rts7751r2d_devices[] __initdata = {
&uart_device,
&sm501_device,
#endif
- &cf_ide_device,
&heartbeat_device,
&spi_sh_sci_device,
};
+/*
+ * The CF is connected with a 16-bit bus where 8-bit operations are
+ * unsupported. The linux ata driver is however using 8-bit operations, so
+ * insert a trapped io filter to convert 8-bit operations into 16-bit.
+ */
+static struct trapped_io cf_trapped_io = {
+ .resource = cf_ide_resources,
+ .num_resources = 2,
+ .minimum_bus_width = 16,
+};
+
static int __init rts7751r2d_devices_setup(void)
{
+ if (register_trapped_io(&cf_trapped_io) == 0)
+ platform_device_register(&cf_ide_device);
spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
return platform_add_devices(rts7751r2d_devices,
ARRAY_SIZE(rts7751r2d_devices));
@@ -232,34 +245,6 @@ static void rts7751r2d_power_off(void)
ctrl_outw(0x0001, PA_POWOFF);
}
-static inline unsigned char is_ide_ioaddr(unsigned long addr)
-{
- return ((cf_ide_resources[0].start <= addr &&
- addr <= cf_ide_resources[0].end) ||
- (cf_ide_resources[1].start <= addr &&
- addr <= cf_ide_resources[1].end));
-}
-
-void rts7751r2d_writeb(u8 b, void __iomem *addr)
-{
- unsigned long tmp = (unsigned long __force)addr;
-
- if (is_ide_ioaddr(tmp))
- ctrl_outw((u16)b, tmp);
- else
- ctrl_outb(b, tmp);
-}
-
-u8 rts7751r2d_readb(void __iomem *addr)
-{
- unsigned long tmp = (unsigned long __force)addr;
-
- if (is_ide_ioaddr(tmp))
- return ctrl_inw(tmp) & 0xff;
- else
- return ctrl_inb(tmp);
-}
-
/*
* Initialize the board
*/
@@ -310,6 +295,4 @@ static struct sh_machine_vector mv_rts7751r2d __initmv = {
.mv_setup = rts7751r2d_setup,
.mv_init_irq = init_rts7751r2d_IRQ,
.mv_irq_demux = rts7751r2d_irq_demux,
- .mv_writeb = rts7751r2d_writeb,
- .mv_readb = rts7751r2d_readb,
};
diff --git a/arch/sh/boards/renesas/sdk7780/Kconfig b/arch/sh/boards/renesas/sdk7780/Kconfig
index e4f5b6985be..065f1df09bf 100644
--- a/arch/sh/boards/renesas/sdk7780/Kconfig
+++ b/arch/sh/boards/renesas/sdk7780/Kconfig
@@ -4,13 +4,6 @@ choice
prompt "SDK7780 options"
default SH_SDK7780_BASE
-config SH_SDK7780_STANDALONE
- bool "SDK7780 board support"
- depends on CPU_SUBTYPE_SH7780
- help
- Selecting this option will enable support for the
- standalone version of the SDK7780. If in doubt, say Y.
-
config SH_SDK7780_BASE
bool "SDK7780 with base-board support"
depends on CPU_SUBTYPE_SH7780
diff --git a/arch/sh/cchips/hd6446x/hd64465/setup.c b/arch/sh/cchips/hd6446x/hd64465/setup.c
index 5cef0db4018..9b8820c3670 100644
--- a/arch/sh/cchips/hd6446x/hd64465/setup.c
+++ b/arch/sh/cchips/hd6446x/hd64465/setup.c
@@ -17,10 +17,8 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/irq.h>
-
#include <asm/io.h>
#include <asm/irq.h>
-
#include <asm/hd64465/hd64465.h>
static void disable_hd64465_irq(unsigned int irq)
@@ -28,51 +26,45 @@ static void disable_hd64465_irq(unsigned int irq)
unsigned short nimr;
unsigned short mask = 1 << (irq - HD64465_IRQ_BASE);
- pr_debug("disable_hd64465_irq(%d): mask=%x\n", irq, mask);
+ pr_debug("disable_hd64465_irq(%d): mask=%x\n", irq, mask);
nimr = inw(HD64465_REG_NIMR);
nimr |= mask;
outw(nimr, HD64465_REG_NIMR);
}
-
static void enable_hd64465_irq(unsigned int irq)
{
unsigned short nimr;
unsigned short mask = 1 << (irq - HD64465_IRQ_BASE);
- pr_debug("enable_hd64465_irq(%d): mask=%x\n", irq, mask);
+ pr_debug("enable_hd64465_irq(%d): mask=%x\n", irq, mask);
nimr = inw(HD64465_REG_NIMR);
nimr &= ~mask;
outw(nimr, HD64465_REG_NIMR);
}
-
static void mask_and_ack_hd64465(unsigned int irq)
{
disable_hd64465_irq(irq);
}
-
static void end_hd64465_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
enable_hd64465_irq(irq);
}
-
static unsigned int startup_hd64465_irq(unsigned int irq)
-{
+{
enable_hd64465_irq(irq);
return 0;
}
-
static void shutdown_hd64465_irq(unsigned int irq)
{
disable_hd64465_irq(irq);
}
-
static struct hw_interrupt_type hd64465_irq_type = {
.typename = "HD64465-IRQ",
.startup = startup_hd64465_irq,
@@ -83,7 +75,6 @@ static struct hw_interrupt_type hd64465_irq_type = {
.end = end_hd64465_irq,
};
-
static irqreturn_t hd64465_interrupt(int irq, void *dev_id)
{
printk(KERN_INFO
@@ -93,9 +84,6 @@ static irqreturn_t hd64465_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
-
-/*====================================================*/
-
/*
* Support for a secondary IRQ demux step. This is necessary
* because the HD64465 presents a very thin interface to the
@@ -103,8 +91,7 @@ static irqreturn_t hd64465_interrupt(int irq, void *dev_id)
* normally done in hardware by other PCMCIA host bridges is
* instead done in software.
*/
-static struct
-{
+static struct {
int (*func)(int, void *);
void *dev;
} hd64465_demux[HD64465_IRQ_NUM];
@@ -112,19 +99,17 @@ static struct
void hd64465_register_irq_demux(int irq,
int (*demux)(int irq, void *dev), void *dev)
{
- hd64465_demux[irq - HD64465_IRQ_BASE].func = demux;
- hd64465_demux[irq - HD64465_IRQ_BASE].dev = dev;
+ hd64465_demux[irq - HD64465_IRQ_BASE].func = demux;
+ hd64465_demux[irq - HD64465_IRQ_BASE].dev = dev;
}
EXPORT_SYMBOL(hd64465_register_irq_demux);
void hd64465_unregister_irq_demux(int irq)
{
- hd64465_demux[irq - HD64465_IRQ_BASE].func = 0;
+ hd64465_demux[irq - HD64465_IRQ_BASE].func = 0;
}
EXPORT_SYMBOL(hd64465_unregister_irq_demux);
-
-
int hd64465_irq_demux(int irq)
{
if (irq == CONFIG_HD64465_IRQ) {
@@ -132,16 +117,16 @@ int hd64465_irq_demux(int irq)
unsigned short nirr = inw(HD64465_REG_NIRR);
unsigned short nimr = inw(HD64465_REG_NIMR);
- pr_debug("hd64465_irq_demux, nirr=%04x, nimr=%04x\n", nirr, nimr);
+ pr_debug("hd64465_irq_demux, nirr=%04x, nimr=%04x\n", nirr, nimr);
nirr &= ~nimr;
for (bit = 1, i = 0 ; i < HD64465_IRQ_NUM ; bit <<= 1, i++)
if (nirr & bit)
- break;
+ break;
- if (i < HD64465_IRQ_NUM) {
+ if (i < HD64465_IRQ_NUM) {
irq = HD64465_IRQ_BASE + i;
- if (hd64465_demux[i].func != 0)
- irq = hd64465_demux[i].func(irq, hd64465_demux[i].dev);
+ if (hd64465_demux[i].func != 0)
+ irq = hd64465_demux[i].func(irq, hd64465_demux[i].dev);
}
}
return irq;
@@ -154,7 +139,6 @@ static struct irqaction irq0 = {
.name = "HD64465",
};
-
static int __init setup_hd64465(void)
{
int i;
@@ -176,8 +160,8 @@ static int __init setup_hd64465(void)
rev = inw(HD64465_REG_SRR);
printk(KERN_INFO "HD64465 hardware revision %d.%d\n", (rev >> 8) & 0xff, rev & 0xff);
-
- outw(0xffff, HD64465_REG_NIMR); /* mask all interrupts */
+
+ outw(0xffff, HD64465_REG_NIMR); /* mask all interrupts */
for (i = 0; i < HD64465_IRQ_NUM ; i++) {
irq_desc[HD64465_IRQ_BASE + i].chip = &hd64465_irq_type;
@@ -185,16 +169,13 @@ static int __init setup_hd64465(void)
setup_irq(CONFIG_HD64465_IRQ, &irq0);
-#ifdef CONFIG_SERIAL
/* wake up the UART from STANDBY at this point */
smscr = inw(HD64465_REG_SMSCR);
outw(smscr & (~HD64465_SMSCR_UARTST), HD64465_REG_SMSCR);
/* remap IO ports for first ISA serial port to HD64465 UART */
hd64465_port_map(0x3f8, 8, CONFIG_HD64465_IOBASE + 0x8000, 1);
-#endif
return 0;
}
-
module_init(setup_hd64465);
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
new file mode 100644
index 00000000000..ee5900817f8
--- /dev/null
+++ b/arch/sh/configs/migor_defconfig
@@ -0,0 +1,824 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.24
+# Wed Feb 6 21:52:20 2008
+#
+CONFIG_SUPERH=y
+CONFIG_SUPERH32=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_SYS_SUPPORTS_NUMA=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_NO_VIRT_TO_BUS=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_FAIR_USER_SCHED=y
+# CONFIG_FAIR_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+# CONFIG_MARKERS is not set
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_KPROBES is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+
+#
+# System type
+#
+CONFIG_CPU_SH4=y
+CONFIG_CPU_SH4A=y
+CONFIG_CPU_SH4AL_DSP=y
+CONFIG_CPU_SHX2=y
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+# CONFIG_CPU_SUBTYPE_SH7203 is not set
+# CONFIG_CPU_SUBTYPE_SH7206 is not set
+# CONFIG_CPU_SUBTYPE_SH7263 is not set
+# CONFIG_CPU_SUBTYPE_SH7705 is not set
+# CONFIG_CPU_SUBTYPE_SH7706 is not set
+# CONFIG_CPU_SUBTYPE_SH7707 is not set
+# CONFIG_CPU_SUBTYPE_SH7708 is not set
+# CONFIG_CPU_SUBTYPE_SH7709 is not set
+# CONFIG_CPU_SUBTYPE_SH7710 is not set
+# CONFIG_CPU_SUBTYPE_SH7712 is not set
+# CONFIG_CPU_SUBTYPE_SH7720 is not set
+# CONFIG_CPU_SUBTYPE_SH7721 is not set
+# CONFIG_CPU_SUBTYPE_SH7750 is not set
+# CONFIG_CPU_SUBTYPE_SH7091 is not set
+# CONFIG_CPU_SUBTYPE_SH7750R is not set
+# CONFIG_CPU_SUBTYPE_SH7750S is not set
+# CONFIG_CPU_SUBTYPE_SH7751 is not set
+# CONFIG_CPU_SUBTYPE_SH7751R is not set
+# CONFIG_CPU_SUBTYPE_SH7760 is not set
+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+# CONFIG_CPU_SUBTYPE_SH7763 is not set
+# CONFIG_CPU_SUBTYPE_SH7770 is not set
+# CONFIG_CPU_SUBTYPE_SH7780 is not set
+# CONFIG_CPU_SUBTYPE_SH7785 is not set
+# CONFIG_CPU_SUBTYPE_SHX3 is not set
+# CONFIG_CPU_SUBTYPE_SH7343 is not set
+CONFIG_CPU_SUBTYPE_SH7722=y
+# CONFIG_CPU_SUBTYPE_SH5_101 is not set
+# CONFIG_CPU_SUBTYPE_SH5_103 is not set
+
+#
+# Memory management options
+#
+CONFIG_QUICKLIST=y
+CONFIG_MMU=y
+CONFIG_PAGE_OFFSET=0x80000000
+CONFIG_MEMORY_START=0x0c000000
+CONFIG_MEMORY_SIZE=0x04000000
+CONFIG_29BIT=y
+# CONFIG_X2TLB is not set
+CONFIG_VSYSCALL=y
+CONFIG_NUMA=y
+CONFIG_NODES_SHIFT=1
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_MAX_ACTIVE_REGIONS=2
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_NEED_MULTIPLE_NODES=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_STATIC=y
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+# CONFIG_MEMORY_HOTPLUG is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_MIGRATION is not set
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+
+#
+# Cache configuration
+#
+# CONFIG_SH_DIRECT_MAPPED is not set
+CONFIG_CACHE_WRITEBACK=y
+# CONFIG_CACHE_WRITETHROUGH is not set
+# CONFIG_CACHE_OFF is not set
+
+#
+# Processor features
+#
+CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
+# CONFIG_SH_FPU_EMU is not set
+CONFIG_SH_DSP=y
+# CONFIG_SH_STORE_QUEUES is not set
+CONFIG_CPU_HAS_INTEVT=y
+CONFIG_CPU_HAS_SR_RB=y
+CONFIG_CPU_HAS_PTEA=y
+CONFIG_CPU_HAS_DSP=y
+
+#
+# Board support
+#
+# CONFIG_SH_7722_SOLUTION_ENGINE is not set
+CONFIG_SH_MIGOR=y
+
+#
+# Timer and clock configuration
+#
+CONFIG_SH_TMU=y
+CONFIG_SH_TIMER_IRQ=16
+CONFIG_SH_PCLK_FREQ=33333333
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# DMA support
+#
+# CONFIG_SH_DMA is not set
+
+#
+# Companion Chips
+#
+
+#
+# Additional SuperH Device Drivers
+#
+# CONFIG_HEARTBEAT is not set
+# CONFIG_PUSH_SWITCH is not set
+
+#
+# Kernel features
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_RCU_TRACE=y
+CONFIG_GUSA=y
+
+#
+# Boot options
+#
+CONFIG_ZERO_PAGE_OFFSET=0x00001000
+CONFIG_BOOT_LINK_OFFSET=0x00800000
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttySC0,115200 earlyprintk=serial ip=on"
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_EXT=y
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=m
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_STNIC is not set
+CONFIG_SMC91X=y
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=3
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA2XX is not set
+CONFIG_USB_GADGET_M66592=y
+CONFIG_USB_M66592=y
+CONFIG_SUPERH_BUILT_IN_M66592=y
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+CONFIG_USB_G_SERIAL=y
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_MMC is not set
+# CONFIG_NEW_LEDS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_SH=y
+
+#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_SH_STANDARD_BIOS is not set
+CONFIG_EARLY_SCIF_CONSOLE=y
+CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe00000
+CONFIG_EARLY_PRINTK=y
+# CONFIG_SH_KGDB is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_SEQIV is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_XTS is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_TEST is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index 2dc754e5b73..3a915fd436d 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -1,9 +1,10 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23-rc2
-# Tue Aug 14 18:04:44 2007
+# Linux kernel version: 2.6.24
+# Thu Feb 7 16:25:55 2008
#
CONFIG_SUPERH=y
+CONFIG_SUPERH32=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_BUG=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
@@ -36,9 +37,14 @@ CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_FAIR_USER_SCHED=y
+# CONFIG_FAIR_CGROUP_SCHED is not set
CONFIG_SYSFS_DEPRECATED=y
# CONFIG_RELAY is not set
# CONFIG_BLK_DEV_INITRD is not set
@@ -53,6 +59,7 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_ANON_INODES=y
@@ -65,6 +72,13 @@ CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+# CONFIG_MARKERS is not set
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_KPROBES is not set
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
@@ -91,13 +105,17 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PREEMPT_RCU is not set
#
# System type
#
CONFIG_CPU_SH4=y
# CONFIG_CPU_SUBTYPE_SH7619 is not set
+# CONFIG_CPU_SUBTYPE_SH7203 is not set
# CONFIG_CPU_SUBTYPE_SH7206 is not set
+# CONFIG_CPU_SUBTYPE_SH7263 is not set
# CONFIG_CPU_SUBTYPE_SH7705 is not set
# CONFIG_CPU_SUBTYPE_SH7706 is not set
# CONFIG_CPU_SUBTYPE_SH7707 is not set
@@ -105,6 +123,8 @@ CONFIG_CPU_SH4=y
# CONFIG_CPU_SUBTYPE_SH7709 is not set
# CONFIG_CPU_SUBTYPE_SH7710 is not set
# CONFIG_CPU_SUBTYPE_SH7712 is not set
+# CONFIG_CPU_SUBTYPE_SH7720 is not set
+# CONFIG_CPU_SUBTYPE_SH7721 is not set
# CONFIG_CPU_SUBTYPE_SH7750 is not set
# CONFIG_CPU_SUBTYPE_SH7091 is not set
# CONFIG_CPU_SUBTYPE_SH7750R is not set
@@ -113,14 +133,15 @@ CONFIG_CPU_SH4=y
CONFIG_CPU_SUBTYPE_SH7751R=y
# CONFIG_CPU_SUBTYPE_SH7760 is not set
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
-# CONFIG_CPU_SUBTYPE_ST40STB1 is not set
-# CONFIG_CPU_SUBTYPE_ST40GX1 is not set
+# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
# CONFIG_CPU_SUBTYPE_SH7785 is not set
# CONFIG_CPU_SUBTYPE_SHX3 is not set
# CONFIG_CPU_SUBTYPE_SH7343 is not set
# CONFIG_CPU_SUBTYPE_SH7722 is not set
+# CONFIG_CPU_SUBTYPE_SH5_101 is not set
+# CONFIG_CPU_SUBTYPE_SH5_103 is not set
#
# Memory management options
@@ -130,6 +151,7 @@ CONFIG_MMU=y
CONFIG_PAGE_OFFSET=0x80000000
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x04000000
+CONFIG_29BIT=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -147,6 +169,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_SPARSEMEM_STATIC=y
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
@@ -168,23 +191,22 @@ CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SH_FPU=y
# CONFIG_SH_STORE_QUEUES is not set
CONFIG_CPU_HAS_INTEVT=y
-CONFIG_CPU_HAS_INTC_IRQ=y
CONFIG_CPU_HAS_SR_RB=y
CONFIG_CPU_HAS_PTEA=y
+CONFIG_CPU_HAS_FPU=y
#
# Board support
#
# CONFIG_SH_7751_SYSTEMH is not set
# CONFIG_SH_SECUREEDGE5410 is not set
-# CONFIG_SH_HS7751RVOIP is not set
CONFIG_SH_RTS7751R2D=y
# CONFIG_SH_LANDISK is not set
# CONFIG_SH_TITAN is not set
# CONFIG_SH_LBOX_RE2 is not set
#
-# RTS7751R2D options
+# RTS7751R2D Board Revision
#
# CONFIG_RTS7751R2D_PLUS is not set
CONFIG_RTS7751R2D_1=y
@@ -198,6 +220,7 @@ CONFIG_SH_PCLK_FREQ=60000000
# CONFIG_TICK_ONESHOT is not set
# CONFIG_NO_HZ is not set
# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
#
# CPU Frequency scaling
@@ -227,11 +250,15 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
+CONFIG_RCU_TRACE=y
+CONFIG_GUSA=y
+# CONFIG_GUSA_RB is not set
#
# Boot options
@@ -250,10 +277,7 @@ CONFIG_SH_PCIDMA_NONCOHERENT=y
CONFIG_PCI_AUTO=y
CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
+CONFIG_PCI_LEGACY=y
# CONFIG_PCCARD is not set
CONFIG_HOTPLUG_PCI=y
# CONFIG_HOTPLUG_PCI_FAKE is not set
@@ -281,6 +305,7 @@ CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -299,6 +324,7 @@ CONFIG_IP_FIB_HASH=y
CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
@@ -324,10 +350,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
# CONFIG_NET_SCHED is not set
#
@@ -335,6 +357,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
@@ -356,6 +379,7 @@ CONFIG_WIRELESS_EXT=y
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
@@ -371,6 +395,7 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
@@ -420,6 +445,7 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
@@ -493,7 +519,9 @@ CONFIG_ATA=y
# CONFIG_PATA_MPIIX is not set
# CONFIG_PATA_OLDPIIX is not set
# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_NS87415 is not set
# CONFIG_PATA_OPTI is not set
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
@@ -508,14 +536,7 @@ CONFIG_ATA=y
# CONFIG_PATA_WINBOND is not set
CONFIG_PATA_PLATFORM=y
# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
# CONFIG_FUSION is not set
-# CONFIG_FUSION_SPI is not set
-# CONFIG_FUSION_FC is not set
-# CONFIG_FUSION_SAS is not set
#
# IEEE 1394 (FireWire) support
@@ -530,25 +551,31 @@ CONFIG_NETDEVICES=y
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
+# CONFIG_VETH is not set
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
+# CONFIG_AX88796 is not set
# CONFIG_STNIC is not set
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNGEM is not set
# CONFIG_CASSINI is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_SMC91X is not set
+# CONFIG_ENC28J60 is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
-# CONFIG_DGRS is not set
# CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set
# CONFIG_FEALNX is not set
@@ -560,6 +587,7 @@ CONFIG_8139TOO=y
# CONFIG_8139TOO_TUNE_TWISTER is not set
# CONFIG_8139TOO_8129 is not set
# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_R6040 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
@@ -570,6 +598,10 @@ CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_E1000E_ENABLED is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
@@ -577,6 +609,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_SIS190 is not set
# CONFIG_SKGE is not set
# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
@@ -585,11 +618,15 @@ CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
# CONFIG_CHELSIO_T3 is not set
+# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
# CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
# CONFIG_TR is not set
#
@@ -597,13 +634,21 @@ CONFIG_NETDEV_10000=y
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
@@ -622,7 +667,6 @@ CONFIG_INPUT=y
#
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
@@ -650,6 +694,7 @@ CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
#
# Serial drivers
@@ -674,11 +719,9 @@ CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_IPMI_HANDLER is not set
-# CONFIG_WATCHDOG is not set
CONFIG_HW_RANDOM=y
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
-# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
@@ -687,16 +730,30 @@ CONFIG_DEVPORT=y
#
# SPI support
#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_SH_SCI=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
-# CONFIG_SENSORS_ABITUGURU is not set
-# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM70 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_PC87427 is not set
# CONFIG_SENSORS_SIS5595 is not set
@@ -708,6 +765,13 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
#
# Multifunction device drivers
@@ -720,16 +784,12 @@ CONFIG_MFD_SM501=y
# CONFIG_VIDEO_DEV is not set
# CONFIG_DVB_CORE is not set
CONFIG_DAB=y
+# CONFIG_USB_DABUSB is not set
#
# Graphics support
#
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
@@ -738,6 +798,7 @@ CONFIG_FB=y
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
# CONFIG_FB_SYS_IMAGEBLIT is not set
@@ -777,6 +838,12 @@ CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_PM3 is not set
CONFIG_FB_SM501=y
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
#
# Console display driver support
@@ -844,6 +911,7 @@ CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_BT87X is not set
# CONFIG_SND_CA0106 is not set
# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_CS46XX is not set
# CONFIG_SND_DARLA20 is not set
@@ -868,6 +936,7 @@ CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_HDA_INTEL is not set
# CONFIG_SND_HDSP is not set
# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_HIFIER is not set
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
# CONFIG_SND_INTEL8X0 is not set
@@ -885,16 +954,27 @@ CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_TRIDENT is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
# CONFIG_SND_VX222 is not set
CONFIG_SND_YMFPCI=m
CONFIG_SND_YMFPCI_FIRMWARE_IN_KERNEL=y
# CONFIG_SND_AC97_POWER_SAVE is not set
#
+# SPI devices
+#
+
+#
# SUPERH devices
#
#
+# USB devices
+#
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_CAIAQ is not set
+
+#
# System on Chip audio support
#
# CONFIG_SND_SOC is not set
@@ -904,6 +984,10 @@ CONFIG_SND_YMFPCI_FIRMWARE_IN_KERNEL=y
#
#
+# ALSA SoC audio for Freescale SOCs
+#
+
+#
# Open Sound System
#
CONFIG_SOUND_PRIME=m
@@ -914,19 +998,104 @@ CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
-# CONFIG_USB is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
#
#
-# USB Gadget Support
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_MON is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_GADGET is not set
# CONFIG_MMC is not set
# CONFIG_NEW_LEDS is not set
@@ -949,13 +1118,17 @@ CONFIG_RTC_INTF_DEV=y
#
# SPI RTC drivers
#
+# CONFIG_RTC_DRV_MAX6902 is not set
+CONFIG_RTC_DRV_R9701=y
+# CONFIG_RTC_DRV_RS5C348 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
# CONFIG_RTC_DRV_M48T59 is not set
# CONFIG_RTC_DRV_V3020 is not set
@@ -963,20 +1136,7 @@ CONFIG_RTC_INTF_DEV=y
#
# on-CPU RTC drivers
#
-CONFIG_RTC_DRV_SH=y
-
-#
-# DMA Engine support
-#
-# CONFIG_DMA_ENGINE is not set
-
-#
-# DMA Clients
-#
-
-#
-# DMA Devices
-#
+# CONFIG_RTC_DRV_SH is not set
#
# Userspace I/O
@@ -1034,7 +1194,6 @@ CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
# CONFIG_CONFIGFS_FS is not set
#
@@ -1053,10 +1212,7 @@ CONFIG_RAMFS=y
# CONFIG_QNX4FS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
+CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
@@ -1070,10 +1226,6 @@ CONFIG_RAMFS=y
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
@@ -1114,30 +1266,22 @@ CONFIG_NLS_CODEPAGE_932=y
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
-
-#
-# Distributed Lock Manager
-#
# CONFIG_DLM is not set
#
-# Profiling support
-#
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-
-#
# Kernel hacking
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SAMPLES is not set
# CONFIG_SH_STANDARD_BIOS is not set
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
@@ -1149,7 +1293,53 @@ CONFIG_EARLY_PRINTK=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-# CONFIG_CRYPTO is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_SEQIV is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_XTS is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_TEST is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
#
# Library routines
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index 4ff5a752dcd..0a6d3b9e648 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -1,9 +1,10 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23-rc2
-# Tue Aug 14 16:33:08 2007
+# Linux kernel version: 2.6.24
+# Thu Feb 7 16:17:47 2008
#
CONFIG_SUPERH=y
+CONFIG_SUPERH32=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_BUG=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
@@ -36,9 +37,14 @@ CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_FAIR_USER_SCHED=y
+# CONFIG_FAIR_CGROUP_SCHED is not set
CONFIG_SYSFS_DEPRECATED=y
# CONFIG_RELAY is not set
# CONFIG_BLK_DEV_INITRD is not set
@@ -53,6 +59,7 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_ANON_INODES=y
@@ -65,6 +72,13 @@ CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+# CONFIG_MARKERS is not set
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_HAVE_KPROBES is not set
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
@@ -91,13 +105,17 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PREEMPT_RCU is not set
#
# System type
#
CONFIG_CPU_SH4=y
# CONFIG_CPU_SUBTYPE_SH7619 is not set
+# CONFIG_CPU_SUBTYPE_SH7203 is not set
# CONFIG_CPU_SUBTYPE_SH7206 is not set
+# CONFIG_CPU_SUBTYPE_SH7263 is not set
# CONFIG_CPU_SUBTYPE_SH7705 is not set
# CONFIG_CPU_SUBTYPE_SH7706 is not set
# CONFIG_CPU_SUBTYPE_SH7707 is not set
@@ -105,6 +123,8 @@ CONFIG_CPU_SH4=y
# CONFIG_CPU_SUBTYPE_SH7709 is not set
# CONFIG_CPU_SUBTYPE_SH7710 is not set
# CONFIG_CPU_SUBTYPE_SH7712 is not set
+# CONFIG_CPU_SUBTYPE_SH7720 is not set
+# CONFIG_CPU_SUBTYPE_SH7721 is not set
# CONFIG_CPU_SUBTYPE_SH7750 is not set
# CONFIG_CPU_SUBTYPE_SH7091 is not set
# CONFIG_CPU_SUBTYPE_SH7750R is not set
@@ -113,14 +133,15 @@ CONFIG_CPU_SH4=y
CONFIG_CPU_SUBTYPE_SH7751R=y
# CONFIG_CPU_SUBTYPE_SH7760 is not set
# CONFIG_CPU_SUBTYPE_SH4_202 is not set
-# CONFIG_CPU_SUBTYPE_ST40STB1 is not set
-# CONFIG_CPU_SUBTYPE_ST40GX1 is not set
+# CONFIG_CPU_SUBTYPE_SH7763 is not set
# CONFIG_CPU_SUBTYPE_SH7770 is not set
# CONFIG_CPU_SUBTYPE_SH7780 is not set
# CONFIG_CPU_SUBTYPE_SH7785 is not set
# CONFIG_CPU_SUBTYPE_SHX3 is not set
# CONFIG_CPU_SUBTYPE_SH7343 is not set
# CONFIG_CPU_SUBTYPE_SH7722 is not set
+# CONFIG_CPU_SUBTYPE_SH5_101 is not set
+# CONFIG_CPU_SUBTYPE_SH5_103 is not set
#
# Memory management options
@@ -130,6 +151,7 @@ CONFIG_MMU=y
CONFIG_PAGE_OFFSET=0x80000000
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x04000000
+CONFIG_29BIT=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -147,6 +169,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_SPARSEMEM_STATIC=y
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
@@ -168,23 +191,22 @@ CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SH_FPU=y
# CONFIG_SH_STORE_QUEUES is not set
CONFIG_CPU_HAS_INTEVT=y
-CONFIG_CPU_HAS_INTC_IRQ=y
CONFIG_CPU_HAS_SR_RB=y
CONFIG_CPU_HAS_PTEA=y
+CONFIG_CPU_HAS_FPU=y
#
# Board support
#
# CONFIG_SH_7751_SYSTEMH is not set
# CONFIG_SH_SECUREEDGE5410 is not set
-# CONFIG_SH_HS7751RVOIP is not set
CONFIG_SH_RTS7751R2D=y
# CONFIG_SH_LANDISK is not set
# CONFIG_SH_TITAN is not set
# CONFIG_SH_LBOX_RE2 is not set
#
-# RTS7751R2D options
+# RTS7751R2D Board Revision
#
CONFIG_RTS7751R2D_PLUS=y
# CONFIG_RTS7751R2D_1 is not set
@@ -198,6 +220,7 @@ CONFIG_SH_PCLK_FREQ=60000000
# CONFIG_TICK_ONESHOT is not set
# CONFIG_NO_HZ is not set
# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
#
# CPU Frequency scaling
@@ -227,11 +250,15 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
+CONFIG_RCU_TRACE=y
+CONFIG_GUSA=y
+# CONFIG_GUSA_RB is not set
#
# Boot options
@@ -250,10 +277,7 @@ CONFIG_SH_PCIDMA_NONCOHERENT=y
CONFIG_PCI_AUTO=y
CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
+CONFIG_PCI_LEGACY=y
# CONFIG_PCCARD is not set
CONFIG_HOTPLUG_PCI=y
# CONFIG_HOTPLUG_PCI_FAKE is not set
@@ -281,6 +305,7 @@ CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -299,6 +324,7 @@ CONFIG_IP_FIB_HASH=y
CONFIG_INET_XFRM_MODE_TRANSPORT=y
CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
@@ -324,10 +350,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
# CONFIG_NET_SCHED is not set
#
@@ -335,6 +357,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
@@ -356,6 +379,7 @@ CONFIG_WIRELESS_EXT=y
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
@@ -371,6 +395,7 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
@@ -420,6 +445,7 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
@@ -493,7 +519,9 @@ CONFIG_ATA=y
# CONFIG_PATA_MPIIX is not set
# CONFIG_PATA_OLDPIIX is not set
# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_NS87415 is not set
# CONFIG_PATA_OPTI is not set
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
@@ -508,14 +536,7 @@ CONFIG_ATA=y
# CONFIG_PATA_WINBOND is not set
CONFIG_PATA_PLATFORM=y
# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
# CONFIG_FUSION is not set
-# CONFIG_FUSION_SPI is not set
-# CONFIG_FUSION_FC is not set
-# CONFIG_FUSION_SAS is not set
#
# IEEE 1394 (FireWire) support
@@ -530,25 +551,31 @@ CONFIG_NETDEVICES=y
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
+# CONFIG_VETH is not set
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
+# CONFIG_AX88796 is not set
# CONFIG_STNIC is not set
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNGEM is not set
# CONFIG_CASSINI is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_SMC91X is not set
+# CONFIG_ENC28J60 is not set
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
-# CONFIG_DGRS is not set
# CONFIG_EEPRO100 is not set
# CONFIG_E100 is not set
# CONFIG_FEALNX is not set
@@ -560,6 +587,7 @@ CONFIG_8139TOO=y
# CONFIG_8139TOO_TUNE_TWISTER is not set
# CONFIG_8139TOO_8129 is not set
# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_R6040 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
@@ -570,6 +598,10 @@ CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_E1000E_ENABLED is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
@@ -577,6 +609,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_SIS190 is not set
# CONFIG_SKGE is not set
# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
# CONFIG_VIA_VELOCITY is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
@@ -585,11 +618,15 @@ CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
# CONFIG_CHELSIO_T3 is not set
+# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
# CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
# CONFIG_TR is not set
#
@@ -597,13 +634,21 @@ CONFIG_NETDEV_10000=y
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
@@ -622,7 +667,6 @@ CONFIG_INPUT=y
#
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_INPUT_EVBUG is not set
@@ -650,6 +694,7 @@ CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
#
# Serial drivers
@@ -674,11 +719,9 @@ CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_IPMI_HANDLER is not set
-# CONFIG_WATCHDOG is not set
CONFIG_HW_RANDOM=y
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
-# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
@@ -687,16 +730,30 @@ CONFIG_DEVPORT=y
#
# SPI support
#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_SH_SCI=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
-# CONFIG_SENSORS_ABITUGURU is not set
-# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM70 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_PC87427 is not set
# CONFIG_SENSORS_SIS5595 is not set
@@ -708,6 +765,13 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
#
# Multifunction device drivers
@@ -720,16 +784,12 @@ CONFIG_MFD_SM501=y
# CONFIG_VIDEO_DEV is not set
# CONFIG_DVB_CORE is not set
CONFIG_DAB=y
+# CONFIG_USB_DABUSB is not set
#
# Graphics support
#
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
@@ -738,6 +798,7 @@ CONFIG_FB=y
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
# CONFIG_FB_SYS_IMAGEBLIT is not set
@@ -777,6 +838,12 @@ CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_PM3 is not set
CONFIG_FB_SM501=y
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
#
# Console display driver support
@@ -844,6 +911,7 @@ CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_BT87X is not set
# CONFIG_SND_CA0106 is not set
# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_CS46XX is not set
# CONFIG_SND_DARLA20 is not set
@@ -868,6 +936,7 @@ CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_HDA_INTEL is not set
# CONFIG_SND_HDSP is not set
# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_HIFIER is not set
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
# CONFIG_SND_INTEL8X0 is not set
@@ -885,16 +954,27 @@ CONFIG_SND_AC97_CODEC=m
# CONFIG_SND_TRIDENT is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
# CONFIG_SND_VX222 is not set
CONFIG_SND_YMFPCI=m
CONFIG_SND_YMFPCI_FIRMWARE_IN_KERNEL=y
# CONFIG_SND_AC97_POWER_SAVE is not set
#
+# SPI devices
+#
+
+#
# SUPERH devices
#
#
+# USB devices
+#
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_CAIAQ is not set
+
+#
# System on Chip audio support
#
# CONFIG_SND_SOC is not set
@@ -904,6 +984,10 @@ CONFIG_SND_YMFPCI_FIRMWARE_IN_KERNEL=y
#
#
+# ALSA SoC audio for Freescale SOCs
+#
+
+#
# Open Sound System
#
CONFIG_SOUND_PRIME=m
@@ -914,19 +998,104 @@ CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
-# CONFIG_USB is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
#
#
-# USB Gadget Support
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_MON is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_GADGET is not set
# CONFIG_MMC is not set
# CONFIG_NEW_LEDS is not set
@@ -949,13 +1118,17 @@ CONFIG_RTC_INTF_DEV=y
#
# SPI RTC drivers
#
+# CONFIG_RTC_DRV_MAX6902 is not set
+CONFIG_RTC_DRV_R9701=y
+# CONFIG_RTC_DRV_RS5C348 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
# CONFIG_RTC_DRV_M48T59 is not set
# CONFIG_RTC_DRV_V3020 is not set
@@ -963,20 +1136,7 @@ CONFIG_RTC_INTF_DEV=y
#
# on-CPU RTC drivers
#
-CONFIG_RTC_DRV_SH=y
-
-#
-# DMA Engine support
-#
-# CONFIG_DMA_ENGINE is not set
-
-#
-# DMA Clients
-#
-
-#
-# DMA Devices
-#
+# CONFIG_RTC_DRV_SH is not set
#
# Userspace I/O
@@ -1034,7 +1194,6 @@ CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
# CONFIG_CONFIGFS_FS is not set
#
@@ -1053,10 +1212,7 @@ CONFIG_RAMFS=y
# CONFIG_QNX4FS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
+CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
@@ -1070,10 +1226,6 @@ CONFIG_RAMFS=y
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_437 is not set
@@ -1114,30 +1266,22 @@ CONFIG_NLS_CODEPAGE_932=y
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
-
-#
-# Distributed Lock Manager
-#
# CONFIG_DLM is not set
#
-# Profiling support
-#
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-
-#
# Kernel hacking
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SAMPLES is not set
# CONFIG_SH_STANDARD_BIOS is not set
CONFIG_EARLY_SCIF_CONSOLE=y
CONFIG_EARLY_SCIF_CONSOLE_PORT=0xffe80000
@@ -1149,7 +1293,53 @@ CONFIG_EARLY_PRINTK=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-# CONFIG_CRYPTO is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_SEQIV is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_XTS is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_TEST is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
#
# Library routines
diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig
index 87ae5c1f862..84717d85486 100644
--- a/arch/sh/configs/se7705_defconfig
+++ b/arch/sh/configs/se7705_defconfig
@@ -231,7 +231,6 @@ CONFIG_CPU_LITTLE_ENDIAN=y
# CONFIG_SH_DSP is not set
# CONFIG_SH_ADC is not set
CONFIG_CPU_HAS_INTEVT=y
-CONFIG_CPU_HAS_PINT_IRQ=y
CONFIG_CPU_HAS_IPR_IRQ=y
CONFIG_CPU_HAS_SR_RB=y
diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c
index 76ed816d9a2..727126e907e 100644
--- a/arch/sh/drivers/dma/dma-api.c
+++ b/arch/sh/drivers/dma/dma-api.c
@@ -350,7 +350,7 @@ int register_dmac(struct dma_info *info)
BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels);
- info->pdev = platform_device_register_simple((char *)info->name, -1,
+ info->pdev = platform_device_register_simple(info->name, -1,
NULL, 0);
if (IS_ERR(info->pdev))
return PTR_ERR(info->pdev);
diff --git a/arch/sh/drivers/pci/fixups-lboxre2.c b/arch/sh/drivers/pci/fixups-lboxre2.c
index 40b19bdfb89..1c1d41255ec 100644
--- a/arch/sh/drivers/pci/fixups-lboxre2.c
+++ b/arch/sh/drivers/pci/fixups-lboxre2.c
@@ -18,7 +18,7 @@ int pci_fixup_pcic(void)
{
unsigned long bcr1, mcr;
- bcr1 = inl(SH7751_BCR1);
+ bcr1 = ctrl_inl(SH7751_BCR1);
bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */
pci_write_reg(bcr1, SH4_PCIBCR1);
@@ -28,7 +28,7 @@ int pci_fixup_pcic(void)
pci_write_reg(0xfb900047, SH7751_PCICONF1);
pci_write_reg(0xab000001, SH7751_PCICONF4);
- mcr = inl(SH7751_MCR);
+ mcr = ctrl_inl(SH7751_MCR);
mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
pci_write_reg(mcr, SH4_PCIMCR);
diff --git a/arch/sh/drivers/pci/fixups-rts7751r2d.c b/arch/sh/drivers/pci/fixups-rts7751r2d.c
index e72ceb560d5..904bce8768d 100644
--- a/arch/sh/drivers/pci/fixups-rts7751r2d.c
+++ b/arch/sh/drivers/pci/fixups-rts7751r2d.c
@@ -19,7 +19,7 @@ int pci_fixup_pcic(void)
{
unsigned long bcr1, mcr;
- bcr1 = inl(SH7751_BCR1);
+ bcr1 = ctrl_inl(SH7751_BCR1);
bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */
pci_write_reg(bcr1, SH4_PCIBCR1);
@@ -30,7 +30,7 @@ int pci_fixup_pcic(void)
pci_write_reg(0xfb900047, SH7751_PCICONF1);
pci_write_reg(0xab000001, SH7751_PCICONF4);
- mcr = inl(SH7751_MCR);
+ mcr = ctrl_inl(SH7751_MCR);
mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
pci_write_reg(mcr, SH4_PCIMCR);
diff --git a/arch/sh/drivers/pci/ops-dreamcast.c b/arch/sh/drivers/pci/ops-dreamcast.c
index e1284fc6936..0dac87b1962 100644
--- a/arch/sh/drivers/pci/ops-dreamcast.c
+++ b/arch/sh/drivers/pci/ops-dreamcast.c
@@ -83,9 +83,9 @@ static int gapspci_read(struct pci_bus *bus, unsigned int devfn, int where, int
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
- case 1: *val = inb(GAPSPCI_BBA_CONFIG+where); break;
- case 2: *val = inw(GAPSPCI_BBA_CONFIG+where); break;
- case 4: *val = inl(GAPSPCI_BBA_CONFIG+where); break;
+ case 1: *val = ctrl_inb(GAPSPCI_BBA_CONFIG+where); break;
+ case 2: *val = ctrl_inw(GAPSPCI_BBA_CONFIG+where); break;
+ case 4: *val = ctrl_inl(GAPSPCI_BBA_CONFIG+where); break;
}
return PCIBIOS_SUCCESSFUL;
@@ -97,9 +97,9 @@ static int gapspci_write(struct pci_bus *bus, unsigned int devfn, int where, int
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
- case 1: outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break;
- case 2: outw((u16)val, GAPSPCI_BBA_CONFIG+where); break;
- case 4: outl((u32)val, GAPSPCI_BBA_CONFIG+where); break;
+ case 1: ctrl_outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break;
+ case 2: ctrl_outw((u16)val, GAPSPCI_BBA_CONFIG+where); break;
+ case 4: ctrl_outl((u32)val, GAPSPCI_BBA_CONFIG+where); break;
}
return PCIBIOS_SUCCESSFUL;
@@ -127,36 +127,36 @@ int __init gapspci_init(void)
*/
for (i=0; i<16; i++)
- idbuf[i] = inb(GAPSPCI_REGS+i);
+ idbuf[i] = ctrl_inb(GAPSPCI_REGS+i);
if (strncmp(idbuf, "GAPSPCI_BRIDGE_2", 16))
return -ENODEV;
- outl(0x5a14a501, GAPSPCI_REGS+0x18);
+ ctrl_outl(0x5a14a501, GAPSPCI_REGS+0x18);
for (i=0; i<1000000; i++)
;
- if (inl(GAPSPCI_REGS+0x18) != 1)
+ if (ctrl_inl(GAPSPCI_REGS+0x18) != 1)
return -EINVAL;
- outl(0x01000000, GAPSPCI_REGS+0x20);
- outl(0x01000000, GAPSPCI_REGS+0x24);
+ ctrl_outl(0x01000000, GAPSPCI_REGS+0x20);
+ ctrl_outl(0x01000000, GAPSPCI_REGS+0x24);
- outl(GAPSPCI_DMA_BASE, GAPSPCI_REGS+0x28);
- outl(GAPSPCI_DMA_BASE+GAPSPCI_DMA_SIZE, GAPSPCI_REGS+0x2c);
+ ctrl_outl(GAPSPCI_DMA_BASE, GAPSPCI_REGS+0x28);
+ ctrl_outl(GAPSPCI_DMA_BASE+GAPSPCI_DMA_SIZE, GAPSPCI_REGS+0x2c);
- outl(1, GAPSPCI_REGS+0x14);
- outl(1, GAPSPCI_REGS+0x34);
+ ctrl_outl(1, GAPSPCI_REGS+0x14);
+ ctrl_outl(1, GAPSPCI_REGS+0x34);
/* Setting Broadband Adapter */
- outw(0xf900, GAPSPCI_BBA_CONFIG+0x06);
- outl(0x00000000, GAPSPCI_BBA_CONFIG+0x30);
- outb(0x00, GAPSPCI_BBA_CONFIG+0x3c);
- outb(0xf0, GAPSPCI_BBA_CONFIG+0x0d);
- outw(0x0006, GAPSPCI_BBA_CONFIG+0x04);
- outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10);
- outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14);
+ ctrl_outw(0xf900, GAPSPCI_BBA_CONFIG+0x06);
+ ctrl_outl(0x00000000, GAPSPCI_BBA_CONFIG+0x30);
+ ctrl_outb(0x00, GAPSPCI_BBA_CONFIG+0x3c);
+ ctrl_outb(0xf0, GAPSPCI_BBA_CONFIG+0x0d);
+ ctrl_outw(0x0006, GAPSPCI_BBA_CONFIG+0x04);
+ ctrl_outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10);
+ ctrl_outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14);
return 0;
}
diff --git a/arch/sh/drivers/pci/ops-rts7751r2d.c b/arch/sh/drivers/pci/ops-rts7751r2d.c
index ec8430c8d2d..b3fa3e2ef18 100644
--- a/arch/sh/drivers/pci/ops-rts7751r2d.c
+++ b/arch/sh/drivers/pci/ops-rts7751r2d.c
@@ -33,7 +33,7 @@ int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
static struct resource sh7751_io_resource = {
.name = "SH7751_IO",
.start = 0x4000,
- .end = 0x4000 + SH7751_PCI_IO_SIZE - 1,
+ .end = SH7751_PCI_IO_SIZE - 1,
.flags = IORESOURCE_IO
};
@@ -68,6 +68,7 @@ static struct sh4_pci_address_map sh7751_pci_map = {
int __init pcibios_init_platform(void)
{
+ __set_io_port_base(SH7751_PCI_IO_BASE);
return sh7751_pcic_init(&sh7751_pci_map);
}
diff --git a/arch/sh/drivers/pci/pci-sh4.h b/arch/sh/drivers/pci/pci-sh4.h
index 4925c79ea95..07e29506080 100644
--- a/arch/sh/drivers/pci/pci-sh4.h
+++ b/arch/sh/drivers/pci/pci-sh4.h
@@ -172,11 +172,11 @@ struct sh4_pci_address_map {
static inline void pci_write_reg(unsigned long val, unsigned long reg)
{
- outl(val, PCI_REG(reg));
+ ctrl_outl(val, PCI_REG(reg));
}
static inline unsigned long pci_read_reg(unsigned long reg)
{
- return inl(PCI_REG(reg));
+ return ctrl_inl(PCI_REG(reg));
}
#endif /* __PCI_SH4_H */
diff --git a/arch/sh/drivers/pci/pci-sh7751.c b/arch/sh/drivers/pci/pci-sh7751.c
index 1aca7fe5783..3065eb184f0 100644
--- a/arch/sh/drivers/pci/pci-sh7751.c
+++ b/arch/sh/drivers/pci/pci-sh7751.c
@@ -58,7 +58,7 @@ static int __init __area_sdram_check(unsigned int area)
{
u32 word;
- word = inl(SH7751_BCR1);
+ word = ctrl_inl(SH7751_BCR1);
/* check BCR for SDRAM in area */
if (((word >> area) & 1) == 0) {
printk("PCI: Area %d is not configured for SDRAM. BCR1=0x%x\n",
@@ -67,7 +67,7 @@ static int __init __area_sdram_check(unsigned int area)
}
pci_write_reg(word, SH4_PCIBCR1);
- word = (u16)inw(SH7751_BCR2);
+ word = (u16)ctrl_inw(SH7751_BCR2);
/* check BCR2 for 32bit SDRAM interface*/
if (((word >> (area << 1)) & 0x3) != 0x3) {
printk("PCI: Area %d is not 32 bit SDRAM. BCR2=0x%x\n",
@@ -85,9 +85,9 @@ int __init sh7751_pcic_init(struct sh4_pci_address_map *map)
u32 word;
/* Set the BCR's to enable PCI access */
- reg = inl(SH7751_BCR1);
+ reg = ctrl_inl(SH7751_BCR1);
reg |= 0x80000;
- outl(reg, SH7751_BCR1);
+ ctrl_outl(reg, SH7751_BCR1);
/* Turn the clocks back on (not done in reset)*/
pci_write_reg(0, SH4_PCICLKR);
@@ -179,13 +179,13 @@ int __init sh7751_pcic_init(struct sh4_pci_address_map *map)
return 0;
/* configure the wait control registers */
- word = inl(SH7751_WCR1);
+ word = ctrl_inl(SH7751_WCR1);
pci_write_reg(word, SH4_PCIWCR1);
- word = inl(SH7751_WCR2);
+ word = ctrl_inl(SH7751_WCR2);
pci_write_reg(word, SH4_PCIWCR2);
- word = inl(SH7751_WCR3);
+ word = ctrl_inl(SH7751_WCR3);
pci_write_reg(word, SH4_PCIWCR3);
- word = inl(SH7751_MCR);
+ word = ctrl_inl(SH7751_MCR);
pci_write_reg(word, SH4_PCIMCR);
/* NOTE: I'm ignoring the PCI error IRQs for now..
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
index 7d797f4de5e..b2a2bfa3c1b 100644
--- a/arch/sh/drivers/pci/pci-sh7780.c
+++ b/arch/sh/drivers/pci/pci-sh7780.c
@@ -52,7 +52,7 @@ static int __init sh7780_pci_init(void)
pr_debug("PCI: Starting intialization.\n");
- outl(0x00000001, SH7780_PCI_VCR2); /* Enable PCIC */
+ ctrl_outl(0x00000001, SH7780_PCI_VCR2); /* Enable PCIC */
/* check for SH7780/SH7780R hardware */
id = pci_read_reg(SH7780_PCIVID);
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index c8928983105..62bf373266f 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -22,5 +22,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_BINFMT_ELF) += dump_task.o
+obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index 1ef21cc087f..e01283d49cb 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -18,5 +18,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_BINFMT_ELF) += dump_task.o
+obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index cc1836e47a5..462a8f6dfee 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -6,4 +6,3 @@ obj-y += intc.o
obj-$(CONFIG_SUPERH32) += imask.o
obj-$(CONFIG_CPU_SH5) += intc-sh5.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
-obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index 43ee7a9a4f0..d6e0e2bdaad 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -75,21 +75,6 @@ int intc_evt_to_irq[(0xE20/0x20)+1] = {
-1, -1 /* 0xE00 - 0xE20 */
};
-/*
- * Opposite mapper.
- */
-static int IRQ_to_vectorN[NR_INTC_IRQS] = {
- 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */
- -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */
- 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */
- 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */
- 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */
- -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */
-
-};
-
static unsigned long intc_virt;
static unsigned int startup_intc_irq(unsigned int irq);
@@ -176,6 +161,18 @@ void make_intc_irq(unsigned int irq)
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
+static int IRQ_to_vectorN[NR_INTC_IRQS] = {
+ 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */
+ -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */
+ 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */
+ 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */
+ -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */
+
+};
+
int intc_irq_describe(char* p, int irq)
{
if (irq < NR_INTC_IRQS)
diff --git a/arch/sh/kernel/cpu/irq/maskreg.c b/arch/sh/kernel/cpu/irq/maskreg.c
deleted file mode 100644
index 978992e367a..00000000000
--- a/arch/sh/kernel/cpu/irq/maskreg.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Interrupt handling for Simple external interrupt mask register
- *
- * Copyright (C) 2001 A&D Co., Ltd. <http://www.aandd.co.jp>
- *
- * This is for the machine which have single 16 bit register
- * for masking external IRQ individually.
- * Each bit of the register is for masking each interrupt.
- *
- * This file may be copied or modified under the terms of the GNU
- * General Public License. See linux/COPYING for more information.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <asm/system.h>
-#include <asm/io.h>
-
-/* address of external interrupt mask register */
-unsigned long irq_mask_register;
-
-/* forward declaration */
-static unsigned int startup_maskreg_irq(unsigned int irq);
-static void shutdown_maskreg_irq(unsigned int irq);
-static void enable_maskreg_irq(unsigned int irq);
-static void disable_maskreg_irq(unsigned int irq);
-static void mask_and_ack_maskreg(unsigned int);
-static void end_maskreg_irq(unsigned int irq);
-
-/* hw_interrupt_type */
-static struct hw_interrupt_type maskreg_irq_type = {
- .typename = "Mask Register",
- .startup = startup_maskreg_irq,
- .shutdown = shutdown_maskreg_irq,
- .enable = enable_maskreg_irq,
- .disable = disable_maskreg_irq,
- .ack = mask_and_ack_maskreg,
- .end = end_maskreg_irq
-};
-
-/* actual implementation */
-static unsigned int startup_maskreg_irq(unsigned int irq)
-{
- enable_maskreg_irq(irq);
- return 0; /* never anything pending */
-}
-
-static void shutdown_maskreg_irq(unsigned int irq)
-{
- disable_maskreg_irq(irq);
-}
-
-static void disable_maskreg_irq(unsigned int irq)
-{
- unsigned short val, mask = 0x01 << irq;
-
- BUG_ON(!irq_mask_register);
-
- /* Set "irq"th bit */
- val = ctrl_inw(irq_mask_register);
- val |= mask;
- ctrl_outw(val, irq_mask_register);
-}
-
-static void enable_maskreg_irq(unsigned int irq)
-{
- unsigned short val, mask = ~(0x01 << irq);
-
- BUG_ON(!irq_mask_register);
-
- /* Clear "irq"th bit */
- val = ctrl_inw(irq_mask_register);
- val &= mask;
- ctrl_outw(val, irq_mask_register);
-}
-
-static void mask_and_ack_maskreg(unsigned int irq)
-{
- disable_maskreg_irq(irq);
-}
-
-static void end_maskreg_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_maskreg_irq(irq);
-}
-
-void make_maskreg_irq(unsigned int irq)
-{
- disable_irq_nosync(irq);
- irq_desc[irq].handler = &maskreg_irq_type;
- disable_maskreg_irq(irq);
-}
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index f2b9238cda0..9e89984c4f1 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -126,12 +126,18 @@ int __init detect_cpu_and_cache_system(void)
CPU_HAS_LLSC;
break;
case 0x3008:
- if (prr == 0xa0) {
+ if (prr == 0xa0 || prr == 0xa1) {
boot_cpu_data.type = CPU_SH7722;
boot_cpu_data.icache.ways = 4;
boot_cpu_data.dcache.ways = 4;
boot_cpu_data.flags |= CPU_HAS_LLSC;
}
+ else if (prr == 0x70) {
+ boot_cpu_data.type = CPU_SH7366;
+ boot_cpu_data.icache.ways = 4;
+ boot_cpu_data.dcache.ways = 4;
+ boot_cpu_data.flags |= CPU_HAS_LLSC;
+ }
break;
case 0x4000: /* 1st cut */
case 0x4001: /* 2nd cut */
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 08ac6387bf1..5d890ac8e79 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o
obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o
obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o
obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o
# SMP setup
@@ -21,6 +22,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o
clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7722.o
clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o
obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index a0fd8bb21f7..299138ebe16 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -1,7 +1,7 @@
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7722.c
*
- * SH7722 support for the clock framework
+ * SH7722 & SH7366 support for the clock framework
*
* Copyright (c) 2006-2007 Nomad Global Solutions Inc
* Based on code for sh7343 by Paul Mundt
@@ -417,15 +417,19 @@ static int sh7722_siu_which(struct clk *clk)
return 0;
if (!strcmp(clk->name, "siu_b_clk"))
return 1;
+#if defined(CONFIG_CPU_SUBTYPE_SH7722)
if (!strcmp(clk->name, "irda_clk"))
return 2;
+#endif
return -EINVAL;
}
static unsigned long sh7722_siu_regs[] = {
[0] = SCLKACR,
[1] = SCLKBCR,
+#if defined(CONFIG_CPU_SUBTYPE_SH7722)
[2] = IrDACLKCR,
+#endif
};
static int sh7722_siu_start_stop(struct clk *clk, int enable)
@@ -571,10 +575,12 @@ static struct clk sh7722_siu_b_clock = {
.ops = &sh7722_siu_clk_ops,
};
+#if defined(CONFIG_CPU_SUBTYPE_SH7722)
static struct clk sh7722_irda_clock = {
.name = "irda_clk",
.ops = &sh7722_siu_clk_ops,
};
+#endif
static struct clk sh7722_video_clock = {
.name = "video_clk",
@@ -588,7 +594,9 @@ static struct clk *sh7722_clocks[] = {
&sh7722_sdram_clock,
&sh7722_siu_a_clock,
&sh7722_siu_b_clock,
+#if defined(CONFIG_CPU_SUBTYPE_SH7722)
&sh7722_irda_clock,
+#endif
&sh7722_video_clock,
};
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
new file mode 100644
index 00000000000..967e8b69a2f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -0,0 +1,177 @@
+/*
+ * SH7366 Setup
+ *
+ * Copyright (C) 2008 Renesas Solutions
+ *
+ * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7366_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7366_devices_setup(void)
+{
+ return platform_add_devices(sh7366_devices,
+ ARRAY_SIZE(sh7366_devices));
+}
+__initcall(sh7366_devices_setup);
+
+enum {
+ UNUSED=0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ ICB,
+ DMAC0, DMAC1, DMAC2, DMAC3,
+ VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
+ MFI, VPU, USB,
+ MMC_MMC1I, MMC_MMC2I, MMC_MMC3I,
+ DMAC4, DMAC5, DMAC_DADERR,
+ SCIF, SCIFA1, SCIFA2,
+ DENC, MSIOF,
+ FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
+ I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
+ SDHI0, SDHI1, SDHI2, SDHI3,
+ CMT, TSIF, SIU,
+ TMU0, TMU1, TMU2,
+ VEU2, LCDC,
+
+ /* interrupt groups */
+
+ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
+ INTC_VECT(ICB, 0x700),
+ INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
+ INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
+ INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
+ INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
+ INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20),
+ INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20),
+ INTC_VECT(MMC_MMC3I, 0xb40),
+ INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
+ INTC_VECT(DMAC_DADERR, 0xbc0),
+ INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20),
+ INTC_VECT(SCIFA2, 0xc40),
+ INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80),
+ INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
+ INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
+ INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
+ INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
+ INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
+ INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+ INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
+ INTC_VECT(SIU, 0xf80),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440),
+ INTC_VECT(VEU2, 0x580), INTC_VECT(LCDC, 0x580),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
+ INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
+ INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I),
+ INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
+ INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
+ FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
+ INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
+ INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
+ { } },
+ { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
+ { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
+ { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
+ { 0, 0, 0, VPU, 0, 0, 0, MFI } },
+ { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
+ { 0, 0, 0, ICB } },
+ { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
+ { 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } },
+ { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
+ { 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } },
+ { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
+ { 0, 0, 0, 0, 0, 0, 0, MSIOF } },
+ { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
+ { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
+ FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
+ { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
+ { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
+ { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
+ { 0, 0, 0, CMT, 0, USB, } },
+ { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
+ { 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } },
+ { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
+ { 0, 0, 0, 0, 0, 0, 0, TSIF } },
+ { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
+ { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } },
+ { 0xa4080008, 0, 16, 4, /* IPRC */ { } },
+ { 0xa408000c, 0, 16, 4, /* IPRD */ { } },
+ { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } },
+ { 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } },
+ { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } },
+ { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } },
+ { 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } },
+ { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } },
+ { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } },
+ { 0xa408002c, 0, 16, 4, /* IPRL */ { } },
+ { 0xa4140010, 0, 32, 4, /* INTPRI00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa414001c, 16, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7366", vectors, groups,
+ mask_registers, prio_registers, sense_registers);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_mem_setup(void)
+{
+ /* TODO: Register Node 1 */
+}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
index 15d167fd0ae..31f8cb0f637 100644
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -20,19 +20,18 @@ int __init detect_cpu_and_cache_system(void)
{
unsigned long long cir;
- /* Do peeks in real mode to avoid having to set up a mapping for the
- WPC registers. On SH5-101 cut2, such a mapping would be exposed to
- an address translation erratum which would make it hard to set up
- correctly. */
+ /*
+ * Do peeks in real mode to avoid having to set up a mapping for
+ * the WPC registers. On SH5-101 cut2, such a mapping would be
+ * exposed to an address translation erratum which would make it
+ * hard to set up correctly.
+ */
cir = peek_real_address_q(0x0d000008);
- if ((cir & 0xffff) == 0x5103) {
+ if ((cir & 0xffff) == 0x5103)
boot_cpu_data.type = CPU_SH5_103;
- } else if (((cir >> 32) & 0xffff) == 0x51e2) {
+ else if (((cir >> 32) & 0xffff) == 0x51e2)
/* CPU.VCR aliased at CIR address on SH5-101 */
boot_cpu_data.type = CPU_SH5_101;
- } else {
- boot_cpu_data.type = CPU_SH_NONE;
- }
/*
* First, setup some sane values for the I-cache.
@@ -40,37 +39,33 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.icache.ways = 4;
boot_cpu_data.icache.sets = 256;
boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
+ boot_cpu_data.icache.way_incr = (1 << 13);
+ boot_cpu_data.icache.entry_shift = 5;
+ boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets *
+ boot_cpu_data.icache.linesz;
+ boot_cpu_data.icache.entry_mask = 0x1fe0;
+ boot_cpu_data.icache.flags = 0;
-#if 0
/*
- * FIXME: This can probably be cleaned up a bit as well.. for example,
- * do we really need the way shift _and_ the way_step_shift ?? Judging
- * by the existing code, I would guess no.. is there any valid reason
- * why we need to be tracking this around?
+ * Next, setup some sane values for the D-cache.
+ *
+ * On the SH5, these are pretty consistent with the I-cache settings,
+ * so we just copy over the existing definitions.. these can be fixed
+ * up later, especially if we add runtime CPU probing.
+ *
+ * Though in the meantime it saves us from having to duplicate all of
+ * the above definitions..
*/
- boot_cpu_data.icache.way_shift = 13;
- boot_cpu_data.icache.entry_shift = 5;
- boot_cpu_data.icache.set_shift = 4;
- boot_cpu_data.icache.way_step_shift = 16;
- boot_cpu_data.icache.asid_shift = 2;
+ boot_cpu_data.dcache = boot_cpu_data.icache;
/*
- * way offset = cache size / associativity, so just don't factor in
- * associativity in the first place..
+ * Setup any cache-related flags here
*/
- boot_cpu_data.icache.way_ofs = boot_cpu_data.icache.sets *
- boot_cpu_data.icache.linesz;
-
- boot_cpu_data.icache.asid_mask = 0x3fc;
- boot_cpu_data.icache.idx_mask = 0x1fe0;
- boot_cpu_data.icache.epn_mask = 0xffffe000;
+#if defined(CONFIG_CACHE_WRITETHROUGH)
+ set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags));
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags));
#endif
- boot_cpu_data.icache.flags = 0;
-
- /* A trivial starting point.. */
- memcpy(&boot_cpu_data.dcache,
- &boot_cpu_data.icache, sizeof(struct cache_info));
-
return 0;
}
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 71c9fde2fd9..2b899122990 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -63,7 +63,13 @@ EXPORT_SYMBOL(memset_io);
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return sh_mv.mv_ioport_map(port, nr);
+ void __iomem *ret;
+
+ ret = __ioport_map_trapped(port, nr);
+ if (ret)
+ return ret;
+
+ return __ioport_map(port, nr);
}
EXPORT_SYMBOL(ioport_map);
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index 771ea423044..db769449f5a 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -33,17 +33,17 @@ static inline void delay(void)
u8 generic_inb(unsigned long port)
{
- return ctrl_inb((unsigned long __force)ioport_map(port, 1));
+ return ctrl_inb((unsigned long __force)__ioport_map(port, 1));
}
u16 generic_inw(unsigned long port)
{
- return ctrl_inw((unsigned long __force)ioport_map(port, 2));
+ return ctrl_inw((unsigned long __force)__ioport_map(port, 2));
}
u32 generic_inl(unsigned long port)
{
- return ctrl_inl((unsigned long __force)ioport_map(port, 4));
+ return ctrl_inl((unsigned long __force)__ioport_map(port, 4));
}
u8 generic_inb_p(unsigned long port)
@@ -81,7 +81,7 @@ void generic_insb(unsigned long port, void *dst, unsigned long count)
volatile u8 *port_addr;
u8 *buf = dst;
- port_addr = (volatile u8 *)ioport_map(port, 1);
+ port_addr = (volatile u8 *)__ioport_map(port, 1);
while (count--)
*buf++ = *port_addr;
}
@@ -91,7 +91,7 @@ void generic_insw(unsigned long port, void *dst, unsigned long count)
volatile u16 *port_addr;
u16 *buf = dst;
- port_addr = (volatile u16 *)ioport_map(port, 2);
+ port_addr = (volatile u16 *)__ioport_map(port, 2);
while (count--)
*buf++ = *port_addr;
@@ -103,7 +103,7 @@ void generic_insl(unsigned long port, void *dst, unsigned long count)
volatile u32 *port_addr;
u32 *buf = dst;
- port_addr = (volatile u32 *)ioport_map(port, 4);
+ port_addr = (volatile u32 *)__ioport_map(port, 4);
while (count--)
*buf++ = *port_addr;
@@ -112,17 +112,17 @@ void generic_insl(unsigned long port, void *dst, unsigned long count)
void generic_outb(u8 b, unsigned long port)
{
- ctrl_outb(b, (unsigned long __force)ioport_map(port, 1));
+ ctrl_outb(b, (unsigned long __force)__ioport_map(port, 1));
}
void generic_outw(u16 b, unsigned long port)
{
- ctrl_outw(b, (unsigned long __force)ioport_map(port, 2));
+ ctrl_outw(b, (unsigned long __force)__ioport_map(port, 2));
}
void generic_outl(u32 b, unsigned long port)
{
- ctrl_outl(b, (unsigned long __force)ioport_map(port, 4));
+ ctrl_outl(b, (unsigned long __force)__ioport_map(port, 4));
}
void generic_outb_p(u8 b, unsigned long port)
@@ -153,7 +153,7 @@ void generic_outsb(unsigned long port, const void *src, unsigned long count)
volatile u8 *port_addr;
const u8 *buf = src;
- port_addr = (volatile u8 __force *)ioport_map(port, 1);
+ port_addr = (volatile u8 __force *)__ioport_map(port, 1);
while (count--)
*port_addr = *buf++;
@@ -164,7 +164,7 @@ void generic_outsw(unsigned long port, const void *src, unsigned long count)
volatile u16 *port_addr;
const u16 *buf = src;
- port_addr = (volatile u16 __force *)ioport_map(port, 2);
+ port_addr = (volatile u16 __force *)__ioport_map(port, 2);
while (count--)
*port_addr = *buf++;
@@ -177,7 +177,7 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count)
volatile u32 *port_addr;
const u32 *buf = src;
- port_addr = (volatile u32 __force *)ioport_map(port, 4);
+ port_addr = (volatile u32 __force *)__ioport_map(port, 4);
while (count--)
*port_addr = *buf++;
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
new file mode 100644
index 00000000000..86a665d9220
--- /dev/null
+++ b/arch/sh/kernel/io_trapped.c
@@ -0,0 +1,276 @@
+/*
+ * Trapped io support
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Intercept io operations by trapping.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/io_trapped.h>
+
+#define TRAPPED_PAGES_MAX 16
+
+#ifdef CONFIG_HAS_IOPORT
+LIST_HEAD(trapped_io);
+EXPORT_SYMBOL_GPL(trapped_io);
+#endif
+#ifdef CONFIG_HAS_IOMEM
+LIST_HEAD(trapped_mem);
+EXPORT_SYMBOL_GPL(trapped_mem);
+#endif
+static DEFINE_SPINLOCK(trapped_lock);
+
+int __init register_trapped_io(struct trapped_io *tiop)
+{
+ struct resource *res;
+ unsigned long len = 0, flags = 0;
+ struct page *pages[TRAPPED_PAGES_MAX];
+ int k, n;
+
+ /* structure must be page aligned */
+ if ((unsigned long)tiop & (PAGE_SIZE - 1))
+ goto bad;
+
+ for (k = 0; k < tiop->num_resources; k++) {
+ res = tiop->resource + k;
+ len += roundup((res->end - res->start) + 1, PAGE_SIZE);
+ flags |= res->flags;
+ }
+
+ /* support IORESOURCE_IO _or_ MEM, not both */
+ if (hweight_long(flags) != 1)
+ goto bad;
+
+ n = len >> PAGE_SHIFT;
+
+ if (n >= TRAPPED_PAGES_MAX)
+ goto bad;
+
+ for (k = 0; k < n; k++)
+ pages[k] = virt_to_page(tiop);
+
+ tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
+ if (!tiop->virt_base)
+ goto bad;
+
+ len = 0;
+ for (k = 0; k < tiop->num_resources; k++) {
+ res = tiop->resource + k;
+ pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
+ (unsigned long)(tiop->virt_base + len),
+ res->flags & IORESOURCE_IO ? "io" : "mmio",
+ (unsigned long)res->start);
+ len += roundup((res->end - res->start) + 1, PAGE_SIZE);
+ }
+
+ tiop->magic = IO_TRAPPED_MAGIC;
+ INIT_LIST_HEAD(&tiop->list);
+ spin_lock_irq(&trapped_lock);
+ if (flags & IORESOURCE_IO)
+ list_add(&tiop->list, &trapped_io);
+ if (flags & IORESOURCE_MEM)
+ list_add(&tiop->list, &trapped_mem);
+ spin_unlock_irq(&trapped_lock);
+
+ return 0;
+ bad:
+ pr_warning("unable to install trapped io filter\n");
+ return -1;
+}
+EXPORT_SYMBOL_GPL(register_trapped_io);
+
+void __iomem *match_trapped_io_handler(struct list_head *list,
+ unsigned long offset,
+ unsigned long size)
+{
+ unsigned long voffs;
+ struct trapped_io *tiop;
+ struct resource *res;
+ int k, len;
+
+ spin_lock_irq(&trapped_lock);
+ list_for_each_entry(tiop, list, list) {
+ voffs = 0;
+ for (k = 0; k < tiop->num_resources; k++) {
+ res = tiop->resource + k;
+ if (res->start == offset) {
+ spin_unlock_irq(&trapped_lock);
+ return tiop->virt_base + voffs;
+ }
+
+ len = (res->end - res->start) + 1;
+ voffs += roundup(len, PAGE_SIZE);
+ }
+ }
+ spin_unlock_irq(&trapped_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(match_trapped_io_handler);
+
+static struct trapped_io *lookup_tiop(unsigned long address)
+{
+ pgd_t *pgd_k;
+ pud_t *pud_k;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+ pte_t entry;
+
+ pgd_k = swapper_pg_dir + pgd_index(address);
+ if (!pgd_present(*pgd_k))
+ return NULL;
+
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ return NULL;
+
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ return NULL;
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ entry = *pte_k;
+
+ return pfn_to_kaddr(pte_pfn(entry));
+}
+
+static unsigned long lookup_address(struct trapped_io *tiop,
+ unsigned long address)
+{
+ struct resource *res;
+ unsigned long vaddr = (unsigned long)tiop->virt_base;
+ unsigned long len;
+ int k;
+
+ for (k = 0; k < tiop->num_resources; k++) {
+ res = tiop->resource + k;
+ len = roundup((res->end - res->start) + 1, PAGE_SIZE);
+ if (address < (vaddr + len))
+ return res->start + (address - vaddr);
+ vaddr += len;
+ }
+ return 0;
+}
+
+static unsigned long long copy_word(unsigned long src_addr, int src_len,
+ unsigned long dst_addr, int dst_len)
+{
+ unsigned long long tmp = 0;
+
+ switch (src_len) {
+ case 1:
+ tmp = ctrl_inb(src_addr);
+ break;
+ case 2:
+ tmp = ctrl_inw(src_addr);
+ break;
+ case 4:
+ tmp = ctrl_inl(src_addr);
+ break;
+ case 8:
+ tmp = ctrl_inq(src_addr);
+ break;
+ }
+
+ switch (dst_len) {
+ case 1:
+ ctrl_outb(tmp, dst_addr);
+ break;
+ case 2:
+ ctrl_outw(tmp, dst_addr);
+ break;
+ case 4:
+ ctrl_outl(tmp, dst_addr);
+ break;
+ case 8:
+ ctrl_outq(tmp, dst_addr);
+ break;
+ }
+
+ return tmp;
+}
+
+static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
+{
+ struct trapped_io *tiop;
+ unsigned long src_addr = (unsigned long)src;
+ unsigned long long tmp;
+
+ pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
+ tiop = lookup_tiop(src_addr);
+ WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
+
+ src_addr = lookup_address(tiop, src_addr);
+ if (!src_addr)
+ return cnt;
+
+ tmp = copy_word(src_addr,
+ max_t(unsigned long, cnt,
+ (tiop->minimum_bus_width / 8)),
+ (unsigned long)dst, cnt);
+
+ pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
+ return 0;
+}
+
+static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
+{
+ struct trapped_io *tiop;
+ unsigned long dst_addr = (unsigned long)dst;
+ unsigned long long tmp;
+
+ pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
+ tiop = lookup_tiop(dst_addr);
+ WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
+
+ dst_addr = lookup_address(tiop, dst_addr);
+ if (!dst_addr)
+ return cnt;
+
+ tmp = copy_word((unsigned long)src, cnt,
+ dst_addr, max_t(unsigned long, cnt,
+ (tiop->minimum_bus_width / 8)));
+
+ pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
+ return 0;
+}
+
+static struct mem_access trapped_io_access = {
+ from_device,
+ to_device,
+};
+
+int handle_trapped_io(struct pt_regs *regs, unsigned long address)
+{
+ mm_segment_t oldfs;
+ opcode_t instruction;
+ int tmp;
+
+ if (!lookup_tiop(address))
+ return 0;
+
+ WARN_ON(user_mode(regs));
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ if (copy_from_user(&instruction, (void *)(regs->pc),
+ sizeof(instruction))) {
+ set_fs(oldfs);
+ return 0;
+ }
+
+ tmp = handle_unaligned_access(instruction, regs, &trapped_io_access);
+ set_fs(oldfs);
+ return tmp == 0;
+}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 0586bc62ad9..9bf19b00696 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -248,9 +248,6 @@ asmlinkage void do_softirq(void)
void __init init_IRQ(void)
{
-#ifdef CONFIG_CPU_HAS_PINT_IRQ
- init_IRQ_pint();
-#endif
plat_irq_setup();
/* Perform the machine specific initialisation */
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index cff3b7dc9c5..046999b1d1a 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -623,6 +623,7 @@ extern void interruptible_sleep_on(wait_queue_head_t *q);
#define mid_sched ((unsigned long) interruptible_sleep_on)
+#ifdef CONFIG_FRAME_POINTER
static int in_sh64_switch_to(unsigned long pc)
{
extern char __sh64_switch_to_end;
@@ -631,12 +632,10 @@ static int in_sh64_switch_to(unsigned long pc)
return (pc >= (unsigned long) sh64_switch_to) &&
(pc < (unsigned long) &__sh64_switch_to_end);
}
+#endif
unsigned long get_wchan(struct task_struct *p)
{
- unsigned long schedule_fp;
- unsigned long sh64_switch_to_fp;
- unsigned long schedule_caller_pc;
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
@@ -649,6 +648,10 @@ unsigned long get_wchan(struct task_struct *p)
#ifdef CONFIG_FRAME_POINTER
if (in_sh64_switch_to(pc)) {
+ unsigned long schedule_fp;
+ unsigned long sh64_switch_to_fp;
+ unsigned long schedule_caller_pc;
+
sh64_switch_to_fp = (long) p->thread.sp;
/* r14 is saved at offset 4 in the sh64_switch_to frame */
schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index ce0664a58b4..fddb547f3c2 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -220,7 +220,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
dp = ((unsigned long) child) + THREAD_SIZE -
sizeof(struct pt_dspregs);
if (*((int *) (dp - 4)) == SR_FD) {
- copy_to_user(addr, (void *) dp,
+ copy_to_user((void *)addr, (void *) dp,
sizeof(struct pt_dspregs));
ret = 0;
}
@@ -234,7 +234,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
dp = ((unsigned long) child) + THREAD_SIZE -
sizeof(struct pt_dspregs);
if (*((int *) (dp - 4)) == SR_FD) {
- copy_from_user((void *) dp, addr,
+ copy_from_user((void *) dp, (void *)addr,
sizeof(struct pt_dspregs));
ret = 0;
}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 18a5baf2cba..ff4f54a47c0 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -333,7 +333,7 @@ static const char *cpu_name[] = {
[CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
[CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
[CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
- [CPU_SH_NONE] = "Unknown"
+ [CPU_SH7366] = "SH7366", [CPU_SH_NONE] = "Unknown"
};
const char *get_cpu_subtype(struct sh_cpuinfo *c)
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 719e127a7c0..a46cc3a4114 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -338,6 +338,8 @@ ENTRY(sys_call_table)
.long sys_epoll_pwait
.long sys_utimensat /* 320 */
.long sys_signalfd
- .long sys_ni_syscall
+ .long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate
+ .long sys_timerfd_settime /* 325 */
+ .long sys_timerfd_gettime
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 12c7340356a..d5d7843aad9 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -376,6 +376,8 @@ sys_call_table:
.long sys_epoll_pwait
.long sys_utimensat
.long sys_signalfd
- .long sys_ni_syscall /* 350 */
+ .long sys_timerfd_create /* 350 */
.long sys_eventfd
.long sys_fallocate
+ .long sys_timerfd_settime
+ .long sys_timerfd_gettime
diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c
index 2bc04bfee73..7281342c044 100644
--- a/arch/sh/kernel/time_32.c
+++ b/arch/sh/kernel/time_32.c
@@ -120,10 +120,6 @@ static long last_rtc_update;
*/
void handle_timer_tick(void)
{
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
if (current->pid)
profile_tick(CPU_PROFILING);
@@ -133,6 +129,16 @@ void handle_timer_tick(void)
#endif
/*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_seqlock(&xtime_lock);
+ do_timer(1);
+
+ /*
* If we have an externally synchronized Linux clock, then update
* RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
* called as close as possible to 500 ms before the new second starts.
@@ -147,6 +153,11 @@ void handle_timer_tick(void)
/* do it again in 60s */
last_rtc_update = xtime.tv_sec - 600;
}
+ write_sequnlock(&xtime_lock);
+
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(get_irq_regs()));
+#endif
}
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c
index f819ba38a6c..898977ee203 100644
--- a/arch/sh/kernel/time_64.c
+++ b/arch/sh/kernel/time_64.c
@@ -229,15 +229,22 @@ static long last_rtc_update;
static inline void do_timer_interrupt(void)
{
unsigned long long current_ctc;
+
+ if (current->pid)
+ profile_tick(CPU_PROFILING);
+
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_lock(&xtime_lock);
asm ("getcon cr62, %0" : "=r" (current_ctc));
ctc_last_interrupt = (unsigned long) current_ctc;
do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
- if (current->pid)
- profile_tick(CPU_PROFILING);
#ifdef CONFIG_HEARTBEAT
if (sh_mv.mv_heartbeat != NULL)
@@ -259,6 +266,11 @@ static inline void do_timer_interrupt(void)
/* do it again in 60 s */
last_rtc_update = xtime.tv_sec - 600;
}
+ write_unlock(&xtime_lock);
+
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(get_irq_regs()));
+#endif
}
/*
@@ -275,16 +287,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
timer_status &= ~0x100;
ctrl_outw(timer_status, TMU0_TCR);
- /*
- * Here we are in the timer irq handler. We just have irqs locally
- * disabled but we don't know if the timer_bh is running on the other
- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
- * the irq version of write_lock because as just said we have irq
- * locally disabled. -arca
- */
- write_lock(&xtime_lock);
do_timer_interrupt();
- write_unlock(&xtime_lock);
return IRQ_HANDLED;
}
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
index 499e07beebe..71312324b5d 100644
--- a/arch/sh/kernel/timers/timer-cmt.c
+++ b/arch/sh/kernel/timers/timer-cmt.c
@@ -100,16 +100,7 @@ static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id)
timer_status &= ~0x80;
ctrl_outw(timer_status, CMT_CMCSR_0);
- /*
- * Here we are in the timer irq handler. We just have irqs locally
- * disabled but we don't know if the timer_bh is running on the other
- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
- * the irq version of write_lock because as just said we have irq
- * locally disabled. -arca
- */
- write_seqlock(&xtime_lock);
handle_timer_tick();
- write_sequnlock(&xtime_lock);
return IRQ_HANDLED;
}
diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c
index b7499a2a918..ade9d6eb29f 100644
--- a/arch/sh/kernel/timers/timer-mtu2.c
+++ b/arch/sh/kernel/timers/timer-mtu2.c
@@ -100,9 +100,7 @@ static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id)
ctrl_outb(timer_status, MTU2_TSR_1);
/* Do timer tick */
- write_seqlock(&xtime_lock);
handle_timer_tick();
- write_sequnlock(&xtime_lock);
return IRQ_HANDLED;
}
@@ -156,7 +154,6 @@ static int mtu2_timer_stop(void)
static int mtu2_timer_init(void)
{
- u8 tmp;
unsigned long interval;
setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq);
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 2e58f7a6b74..baa4fa368dc 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -147,6 +147,36 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
return -EFAULT;
}
+static inline void sign_extend(unsigned int count, unsigned char *dst)
+{
+#ifdef __LITTLE_ENDIAN__
+ if ((count == 1) && dst[0] & 0x80) {
+ dst[1] = 0xff;
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+ if ((count == 2) && dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ if ((count == 1) && dst[3] & 0x80) {
+ dst[2] = 0xff;
+ dst[1] = 0xff;
+ dst[0] = 0xff;
+ }
+ if ((count == 2) && dst[2] & 0x80) {
+ dst[1] = 0xff;
+ dst[0] = 0xff;
+ }
+#endif
+}
+
+static struct mem_access user_mem_access = {
+ copy_from_user,
+ copy_to_user,
+};
+
/*
* handle an instruction that does an unaligned memory access by emulating the
* desired behaviour
@@ -154,7 +184,8 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
* (if that instruction is in a branch delay slot)
* - return 0 if emulation okay, -EFAULT on existential error
*/
-static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
+static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
+ struct mem_access *ma)
{
int ret, index, count;
unsigned long *rm, *rn;
@@ -178,25 +209,13 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
-#ifdef __LITTLE_ENDIAN__
- if (copy_from_user(dst, src, count))
- goto fetch_fault;
-
- if ((count == 2) && dst[1] & 0x80) {
- dst[2] = 0xff;
- dst[3] = 0xff;
- }
-#else
+#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
-
- if (__copy_user(dst, src, count))
+#endif
+ if (ma->from(dst, src, count))
goto fetch_fault;
- if ((count == 2) && dst[2] & 0x80) {
- dst[0] = 0xff;
- dst[1] = 0xff;
- }
-#endif
+ sign_extend(count, dst);
} else {
/* to memory */
src = (unsigned char*) rm;
@@ -206,7 +225,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
dst = (unsigned char*) *rn;
dst += regs->regs[0];
- if (copy_to_user(dst, src, count))
+ if (ma->to(dst, src, count))
goto fetch_fault;
}
ret = 0;
@@ -217,7 +236,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
dst = (unsigned char*) *rn;
dst += (instruction&0x000F)<<2;
- if (copy_to_user(dst,src,4))
+ if (ma->to(dst, src, 4))
goto fetch_fault;
ret = 0;
break;
@@ -230,7 +249,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
- if (copy_to_user(dst, src, count))
+ if (ma->to(dst, src, count))
goto fetch_fault;
ret = 0;
break;
@@ -241,7 +260,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
- if (copy_from_user(dst,src,4))
+ if (ma->from(dst, src, 4))
goto fetch_fault;
ret = 0;
break;
@@ -253,25 +272,12 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
-#ifdef __LITTLE_ENDIAN__
- if (copy_from_user(dst, src, count))
- goto fetch_fault;
-
- if ((count == 2) && dst[1] & 0x80) {
- dst[2] = 0xff;
- dst[3] = 0xff;
- }
-#else
+#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
-
- if (copy_from_user(dst, src, count))
- goto fetch_fault;
-
- if ((count == 2) && dst[2] & 0x80) {
- dst[0] = 0xff;
- dst[1] = 0xff;
- }
#endif
+ if (ma->from(dst, src, count))
+ goto fetch_fault;
+ sign_extend(count, dst);
ret = 0;
break;
@@ -285,7 +291,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
dst = (unsigned char*) *rm; /* called Rn in the spec */
dst += (instruction&0x000F)<<1;
- if (copy_to_user(dst, src, 2))
+ if (ma->to(dst, src, 2))
goto fetch_fault;
ret = 0;
break;
@@ -299,21 +305,9 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
#if !defined(__LITTLE_ENDIAN__)
dst += 2;
#endif
-
- if (copy_from_user(dst, src, 2))
+ if (ma->from(dst, src, 2))
goto fetch_fault;
-
-#ifdef __LITTLE_ENDIAN__
- if (dst[1] & 0x80) {
- dst[2] = 0xff;
- dst[3] = 0xff;
- }
-#else
- if (dst[2] & 0x80) {
- dst[0] = 0xff;
- dst[1] = 0xff;
- }
-#endif
+ sign_extend(2, dst);
ret = 0;
break;
}
@@ -332,11 +326,14 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
* emulate the instruction in the delay slot
* - fetches the instruction from PC+2
*/
-static inline int handle_unaligned_delayslot(struct pt_regs *regs)
+static inline int handle_delayslot(struct pt_regs *regs,
+ opcode_t old_instruction,
+ struct mem_access *ma)
{
- u16 instruction;
+ opcode_t instruction;
+ void *addr = (void *)(regs->pc + instruction_size(old_instruction));
- if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
+ if (copy_from_user(&instruction, addr, sizeof(instruction))) {
/* the instruction-fetch faulted */
if (user_mode(regs))
return -EFAULT;
@@ -346,7 +343,7 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
regs, 0);
}
- return handle_unaligned_ins(instruction,regs);
+ return handle_unaligned_ins(instruction, regs, ma);
}
/*
@@ -369,10 +366,11 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
* XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
* opcodes..
*/
-#ifndef CONFIG_CPU_SH2A
+
static int handle_unaligned_notify_count = 10;
-static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
+int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
+ struct mem_access *ma)
{
u_int rm;
int ret, index;
@@ -387,7 +385,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
printk(KERN_NOTICE "Fixing up unaligned userspace access "
"in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
current->comm, task_pid_nr(current),
- (u16 *)regs->pc, instruction);
+ (void *)regs->pc, instruction);
}
ret = -EFAULT;
@@ -395,19 +393,19 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
case 0x0000:
if (instruction==0x000B) {
/* rts */
- ret = handle_unaligned_delayslot(regs);
+ ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = regs->pr;
}
else if ((instruction&0x00FF)==0x0023) {
/* braf @Rm */
- ret = handle_unaligned_delayslot(regs);
+ ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += rm + 4;
}
else if ((instruction&0x00FF)==0x0003) {
/* bsrf @Rm */
- ret = handle_unaligned_delayslot(regs);
+ ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += rm + 4;
@@ -428,13 +426,13 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
case 0x4000:
if ((instruction&0x00FF)==0x002B) {
/* jmp @Rm */
- ret = handle_unaligned_delayslot(regs);
+ ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = rm;
}
else if ((instruction&0x00FF)==0x000B) {
/* jsr @Rm */
- ret = handle_unaligned_delayslot(regs);
+ ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc = rm;
@@ -461,7 +459,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
case 0x0B00: /* bf lab - no delayslot*/
break;
case 0x0F00: /* bf/s lab */
- ret = handle_unaligned_delayslot(regs);
+ ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) != 0)
@@ -474,7 +472,7 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
case 0x0900: /* bt lab - no delayslot */
break;
case 0x0D00: /* bt/s lab */
- ret = handle_unaligned_delayslot(regs);
+ ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) == 0)
@@ -488,13 +486,13 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
break;
case 0xA000: /* bra label */
- ret = handle_unaligned_delayslot(regs);
+ ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += SH_PC_12BIT_OFFSET(instruction);
break;
case 0xB000: /* bsr label */
- ret = handle_unaligned_delayslot(regs);
+ ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += SH_PC_12BIT_OFFSET(instruction);
@@ -505,12 +503,11 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
/* handle non-delay-slot instruction */
simple:
- ret = handle_unaligned_ins(instruction,regs);
+ ret = handle_unaligned_ins(instruction, regs, ma);
if (ret==0)
regs->pc += instruction_size(instruction);
return ret;
}
-#endif /* CONFIG_CPU_SH2A */
#ifdef CONFIG_CPU_HAS_SR_RB
#define lookup_exception_vector(x) \
@@ -538,10 +535,8 @@ asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long error_code = 0;
mm_segment_t oldfs;
siginfo_t info;
-#ifndef CONFIG_CPU_SH2A
- u16 instruction;
+ opcode_t instruction;
int tmp;
-#endif
/* Intentional ifdef */
#ifdef CONFIG_CPU_HAS_SR_RB
@@ -561,9 +556,9 @@ asmlinkage void do_address_error(struct pt_regs *regs,
goto uspace_segv;
}
-#ifndef CONFIG_CPU_SH2A
set_fs(USER_DS);
- if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+ if (copy_from_user(&instruction, (void *)(regs->pc),
+ sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
*/
@@ -571,13 +566,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
goto uspace_segv;
}
- tmp = handle_unaligned_access(instruction, regs);
+ tmp = handle_unaligned_access(instruction, regs,
+ &user_mem_access);
set_fs(oldfs);
if (tmp==0)
return; /* sorted */
-#endif
-
uspace_segv:
printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
"access (PC %lx PR %lx)\n", current->comm, regs->pc,
@@ -592,9 +586,9 @@ uspace_segv:
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
-#ifndef CONFIG_CPU_SH2A
set_fs(KERNEL_DS);
- if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+ if (copy_from_user(&instruction, (void *)(regs->pc),
+ sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
*/
@@ -602,14 +596,8 @@ uspace_segv:
die("insn faulting in do_address_error", regs, 0);
}
- handle_unaligned_access(instruction, regs);
+ handle_unaligned_access(instruction, regs, &user_mem_access);
set_fs(oldfs);
-#else
- printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
- "access\n", current->comm);
-
- force_sig(SIGSEGV, current);
-#endif
}
}
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index c0b3c6f6edb..a55ac81d795 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -630,7 +630,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
current->thread.fpu.hard.fp_regs[destreg] = buflo;
current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
} else {
-#if defined(CONFIG_LITTLE_ENDIAN)
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
current->thread.fpu.hard.fp_regs[destreg] = bufhi;
current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
#else
@@ -700,7 +700,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
buflo = current->thread.fpu.hard.fp_regs[srcreg];
bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
} else {
-#if defined(CONFIG_LITTLE_ENDIAN)
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
bufhi = current->thread.fpu.hard.fp_regs[srcreg];
buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
#else
diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S
index 3f1bd6392bb..d1e177009a4 100644
--- a/arch/sh/kernel/vmlinux_64.lds.S
+++ b/arch/sh/kernel/vmlinux_64.lds.S
@@ -51,7 +51,7 @@ SECTIONS
KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
-#ifdef CONFIG_LITTLE_ENDIAN
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
} = 0x6ff0fff0
#else
} = 0xf0fff06f
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index 4617e3aeee7..3877321fced 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -1,10 +1,10 @@
/*
* arch/sh/mm/cache-sh5.c
*
- * Original version Copyright (C) 2000, 2001 Paolo Alberelli
- * Second version Copyright (C) benedict.gaster@superh.com 2002
- * Third version Copyright Richard.Curnow@superh.com 2003
- * Hacks to third version Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2000, 2001 Paolo Alberelli
+ * Copyright (C) 2002 Benedict Gaster
+ * Copyright (C) 2003 Richard Curnow
+ * Copyright (C) 2003 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -13,101 +13,20 @@
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/threads.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
+#include <asm/tlb.h>
#include <asm/processor.h>
#include <asm/cache.h>
-#include <asm/tlb.h>
-#include <asm/io.h>
+#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
-#include <asm/pgalloc.h> /* for flush_itlb_range */
-
-#include <linux/proc_fs.h>
-
-/* This function is in entry.S */
-extern unsigned long switch_and_save_asid(unsigned long new_asid);
/* Wired TLB entry for the D-cache */
static unsigned long long dtlb_cache_slot;
-/**
- * sh64_cache_init()
- *
- * This is pretty much just a straightforward clone of the SH
- * detect_cpu_and_cache_system().
- *
- * This function is responsible for setting up all of the cache
- * info dynamically as well as taking care of CPU probing and
- * setting up the relevant subtype data.
- *
- * FIXME: For the time being, we only really support the SH5-101
- * out of the box, and don't support dynamic probing for things
- * like the SH5-103 or even cut2 of the SH5-101. Implement this
- * later!
- */
-int __init sh64_cache_init(void)
+void __init p3_cache_init(void)
{
- /*
- * First, setup some sane values for the I-cache.
- */
- cpu_data->icache.ways = 4;
- cpu_data->icache.sets = 256;
- cpu_data->icache.linesz = L1_CACHE_BYTES;
-
- /*
- * FIXME: This can probably be cleaned up a bit as well.. for example,
- * do we really need the way shift _and_ the way_step_shift ?? Judging
- * by the existing code, I would guess no.. is there any valid reason
- * why we need to be tracking this around?
- */
- cpu_data->icache.way_shift = 13;
- cpu_data->icache.entry_shift = 5;
- cpu_data->icache.set_shift = 4;
- cpu_data->icache.way_step_shift = 16;
- cpu_data->icache.asid_shift = 2;
-
- /*
- * way offset = cache size / associativity, so just don't factor in
- * associativity in the first place..
- */
- cpu_data->icache.way_ofs = cpu_data->icache.sets *
- cpu_data->icache.linesz;
-
- cpu_data->icache.asid_mask = 0x3fc;
- cpu_data->icache.idx_mask = 0x1fe0;
- cpu_data->icache.epn_mask = 0xffffe000;
- cpu_data->icache.flags = 0;
-
- /*
- * Next, setup some sane values for the D-cache.
- *
- * On the SH5, these are pretty consistent with the I-cache settings,
- * so we just copy over the existing definitions.. these can be fixed
- * up later, especially if we add runtime CPU probing.
- *
- * Though in the meantime it saves us from having to duplicate all of
- * the above definitions..
- */
- cpu_data->dcache = cpu_data->icache;
-
- /*
- * Setup any cache-related flags here
- */
-#if defined(CONFIG_DCACHE_WRITE_THROUGH)
- set_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags));
-#elif defined(CONFIG_DCACHE_WRITE_BACK)
- set_bit(SH_CACHE_MODE_WB, &(cpu_data->dcache.flags));
-#endif
-
- /*
- * We also need to reserve a slot for the D-cache in the DTLB, so we
- * do this now ..
- */
- dtlb_cache_slot = sh64_get_wired_dtlb_entry();
-
- return 0;
+ /* Reserve a slot for dcache colouring in the DTLB */
+ dtlb_cache_slot = sh64_get_wired_dtlb_entry();
}
#ifdef CONFIG_DCACHE_DISABLED
@@ -116,73 +35,48 @@ int __init sh64_cache_init(void)
#define sh64_dcache_purge_user_range(mm, start, end) do { } while (0)
#define sh64_dcache_purge_phy_page(paddr) do { } while (0)
#define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0)
-#define sh64_dcache_purge_kernel_range(start, end) do { } while (0)
-#define sh64_dcache_wback_current_user_range(start, end) do { } while (0)
#endif
-/*##########################################################################*/
-
-/* From here onwards, a rewrite of the implementation,
- by Richard.Curnow@superh.com.
-
- The major changes in this compared to the old version are;
- 1. use more selective purging through OCBP instead of using ALLOCO to purge
- by natural replacement. This avoids purging out unrelated cache lines
- that happen to be in the same set.
- 2. exploit the APIs copy_user_page and clear_user_page better
- 3. be more selective about I-cache purging, in particular use invalidate_all
- more sparingly.
-
- */
-
-/*##########################################################################
- SUPPORT FUNCTIONS
- ##########################################################################*/
-
-/****************************************************************************/
-/* The following group of functions deal with mapping and unmapping a temporary
- page into the DTLB slot that have been set aside for our exclusive use. */
-/* In order to accomplish this, we use the generic interface for adding and
- removing a wired slot entry as defined in arch/sh/mm/tlb-sh5.c */
-/****************************************************************************/
-
-static unsigned long slot_own_flags;
-
-static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr)
+/*
+ * The following group of functions deal with mapping and unmapping a
+ * temporary page into a DTLB slot that has been set aside for exclusive
+ * use.
+ */
+static inline void
+sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
+ unsigned long paddr)
{
- local_irq_save(slot_own_flags);
+ local_irq_disable();
sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
}
static inline void sh64_teardown_dtlb_cache_slot(void)
{
sh64_teardown_tlb_slot(dtlb_cache_slot);
- local_irq_restore(slot_own_flags);
+ local_irq_enable();
}
-/****************************************************************************/
-
#ifndef CONFIG_ICACHE_DISABLED
-
-static void __inline__ sh64_icache_inv_all(void)
+static inline void sh64_icache_inv_all(void)
{
unsigned long long addr, flag, data;
unsigned int flags;
- addr=ICCR0;
- flag=ICCR0_ICI;
- data=0;
+ addr = ICCR0;
+ flag = ICCR0_ICI;
+ data = 0;
/* Make this a critical section for safety (probably not strictly necessary.) */
local_irq_save(flags);
/* Without %1 it gets unexplicably wrong */
- asm volatile("getcfg %3, 0, %0\n\t"
- "or %0, %2, %0\n\t"
- "putcfg %3, 0, %0\n\t"
- "synci"
- : "=&r" (data)
- : "0" (data), "r" (flag), "r" (addr));
+ __asm__ __volatile__ (
+ "getcfg %3, 0, %0\n\t"
+ "or %0, %2, %0\n\t"
+ "putcfg %3, 0, %0\n\t"
+ "synci"
+ : "=&r" (data)
+ : "0" (data), "r" (flag), "r" (addr));
local_irq_restore(flags);
}
@@ -193,20 +87,12 @@ static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
* the addresses lie in the kernel superpage. */
unsigned long long ullend, addr, aligned_start;
-#if (NEFF == 32)
aligned_start = (unsigned long long)(signed long long)(signed long) start;
-#else
-#error "NEFF != 32"
-#endif
- aligned_start &= L1_CACHE_ALIGN_MASK;
- addr = aligned_start;
-#if (NEFF == 32)
+ addr = L1_CACHE_ALIGN(aligned_start);
ullend = (unsigned long long) (signed long long) (signed long) end;
-#else
-#error "NEFF != 32"
-#endif
+
while (addr <= ullend) {
- asm __volatile__ ("icbi %0, 0" : : "r" (addr));
+ __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
addr += L1_CACHE_BYTES;
}
}
@@ -215,7 +101,7 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
{
/* If we get called, we know that vma->vm_flags contains VM_EXEC.
Also, eaddr is page-aligned. */
-
+ unsigned int cpu = smp_processor_id();
unsigned long long addr, end_addr;
unsigned long flags = 0;
unsigned long running_asid, vma_asid;
@@ -237,17 +123,17 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
*/
running_asid = get_asid();
- vma_asid = (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK);
+ vma_asid = cpu_asid(cpu, vma->vm_mm);
if (running_asid != vma_asid) {
local_irq_save(flags);
switch_and_save_asid(vma_asid);
}
while (addr < end_addr) {
/* Worth unrolling a little */
- asm __volatile__("icbi %0, 0" : : "r" (addr));
- asm __volatile__("icbi %0, 32" : : "r" (addr));
- asm __volatile__("icbi %0, 64" : : "r" (addr));
- asm __volatile__("icbi %0, 96" : : "r" (addr));
+ __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
+ __asm__ __volatile__("icbi %0, 32" : : "r" (addr));
+ __asm__ __volatile__("icbi %0, 64" : : "r" (addr));
+ __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
addr += 128;
}
if (running_asid != vma_asid) {
@@ -256,8 +142,6 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
}
}
-/****************************************************************************/
-
static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
@@ -275,10 +159,10 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
possible with the D-cache. Just assume 64 for now as a working
figure.
*/
-
int n_pages;
- if (!mm) return;
+ if (!mm)
+ return;
n_pages = ((end - start) >> PAGE_SHIFT);
if (n_pages >= 64) {
@@ -290,7 +174,7 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
unsigned long mm_asid, current_asid;
unsigned long long flags = 0ULL;
- mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
+ mm_asid = cpu_asid(smp_processor_id(), mm);
current_asid = get_asid();
if (mm_asid != current_asid) {
@@ -322,6 +206,7 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
}
aligned_start = vma->vm_end; /* Skip to start of next region */
}
+
if (mm_asid != current_asid) {
switch_and_save_asid(current_asid);
local_irq_restore(flags);
@@ -329,47 +214,46 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
}
}
+/*
+ * Invalidate a small range of user context I-cache, not necessarily page
+ * (or even cache-line) aligned.
+ *
+ * Since this is used inside ptrace, the ASID in the mm context typically
+ * won't match current_asid. We'll have to switch ASID to do this. For
+ * safety, and given that the range will be small, do all this under cli.
+ *
+ * Note, there is a hazard that the ASID in mm->context is no longer
+ * actually associated with mm, i.e. if the mm->context has started a new
+ * cycle since mm was last active. However, this is just a performance
+ * issue: all that happens is that we invalidate lines belonging to
+ * another mm, so the owning process has to refill them when that mm goes
+ * live again. mm itself can't have any cache entries because there will
+ * have been a flush_cache_all when the new mm->context cycle started.
+ */
static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
unsigned long start, int len)
{
-
- /* Invalidate a small range of user context I-cache, not necessarily
- page (or even cache-line) aligned. */
-
unsigned long long eaddr = start;
unsigned long long eaddr_end = start + len;
unsigned long current_asid, mm_asid;
unsigned long long flags;
unsigned long long epage_start;
- /* Since this is used inside ptrace, the ASID in the mm context
- typically won't match current_asid. We'll have to switch ASID to do
- this. For safety, and given that the range will be small, do all
- this under cli.
-
- Note, there is a hazard that the ASID in mm->context is no longer
- actually associated with mm, i.e. if the mm->context has started a
- new cycle since mm was last active. However, this is just a
- performance issue: all that happens is that we invalidate lines
- belonging to another mm, so the owning process has to refill them
- when that mm goes live again. mm itself can't have any cache
- entries because there will have been a flush_cache_all when the new
- mm->context cycle started. */
-
- /* Align to start of cache line. Otherwise, suppose len==8 and start
- was at 32N+28 : the last 4 bytes wouldn't get invalidated. */
- eaddr = start & L1_CACHE_ALIGN_MASK;
+ /*
+ * Align to start of cache line. Otherwise, suppose len==8 and
+ * start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
+ */
+ eaddr = L1_CACHE_ALIGN(start);
eaddr_end = start + len;
+ mm_asid = cpu_asid(smp_processor_id(), mm);
local_irq_save(flags);
- mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
current_asid = switch_and_save_asid(mm_asid);
epage_start = eaddr & PAGE_MASK;
- while (eaddr < eaddr_end)
- {
- asm __volatile__("icbi %0, 0" : : "r" (eaddr));
+ while (eaddr < eaddr_end) {
+ __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr));
eaddr += L1_CACHE_BYTES;
}
switch_and_save_asid(current_asid);
@@ -394,30 +278,24 @@ static void sh64_icache_inv_current_user_range(unsigned long start, unsigned lon
been recycled since we were last active in which case we might just
invalidate another processes I-cache entries : no worries, just a
performance drop for him. */
- aligned_start = start & L1_CACHE_ALIGN_MASK;
+ aligned_start = L1_CACHE_ALIGN(start);
addr = aligned_start;
while (addr < ull_end) {
- asm __volatile__ ("icbi %0, 0" : : "r" (addr));
- asm __volatile__ ("nop");
- asm __volatile__ ("nop");
+ __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
+ __asm__ __volatile__ ("nop");
+ __asm__ __volatile__ ("nop");
addr += L1_CACHE_BYTES;
}
}
-
#endif /* !CONFIG_ICACHE_DISABLED */
-/****************************************************************************/
-
#ifndef CONFIG_DCACHE_DISABLED
-
/* Buffer used as the target of alloco instructions to purge data from cache
sets by natural eviction. -- RPC */
-#define DUMMY_ALLOCO_AREA_SIZE L1_CACHE_SIZE_BYTES + (1024 * 4)
+#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
-/****************************************************************************/
-
-static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
+static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
{
/* Purge all ways in a particular block of sets, specified by the base
set number and number of sets. Can handle wrap-around, if that's
@@ -428,102 +306,86 @@ static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets
int j;
int set_offset;
- dummy_buffer_base_set = ((int)&dummy_alloco_area & cpu_data->dcache.idx_mask) >> cpu_data->dcache.entry_shift;
+ dummy_buffer_base_set = ((int)&dummy_alloco_area &
+ cpu_data->dcache.entry_mask) >>
+ cpu_data->dcache.entry_shift;
set_offset = sets_to_purge_base - dummy_buffer_base_set;
- for (j=0; j<n_sets; j++, set_offset++) {
+ for (j = 0; j < n_sets; j++, set_offset++) {
set_offset &= (cpu_data->dcache.sets - 1);
- eaddr0 = (unsigned long long)dummy_alloco_area + (set_offset << cpu_data->dcache.entry_shift);
-
- /* Do one alloco which hits the required set per cache way. For
- write-back mode, this will purge the #ways resident lines. There's
- little point unrolling this loop because the allocos stall more if
- they're too close together. */
- eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
- for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
- asm __volatile__ ("alloco %0, 0" : : "r" (eaddr));
- asm __volatile__ ("synco"); /* TAKum03020 */
+ eaddr0 = (unsigned long long)dummy_alloco_area +
+ (set_offset << cpu_data->dcache.entry_shift);
+
+ /*
+ * Do one alloco which hits the required set per cache
+ * way. For write-back mode, this will purge the #ways
+ * resident lines. There's little point unrolling this
+ * loop because the allocos stall more if they're too
+ * close together.
+ */
+ eaddr1 = eaddr0 + cpu_data->dcache.way_size *
+ cpu_data->dcache.ways;
+
+ for (eaddr = eaddr0; eaddr < eaddr1;
+ eaddr += cpu_data->dcache.way_size) {
+ __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
+ __asm__ __volatile__ ("synco"); /* TAKum03020 */
}
- eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
- for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
- /* Load from each address. Required because alloco is a NOP if
- the cache is write-through. Write-through is a config option. */
+ eaddr1 = eaddr0 + cpu_data->dcache.way_size *
+ cpu_data->dcache.ways;
+
+ for (eaddr = eaddr0; eaddr < eaddr1;
+ eaddr += cpu_data->dcache.way_size) {
+ /*
+ * Load from each address. Required because
+ * alloco is a NOP if the cache is write-through.
+ */
if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
- *(volatile unsigned char *)(int)eaddr;
+ ctrl_inb(eaddr);
}
}
- /* Don't use OCBI to invalidate the lines. That costs cycles directly.
- If the dummy block is just left resident, it will naturally get
- evicted as required. */
-
- return;
+ /*
+ * Don't use OCBI to invalidate the lines. That costs cycles
+ * directly. If the dummy block is just left resident, it will
+ * naturally get evicted as required.
+ */
}
-/****************************************************************************/
-
+/*
+ * Purge the entire contents of the dcache. The most efficient way to
+ * achieve this is to use alloco instructions on a region of unused
+ * memory equal in size to the cache, thereby causing the current
+ * contents to be discarded by natural eviction. The alternative, namely
+ * reading every tag, setting up a mapping for the corresponding page and
+ * doing an OCBP for the line, would be much more expensive.
+ */
static void sh64_dcache_purge_all(void)
{
- /* Purge the entire contents of the dcache. The most efficient way to
- achieve this is to use alloco instructions on a region of unused
- memory equal in size to the cache, thereby causing the current
- contents to be discarded by natural eviction. The alternative,
- namely reading every tag, setting up a mapping for the corresponding
- page and doing an OCBP for the line, would be much more expensive.
- */
sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
-
- return;
-
}
-/****************************************************************************/
-
-static void sh64_dcache_purge_kernel_range(unsigned long start, unsigned long end)
-{
- /* Purge the range of addresses [start,end] from the D-cache. The
- addresses lie in the superpage mapping. There's no harm if we
- overpurge at either end - just a small performance loss. */
- unsigned long long ullend, addr, aligned_start;
-#if (NEFF == 32)
- aligned_start = (unsigned long long)(signed long long)(signed long) start;
-#else
-#error "NEFF != 32"
-#endif
- aligned_start &= L1_CACHE_ALIGN_MASK;
- addr = aligned_start;
-#if (NEFF == 32)
- ullend = (unsigned long long) (signed long long) (signed long) end;
-#else
-#error "NEFF != 32"
-#endif
- while (addr <= ullend) {
- asm __volatile__ ("ocbp %0, 0" : : "r" (addr));
- addr += L1_CACHE_BYTES;
- }
- return;
-}
/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
anything else in the kernel */
#define MAGIC_PAGE0_START 0xffffffffec000000ULL
-static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned long eaddr)
+/* Purge the physical page 'paddr' from the cache. It's known that any
+ * cache lines requiring attention have the same page colour as the the
+ * address 'eaddr'.
+ *
+ * This relies on the fact that the D-cache matches on physical tags when
+ * no virtual tag matches. So we create an alias for the original page
+ * and purge through that. (Alternatively, we could have done this by
+ * switching ASID to match the original mapping and purged through that,
+ * but that involves ASID switching cost + probably a TLBMISS + refill
+ * anyway.)
+ */
+static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
+ unsigned long eaddr)
{
- /* Purge the physical page 'paddr' from the cache. It's known that any
- cache lines requiring attention have the same page colour as the the
- address 'eaddr'.
-
- This relies on the fact that the D-cache matches on physical tags
- when no virtual tag matches. So we create an alias for the original
- page and purge through that. (Alternatively, we could have done
- this by switching ASID to match the original mapping and purged
- through that, but that involves ASID switching cost + probably a
- TLBMISS + refill anyway.)
- */
-
unsigned long long magic_page_start;
unsigned long long magic_eaddr, magic_eaddr_end;
@@ -531,47 +393,45 @@ static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned lo
/* As long as the kernel is not pre-emptible, this doesn't need to be
under cli/sti. */
-
sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
magic_eaddr = magic_page_start;
magic_eaddr_end = magic_eaddr + PAGE_SIZE;
+
while (magic_eaddr < magic_eaddr_end) {
/* Little point in unrolling this loop - the OCBPs are blocking
and won't go any quicker (i.e. the loop overhead is parallel
to part of the OCBP execution.) */
- asm __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
+ __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
magic_eaddr += L1_CACHE_BYTES;
}
sh64_teardown_dtlb_cache_slot();
}
-/****************************************************************************/
-
+/*
+ * Purge a page given its physical start address, by creating a temporary
+ * 1 page mapping and purging across that. Even if we know the virtual
+ * address (& vma or mm) of the page, the method here is more elegant
+ * because it avoids issues of coping with page faults on the purge
+ * instructions (i.e. no special-case code required in the critical path
+ * in the TLB miss handling).
+ */
static void sh64_dcache_purge_phy_page(unsigned long paddr)
{
- /* Pure a page given its physical start address, by creating a
- temporary 1 page mapping and purging across that. Even if we know
- the virtual address (& vma or mm) of the page, the method here is
- more elegant because it avoids issues of coping with page faults on
- the purge instructions (i.e. no special-case code required in the
- critical path in the TLB miss handling). */
-
unsigned long long eaddr_start, eaddr, eaddr_end;
int i;
/* As long as the kernel is not pre-emptible, this doesn't need to be
under cli/sti. */
-
eaddr_start = MAGIC_PAGE0_START;
- for (i=0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
+ for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
eaddr = eaddr_start;
eaddr_end = eaddr + PAGE_SIZE;
while (eaddr < eaddr_end) {
- asm __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
+ __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
eaddr += L1_CACHE_BYTES;
}
@@ -584,6 +444,7 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
unsigned long addr, unsigned long end)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
@@ -597,7 +458,11 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
if (pgd_bad(*pgd))
return;
- pmd = pmd_offset(pgd, addr);
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud) || pud_bad(*pud))
+ return;
+
+ pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return;
@@ -611,419 +476,357 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(pte - 1, ptl);
}
-/****************************************************************************/
+/*
+ * There are at least 5 choices for the implementation of this, with
+ * pros (+), cons(-), comments(*):
+ *
+ * 1. ocbp each line in the range through the original user's ASID
+ * + no lines spuriously evicted
+ * - tlbmiss handling (must either handle faults on demand => extra
+ * special-case code in tlbmiss critical path), or map the page in
+ * advance (=> flush_tlb_range in advance to avoid multiple hits)
+ * - ASID switching
+ * - expensive for large ranges
+ *
+ * 2. temporarily map each page in the range to a special effective
+ * address and ocbp through the temporary mapping; relies on the
+ * fact that SH-5 OCB* always do TLB lookup and match on ptags (they
+ * never look at the etags)
+ * + no spurious evictions
+ * - expensive for large ranges
+ * * surely cheaper than (1)
+ *
+ * 3. walk all the lines in the cache, check the tags, if a match
+ * occurs create a page mapping to ocbp the line through
+ * + no spurious evictions
+ * - tag inspection overhead
+ * - (especially for small ranges)
+ * - potential cost of setting up/tearing down page mapping for
+ * every line that matches the range
+ * * cost partly independent of range size
+ *
+ * 4. walk all the lines in the cache, check the tags, if a match
+ * occurs use 4 * alloco to purge the line (+3 other probably
+ * innocent victims) by natural eviction
+ * + no tlb mapping overheads
+ * - spurious evictions
+ * - tag inspection overhead
+ *
+ * 5. implement like flush_cache_all
+ * + no tag inspection overhead
+ * - spurious evictions
+ * - bad for small ranges
+ *
+ * (1) can be ruled out as more expensive than (2). (2) appears best
+ * for small ranges. The choice between (3), (4) and (5) for large
+ * ranges and the range size for the large/small boundary need
+ * benchmarking to determine.
+ *
+ * For now use approach (2) for small ranges and (5) for large ones.
+ */
static void sh64_dcache_purge_user_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
- /* There are at least 5 choices for the implementation of this, with
- pros (+), cons(-), comments(*):
-
- 1. ocbp each line in the range through the original user's ASID
- + no lines spuriously evicted
- - tlbmiss handling (must either handle faults on demand => extra
- special-case code in tlbmiss critical path), or map the page in
- advance (=> flush_tlb_range in advance to avoid multiple hits)
- - ASID switching
- - expensive for large ranges
-
- 2. temporarily map each page in the range to a special effective
- address and ocbp through the temporary mapping; relies on the
- fact that SH-5 OCB* always do TLB lookup and match on ptags (they
- never look at the etags)
- + no spurious evictions
- - expensive for large ranges
- * surely cheaper than (1)
-
- 3. walk all the lines in the cache, check the tags, if a match
- occurs create a page mapping to ocbp the line through
- + no spurious evictions
- - tag inspection overhead
- - (especially for small ranges)
- - potential cost of setting up/tearing down page mapping for
- every line that matches the range
- * cost partly independent of range size
-
- 4. walk all the lines in the cache, check the tags, if a match
- occurs use 4 * alloco to purge the line (+3 other probably
- innocent victims) by natural eviction
- + no tlb mapping overheads
- - spurious evictions
- - tag inspection overhead
-
- 5. implement like flush_cache_all
- + no tag inspection overhead
- - spurious evictions
- - bad for small ranges
-
- (1) can be ruled out as more expensive than (2). (2) appears best
- for small ranges. The choice between (3), (4) and (5) for large
- ranges and the range size for the large/small boundary need
- benchmarking to determine.
-
- For now use approach (2) for small ranges and (5) for large ones.
-
- */
-
- int n_pages;
+ int n_pages = ((end - start) >> PAGE_SHIFT);
- n_pages = ((end - start) >> PAGE_SHIFT);
if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
-#if 1
sh64_dcache_purge_all();
-#else
- unsigned long long set, way;
- unsigned long mm_asid = mm->context & MMU_CONTEXT_ASID_MASK;
- for (set = 0; set < cpu_data->dcache.sets; set++) {
- unsigned long long set_base_config_addr = CACHE_OC_ADDRESS_ARRAY + (set << cpu_data->dcache.set_shift);
- for (way = 0; way < cpu_data->dcache.ways; way++) {
- unsigned long long config_addr = set_base_config_addr + (way << cpu_data->dcache.way_step_shift);
- unsigned long long tag0;
- unsigned long line_valid;
-
- asm __volatile__("getcfg %1, 0, %0" : "=r" (tag0) : "r" (config_addr));
- line_valid = tag0 & SH_CACHE_VALID;
- if (line_valid) {
- unsigned long cache_asid;
- unsigned long epn;
-
- cache_asid = (tag0 & cpu_data->dcache.asid_mask) >> cpu_data->dcache.asid_shift;
- /* The next line needs some
- explanation. The virtual tags
- encode bits [31:13] of the virtual
- address, bit [12] of the 'tag' being
- implied by the cache set index. */
- epn = (tag0 & cpu_data->dcache.epn_mask) | ((set & 0x80) << cpu_data->dcache.entry_shift);
-
- if ((cache_asid == mm_asid) && (start <= epn) && (epn < end)) {
- /* TODO : could optimise this
- call by batching multiple
- adjacent sets together. */
- sh64_dcache_purge_sets(set, 1);
- break; /* Don't waste time inspecting other ways for this set */
- }
- }
- }
- }
-#endif
} else {
/* Small range, covered by a single page table page */
start &= PAGE_MASK; /* should already be so */
end = PAGE_ALIGN(end); /* should already be so */
sh64_dcache_purge_user_pages(mm, start, end);
}
- return;
}
-static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned long end)
+/*
+ * Purge the range of addresses from the D-cache.
+ *
+ * The addresses lie in the superpage mapping. There's no harm if we
+ * overpurge at either end - just a small performance loss.
+ */
+void __flush_purge_region(void *start, int size)
{
- unsigned long long aligned_start;
- unsigned long long ull_end;
- unsigned long long addr;
-
- ull_end = end;
+ unsigned long long ullend, addr, aligned_start;
- /* Just wback over the range using the natural addresses. TLB miss
- handling will be OK (TBC) : the range has just been written to by
- the signal frame setup code, so the PTEs must exist.
+ aligned_start = (unsigned long long)(signed long long)(signed long) start;
+ addr = L1_CACHE_ALIGN(aligned_start);
+ ullend = (unsigned long long) (signed long long) (signed long) start + size;
- Note, if we have CONFIG_PREEMPT and get preempted inside this loop,
- it doesn't matter, even if the pid->ASID mapping changes whilst
- we're away. In that case the cache will have been flushed when the
- mapping was renewed. So the writebacks below will be nugatory (and
- we'll doubtless have to fault the TLB entry/ies in again with the
- new ASID), but it's a rare case.
- */
- aligned_start = start & L1_CACHE_ALIGN_MASK;
- addr = aligned_start;
- while (addr < ull_end) {
- asm __volatile__ ("ocbwb %0, 0" : : "r" (addr));
+ while (addr <= ullend) {
+ __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr));
addr += L1_CACHE_BYTES;
}
}
-/****************************************************************************/
-
-/* These *MUST* lie in an area of virtual address space that's otherwise unused. */
-#define UNIQUE_EADDR_START 0xe0000000UL
-#define UNIQUE_EADDR_END 0xe8000000UL
-
-static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, unsigned long paddr)
+void __flush_wback_region(void *start, int size)
{
- /* Given a physical address paddr, and a user virtual address
- user_eaddr which will eventually be mapped to it, create a one-off
- kernel-private eaddr mapped to the same paddr. This is used for
- creating special destination pages for copy_user_page and
- clear_user_page */
+ unsigned long long ullend, addr, aligned_start;
- static unsigned long current_pointer = UNIQUE_EADDR_START;
- unsigned long coloured_pointer;
+ aligned_start = (unsigned long long)(signed long long)(signed long) start;
+ addr = L1_CACHE_ALIGN(aligned_start);
+ ullend = (unsigned long long) (signed long long) (signed long) start + size;
- if (current_pointer == UNIQUE_EADDR_END) {
- sh64_dcache_purge_all();
- current_pointer = UNIQUE_EADDR_START;
+ while (addr < ullend) {
+ __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr));
+ addr += L1_CACHE_BYTES;
}
-
- coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | (user_eaddr & CACHE_OC_SYN_MASK);
- sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
-
- current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
-
- return coloured_pointer;
-}
-
-/****************************************************************************/
-
-static void sh64_copy_user_page_coloured(void *to, void *from, unsigned long address)
-{
- void *coloured_to;
-
- /* Discard any existing cache entries of the wrong colour. These are
- present quite often, if the kernel has recently used the page
- internally, then given it up, then it's been allocated to the user.
- */
- sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
-
- coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
- sh64_page_copy(from, coloured_to);
-
- sh64_teardown_dtlb_cache_slot();
}
-static void sh64_clear_user_page_coloured(void *to, unsigned long address)
+void __flush_invalidate_region(void *start, int size)
{
- void *coloured_to;
-
- /* Discard any existing kernel-originated lines of the wrong colour (as
- above) */
- sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to);
+ unsigned long long ullend, addr, aligned_start;
- coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to));
- sh64_page_clear(coloured_to);
+ aligned_start = (unsigned long long)(signed long long)(signed long) start;
+ addr = L1_CACHE_ALIGN(aligned_start);
+ ullend = (unsigned long long) (signed long long) (signed long) start + size;
- sh64_teardown_dtlb_cache_slot();
+ while (addr < ullend) {
+ __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr));
+ addr += L1_CACHE_BYTES;
+ }
}
-
#endif /* !CONFIG_DCACHE_DISABLED */
-/****************************************************************************/
-
-/*##########################################################################
- EXTERNALLY CALLABLE API.
- ##########################################################################*/
-
-/* These functions are described in Documentation/cachetlb.txt.
- Each one of these functions varies in behaviour depending on whether the
- I-cache and/or D-cache are configured out.
-
- Note that the Linux term 'flush' corresponds to what is termed 'purge' in
- the sh/sh64 jargon for the D-cache, i.e. write back dirty data then
- invalidate the cache lines, and 'invalidate' for the I-cache.
- */
-
-#undef FLUSH_TRACE
-
+/*
+ * Invalidate the entire contents of both caches, after writing back to
+ * memory any dirty data from the D-cache.
+ */
void flush_cache_all(void)
{
- /* Invalidate the entire contents of both caches, after writing back to
- memory any dirty data from the D-cache. */
sh64_dcache_purge_all();
sh64_icache_inv_all();
}
-/****************************************************************************/
-
+/*
+ * Invalidate an entire user-address space from both caches, after
+ * writing back dirty data (e.g. for shared mmap etc).
+ *
+ * This could be coded selectively by inspecting all the tags then
+ * doing 4*alloco on any set containing a match (as for
+ * flush_cache_range), but fork/exit/execve (where this is called from)
+ * are expensive anyway.
+ *
+ * Have to do a purge here, despite the comments re I-cache below.
+ * There could be odd-coloured dirty data associated with the mm still
+ * in the cache - if this gets written out through natural eviction
+ * after the kernel has reused the page there will be chaos.
+ *
+ * The mm being torn down won't ever be active again, so any Icache
+ * lines tagged with its ASID won't be visible for the rest of the
+ * lifetime of this ASID cycle. Before the ASID gets reused, there
+ * will be a flush_cache_all. Hence we don't need to touch the
+ * I-cache. This is similar to the lack of action needed in
+ * flush_tlb_mm - see fault.c.
+ */
void flush_cache_mm(struct mm_struct *mm)
{
- /* Invalidate an entire user-address space from both caches, after
- writing back dirty data (e.g. for shared mmap etc). */
-
- /* This could be coded selectively by inspecting all the tags then
- doing 4*alloco on any set containing a match (as for
- flush_cache_range), but fork/exit/execve (where this is called from)
- are expensive anyway. */
-
- /* Have to do a purge here, despite the comments re I-cache below.
- There could be odd-coloured dirty data associated with the mm still
- in the cache - if this gets written out through natural eviction
- after the kernel has reused the page there will be chaos.
- */
-
sh64_dcache_purge_all();
-
- /* The mm being torn down won't ever be active again, so any Icache
- lines tagged with its ASID won't be visible for the rest of the
- lifetime of this ASID cycle. Before the ASID gets reused, there
- will be a flush_cache_all. Hence we don't need to touch the
- I-cache. This is similar to the lack of action needed in
- flush_tlb_mm - see fault.c. */
}
-/****************************************************************************/
-
+/*
+ * Invalidate (from both caches) the range [start,end) of virtual
+ * addresses from the user address space specified by mm, after writing
+ * back any dirty data.
+ *
+ * Note, 'end' is 1 byte beyond the end of the range to flush.
+ */
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
- /* Invalidate (from both caches) the range [start,end) of virtual
- addresses from the user address space specified by mm, after writing
- back any dirty data.
-
- Note, 'end' is 1 byte beyond the end of the range to flush. */
-
sh64_dcache_purge_user_range(mm, start, end);
sh64_icache_inv_user_page_range(mm, start, end);
}
-/****************************************************************************/
-
-void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn)
+/*
+ * Invalidate any entries in either cache for the vma within the user
+ * address space vma->vm_mm for the page starting at virtual address
+ * 'eaddr'. This seems to be used primarily in breaking COW. Note,
+ * the I-cache must be searched too in case the page in question is
+ * both writable and being executed from (e.g. stack trampolines.)
+ *
+ * Note, this is called with pte lock held.
+ */
+void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
+ unsigned long pfn)
{
- /* Invalidate any entries in either cache for the vma within the user
- address space vma->vm_mm for the page starting at virtual address
- 'eaddr'. This seems to be used primarily in breaking COW. Note,
- the I-cache must be searched too in case the page in question is
- both writable and being executed from (e.g. stack trampolines.)
-
- Note, this is called with pte lock held.
- */
-
sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
- if (vma->vm_flags & VM_EXEC) {
+ if (vma->vm_flags & VM_EXEC)
sh64_icache_inv_user_page(vma, eaddr);
- }
}
-/****************************************************************************/
+void flush_dcache_page(struct page *page)
+{
+ sh64_dcache_purge_phy_page(page_to_phys(page));
+ wmb();
+}
-#ifndef CONFIG_DCACHE_DISABLED
+/*
+ * Flush the range [start,end] of kernel virtual adddress space from
+ * the I-cache. The corresponding range must be purged from the
+ * D-cache also because the SH-5 doesn't have cache snooping between
+ * the caches. The addresses will be visible through the superpage
+ * mapping, therefore it's guaranteed that there no cache entries for
+ * the range in cache sets of the wrong colour.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ __flush_purge_region((void *)start, end);
+ wmb();
+ sh64_icache_inv_kernel_range(start, end);
+}
-void copy_user_page(void *to, void *from, unsigned long address, struct page *page)
+/*
+ * Flush the range of user (defined by vma->vm_mm) address space starting
+ * at 'addr' for 'len' bytes from the cache. The range does not straddle
+ * a page boundary, the unique physical page containing the range is
+ * 'page'. This seems to be used mainly for invalidating an address
+ * range following a poke into the program text through the ptrace() call
+ * from another process (e.g. for BRK instruction insertion).
+ */
+void flush_icache_user_range(struct vm_area_struct *vma,
+ struct page *page, unsigned long addr, int len)
{
- /* 'from' and 'to' are kernel virtual addresses (within the superpage
- mapping of the physical RAM). 'address' is the user virtual address
- where the copy 'to' will be mapped after. This allows a custom
- mapping to be used to ensure that the new copy is placed in the
- right cache sets for the user to see it without having to bounce it
- out via memory. Note however : the call to flush_page_to_ram in
- (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
- very important case!
-
- TBD : can we guarantee that on every call, any cache entries for
- 'from' are in the same colour sets as 'address' also? i.e. is this
- always used just to deal with COW? (I suspect not). */
-
- /* There are two possibilities here for when the page 'from' was last accessed:
- * by the kernel : this is OK, no purge required.
- * by the/a user (e.g. for break_COW) : need to purge.
-
- If the potential user mapping at 'address' is the same colour as
- 'from' there is no need to purge any cache lines from the 'from'
- page mapped into cache sets of colour 'address'. (The copy will be
- accessing the page through 'from').
- */
- if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) {
- sh64_dcache_purge_coloured_phy_page(__pa(from), address);
- }
+ sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
+ mb();
- if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
- /* No synonym problem on destination */
- sh64_page_copy(from, to);
- } else {
- sh64_copy_user_page_coloured(to, from, address);
- }
+ if (vma->vm_flags & VM_EXEC)
+ sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
+}
+
+/*
+ * For the address range [start,end), write back the data from the
+ * D-cache and invalidate the corresponding region of the I-cache for the
+ * current process. Used to flush signal trampolines on the stack to
+ * make them executable.
+ */
+void flush_cache_sigtramp(unsigned long vaddr)
+{
+ unsigned long end = vaddr + L1_CACHE_BYTES;
- /* Note, don't need to flush 'from' page from the cache again - it's
- done anyway by the generic code */
+ __flush_wback_region((void *)vaddr, L1_CACHE_BYTES);
+ wmb();
+ sh64_icache_inv_current_user_range(vaddr, end);
}
-void clear_user_page(void *to, unsigned long address, struct page *page)
+/*
+ * These *MUST* lie in an area of virtual address space that's otherwise
+ * unused.
+ */
+#define UNIQUE_EADDR_START 0xe0000000UL
+#define UNIQUE_EADDR_END 0xe8000000UL
+
+/*
+ * Given a physical address paddr, and a user virtual address user_eaddr
+ * which will eventually be mapped to it, create a one-off kernel-private
+ * eaddr mapped to the same paddr. This is used for creating special
+ * destination pages for copy_user_page and clear_user_page.
+ */
+static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr,
+ unsigned long paddr)
{
- /* 'to' is a kernel virtual address (within the superpage
- mapping of the physical RAM). 'address' is the user virtual address
- where the 'to' page will be mapped after. This allows a custom
- mapping to be used to ensure that the new copy is placed in the
- right cache sets for the user to see it without having to bounce it
- out via memory.
- */
+ static unsigned long current_pointer = UNIQUE_EADDR_START;
+ unsigned long coloured_pointer;
- if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) {
- /* No synonym problem on destination */
- sh64_page_clear(to);
- } else {
- sh64_clear_user_page_coloured(to, address);
+ if (current_pointer == UNIQUE_EADDR_END) {
+ sh64_dcache_purge_all();
+ current_pointer = UNIQUE_EADDR_START;
}
-}
-#endif /* !CONFIG_DCACHE_DISABLED */
+ coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) |
+ (user_eaddr & CACHE_OC_SYN_MASK);
+ sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
-/****************************************************************************/
+ current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
-void flush_dcache_page(struct page *page)
-{
- sh64_dcache_purge_phy_page(page_to_phys(page));
- wmb();
+ return coloured_pointer;
}
-/****************************************************************************/
-
-void flush_icache_range(unsigned long start, unsigned long end)
+static void sh64_copy_user_page_coloured(void *to, void *from,
+ unsigned long address)
{
- /* Flush the range [start,end] of kernel virtual adddress space from
- the I-cache. The corresponding range must be purged from the
- D-cache also because the SH-5 doesn't have cache snooping between
- the caches. The addresses will be visible through the superpage
- mapping, therefore it's guaranteed that there no cache entries for
- the range in cache sets of the wrong colour.
+ void *coloured_to;
- Primarily used for cohering the I-cache after a module has
- been loaded. */
+ /*
+ * Discard any existing cache entries of the wrong colour. These are
+ * present quite often, if the kernel has recently used the page
+ * internally, then given it up, then it's been allocated to the user.
+ */
+ sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
- /* We also make sure to purge the same range from the D-cache since
- flush_page_to_ram() won't be doing this for us! */
+ coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
+ copy_page(from, coloured_to);
- sh64_dcache_purge_kernel_range(start, end);
- wmb();
- sh64_icache_inv_kernel_range(start, end);
+ sh64_teardown_dtlb_cache_slot();
}
-/****************************************************************************/
-
-void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long addr, int len)
+static void sh64_clear_user_page_coloured(void *to, unsigned long address)
{
- /* Flush the range of user (defined by vma->vm_mm) address space
- starting at 'addr' for 'len' bytes from the cache. The range does
- not straddle a page boundary, the unique physical page containing
- the range is 'page'. This seems to be used mainly for invalidating
- an address range following a poke into the program text through the
- ptrace() call from another process (e.g. for BRK instruction
- insertion). */
+ void *coloured_to;
- sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
- mb();
+ /*
+ * Discard any existing kernel-originated lines of the wrong
+ * colour (as above)
+ */
+ sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
- if (vma->vm_flags & VM_EXEC) {
- sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
- }
-}
+ coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
+ clear_page(coloured_to);
-/*##########################################################################
- ARCH/SH64 PRIVATE CALLABLE API.
- ##########################################################################*/
+ sh64_teardown_dtlb_cache_slot();
+}
-void flush_cache_sigtramp(unsigned long start, unsigned long end)
+/*
+ * 'from' and 'to' are kernel virtual addresses (within the superpage
+ * mapping of the physical RAM). 'address' is the user virtual address
+ * where the copy 'to' will be mapped after. This allows a custom
+ * mapping to be used to ensure that the new copy is placed in the
+ * right cache sets for the user to see it without having to bounce it
+ * out via memory. Note however : the call to flush_page_to_ram in
+ * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
+ * very important case!
+ *
+ * TBD : can we guarantee that on every call, any cache entries for
+ * 'from' are in the same colour sets as 'address' also? i.e. is this
+ * always used just to deal with COW? (I suspect not).
+ *
+ * There are two possibilities here for when the page 'from' was last accessed:
+ * - by the kernel : this is OK, no purge required.
+ * - by the/a user (e.g. for break_COW) : need to purge.
+ *
+ * If the potential user mapping at 'address' is the same colour as
+ * 'from' there is no need to purge any cache lines from the 'from'
+ * page mapped into cache sets of colour 'address'. (The copy will be
+ * accessing the page through 'from').
+ */
+void copy_user_page(void *to, void *from, unsigned long address,
+ struct page *page)
{
- /* For the address range [start,end), write back the data from the
- D-cache and invalidate the corresponding region of the I-cache for
- the current process. Used to flush signal trampolines on the stack
- to make them executable. */
+ if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0)
+ sh64_dcache_purge_coloured_phy_page(__pa(from), address);
- sh64_dcache_wback_current_user_range(start, end);
- wmb();
- sh64_icache_inv_current_user_range(start, end);
+ if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
+ copy_page(to, from);
+ else
+ sh64_copy_user_page_coloured(to, from, address);
}
+/*
+ * 'to' is a kernel virtual address (within the superpage mapping of the
+ * physical RAM). 'address' is the user virtual address where the 'to'
+ * page will be mapped after. This allows a custom mapping to be used to
+ * ensure that the new copy is placed in the right cache sets for the
+ * user to see it without having to bounce it out via memory.
+ */
+void clear_user_page(void *to, unsigned long address, struct page *page)
+{
+ if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
+ clear_page(to);
+ else
+ sh64_clear_user_page_coloured(to, address);
+}
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 7b2131c9eed..d3c33fc5b1c 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -26,7 +26,7 @@ struct dma_coherent_mem {
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
- void *ret;
+ void *ret, *ret_nocache;
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
int order = get_order(size);
@@ -44,17 +44,24 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
}
ret = (void *)__get_free_pages(gfp, order);
-
- if (ret != NULL) {
- memset(ret, 0, size);
- /*
- * Pages from the page allocator may have data present in
- * cache. So flush the cache before using uncached memory.
- */
- dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
- *dma_handle = virt_to_phys(ret);
+ if (!ret)
+ return NULL;
+
+ memset(ret, 0, size);
+ /*
+ * Pages from the page allocator may have data present in
+ * cache. So flush the cache before using uncached memory.
+ */
+ dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
+
+ ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
+ if (!ret_nocache) {
+ free_pages((unsigned long)ret, order);
+ return NULL;
}
- return ret;
+
+ *dma_handle = virt_to_phys(ret);
+ return ret_nocache;
}
EXPORT_SYMBOL(dma_alloc_coherent);
@@ -71,7 +78,8 @@ void dma_free_coherent(struct device *dev, size_t size,
} else {
WARN_ON(irqs_disabled()); /* for portability */
BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
- free_pages((unsigned long)vaddr, order);
+ free_pages((unsigned long)phys_to_virt(dma_handle), order);
+ iounmap(vaddr);
}
}
EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 33b43d20e9f..d1fa27594c6 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
+#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -163,6 +164,8 @@ no_context:
if (fixup_exception(regs))
return;
+ if (handle_trapped_io(regs, address))
+ return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
@@ -296,6 +299,14 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
entry = pte_mkdirty(entry);
entry = pte_mkyoung(entry);
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
+ /*
+ * ITLB is not affected by "ldtlb" instruction.
+ * So, we need to flush the entry by ourselves.
+ */
+ local_flush_tlb_one(get_asid(), address & PAGE_MASK);
+#endif
+
set_pte(pte, entry);
update_mmu_cache(NULL, address, entry);
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 2918c6b1465..e2ed6dd252b 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -203,6 +203,7 @@ void __init paging_init(void)
free_area_init_nodes(max_zone_pfns);
+#ifdef CONFIG_SUPERH32
/* Set up the uncached fixmap */
set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
@@ -214,6 +215,7 @@ void __init paging_init(void)
*/
cached_to_uncached = P2SEG - P1SEG;
#endif
+#endif
}
static struct kcore_list kcore_mem, kcore_vmalloc;
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 25810670a0f..67997af25c0 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -45,3 +45,5 @@ MAGICPANELR2 SH_MAGIC_PANEL_R2
R2D_PLUS RTS7751R2D_PLUS
R2D_1 RTS7751R2D_1
CAYMAN SH_CAYMAN
+SDK7780 SH_SDK7780
+MIGOR SH_MIGOR
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 4cd5d7818dc..a6a6f982337 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -713,10 +713,10 @@ static irqreturn_t pcic_timer_handler (int irq, void *h)
write_seqlock(&xtime_lock); /* Dummy, to show that we remember */
pcic_clear_clock_irq();
do_timer(1);
+ write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
- write_sequnlock(&xtime_lock);
return IRQ_HANDLED;
}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 00b393c3a4a..cfaf22c05bc 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -128,10 +128,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
clear_clock_irq();
do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
-
/* Determine when to update the Mostek clock. */
if (ntp_synced() &&
@@ -145,6 +141,9 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
}
write_sequnlock(&xtime_lock);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(get_irq_regs()));
+#endif
return IRQ_HANDLED;
}
diff --git a/arch/sparc64/solaris/fs.c b/arch/sparc64/solaris/fs.c
index 9311bfe4f2f..7d035f0d3ae 100644
--- a/arch/sparc64/solaris/fs.c
+++ b/arch/sparc64/solaris/fs.c
@@ -434,9 +434,9 @@ asmlinkage int solaris_statvfs(u32 path, u32 buf)
error = user_path_walk(A(path),&nd);
if (!error) {
- struct inode * inode = nd.dentry->d_inode;
- error = report_statvfs(nd.mnt, inode, buf);
- path_release(&nd);
+ struct inode *inode = nd.path.dentry->d_inode;
+ error = report_statvfs(nd.path.mnt, inode, buf);
+ path_put(&nd.path);
}
return error;
}
@@ -464,9 +464,9 @@ asmlinkage int solaris_statvfs64(u32 path, u32 buf)
lock_kernel();
error = user_path_walk(A(path), &nd);
if (!error) {
- struct inode * inode = nd.dentry->d_inode;
- error = report_statvfs64(nd.mnt, inode, buf);
- path_release(&nd);
+ struct inode *inode = nd.path.dentry->d_inode;
+ error = report_statvfs64(nd.path.mnt, inode, buf);
+ path_put(&nd.path);
}
unlock_kernel();
return error;
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index ebb265c07e4..19d579d74d2 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -145,8 +145,8 @@ void mconsole_proc(struct mc_request *req)
}
up_write(&super->s_umount);
- nd.dentry = super->s_root;
- nd.mnt = NULL;
+ nd.path.dentry = super->s_root;
+ nd.path.mnt = NULL;
nd.flags = O_RDONLY + 1;
nd.last_type = LAST_ROOT;
@@ -159,7 +159,7 @@ void mconsole_proc(struct mc_request *req)
goto out_kill;
}
- file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
+ file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY);
if (IS_ERR(file)) {
mconsole_reply(req, "Failed to open file", 1, 0);
goto out_kill;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index aaed1a3b92d..3be2305709b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,6 +21,8 @@ config X86
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_KPROBES
+ select HAVE_KVM
+
config GENERIC_LOCKBREAK
def_bool n
@@ -119,8 +121,6 @@ config ARCH_HAS_CPU_RELAX
config HAVE_SETUP_PER_CPU_AREA
def_bool X86_64
-select HAVE_KVM
-
config ARCH_HIBERNATION_POSSIBLE
def_bool y
depends on !SMP || !X86_VOYAGER
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 10b67170b13..8ca3557a6d5 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -126,6 +126,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d "
"state\n", cx->type);
}
+ snprintf(cx->desc, ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
+ cx->address);
out:
set_cpus_allowed(current, saved_mask);
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 32dd62b36ff..0c0eeb163d9 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -384,9 +384,6 @@ static void __init runtime_code_page_mkexec(void)
efi_memory_desc_t *md;
void *p;
- if (!(__supported_pte_mask & _PAGE_NX))
- return;
-
/* Make EFI runtime service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
@@ -394,7 +391,7 @@ static void __init runtime_code_page_mkexec(void)
if (md->type != EFI_RUNTIME_SERVICES_CODE)
continue;
- set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT);
+ set_memory_x(md->virt_addr, md->num_pages);
}
}
@@ -428,9 +425,6 @@ void __init efi_enter_virtual_mode(void)
else
va = efi_ioremap(md->phys_addr, size);
- if (md->attribute & EFI_MEMORY_WB)
- set_memory_uc(md->virt_addr, size);
-
md->virt_addr = (u64) (unsigned long) va;
if (!va) {
@@ -439,6 +433,9 @@ void __init efi_enter_virtual_mode(void)
continue;
}
+ if (!(md->attribute & EFI_MEMORY_WB))
+ set_memory_uc(md->virt_addr, md->num_pages);
+
systab = (u64) (unsigned long) efi_phys.systab;
if (md->phys_addr <= systab && systab < end) {
systab += md->virt_addr - md->phys_addr;
@@ -476,7 +473,8 @@ void __init efi_enter_virtual_mode(void)
efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
efi.reset_system = virt_efi_reset_system;
efi.set_virtual_address_map = virt_efi_set_virtual_address_map;
- runtime_code_page_mkexec();
+ if (__supported_pte_mask & _PAGE_NX)
+ runtime_code_page_mkexec();
early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
memmap.map = NULL;
}
diff --git a/arch/x86/kernel/efi_64.c b/arch/x86/kernel/efi_64.c
index 09d5c233093..d143a1e76b3 100644
--- a/arch/x86/kernel/efi_64.c
+++ b/arch/x86/kernel/efi_64.c
@@ -35,6 +35,7 @@
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm/efi.h>
+#include <asm/cacheflush.h>
static pgd_t save_pgd __initdata;
static unsigned long efi_flags __initdata;
@@ -43,22 +44,15 @@ static void __init early_mapping_set_exec(unsigned long start,
unsigned long end,
int executable)
{
- pte_t *kpte;
- unsigned int level;
-
- while (start < end) {
- kpte = lookup_address((unsigned long)__va(start), &level);
- BUG_ON(!kpte);
- if (executable)
- set_pte(kpte, pte_mkexec(*kpte));
- else
- set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
- __supported_pte_mask));
- if (level == PG_LEVEL_4K)
- start = (start + PAGE_SIZE) & PAGE_MASK;
- else
- start = (start + PMD_SIZE) & PMD_MASK;
- }
+ unsigned long num_pages;
+
+ start &= PMD_MASK;
+ end = (end + PMD_SIZE - 1) & PMD_MASK;
+ num_pages = (end - start) >> PAGE_SHIFT;
+ if (executable)
+ set_memory_x((unsigned long)__va(start), num_pages);
+ else
+ set_memory_nx((unsigned long)__va(start), num_pages);
}
static void __init early_runtime_code_mapping_set_exec(int executable)
@@ -74,7 +68,7 @@ static void __init early_runtime_code_mapping_set_exec(int executable)
md = p;
if (md->type == EFI_RUNTIME_SERVICES_CODE) {
unsigned long end;
- end = md->phys_addr + (md->num_pages << PAGE_SHIFT);
+ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
early_mapping_set_exec(md->phys_addr, end, executable);
}
}
@@ -84,8 +78,8 @@ void __init efi_call_phys_prelog(void)
{
unsigned long vaddress;
- local_irq_save(efi_flags);
early_runtime_code_mapping_set_exec(1);
+ local_irq_save(efi_flags);
vaddress = (unsigned long)__va(0x0UL);
save_pgd = *pgd_offset_k(0x0UL);
set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
@@ -98,9 +92,9 @@ void __init efi_call_phys_epilog(void)
* After the lock is released, the original page table is restored.
*/
set_pgd(pgd_offset_k(0x0UL), save_pgd);
- early_runtime_code_mapping_set_exec(0);
__flush_tlb_all();
local_irq_restore(efi_flags);
+ early_runtime_code_mapping_set_exec(0);
}
void __init efi_reserve_bootmem(void)
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index ef62b07b2b4..8540abe86ad 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -95,7 +95,7 @@ static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
* registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
* !using_apic_timer decisions in do_timer_interrupt_hook()
*/
-struct clock_event_device pit_clockevent = {
+static struct clock_event_device pit_clockevent = {
.name = "pit",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = init_pit_timer,
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 65f6acb025c..faf3229f8fb 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -749,6 +749,15 @@ void __init gart_iommu_init(void)
*/
set_memory_np((unsigned long)__va(iommu_bus_base),
iommu_size >> PAGE_SHIFT);
+ /*
+ * Tricky. The GART table remaps the physical memory range,
+ * so the CPU wont notice potential aliases and if the memory
+ * is remapped to UC later on, we might surprise the PCI devices
+ * with a stray writeout of a cacheline. So play it sure and
+ * do an explicit, full-scale wbinvd() _after_ having marked all
+ * the pages as Not-Present:
+ */
+ wbinvd();
/*
* Try to workaround a bug (thanks to BenH)
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 1941482d4ca..c47208fc593 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -11,7 +11,7 @@
static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
{
u8 config, rev;
- u32 word;
+ u16 word;
/* BIOS may enable hardware IRQ balancing for
* E7520/E7320/E7525(revision ID 0x9 and below)
@@ -26,8 +26,11 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
pci_read_config_byte(dev, 0xf4, &config);
pci_write_config_byte(dev, 0xf4, config|0x2);
- /* read xTPR register */
- raw_pci_read(0, 0, 0x40, 0x4c, 2, &word);
+ /*
+ * read xTPR register. We may not have a pci_dev for device 8
+ * because it might be hidden until the above write.
+ */
+ pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
if (!(word & (1 << 13))) {
dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 5818dc28167..7fd6ac43e4a 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -326,7 +326,7 @@ static inline void kb_wait(void)
}
}
-void machine_emergency_restart(void)
+static void native_machine_emergency_restart(void)
{
int i;
@@ -376,7 +376,7 @@ void machine_emergency_restart(void)
}
}
-void machine_shutdown(void)
+static void native_machine_shutdown(void)
{
/* Stop the cpus and apics */
#ifdef CONFIG_SMP
@@ -420,7 +420,7 @@ void machine_shutdown(void)
#endif
}
-void machine_restart(char *__unused)
+static void native_machine_restart(char *__unused)
{
printk("machine restart\n");
@@ -429,11 +429,11 @@ void machine_restart(char *__unused)
machine_emergency_restart();
}
-void machine_halt(void)
+static void native_machine_halt(void)
{
}
-void machine_power_off(void)
+static void native_machine_power_off(void)
{
if (pm_power_off) {
if (!reboot_force)
@@ -443,9 +443,35 @@ void machine_power_off(void)
}
struct machine_ops machine_ops = {
- .power_off = machine_power_off,
- .shutdown = machine_shutdown,
- .emergency_restart = machine_emergency_restart,
- .restart = machine_restart,
- .halt = machine_halt
+ .power_off = native_machine_power_off,
+ .shutdown = native_machine_shutdown,
+ .emergency_restart = native_machine_emergency_restart,
+ .restart = native_machine_restart,
+ .halt = native_machine_halt
};
+
+void machine_power_off(void)
+{
+ machine_ops.power_off();
+}
+
+void machine_shutdown(void)
+{
+ machine_ops.shutdown();
+}
+
+void machine_emergency_restart(void)
+{
+ machine_ops.emergency_restart();
+}
+
+void machine_restart(char *cmd)
+{
+ machine_ops.restart(cmd);
+}
+
+void machine_halt(void)
+{
+ machine_ops.halt();
+}
+
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
index 4c163772000..c29e235792a 100644
--- a/arch/x86/kernel/test_rodata.c
+++ b/arch/x86/kernel/test_rodata.c
@@ -10,8 +10,8 @@
* of the License.
*/
#include <linux/module.h>
+#include <asm/cacheflush.h>
#include <asm/sections.h>
-extern int rodata_test_data;
int rodata_test(void)
{
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index efc66df728b..04546668191 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -84,7 +84,7 @@ static inline void conditional_sti(struct pt_regs *regs)
static inline void preempt_conditional_sti(struct pt_regs *regs)
{
- preempt_disable();
+ inc_preempt_count();
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
@@ -95,7 +95,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
local_irq_disable();
/* Make sure to not schedule here because we could be running
on an exception stack. */
- preempt_enable_no_resched();
+ dec_preempt_count();
}
int kstack_depth_to_print = 12;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 621afb6343d..fdc667422df 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -186,7 +186,7 @@ static int bad_address(void *p)
}
#endif
-void dump_pagetable(unsigned long address)
+static void dump_pagetable(unsigned long address)
{
#ifdef CONFIG_X86_32
__typeof__(pte_val(__pte(0))) page;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8106bba41ec..ee1091a4696 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -47,6 +47,7 @@
#include <asm/sections.h>
#include <asm/paravirt.h>
#include <asm/setup.h>
+#include <asm/cacheflush.h>
unsigned int __VMALLOC_RESERVE = 128 << 20;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b59fc238151..a4a9cccdd4f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -45,6 +45,7 @@
#include <asm/sections.h>
#include <asm/kdebug.h>
#include <asm/numa.h>
+#include <asm/cacheflush.h>
const struct dma_mapping_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index a4897a85268..9f42d7e9c15 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -265,7 +265,9 @@ static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
{
- pgd_t *pgd = &swapper_pg_dir[pgd_index(addr)];
+ /* Don't assume we're using swapper_pg_dir at this point */
+ pgd_t *base = __va(read_cr3());
+ pgd_t *pgd = &base[pgd_index(addr)];
pud_t *pud = pud_offset(pgd, addr);
pmd_t *pmd = pmd_offset(pud, addr);
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index ed820160035..75f1b109aae 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -40,7 +40,6 @@ struct split_state {
static int print_split(struct split_state *s)
{
long i, expected, missed = 0;
- int printed = 0;
int err = 0;
s->lpg = s->gpg = s->spg = s->exec = 0;
@@ -53,12 +52,6 @@ static int print_split(struct split_state *s)
pte = lookup_address(addr, &level);
if (!pte) {
- if (!printed) {
- dump_pagetable(addr);
- printk(KERN_INFO "CPA %lx no pte level %d\n",
- addr, level);
- printed = 1;
- }
missed++;
i++;
continue;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 440210a2277..4119379f80f 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -275,8 +275,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
break;
#ifdef CONFIG_X86_64
case PG_LEVEL_1G:
- psize = PMD_PAGE_SIZE;
- pmask = PMD_PAGE_MASK;
+ psize = PUD_PAGE_SIZE;
+ pmask = PUD_PAGE_MASK;
break;
#endif
default:
@@ -688,6 +688,15 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
return 0;
+ /* Ensure we are PAGE_SIZE aligned */
+ if (addr & ~PAGE_MASK) {
+ addr &= PAGE_MASK;
+ /*
+ * People should not be passing in unaligned addresses:
+ */
+ WARN_ON_ONCE(1);
+ }
+
cpa.vaddr = addr;
cpa.numpages = numpages;
cpa.mask_set = mask_set;
@@ -861,8 +870,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
return;
/*
- * The return value is ignored - the calls cannot fail,
- * large pages are disabled at boot time:
+ * The return value is ignored as the calls cannot fail.
+ * Large pages are kept enabled at boot time, and are
+ * split up quickly with DEBUG_PAGEALLOC. If a splitup
+ * fails here (due to temporary memory shortage) no damage
+ * is done because we just keep the largepage intact up
+ * to the next attempt when it will likely be split up:
*/
if (enable)
__set_pages_p(page, numpages);
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index d28dda57470..f385a4b4a48 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -7,7 +7,7 @@ VDSO32-$(CONFIG_X86_32) := y
VDSO32-$(CONFIG_COMPAT) := y
vdso-install-$(VDSO64-y) += vdso.so
-vdso-install-$(VDSO32-y) += $(vdso32-y:=.so)
+vdso-install-$(VDSO32-y) += $(vdso32-images)
# files to link into the vdso
@@ -63,6 +63,8 @@ vdso32.so-$(CONFIG_X86_32) += int80
vdso32.so-$(CONFIG_COMPAT) += syscall
vdso32.so-$(VDSO32-y) += sysenter
+vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
+
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
@@ -71,21 +73,21 @@ VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1
override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
targets += vdso32/vdso32.lds
-targets += $(vdso32.so-y:%=vdso32-%.so.dbg) $(vdso32.so-y:%=vdso32-%.so)
+targets += $(vdso32-images) $(vdso32-images:=.dbg)
targets += vdso32/note.o $(vdso32.so-y:%=vdso32/%.o)
-extra-y += $(vdso32.so-y:%=vdso32-%.so)
+extra-y += $(vdso32-images)
-$(obj)/vdso32.o: $(vdso32.so-y:%=$(obj)/vdso32-%.so)
+$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
-$(vdso32.so-y:%=$(obj)/vdso32-%.so.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
-$(vdso32.so-y:%=$(obj)/vdso32-%.so.dbg): asflags-$(CONFIG_X86_64) += -m32
+$(vdso32-images:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
+$(vdso32-images:%=$(obj)/%.dbg): asflags-$(CONFIG_X86_64) += -m32
-$(vdso32.so-y:%=$(obj)/vdso32-%.so.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
- $(obj)/vdso32/vdso32.lds \
- $(obj)/vdso32/note.o \
- $(obj)/vdso32/%.o
+$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
+ $(obj)/vdso32/vdso32.lds \
+ $(obj)/vdso32/note.o \
+ $(obj)/vdso32/%.o
$(call if_changed,vdso)
# Make vdso32-*-syms.lds from each image, and then make sure they match.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index de647bc6e74..49e5358f481 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -798,6 +798,10 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
* added to the table can be prepared properly for Xen.
*/
xen_write_cr3(__pa(base));
+
+ /* Unpin initial Xen pagetable */
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
+ PFN_DOWN(__pa(xen_start_info->pt_base)));
}
static __init void xen_pagetable_setup_done(pgd_t *base)
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 9ce983ed60f..ea92bac42c5 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -186,6 +186,12 @@ static int __init dmi_unknown_osi_linux(const struct dmi_system_id *d)
acpi_dmi_osi_linux(-1, d); /* unknown */
return 0;
}
+static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2006");
+ return 0;
+}
/*
* Most BIOS that invoke OSI(Linux) do nothing with it.
@@ -228,10 +234,10 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5520"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 6460"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 7510"),
- * DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5220"),
*
* _OSI(Linux) is a NOP:
* DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
+ * DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5220"),
*/
{
.callback = dmi_disable_osi_linux,
@@ -327,12 +333,20 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
- .ident = "Dell OP GX620",
+ .ident = "Dell OptiPlex GX620",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX620"),
},
},
+ { /* OSI(Linux) causes some USB initialization to not run */
+ .callback = dmi_unknown_osi_linux,
+ .ident = "Dell OptiPlex 755",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 755"),
+ },
+ },
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
.ident = "Dell PE 1900",
@@ -342,6 +356,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
},
{ /* OSI(Linux) is a NOP */
+ .callback = dmi_unknown_osi_linux,
+ .ident = "Dell PE 1950",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
+ },
+ },
+ { /* OSI(Linux) is a NOP */
.callback = dmi_disable_osi_linux,
.ident = "Dell PE R200",
.matches = {
@@ -357,6 +379,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 390"),
},
},
+ { /* OSI(Linux) touches USB */
+ .callback = dmi_unknown_osi_linux,
+ .ident = "Dell PR 390",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 690"),
+ },
+ },
+ { /* OSI(Linux) unknown - ASL looks benign, but may effect dock/SMM */
+ .callback = dmi_unknown_osi_linux,
+ .ident = "Dell PR M4300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M4300"),
+ },
+ },
{ /* OSI(Linux) is a NOP */
.callback = dmi_disable_osi_linux,
.ident = "Dell Vostro 1000",
@@ -390,10 +428,10 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1536"),
* DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1556"),
* DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 1546"),
+ * DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
* _OSI(Linux) unknown effect:
* DMI_MATCH(DMI_PRODUCT_NAME, "Amilo M1425"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Amilo Si 1520"),
- * DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
*/
{
.callback = dmi_disable_osi_linux,
@@ -402,6 +440,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
},
},
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Fujitsu Siemens",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
+ },
+ },
/*
* Disable OSI(Linux) warnings on all "Hewlett-Packard"
*
@@ -443,10 +489,11 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* _OSI(Linux) helps sound
* DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
* DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
+ * _OSI(Linux) has Linux specific hooks
+ * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
* _OSI(Linux) is a NOP:
* DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
- * _OSI(Linux) effect unknown
- * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
+ * DMI_MATCH(DMI_PRODUCT_VERSION, "LENOVO3000 V100"),
*/
{
.callback = dmi_enable_osi_linux,
@@ -465,7 +512,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
},
{
- .callback = dmi_unknown_osi_linux,
+ .callback = dmi_enable_osi_linux,
.ident = "Lenovo ThinkPad X61",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -473,7 +520,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
},
{
- .callback = dmi_unknown_osi_linux,
+ .callback = dmi_disable_osi_linux,
.ident = "Lenovo 3000 V100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -543,8 +590,9 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* Disable OSI(Linux) warnings on all "Sony Corporation"
*
* _OSI(Linux) is a NOP:
- * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ650N"),
+ * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NR11S_S"),
* DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ38GP_C"),
+ * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ650N"),
* DMI_MATCH(DMI_PRODUCT_NAME, "VGN-TZ21MN_N"),
* _OSI(Linux) unknown effect:
* DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ11M"),
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 5479dc0eeee..abec1ca94cf 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -110,7 +110,7 @@ static const struct file_operations acpi_system_event_ops = {
#endif /* CONFIG_ACPI_PROC_EVENT */
/* ACPI notifier chain */
-BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
+static BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
{
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index 058d0be5cbe..4290e019309 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -616,6 +616,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
return_ACPI_STATUS(status);
}
+ arg.integer.value = sleep_state;
status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK"));
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 34b3386dedc..8edba7b678e 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -325,7 +325,7 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
}
#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
-struct acpi_table_header *acpi_find_dsdt_initrd(void)
+static struct acpi_table_header *acpi_find_dsdt_initrd(void)
{
struct file *firmware_file;
mm_segment_t oldfs;
@@ -419,7 +419,7 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
}
#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
-int __init acpi_no_initrd_override_setup(char *s)
+static int __init acpi_no_initrd_override_setup(char *s)
{
acpi_no_initrd_override = 1;
return 1;
@@ -623,7 +623,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
- void *value, u32 width)
+ u32 *value, u32 width)
{
int result, size;
@@ -689,7 +689,6 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
acpi_status status;
unsigned long temp;
acpi_object_type type;
- u8 tu8;
acpi_get_parent(chandle, &handle);
if (handle != rhandle) {
@@ -704,6 +703,7 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
&temp);
if (ACPI_SUCCESS(status)) {
+ u32 val;
pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
@@ -712,24 +712,24 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
/* any nicer way to get bus number of bridge ? */
status =
- acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8,
+ acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
8);
if (ACPI_SUCCESS(status)
- && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
+ && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
status =
acpi_os_read_pci_configuration(pci_id, 0x18,
- &tu8, 8);
+ &val, 8);
if (!ACPI_SUCCESS(status)) {
/* Certainly broken... FIX ME */
return;
}
*is_bridge = 1;
- pci_id->bus = tu8;
+ pci_id->bus = val;
status =
acpi_os_read_pci_configuration(pci_id, 0x19,
- &tu8, 8);
+ &val, 8);
if (ACPI_SUCCESS(status)) {
- *bus_number = tu8;
+ *bus_number = val;
}
} else
*is_bridge = 0;
@@ -1109,7 +1109,7 @@ void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
* string starting with '!' disables that string
* otherwise string is added to list, augmenting built-in strings
*/
-static int __init acpi_osi_setup(char *str)
+int __init acpi_osi_setup(char *str)
{
if (str == NULL || *str == '\0') {
printk(KERN_INFO PREFIX "_OSI method disabled\n");
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 32003fdc91e..980e1c33e6c 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -945,11 +945,16 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
* Otherwise, ignore this info and continue.
*/
cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
} else {
continue;
}
+ } else {
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
+ cx.address);
}
+
obj = &(element->package.elements[2]);
if (obj->type != ACPI_TYPE_INTEGER)
continue;
@@ -1420,6 +1425,14 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
return 0;
local_irq_disable();
+
+ /* Do not access any ACPI IO ports in suspend path */
+ if (acpi_idle_suspend) {
+ acpi_safe_halt();
+ local_irq_enable();
+ return 0;
+ }
+
if (pr->flags.bm_check)
acpi_idle_update_bm_rld(pr, cx);
@@ -1643,6 +1656,11 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
return -EINVAL;
}
+ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+ dev->states[i].name[0] = '\0';
+ dev->states[i].desc[0] = '\0';
+ }
+
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
state = &dev->states[count];
@@ -1659,6 +1677,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
cpuidle_set_statedata(state, cx);
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
+ strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
state->power_usage = cx->power;
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
index 36b84ab418d..efacc9f8bfe 100644
--- a/drivers/acpi/wmi.c
+++ b/drivers/acpi/wmi.c
@@ -247,7 +247,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
block = &wblock->gblock;
handle = wblock->handle;
- if (!block->flags & ACPI_WMI_METHOD)
+ if (!(block->flags & ACPI_WMI_METHOD))
return AE_BAD_DATA;
if (block->instance_count < instance)
@@ -673,11 +673,11 @@ static int __init acpi_wmi_init(void)
{
acpi_status result;
+ INIT_LIST_HEAD(&wmi_blocks.list);
+
if (acpi_disabled)
return -ENODEV;
- INIT_LIST_HEAD(&wmi_blocks.list);
-
result = acpi_bus_register_driver(&acpi_wmi_driver);
if (result < 0) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3011919f3ec..004dae4ea5b 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3048,6 +3048,8 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
static int ata_dev_set_mode(struct ata_device *dev)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
+ const char *dev_err_whine = "";
+ int ign_dev_err = 0;
unsigned int err_mask;
int rc;
@@ -3057,41 +3059,57 @@ static int ata_dev_set_mode(struct ata_device *dev)
err_mask = ata_dev_set_xfermode(dev);
+ if (err_mask & ~AC_ERR_DEV)
+ goto fail;
+
+ /* revalidate */
+ ehc->i.flags |= ATA_EHI_POST_SETMODE;
+ rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
+ ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
+ if (rc)
+ return rc;
+
/* Old CFA may refuse this command, which is just fine */
if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
- err_mask &= ~AC_ERR_DEV;
+ ign_dev_err = 1;
/* Some very old devices and some bad newer ones fail any kind of
SET_XFERMODE request but support PIO0-2 timings and no IORDY */
if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
dev->pio_mode <= XFER_PIO_2)
- err_mask &= ~AC_ERR_DEV;
+ ign_dev_err = 1;
/* Early MWDMA devices do DMA but don't allow DMA mode setting.
Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
dev->dma_mode == XFER_MW_DMA_0 &&
(dev->id[63] >> 8) & 1)
- err_mask &= ~AC_ERR_DEV;
+ ign_dev_err = 1;
- if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
- "(err_mask=0x%x)\n", err_mask);
- return -EIO;
- }
+ /* if the device is actually configured correctly, ignore dev err */
+ if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
+ ign_dev_err = 1;
- ehc->i.flags |= ATA_EHI_POST_SETMODE;
- rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
- ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
- if (rc)
- return rc;
+ if (err_mask & AC_ERR_DEV) {
+ if (!ign_dev_err)
+ goto fail;
+ else
+ dev_err_whine = " (device error ignored)";
+ }
DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
dev->xfer_shift, (int)dev->xfer_mode);
- ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
- ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
+ ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
+ ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
+ dev_err_whine);
+
return 0;
+
+ fail:
+ ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
+ "(err_mask=0x%x)\n", err_mask);
+ return -EIO;
}
/**
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 761a66608d7..ea567e2b170 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -772,7 +772,7 @@ static void __exit amd_exit(void)
}
MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
+MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, amd);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 333dc15f8cc..6c59969fd50 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -127,7 +127,7 @@ static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
static int qdi; /* Set to probe QDI controllers */
static int winbond; /* Set to probe Winbond controllers,
- give I/O port if non stdanard */
+ give I/O port if non standard */
static int autospeed; /* Chip present which snoops speed changes */
static int pio_mask = 0x1F; /* PIO range for autospeed devices */
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 1c1b83541d1..15dd649f89e 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -17,6 +17,7 @@
* Base + 0x00 IRQ Status
* Base + 0x01 IRQ control
* Base + 0x02 Chipset control
+ * Base + 0x03 Unknown
* Base + 0x04 VDMA and reset control + wait bits
* Base + 0x08 BMIMBA
* Base + 0x0C DMA Length
@@ -174,8 +175,12 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
ata_std_ports(&ap->ioaddr);
iowrite8(0x05, base + 0x01); /* Enable interrupt lines */
- iowrite8(0xB3, base + 0x02); /* Burst, ?? setup */
- iowrite8(0x00, base + 0x04); /* WAIT0 ? */
+ iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */
+ iowrite8(0x01, base + 0x03); /* Unknown */
+ iowrite8(0x20, base + 0x04); /* WAIT0 */
+ iowrite8(0x8f, base + 0x05); /* Unknown */
+ iowrite8(0xa4, base + 0x1c); /* Unknown */
+ iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */
/* FIXME: Should we disable them at remove ? */
return ata_host_activate(host, dev->irq, ata_interrupt,
IRQF_SHARED, &ninja32_sht);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 39627ab684b..d119a68c388 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -84,6 +84,7 @@ enum {
VIA_BAD_ID = 0x100, /* Has wrong vendor ID (0x1107) */
VIA_BAD_AST = 0x200, /* Don't touch Address Setup Timing */
VIA_NO_ENABLES = 0x400, /* Has no enablebits */
+ VIA_SATA_PATA = 0x800, /* SATA/PATA combined configuration */
};
/*
@@ -100,7 +101,7 @@ static const struct via_isa_bridge {
{ "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
- { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+ { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
{ "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
@@ -172,6 +173,9 @@ static int via_cable_detect(struct ata_port *ap) {
if (via_cable_override(pdev))
return ATA_CBL_PATA40_SHORT;
+ if ((config->flags & VIA_SATA_PATA) && ap->port_no == 0)
+ return ATA_CBL_SATA;
+
/* Early chips are 40 wire */
if ((config->flags & VIA_UDMA) < VIA_UDMA_66)
return ATA_CBL_PATA40;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 080b8362f8d..04b571764af 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1716,14 +1716,16 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
hc, relevant, hc_irq_cause);
- for (port = port0; port < port0 + last_port; port++) {
+ for (port = port0; port < last_port; port++) {
struct ata_port *ap = host->ports[port];
- struct mv_port_priv *pp = ap->private_data;
+ struct mv_port_priv *pp;
int have_err_bits, hard_port, shift;
if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
continue;
+ pp = ap->private_data;
+
shift = port << 1; /* (port * 2) */
if (port >= MV_PORTS_PER_HC) {
shift++; /* skip bit 8 in the HC Main IRQ reg */
@@ -2879,6 +2881,26 @@ done:
return rc;
}
+static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
+{
+ hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
+ MV_CRQB_Q_SZ, 0);
+ if (!hpriv->crqb_pool)
+ return -ENOMEM;
+
+ hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
+ MV_CRPB_Q_SZ, 0);
+ if (!hpriv->crpb_pool)
+ return -ENOMEM;
+
+ hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
+ MV_SG_TBL_SZ, 0);
+ if (!hpriv->sg_tbl_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
/**
* mv_platform_probe - handle a positive probe of an soc Marvell
* host
@@ -2932,6 +2954,10 @@ static int mv_platform_probe(struct platform_device *pdev)
hpriv->base = ioremap(res->start, res->end - res->start + 1);
hpriv->base -= MV_SATAHC0_REG_BASE;
+ rc = mv_create_dma_pools(hpriv, &pdev->dev);
+ if (rc)
+ return rc;
+
/* initialize adapter */
rc = mv_init_host(host, chip_soc);
if (rc)
@@ -3068,26 +3094,6 @@ static void mv_print_info(struct ata_host *host)
scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
}
-static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
-{
- hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
- MV_CRQB_Q_SZ, 0);
- if (!hpriv->crqb_pool)
- return -ENOMEM;
-
- hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
- MV_CRPB_Q_SZ, 0);
- if (!hpriv->crpb_pool)
- return -ENOMEM;
-
- hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
- MV_SG_TBL_SZ, 0);
- if (!hpriv->sg_tbl_pool)
- return -ENOMEM;
-
- return 0;
-}
-
/**
* mv_pci_init_one - handle a positive probe of a PCI Marvell host
* @pdev: PCI device found
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index b4e462f154e..730ccea78e4 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,10 +251,6 @@ static int floppy_release(struct inode *inode, struct file *filp);
static int floppy_check_change(struct gendisk *disk);
static int floppy_revalidate(struct gendisk *disk);
-#ifndef CONFIG_PMAC_MEDIABAY
-#define check_media_bay(which, what) 1
-#endif
-
static void swim3_select(struct floppy_state *fs, int sel)
{
struct swim3 __iomem *sw = fs->swim3;
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 379cbdad492..9df08105f4f 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -36,7 +36,7 @@
#include "i830_drm.h"
#include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */
-#include <linux/pagemap.h> /* For FASTCALL on unlock_page() */
+#include <linux/pagemap.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
index bb09413d5a2..88590d04004 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/char/hvc_rtas.c
@@ -76,7 +76,7 @@ static struct hv_ops hvc_rtas_get_put_ops = {
.put_chars = hvc_rtas_write_console,
};
-static int hvc_rtas_init(void)
+static int __init hvc_rtas_init(void)
{
struct hvc_struct *hp;
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 00b8a84b031..ffa0efce0ae 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -45,7 +45,7 @@ config CARDMAN_4040
config IPWIRELESS
tristate "IPWireless 3G UMTS PCMCIA card support"
- depends on PCMCIA
+ depends on PCMCIA && NETDEVICES
select PPP
help
This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 60f71e6345e..d73663a5232 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -219,7 +219,8 @@ static void poll_idle_init(struct cpuidle_device *dev)
cpuidle_set_statedata(state, NULL);
- snprintf(state->name, CPUIDLE_NAME_LEN, "C0 (poll idle)");
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
state->exit_latency = 0;
state->target_residency = 0;
state->power_usage = -1;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 088ea74edd3..69102ca0568 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -218,16 +218,23 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
return sprintf(buf, "%u\n", state->_name);\
}
-static ssize_t show_state_name(struct cpuidle_state *state, char *buf)
-{
- return sprintf(buf, "%s\n", state->name);
+#define define_show_state_str_function(_name) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
+{ \
+ if (state->_name[0] == '\0')\
+ return sprintf(buf, "<null>\n");\
+ return sprintf(buf, "%s\n", state->_name);\
}
define_show_state_function(exit_latency)
define_show_state_function(power_usage)
define_show_state_function(usage)
define_show_state_function(time)
+define_show_state_str_function(name)
+define_show_state_str_function(desc)
+
define_one_state_ro(name, show_state_name);
+define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
define_one_state_ro(power, show_state_power_usage);
define_one_state_ro(usage, show_state_usage);
@@ -235,6 +242,7 @@ define_one_state_ro(time, show_state_time);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
+ &attr_desc.attr,
&attr_latency.attr,
&attr_power.attr,
&attr_usage.attr,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 043c34ad0a0..df752e690e4 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -378,6 +378,9 @@ config BLK_DEV_IDEPNP
would like the kernel to automatically detect and activate
it, say Y here.
+config BLK_DEV_IDEDMA_SFF
+ bool
+
if PCI
comment "PCI IDE chipsets support"
@@ -459,6 +462,7 @@ config BLK_DEV_RZ1000
config BLK_DEV_IDEDMA_PCI
bool
select BLK_DEV_IDEPCI
+ select BLK_DEV_IDEDMA_SFF
config BLK_DEV_AEC62XX
tristate "AEC62XX chipset support"
@@ -688,23 +692,6 @@ config BLK_DEV_PDC202XX_OLD
If unsure, say N.
-config PDC202XX_BURST
- bool "Special UDMA Feature"
- depends on BLK_DEV_PDC202XX_OLD
- help
- This option causes the pdc202xx driver to enable UDMA modes on the
- PDC202xx even when the PDC202xx BIOS has not done so.
-
- It was originally designed for the PDC20246/Ultra33, whose BIOS will
- only setup UDMA on the first two PDC20246 cards. It has also been
- used successfully on a PDC20265/Ultra100, allowing use of UDMA modes
- when the PDC20265 BIOS has been disabled (for faster boot up).
-
- Please read the comments at the top of
- <file:drivers/ide/pci/pdc202xx_old.c>.
-
- If unsure, say N.
-
config BLK_DEV_PDC202XX_NEW
tristate "PROMISE PDC202{68|69|70|71|75|76|77} support"
select BLK_DEV_IDEDMA_PCI
@@ -1016,7 +1003,7 @@ config BLK_DEV_Q40IDE
config BLK_DEV_PALMCHIP_BK3710
tristate "Palmchip bk3710 IDE controller support"
depends on ARCH_DAVINCI
- select BLK_DEV_IDEDMA_PCI
+ select BLK_DEV_IDEDMA_SFF
help
Say Y here if you want to support the onchip IDE controller on the
TI DaVinci SoC
@@ -1124,7 +1111,8 @@ config BLK_DEV_UMC8672
endif
config BLK_DEV_IDEDMA
- def_bool BLK_DEV_IDEDMA_PCI || BLK_DEV_IDEDMA_PMAC || BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+ def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \
+ BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
config IDE_ARCH_OBSOLETE_INIT
def_bool ALPHA || (ARM && !ARCH_L7200) || BLACKFIN || X86 || IA64 || M32R || MIPS || PARISC || PPC || (SUPERH64 && BLK_DEV_IDEPCI) || SPARC
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
index 0e7574c0ee6..161d30c8481 100644
--- a/drivers/ide/arm/bast-ide.c
+++ b/drivers/ide/arm/bast-ide.c
@@ -21,12 +21,7 @@
#include <asm/arch/bast-map.h>
#include <asm/arch/bast-irq.h>
-/* list of registered interfaces */
-static ide_hwif_t *ifs[2];
-
-static int __init
-bastide_register(unsigned int base, unsigned int aux, int irq,
- ide_hwif_t **hwif)
+static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
{
ide_hwif_t *hwif;
hw_regs_t hw;
@@ -76,8 +71,9 @@ static int __init bastide_init(void)
printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n");
- bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0, &ifs[0]);
- bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1, &ifs[1]);
+ bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0);
+ bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1);
+
return 0;
}
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index c3069970a01..8e1f6bd3388 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -311,15 +311,37 @@ static void __devinit palm_bk3710_chipinit(void __iomem *base)
palm_bk3710_setpiomode(base, NULL, 0, 600, 0);
palm_bk3710_setpiomode(base, NULL, 1, 600, 0);
}
+
+static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
+{
+ return ATA_CBL_PATA80;
+}
+
+static void __devinit palm_bk3710_init_hwif(ide_hwif_t *hwif)
+{
+ hwif->set_pio_mode = palm_bk3710_set_pio_mode;
+ hwif->set_dma_mode = palm_bk3710_set_dma_mode;
+
+ hwif->cable_detect = palm_bk3710_cable_detect;
+}
+
+static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
+ .init_hwif = palm_bk3710_init_hwif,
+ .host_flags = IDE_HFLAG_NO_DMA, /* hack (no PCI) */
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA4, /* (input clk 99MHz) */
+ .mwdma_mask = ATA_MWDMA2,
+};
+
static int __devinit palm_bk3710_probe(struct platform_device *pdev)
{
- hw_regs_t ide_ctlr_info;
- int index = 0;
- int pribase;
struct clk *clkp;
struct resource *mem, *irq;
ide_hwif_t *hwif;
void __iomem *base;
+ int pribase, i;
+ hw_regs_t hw;
+ u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
clkp = clk_get(NULL, "IDECLK");
if (IS_ERR(clkp))
@@ -330,7 +352,7 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
ide_palm_clk = clk_get_rate(ideclkp)/100000;
ide_palm_clk = (10000/ide_palm_clk) + 1;
/* Register the IDE interface with Linux ATA Interface */
- memset(&ide_ctlr_info, 0, sizeof(ide_ctlr_info));
+ memset(&hw, 0, sizeof(hw));
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem == NULL) {
@@ -349,32 +371,42 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
palm_bk3710_chipinit(base);
pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
- for (index = 0; index < IDE_NR_PORTS - 2; index++)
- ide_ctlr_info.io_ports[index] = pribase + index;
- ide_ctlr_info.io_ports[IDE_CONTROL_OFFSET] = mem->start +
+ for (i = 0; i < IDE_NR_PORTS - 2; i++)
+ hw.io_ports[i] = pribase + i;
+ hw.io_ports[IDE_CONTROL_OFFSET] = mem->start +
IDE_PALM_ATA_PRI_CTL_OFFSET;
- ide_ctlr_info.irq = irq->start;
- ide_ctlr_info.chipset = ide_palm3710;
+ hw.irq = irq->start;
+ hw.chipset = ide_palm3710;
- if (ide_register_hw(&ide_ctlr_info, NULL, &hwif) < 0) {
- printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
- return -ENODEV;
- }
+ hwif = ide_deprecated_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ if (hwif == NULL)
+ goto out;
+
+ i = hwif->index;
+
+ if (hwif->present)
+ ide_unregister(i, 0, 0);
+ else if (!hwif->hold)
+ ide_init_port_data(hwif, i);
+
+ ide_init_port_hw(hwif, &hw);
- hwif->set_pio_mode = &palm_bk3710_set_pio_mode;
- hwif->set_dma_mode = &palm_bk3710_set_dma_mode;
hwif->mmio = 1;
default_hwif_mmiops(hwif);
- hwif->cbl = ATA_CBL_PATA80;
- hwif->ultra_mask = 0x1f; /* Ultra DMA Mode 4 Max
- (input clk 99MHz) */
- hwif->mwdma_mask = 0x7;
- hwif->drives[0].autotune = 1;
- hwif->drives[1].autotune = 1;
ide_setup_dma(hwif, mem->start);
+ idx[0] = i;
+
+ ide_device_add(idx, &palm_bk3710_port_info);
+
+ if (!hwif->present)
+ goto out;
+
return 0;
+out:
+ printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
+ return -ENODEV;
}
static struct platform_driver platform_bk_driver = {
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 5e42c19a03e..354c91d06a6 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1555,7 +1555,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (stat)
return stat;
- toc->hdr.toc_length = ntohs (toc->hdr.toc_length);
+ toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) {
toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3c69822507e..aed8b31ca56 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -590,20 +590,24 @@ static ide_proc_entry_t idedisk_proc[] = {
static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
{
ide_drive_t *drive = q->queuedata;
- ide_task_t task;
+ ide_task_t *task = kmalloc(sizeof(*task), GFP_ATOMIC);
- memset(&task, 0, sizeof(task));
+ /* FIXME: map struct ide_taskfile on rq->cmd[] */
+ BUG_ON(task == NULL);
+
+ memset(task, 0, sizeof(*task));
if (ide_id_has_flush_cache_ext(drive->id) &&
(drive->capacity64 >= (1UL << 28)))
- task.tf.command = WIN_FLUSH_CACHE_EXT;
+ task->tf.command = WIN_FLUSH_CACHE_EXT;
else
- task.tf.command = WIN_FLUSH_CACHE;
- task.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
- task.data_phase = TASKFILE_NO_DATA;
+ task->tf.command = WIN_FLUSH_CACHE;
+ task->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE |
+ IDE_TFLAG_DYN;
+ task->data_phase = TASKFILE_NO_DATA;
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
rq->cmd_flags |= REQ_SOFTBARRIER;
- rq->special = &task;
+ rq->special = task;
}
/*
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index a4bb32883c6..d0e7b537353 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -198,7 +198,7 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
EXPORT_SYMBOL_GPL(ide_build_sglist);
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
/**
* ide_build_dmatable - build IDE DMA table
*
@@ -316,7 +316,7 @@ void ide_destroy_dmatable (ide_drive_t *drive)
EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
/**
* config_drive_for_dma - attempt to activate IDE DMA
* @drive: the drive to place in DMA mode
@@ -424,7 +424,7 @@ void ide_dma_host_set(ide_drive_t *drive, int on)
}
EXPORT_SYMBOL_GPL(ide_dma_host_set);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
/**
* ide_dma_off_quietly - Generic DMA kill
@@ -474,7 +474,7 @@ void ide_dma_on(ide_drive_t *drive)
drive->hwif->dma_host_set(drive, 1);
}
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
/**
* ide_dma_setup - begin a DMA phase
* @drive: target device
@@ -591,7 +591,7 @@ static int __ide_dma_test_irq(ide_drive_t *drive)
}
#else
static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
int __ide_dma_bad_drive (ide_drive_t *drive)
{
@@ -840,7 +840,7 @@ void ide_check_dma_crc(ide_drive_t *drive)
ide_dma_on(drive);
}
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
void ide_dma_lost_irq (ide_drive_t *drive)
{
printk("%s: DMA interrupt recovery\n", drive->name);
@@ -1002,4 +1002,4 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
}
EXPORT_SYMBOL_GPL(ide_setup_dma);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 3addbe478d2..715379605a7 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -361,17 +361,21 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
spin_unlock_irqrestore(&ide_lock, flags);
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- ide_task_t *args = (ide_task_t *) rq->special;
+ ide_task_t *task = (ide_task_t *)rq->special;
+
if (rq->errors == 0)
- rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
-
- if (args) {
- struct ide_taskfile *tf = &args->tf;
+ rq->errors = !OK_STAT(stat, READY_STAT, BAD_STAT);
+
+ if (task) {
+ struct ide_taskfile *tf = &task->tf;
tf->error = err;
tf->status = stat;
- ide_tf_read(drive, args);
+ ide_tf_read(drive, task);
+
+ if (task->tf_flags & IDE_TFLAG_DYN)
+ kfree(task);
}
} else if (blk_pm_request(rq)) {
struct request_pm_state *pm = rq->data;
@@ -388,7 +392,8 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
spin_lock_irqsave(&ide_lock, flags);
HWGROUP(drive)->rq = NULL;
rq->errors = err;
- if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0))
+ if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0),
+ blk_rq_bytes(rq))))
BUG();
spin_unlock_irqrestore(&ide_lock, flags);
}
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index c32e759df20..c419266234a 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -786,15 +786,11 @@ static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
{
ide_hwgroup_t *hwgroup = HWGROUP(drive);
- if (hwgroup->handler != NULL) {
- printk(KERN_CRIT "%s: ide_set_handler: handler not null; "
- "old=%p, new=%p\n",
- drive->name, hwgroup->handler, handler);
- }
+ BUG_ON(hwgroup->handler);
hwgroup->handler = handler;
hwgroup->expiry = expiry;
hwgroup->timer.expires = jiffies + timeout;
- hwgroup->req_gen_timer = hwgroup->req_gen;
+ hwgroup->req_gen_timer = hwgroup->req_gen;
add_timer(&hwgroup->timer);
}
@@ -827,11 +823,9 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
unsigned timeout, ide_expiry_t *expiry)
{
unsigned long flags;
- ide_hwgroup_t *hwgroup = HWGROUP(drive);
ide_hwif_t *hwif = HWIF(drive);
spin_lock_irqsave(&ide_lock, flags);
- BUG_ON(hwgroup->handler);
__ide_set_handler(drive, handler, timeout, expiry);
hwif->OUTBSYNC(drive, cmd, IDE_COMMAND_REG);
/*
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 1ff676cc647..29e2c9719c3 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -21,15 +21,6 @@
#include <asm/uaccess.h>
#include <asm/io.h>
-/*
- * IDE library routines. These are plug in code that most
- * drivers can use but occasionally may be weird enough
- * to want to do their own thing with
- *
- * Add common non I/O op stuff here. Make sure it has proper
- * kernel-doc function headers or your patch will be rejected
- */
-
static const char *udma_str[] =
{ "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44",
"UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" };
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 6daea896c5d..4a2cb286822 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1051,7 +1051,7 @@ static int init_irq (ide_hwif_t *hwif)
int sa = 0;
#if defined(__mc68000__)
sa = IRQF_SHARED;
-#endif /* __mc68000__ || CONFIG_APUS */
+#endif /* __mc68000__ */
if (IDE_CHIPSET_IS_PCI(hwif->chipset))
sa = IRQF_SHARED;
@@ -1355,7 +1355,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
hwif->ultra_mask = d->udma_mask;
/* reset DMA masks only for SFF-style DMA controllers */
- if ((d->host_flags && IDE_HFLAG_NO_DMA) == 0 && hwif->dma_base == 0)
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0 && hwif->dma_base == 0)
hwif->swdma_mask = hwif->mwdma_mask = hwif->ultra_mask = 0;
if (d->host_flags & IDE_HFLAG_RQSIZE_256)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 49dd2e7bae7..0598ecfd5f3 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -466,9 +466,6 @@ static void ide_tape_put(struct ide_tape_obj *tape)
/* 0 = no tape is loaded, so we don't rewind after ejecting */
#define IDETAPE_MEDIUM_PRESENT 9
-/* A define for the READ BUFFER command */
-#define IDETAPE_RETRIEVE_FAULTY_BLOCK 6
-
/* Some defines for the SPACE command */
#define IDETAPE_SPACE_OVER_FILEMARK 1
#define IDETAPE_SPACE_TO_EOD 3
@@ -490,7 +487,6 @@ enum {
REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
REQ_IDETAPE_READ = (1 << 2),
REQ_IDETAPE_WRITE = (1 << 3),
- REQ_IDETAPE_READ_BUFFER = (1 << 4),
};
/* Error codes returned in rq->errors to the higher part of the driver. */
@@ -1523,29 +1519,6 @@ static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
set_bit(PC_DMA_RECOMMENDED, &pc->flags);
}
-static void idetape_create_read_buffer_cmd(idetape_tape_t *tape,
- idetape_pc_t *pc, struct idetape_bh *bh)
-{
- int size = 32768;
- struct idetape_bh *p = bh;
-
- idetape_init_pc(pc);
- pc->c[0] = READ_BUFFER;
- pc->c[1] = IDETAPE_RETRIEVE_FAULTY_BLOCK;
- pc->c[7] = size >> 8;
- pc->c[8] = size & 0xff;
- pc->callback = &idetape_pc_callback;
- pc->bh = bh;
- atomic_set(&bh->b_count, 0);
- pc->buffer = NULL;
- while (p) {
- atomic_set(&p->b_count, 0);
- p = p->b_reqnext;
- }
- pc->request_transfer = size;
- pc->buffer_size = size;
-}
-
static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
unsigned int length, struct idetape_bh *bh)
{
@@ -1655,13 +1628,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
(struct idetape_bh *)rq->special);
goto out;
}
- if (rq->cmd[0] & REQ_IDETAPE_READ_BUFFER) {
- tape->postpone_cnt = 0;
- pc = idetape_next_pc_storage(drive);
- idetape_create_read_buffer_cmd(tape, pc,
- (struct idetape_bh *)rq->special);
- goto out;
- }
if (rq->cmd[0] & REQ_IDETAPE_PC1) {
pc = (idetape_pc_t *) rq->buffer;
rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index ad0e9955f73..4a8952a6c3d 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -44,8 +44,6 @@
* inspiration from lots of linux users, esp. hamish@zot.apana.org.au
*/
-#define REVISION "Revision: 7.00alpha2"
-
#define _IDE_C /* Tell ide.h it's really us */
#include <linux/module.h>
@@ -1618,7 +1616,7 @@ static int __init ide_init(void)
{
int ret;
- printk(KERN_INFO "Uniform Multi-Platform E-IDE driver " REVISION "\n");
+ printk(KERN_INFO "Uniform Multi-Platform E-IDE driver\n");
system_bus_speed = ide_system_bus_speed();
printk(KERN_INFO "ide: Assuming %dMHz system bus speed "
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index 9d3851d2767..b7d81090d5d 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -94,7 +94,7 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
unsigned long ctl, unsigned long irq_port,
- ide_ack_intr_t *ack_intr);
+ ide_ack_intr_t *ack_intr)
{
int i;
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 0be1a824102..1c163e4ef03 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -147,11 +147,6 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
/* We must not grab the entire device, it has 'ISA' space in its
* BARS too and we will freak out other bits of the kernel
- *
- * pci_enable_device_bars() is going away. I replaced it with
- * IO only enable for now but I'll need confirmation this is
- * allright for that device. If not, it will need some kind of
- * quirk. --BenH.
*/
if (pci_enable_device_io(dev)) {
printk(KERN_WARNING "%s: Unable to enable 55x0.\n", d->name);
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index da432979038..150422ec3cf 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -3,26 +3,6 @@
* Copyright (C) 2006-2007 MontaVista Software, Inc.
* Copyright (C) 2007 Bartlomiej Zolnierkiewicz
*
- * Promise Ultra33 cards with BIOS v1.20 through 1.28 will need this
- * compiled into the kernel if you have more than one card installed.
- * Note that BIOS v1.29 is reported to fix the problem. Since this is
- * safe chipset tuning, including this support is harmless
- *
- * Promise Ultra66 cards with BIOS v1.11 this
- * compiled into the kernel if you have more than one card installed.
- *
- * Promise Ultra100 cards.
- *
- * The latest chipset code will support the following ::
- * Three Ultra33 controllers and 12 drives.
- * 8 are UDMA supported and 4 are limited to DMA mode 2 multi-word.
- * The 8/4 ratio is a BIOS code limit by promise.
- *
- * UNLESS you enable "CONFIG_PDC202XX_BURST"
- *
- */
-
-/*
* Portions Copyright (C) 1999 Promise Technology, Inc.
* Author: Frank Tiernan (frankt@promise.com)
* Released under terms of General Public License
@@ -344,7 +324,6 @@ static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase)
(primary_mode & 1) ? "MASTER" : "PCI",
(secondary_mode & 1) ? "MASTER" : "PCI" );
-#ifdef CONFIG_PDC202XX_BURST
if (!(udma_speed_flag & 1)) {
printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ",
hwif->cds->name, udma_speed_flag,
@@ -352,7 +331,6 @@ static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase)
outb(udma_speed_flag | 1, dmabase | 0x1f);
printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN");
}
-#endif /* CONFIG_PDC202XX_BURST */
ide_setup_dma(hwif, dmabase);
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 638b727d42e..b10ade92efe 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3587,8 +3587,6 @@ static void cm_release_port_obj(struct kobject *obj)
{
struct cm_port *cm_port;
- printk(KERN_ERR "free cm port\n");
-
cm_port = container_of(obj, struct cm_port, port_obj);
kfree(cm_port);
}
@@ -3601,8 +3599,6 @@ static void cm_release_dev_obj(struct kobject *obj)
{
struct cm_device *cm_dev;
- printk(KERN_ERR "free cm dev\n");
-
cm_dev = container_of(obj, struct cm_device, dev_obj);
kfree(cm_dev);
}
@@ -3616,18 +3612,12 @@ struct class cm_class = {
};
EXPORT_SYMBOL(cm_class);
-static void cm_remove_fs_obj(struct kobject *obj)
-{
- kobject_put(obj->parent);
- kobject_put(obj);
-}
-
static int cm_create_port_fs(struct cm_port *port)
{
int i, ret;
ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
- kobject_get(&port->cm_dev->dev_obj),
+ &port->cm_dev->dev_obj,
"%d", port->port_num);
if (ret) {
kfree(port);
@@ -3637,7 +3627,7 @@ static int cm_create_port_fs(struct cm_port *port)
for (i = 0; i < CM_COUNTER_GROUPS; i++) {
ret = kobject_init_and_add(&port->counter_group[i].obj,
&cm_counter_obj_type,
- kobject_get(&port->port_obj),
+ &port->port_obj,
"%s", counter_group_names[i]);
if (ret)
goto error;
@@ -3647,8 +3637,8 @@ static int cm_create_port_fs(struct cm_port *port)
error:
while (i--)
- cm_remove_fs_obj(&port->counter_group[i].obj);
- cm_remove_fs_obj(&port->port_obj);
+ kobject_put(&port->counter_group[i].obj);
+ kobject_put(&port->port_obj);
return ret;
}
@@ -3658,9 +3648,9 @@ static void cm_remove_port_fs(struct cm_port *port)
int i;
for (i = 0; i < CM_COUNTER_GROUPS; i++)
- cm_remove_fs_obj(&port->counter_group[i].obj);
+ kobject_put(&port->counter_group[i].obj);
- cm_remove_fs_obj(&port->port_obj);
+ kobject_put(&port->port_obj);
}
static void cm_add_one(struct ib_device *device)
@@ -3744,7 +3734,7 @@ error1:
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
- cm_remove_fs_obj(&cm_dev->dev_obj);
+ kobject_put(&cm_dev->dev_obj);
}
static void cm_remove_one(struct ib_device *device)
@@ -3771,7 +3761,7 @@ static void cm_remove_one(struct ib_device *device)
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
- cm_remove_fs_obj(&cm_dev->dev_obj);
+ kobject_put(&cm_dev->dev_obj);
}
static int __init ib_cm_init(void)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 1eff1b2c0e0..34507daaf9b 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1107,7 +1107,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.param.ud.private_data_len =
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
} else {
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
conn_id = cma_new_conn_id(&listen_id->id, ib_event);
cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
ib_event->private_data, offset);
@@ -1130,6 +1129,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
ret = conn_id->id.event_handler(&conn_id->id, &event);
if (!ret) {
+ /*
+ * Acquire mutex to prevent user executing rdma_destroy_id()
+ * while we're accessing the cm_id.
+ */
+ mutex_lock(&lock);
+ if (cma_comp(conn_id, CMA_CONNECT) &&
+ !cma_is_ud_ps(conn_id->id.ps))
+ ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ mutex_unlock(&lock);
cma_enable_remove(conn_id);
goto out;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index e9a08fa3dff..320f2b6ddee 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -35,6 +35,7 @@
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/notifier.h>
+#include <linux/inetdevice.h>
#include <net/neighbour.h>
#include <net/netevent.h>
@@ -1784,6 +1785,17 @@ err:
return err;
}
+static int is_loopback_dst(struct iw_cm_id *cm_id)
+{
+ struct net_device *dev;
+
+ dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
+ if (!dev)
+ return 0;
+ dev_put(dev);
+ return 1;
+}
+
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
int err = 0;
@@ -1791,6 +1803,11 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct iwch_ep *ep;
struct rtable *rt;
+ if (is_loopback_dst(cm_id)) {
+ err = -ENOSYS;
+ goto out;
+ }
+
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) {
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 7dc91a3e712..fe2c2e94a5f 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -199,7 +199,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
if (err)
goto err_free;
- err = mlx4_mr_enable(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
+ err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
if (err)
goto err_mr;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 6bd9f139334..1e1e336d3ef 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -473,7 +473,7 @@ static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
return;
- cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
+ be16_add_cpu(&cqe->db_cnt, -dbd);
cqe->wqe = new_wqe;
cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 1f4d27d7c16..252db0822f6 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -542,6 +542,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
for (i = 0; i < npages; ++i) {
db_tab->page[i].refcount = 0;
db_tab->page[i].uvirt = 0;
+ sg_init_table(&db_tab->page[i].mem, 1);
}
return db_tab;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index f9b7caa5414..054fab8e27a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -209,7 +209,6 @@ struct ipoib_cm_tx {
unsigned tx_tail;
unsigned long flags;
u32 mtu;
- struct ib_wc ibwc[IPOIB_NUM_WC];
};
struct ipoib_cm_rx_buf {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 9d3e778dc56..08c4396cf41 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -780,6 +780,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
ipoib_ib_dev_down(dev, 0);
+ ipoib_ib_dev_stop(dev, 0);
ipoib_pkey_dev_delay_open(dev);
return;
}
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 936788272a5..51a112815f4 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -416,7 +416,6 @@ static void poll_media_bay(struct media_bay_info* bay)
}
}
-#ifdef CONFIG_MAC_FLOPPY
int check_media_bay(struct device_node *which_bay, int what)
{
int i;
@@ -431,7 +430,6 @@ int check_media_bay(struct device_node *which_bay, int what)
return -ENODEV;
}
EXPORT_SYMBOL(check_media_bay);
-#endif /* CONFIG_MAC_FLOPPY */
#ifdef CONFIG_BLK_DEV_IDE_PMAC
int check_media_bay_by_base(unsigned long base, int what)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index a0585fb6da9..7aeceedcf7d 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -206,16 +206,10 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
/* copy the pathname of a file to a buffer */
char *file_path(struct file *file, char *buf, int count)
{
- struct dentry *d;
- struct vfsmount *v;
-
if (!buf)
return NULL;
- d = file->f_path.dentry;
- v = file->f_path.mnt;
-
- buf = d_path(d, v, buf, count);
+ buf = d_path(&file->f_path, buf, count);
return IS_ERR(buf) ? NULL : buf;
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index edc057f5cdc..2928ef22810 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -124,7 +124,7 @@ enum dm_raid1_error {
struct mirror {
struct mirror_set *ms;
atomic_t error_count;
- uint32_t error_type;
+ unsigned long error_type;
struct dm_dev *dev;
sector_t offset;
};
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f1606298238..e75b1437b58 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -361,7 +361,7 @@ static int lookup_device(const char *path, dev_t *dev)
if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
return r;
- inode = nd.dentry->d_inode;
+ inode = nd.path.dentry->d_inode;
if (!inode) {
r = -ENOENT;
goto out;
@@ -375,7 +375,7 @@ static int lookup_device(const char *path, dev_t *dev)
*dev = inode->i_rdev;
out:
- path_release(&nd);
+ path_put(&nd.path);
return r;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5fc326d3970..7da6ec244e1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5197,8 +5197,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
chunk_kb ? "KB" : "B");
if (bitmap->file) {
seq_printf(seq, ", file: ");
- seq_path(seq, bitmap->file->f_path.mnt,
- bitmap->file->f_path.dentry," \t\n");
+ seq_path(seq, &bitmap->file->f_path, " \t\n");
}
seq_printf(seq, "\n");
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index f55b71a4337..4fb24215bd9 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -282,7 +282,7 @@ static int tifm_ms_issue_cmd(struct tifm_ms *host)
writel(TIFM_MS_SYS_LATCH
| readl(sock->addr + SOCK_MS_SYSTEM),
- sock + SOCK_MS_SYSTEM);
+ sock->addr + SOCK_MS_SYSTEM);
writel(0, sock->addr + SOCK_MS_DATA);
dev_dbg(&sock->dev, "writing %x\n", 0);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 425f60c21fd..bfda731696f 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1470,9 +1470,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
if (mpt_debug_level)
printk(KERN_INFO MYNAM ": mpt_debug_level=%xh\n", mpt_debug_level);
- if (pci_enable_device(pdev))
- return r;
-
ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
if (ioc == NULL) {
printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
@@ -1482,6 +1479,20 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->id = mpt_ids++;
sprintf(ioc->name, "ioc%d", ioc->id);
+ ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_enable_device_mem(pdev)) {
+ kfree(ioc);
+ printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() "
+ "failed\n", ioc->name);
+ return r;
+ }
+ if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
+ kfree(ioc);
+ printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
+ "MEM failed\n", ioc->name);
+ return r;
+ }
+
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name));
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
@@ -1658,6 +1669,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->active = 0;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+ /* Set IOC ptr in the pcidev's driver data. */
+ pci_set_drvdata(ioc->pcidev, ioc);
+
/* Set lookup ptr. */
list_add_tail(&ioc->list, &ioc_list);
@@ -1791,6 +1805,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
pci_disable_device(pdev);
+ pci_release_selected_regions(pdev, ioc->bars);
pci_set_power_state(pdev, device_state);
return 0;
@@ -1807,7 +1822,6 @@ mpt_resume(struct pci_dev *pdev)
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
u32 device_state = pdev->current_state;
int recovery_state;
- int err;
printk(MYIOC_s_INFO_FMT
"pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n",
@@ -1815,9 +1829,18 @@ mpt_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, 0);
pci_restore_state(pdev);
- err = pci_enable_device(pdev);
- if (err)
- return err;
+ if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) {
+ ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
+ IORESOURCE_IO);
+ if (pci_enable_device(pdev))
+ return 0;
+ } else {
+ ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_enable_device_mem(pdev))
+ return 0;
+ }
+ if (pci_request_selected_regions(pdev, ioc->bars, "mpt"))
+ return 0;
/* enable interrupts */
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
@@ -1878,6 +1901,7 @@ mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase)
* -2 if READY but IOCFacts Failed
* -3 if READY but PrimeIOCFifos Failed
* -4 if READY but IOCInit Failed
+ * -5 if failed to enable_device and/or request_selected_regions
*/
static int
mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
@@ -1976,6 +2000,18 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
}
}
+ if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) &&
+ (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) {
+ pci_release_selected_regions(ioc->pcidev, ioc->bars);
+ ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
+ IORESOURCE_IO);
+ if (pci_enable_device(ioc->pcidev))
+ return -5;
+ if (pci_request_selected_regions(ioc->pcidev, ioc->bars,
+ "mpt"))
+ return -5;
+ }
+
/*
* Device is reset now. It must have de-asserted the interrupt line
* (if it was asserted) and it should be safe to register for the
@@ -1999,7 +2035,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
irq_allocated = 1;
ioc->pci_irq = ioc->pcidev->irq;
pci_set_master(ioc->pcidev); /* ?? */
- pci_set_drvdata(ioc->pcidev, ioc);
dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt "
"%d\n", ioc->name, ioc->pcidev->irq));
}
@@ -2381,6 +2416,9 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc)
ioc->memmap = NULL;
}
+ pci_disable_device(ioc->pcidev);
+ pci_release_selected_regions(ioc->pcidev, ioc->bars);
+
#if defined(CONFIG_MTRR) && 0
if (ioc->mtrr_reg > 0) {
mtrr_del(ioc->mtrr_reg, 0, 0);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b49b706c002..d83ea96fe13 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -629,6 +629,7 @@ typedef struct _MPT_ADAPTER
dma_addr_t HostPageBuffer_dma;
int mtrr_reg;
struct pci_dev *pcidev; /* struct pci_dev pointer */
+ int bars; /* bitmask of BAR's that must be configured */
u8 __iomem *memmap; /* mmap address */
struct Scsi_Host *sh; /* Scsi Host pointer */
SpiCfgData spi_data; /* Scsi config. data */
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index 7ba1acad540..e2c7edd206a 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -1689,7 +1689,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev,
static struct device_attribute dev_attr_hotkey_wakeup_reason =
__ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
-void hotkey_wakeup_reason_notify_change(void)
+static void hotkey_wakeup_reason_notify_change(void)
{
if (tp_features.hotkey_mask)
sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
@@ -1708,7 +1708,7 @@ static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
__ATTR(wakeup_hotunplug_complete, S_IRUGO,
hotkey_wakeup_hotunplug_complete_show, NULL);
-void hotkey_wakeup_hotunplug_complete_notify_change(void)
+static void hotkey_wakeup_hotunplug_complete_notify_change(void)
{
if (tp_features.hotkey_mask)
sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 9b430f20b64..28cc6787a80 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -184,26 +184,26 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
ret = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
DEBUG(1, "MTDSB: path_lookup() returned %d, inode %p\n",
- ret, nd.dentry ? nd.dentry->d_inode : NULL);
+ ret, nd.path.dentry ? nd.path.dentry->d_inode : NULL);
if (ret)
return ret;
ret = -EINVAL;
- if (!S_ISBLK(nd.dentry->d_inode->i_mode))
+ if (!S_ISBLK(nd.path.dentry->d_inode->i_mode))
goto out;
- if (nd.mnt->mnt_flags & MNT_NODEV) {
+ if (nd.path.mnt->mnt_flags & MNT_NODEV) {
ret = -EACCES;
goto out;
}
- if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR)
+ if (imajor(nd.path.dentry->d_inode) != MTD_BLOCK_MAJOR)
goto not_an_MTD_device;
- mtdnr = iminor(nd.dentry->d_inode);
- path_release(&nd);
+ mtdnr = iminor(nd.path.dentry->d_inode);
+ path_put(&nd.path);
return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super,
mnt);
@@ -214,7 +214,7 @@ not_an_MTD_device:
"MTD: Attempt to mount non-MTD device \"%s\"\n",
dev_name);
out:
- path_release(&nd);
+ path_put(&nd.path);
return ret;
}
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 521dc0322ee..75ef9d0d974 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/bitmap.h>
#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
#include "mlx4.h"
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 679dfdb6807..79b317b88c8 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -578,13 +578,6 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
goto err_free;
}
- fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
- key_to_hw_index(fmr->mr.key), NULL);
- if (!fmr->mpt) {
- err = -ENOMEM;
- goto err_free;
- }
-
return 0;
err_free:
@@ -595,7 +588,19 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
- return mlx4_mr_enable(dev, &fmr->mr);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err;
+
+ err = mlx4_mr_enable(dev, &fmr->mr);
+ if (err)
+ return err;
+
+ fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
+ key_to_hw_index(fmr->mr.key), NULL);
+ if (!fmr->mpt)
+ return -ENOMEM;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 8134c7e198a..b07ba2a1411 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -187,23 +187,22 @@ void sync_stop(void)
end_sync();
}
-
+
/* Optimisation. We can manage without taking the dcookie sem
* because we cannot reach this code without at least one
* dcookie user still being registered (namely, the reader
* of the event buffer). */
-static inline unsigned long fast_get_dcookie(struct dentry * dentry,
- struct vfsmount * vfsmnt)
+static inline unsigned long fast_get_dcookie(struct path *path)
{
unsigned long cookie;
-
- if (dentry->d_cookie)
- return (unsigned long)dentry;
- get_dcookie(dentry, vfsmnt, &cookie);
+
+ if (path->dentry->d_cookie)
+ return (unsigned long)path->dentry;
+ get_dcookie(path, &cookie);
return cookie;
}
-
+
/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
* which corresponds loosely to "application name". This is
* not strictly necessary but allows oprofile to associate
@@ -222,8 +221,7 @@ static unsigned long get_exec_dcookie(struct mm_struct * mm)
continue;
if (!(vma->vm_flags & VM_EXECUTABLE))
continue;
- cookie = fast_get_dcookie(vma->vm_file->f_path.dentry,
- vma->vm_file->f_path.mnt);
+ cookie = fast_get_dcookie(&vma->vm_file->f_path);
break;
}
@@ -248,8 +246,7 @@ static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, o
continue;
if (vma->vm_file) {
- cookie = fast_get_dcookie(vma->vm_file->f_path.dentry,
- vma->vm_file->f_path.mnt);
+ cookie = fast_get_dcookie(&vma->vm_file->f_path);
*offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
vma->vm_start;
} else {
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 238628d3a85..d76d37bcb9c 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1768,7 +1768,7 @@ static int parport_PS2_supported(struct parport *pb)
}
#ifdef CONFIG_PARPORT_PC_FIFO
-static int __devinit parport_ECP_supported(struct parport *pb)
+static int parport_ECP_supported(struct parport *pb)
{
int i;
int config, configb;
@@ -1992,7 +1992,7 @@ static int parport_ECPEPP_supported(struct parport *pb)
/* Don't bother probing for modes we know we won't use. */
static int __devinit parport_PS2_supported(struct parport *pb) { return 0; }
#ifdef CONFIG_PARPORT_PC_FIFO
-static int __devinit parport_ECP_supported(struct parport *pb) { return 0; }
+static int parport_ECP_supported(struct parport *pb) { return 0; }
#endif
static int __devinit parport_EPP_supported(struct parport *pb) { return 0; }
static int __devinit parport_ECPEPP_supported(struct parport *pb){return 0;}
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 662b4c279cf..c283a9a70d8 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -36,7 +36,7 @@ static int num = 0;
* have irqs (PIC, Timer) because we call acpi_register_gsi.
* Finally, only devices that have a CRS method need to be in this list.
*/
-static struct __initdata acpi_device_id excluded_id_list[] = {
+static struct acpi_device_id excluded_id_list[] __initdata = {
{"PNP0C09", 0}, /* EC */
{"PNP0C0F", 0}, /* Link device */
{"PNP0000", 0}, /* PIC */
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index f7e67197a56..a8a51500e1e 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -105,8 +105,6 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
char *argv[3], **envp, *buf, *scratch;
int i = 0, value;
- if (!current->fs->root)
- return -EAGAIN;
if (!(envp = kcalloc(20, sizeof(char *), GFP_KERNEL)))
return -ENOMEM;
if (!(buf = kzalloc(256, GFP_KERNEL))) {
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 4c066545d17..6c9592ce499 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -76,7 +76,6 @@
*
* @pm_control: Shadow of the processor's pm_control register.
* @pm_start_stop: Shadow of the processor's pm_start_stop register.
- * @pm_interval: Shadow of the processor's pm_interval register.
* @group_control: Shadow of the processor's group_control register.
* @debug_bus_control: Shadow of the processor's debug_bus_control register.
*
@@ -91,7 +90,6 @@
struct ps3_lpm_shadow_regs {
u64 pm_control;
u64 pm_start_stop;
- u64 pm_interval;
u64 group_control;
u64 debug_bus_control;
};
@@ -181,9 +179,9 @@ void ps3_set_bookmark(u64 bookmark)
* includes cycles before the call.
*/
- asm volatile("or 29, 29, 29;"); /* db10cyc */
+ asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
mtspr(SPRN_BKMK, bookmark);
- asm volatile("or 29, 29, 29;"); /* db10cyc */
+ asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
}
EXPORT_SYMBOL_GPL(ps3_set_bookmark);
@@ -408,7 +406,14 @@ u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg)
case pm_start_stop:
return lpm_priv->shadow.pm_start_stop;
case pm_interval:
- return lpm_priv->shadow.pm_interval;
+ result = lv1_set_lpm_interval(lpm_priv->lpm_id, 0, 0, &val);
+ if (result) {
+ val = 0;
+ dev_dbg(sbd_core(), "%s:%u: lv1 set_inteval failed: "
+ "reg %u, %s\n", __func__, __LINE__, reg,
+ ps3_result(result));
+ }
+ return (u32)val;
case group_control:
return lpm_priv->shadow.group_control;
case debug_bus_control:
@@ -475,10 +480,8 @@ void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
lpm_priv->shadow.pm_control = val;
break;
case pm_interval:
- if (val != lpm_priv->shadow.pm_interval)
- result = lv1_set_lpm_interval(lpm_priv->lpm_id, val,
- PS3_WRITE_PM_MASK, &dummy);
- lpm_priv->shadow.pm_interval = val;
+ result = lv1_set_lpm_interval(lpm_priv->lpm_id, val,
+ PS3_WRITE_PM_MASK, &dummy);
break;
case pm_start_stop:
if (val != lpm_priv->shadow.pm_start_stop)
@@ -1140,7 +1143,6 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT;
- lpm_priv->shadow.pm_interval = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;
diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c
index c3c3aba3ffc..d4f6f960dd1 100644
--- a/drivers/ps3/ps3-sys-manager.c
+++ b/drivers/ps3/ps3-sys-manager.c
@@ -28,10 +28,6 @@
#include "vuart.h"
-MODULE_AUTHOR("Sony Corporation");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("PS3 System Manager");
-
/**
* ps3_sys_manager - PS3 system manager driver.
*
@@ -142,9 +138,11 @@ enum ps3_sys_manager_attr {
/**
* enum ps3_sys_manager_event - External event type, reported by system manager.
- * @PS3_SM_EVENT_POWER_PRESSED: payload.value not used.
+ * @PS3_SM_EVENT_POWER_PRESSED: payload.value =
+ * enum ps3_sys_manager_button_event.
* @PS3_SM_EVENT_POWER_RELEASED: payload.value = time pressed in millisec.
- * @PS3_SM_EVENT_RESET_PRESSED: payload.value not used.
+ * @PS3_SM_EVENT_RESET_PRESSED: payload.value =
+ * enum ps3_sys_manager_button_event.
* @PS3_SM_EVENT_RESET_RELEASED: payload.value = time pressed in millisec.
* @PS3_SM_EVENT_THERMAL_ALERT: payload.value = thermal zone id.
* @PS3_SM_EVENT_THERMAL_CLEARED: payload.value = thermal zone id.
@@ -162,6 +160,17 @@ enum ps3_sys_manager_event {
};
/**
+ * enum ps3_sys_manager_button_event - Button event payload values.
+ * @PS3_SM_BUTTON_EVENT_HARD: Hardware generated event.
+ * @PS3_SM_BUTTON_EVENT_SOFT: Software generated event.
+ */
+
+enum ps3_sys_manager_button_event {
+ PS3_SM_BUTTON_EVENT_HARD = 0,
+ PS3_SM_BUTTON_EVENT_SOFT = 1,
+};
+
+/**
* enum ps3_sys_manager_next_op - Operation to perform after lpar is destroyed.
*/
@@ -181,7 +190,9 @@ enum ps3_sys_manager_next_op {
* @PS3_SM_WAKE_P_O_R: Power on reset.
*
* Additional wakeup sources when specifying PS3_SM_NEXT_OP_SYS_SHUTDOWN.
- * System will always wake from the PS3_SM_WAKE_DEFAULT sources.
+ * The system will always wake from the PS3_SM_WAKE_DEFAULT sources.
+ * Sources listed here are the only ones available to guests in the
+ * other-os lpar.
*/
enum ps3_sys_manager_wake_source {
@@ -189,7 +200,7 @@ enum ps3_sys_manager_wake_source {
PS3_SM_WAKE_DEFAULT = 0,
PS3_SM_WAKE_RTC = 0x00000040,
PS3_SM_WAKE_RTC_ERROR = 0x00000080,
- PS3_SM_WAKE_P_O_R = 0x10000000,
+ PS3_SM_WAKE_P_O_R = 0x80000000,
};
/**
@@ -418,8 +429,10 @@ static int ps3_sys_manager_handle_event(struct ps3_system_bus_device *dev)
switch (event.type) {
case PS3_SM_EVENT_POWER_PRESSED:
- dev_dbg(&dev->core, "%s:%d: POWER_PRESSED\n",
- __func__, __LINE__);
+ dev_dbg(&dev->core, "%s:%d: POWER_PRESSED (%s)\n",
+ __func__, __LINE__,
+ (event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft"
+ : "hard"));
ps3_sm_force_power_off = 1;
/*
* A memory barrier is use here to sync memory since
@@ -434,8 +447,10 @@ static int ps3_sys_manager_handle_event(struct ps3_system_bus_device *dev)
__func__, __LINE__, event.value);
break;
case PS3_SM_EVENT_RESET_PRESSED:
- dev_dbg(&dev->core, "%s:%d: RESET_PRESSED\n",
- __func__, __LINE__);
+ dev_dbg(&dev->core, "%s:%d: RESET_PRESSED (%s)\n",
+ __func__, __LINE__,
+ (event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft"
+ : "hard"));
ps3_sm_force_power_off = 0;
/*
* A memory barrier is use here to sync memory since
@@ -622,7 +637,7 @@ static void ps3_sys_manager_final_restart(struct ps3_system_bus_device *dev)
ps3_vuart_cancel_async(dev);
ps3_sys_manager_send_attr(dev, 0);
- ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_LPAR_REBOOT,
+ ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_SYS_REBOOT,
PS3_SM_WAKE_DEFAULT);
ps3_sys_manager_send_request_shutdown(dev);
@@ -699,4 +714,7 @@ static int __init ps3_sys_manager_init(void)
module_init(ps3_sys_manager_init);
/* Module remove not supported. */
+MODULE_AUTHOR("Sony Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PS3 System Manager");
MODULE_ALIAS(PS3_MODULE_ALIAS_SYSTEM_MANAGER);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a5f0aaaf0dd..a7a0813b24c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -722,7 +722,7 @@ config SCSI_FD_MCS
config SCSI_GDTH
tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
- depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API && PCI_LEGACY
+ depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
---help---
Formerly called GDT SCSI Disk Array Controller Support.
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index bfd0e64964a..c05092fd3a9 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -144,51 +144,77 @@ static char *aac_get_status_string(u32 status);
*/
static int nondasd = -1;
-static int aac_cache = 0;
+static int aac_cache;
static int dacmode = -1;
-
+int aac_msi;
int aac_commit = -1;
int startup_timeout = 180;
int aif_timeout = 120;
module_param(nondasd, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
+MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
+ " 0=off, 1=on");
module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n\tbit 0 - Disable FUA in WRITE SCSI commands\n\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n\tbit 2 - Disable only if Battery not protecting Cache");
+MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
+ "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
+ "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
+ "\tbit 2 - Disable only if Battery not protecting Cache");
module_param(dacmode, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
+MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
+ " 0=off, 1=on");
module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
+MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
+ " adapter for foreign arrays.\n"
+ "This is typically needed in systems that do not have a BIOS."
+ " 0=off, 1=on");
+module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(msi, "IRQ handling."
+ " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)");
module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for adapter to have it's kernel up and\nrunning. This is typically adjusted for large systems that do not have a BIOS.");
+MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
+ " adapter to have it's kernel up and\n"
+ "running. This is typically adjusted for large systems that do not"
+ " have a BIOS.");
module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for applications to pick up AIFs before\nderegistering them. This is typically adjusted for heavily burdened systems.");
+MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
+ " applications to pick up AIFs before\n"
+ "deregistering them. This is typically adjusted for heavily burdened"
+ " systems.");
int numacb = -1;
module_param(numacb, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid values are 512 and down. Default is to use suggestion from Firmware.");
+MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
+ " blocks (FIB) allocated. Valid values are 512 and down. Default is"
+ " to use suggestion from Firmware.");
int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
+MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
+ " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
+ " suggestion from Firmware.");
int update_interval = 30 * 60;
module_param(update_interval, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync updates issued to adapter.");
+MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
+ " updates issued to adapter.");
int check_interval = 24 * 60 * 60;
module_param(check_interval, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health checks.");
+MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
+ " checks.");
int aac_check_reset = 1;
module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter. a value of -1 forces the reset to adapters programmed to ignore it.");
+MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the"
+ " adapter. a value of -1 forces the reset to adapters programmed to"
+ " ignore it.");
int expose_physicals = -1;
module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
+MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
+ " -1=protect 0=off, 1=on");
-int aac_reset_devices = 0;
+int aac_reset_devices;
module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
@@ -1315,7 +1341,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
(int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
dev->supplement_adapter_info.VpdInfo.Tsid);
}
- if (!aac_check_reset || ((aac_check_reset != 1) &&
+ if (!aac_check_reset || ((aac_check_reset == 1) &&
(dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_IGNORE_RESET))) {
printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
@@ -1353,13 +1379,14 @@ int aac_get_adapter_info(struct aac_dev* dev)
if (nondasd != -1)
dev->nondasd_support = (nondasd!=0);
- if(dev->nondasd_support != 0) {
+ if (dev->nondasd_support && !dev->in_reset)
printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
- }
dev->dac_support = 0;
if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
- printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id);
+ if (!dev->in_reset)
+ printk(KERN_INFO "%s%d: 64bit support enabled.\n",
+ dev->name, dev->id);
dev->dac_support = 1;
}
@@ -1369,8 +1396,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
if(dev->dac_support != 0) {
if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) &&
!pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) {
- printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
- dev->name, dev->id);
+ if (!dev->in_reset)
+ printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
+ dev->name, dev->id);
} else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) &&
!pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) {
printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3195d29f217..ace0b751c13 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1026,6 +1026,7 @@ struct aac_dev
u8 raw_io_64;
u8 printf_enabled;
u8 in_reset;
+ u8 msi;
};
#define aac_adapter_interrupt(dev) \
@@ -1881,6 +1882,7 @@ extern int startup_timeout;
extern int aif_timeout;
extern int expose_physicals;
extern int aac_reset_devices;
+extern int aac_msi;
extern int aac_commit;
extern int update_interval;
extern int check_interval;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 81b36923e0e..47434499e82 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1458,7 +1458,7 @@ int aac_check_health(struct aac_dev * aac)
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
- if (!aac_check_reset || ((aac_check_reset != 1) &&
+ if (!aac_check_reset || ((aac_check_reset == 1) &&
(aac->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_IGNORE_RESET)))
goto out;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e80d2a0c46a..ae5f74fb62d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -275,9 +275,9 @@ static const char *aac_info(struct Scsi_Host *shost)
/**
* aac_get_driver_ident
- * @devtype: index into lookup table
+ * @devtype: index into lookup table
*
- * Returns a pointer to the entry in the driver lookup table.
+ * Returns a pointer to the entry in the driver lookup table.
*/
struct aac_driver_ident* aac_get_driver_ident(int devtype)
@@ -494,13 +494,14 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct scsi_device * sdev = to_scsi_device(dev);
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
if (sdev_channel(sdev) != CONTAINER_CHANNEL)
return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
- ? "Hidden\n" : "JBOD");
+ ? "Hidden\n" :
+ ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
return snprintf(buf, PAGE_SIZE, "%s\n",
- get_container_type(((struct aac_dev *)(sdev->host->hostdata))
- ->fsa_dev[sdev_id(sdev)].type));
+ get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
}
static struct device_attribute aac_raid_level_attr = {
@@ -641,7 +642,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
AAC_OPTION_MU_RESET) &&
aac_check_reset &&
((aac_check_reset != 1) ||
- (aac->supplement_adapter_info.SupportedOptions2 &
+ !(aac->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_IGNORE_RESET)))
aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
@@ -860,8 +861,8 @@ ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf)
le32_to_cpu(dev->adapter_info.serial[0]));
if (len &&
!memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
- sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)+2-len],
- buf, len))
+ sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
+ buf, len-1))
len = snprintf(buf, PAGE_SIZE, "%.*s\n",
(int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
dev->supplement_adapter_info.MfgPcbaSerialNo);
@@ -1004,32 +1005,32 @@ static const struct file_operations aac_cfg_fops = {
static struct scsi_host_template aac_driver_template = {
.module = THIS_MODULE,
- .name = "AAC",
+ .name = "AAC",
.proc_name = AAC_DRIVERNAME,
- .info = aac_info,
- .ioctl = aac_ioctl,
+ .info = aac_info,
+ .ioctl = aac_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = aac_compat_ioctl,
#endif
- .queuecommand = aac_queuecommand,
- .bios_param = aac_biosparm,
+ .queuecommand = aac_queuecommand,
+ .bios_param = aac_biosparm,
.shost_attrs = aac_attrs,
.slave_configure = aac_slave_configure,
.change_queue_depth = aac_change_queue_depth,
.sdev_attrs = aac_dev_attrs,
.eh_abort_handler = aac_eh_abort,
.eh_host_reset_handler = aac_eh_reset,
- .can_queue = AAC_NUM_IO_FIB,
- .this_id = MAXIMUM_NUM_CONTAINERS,
- .sg_tablesize = 16,
- .max_sectors = 128,
+ .can_queue = AAC_NUM_IO_FIB,
+ .this_id = MAXIMUM_NUM_CONTAINERS,
+ .sg_tablesize = 16,
+ .max_sectors = 128,
#if (AAC_NUM_IO_FIB > 256)
.cmd_per_lun = 256,
#else
- .cmd_per_lun = AAC_NUM_IO_FIB,
+ .cmd_per_lun = AAC_NUM_IO_FIB,
#endif
.use_clustering = ENABLE_CLUSTERING,
- .emulated = 1,
+ .emulated = 1,
};
static void __aac_shutdown(struct aac_dev * aac)
@@ -1039,6 +1040,8 @@ static void __aac_shutdown(struct aac_dev * aac)
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
free_irq(aac->pdev->irq, aac);
+ if (aac->msi)
+ pci_disable_msi(aac->pdev);
}
static int __devinit aac_probe_one(struct pci_dev *pdev,
@@ -1254,7 +1257,7 @@ static struct pci_driver aac_pci_driver = {
.id_table = aac_pci_tbl,
.probe = aac_probe_one,
.remove = __devexit_p(aac_remove_one),
- .shutdown = aac_shutdown,
+ .shutdown = aac_shutdown,
};
static int __init aac_init(void)
@@ -1271,7 +1274,7 @@ static int __init aac_init(void)
aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
if (aac_cfg_major < 0) {
printk(KERN_WARNING
- "aacraid: unable to register \"aac\" device.\n");
+ "aacraid: unable to register \"aac\" device.\n");
}
return 0;
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index a08bbf1fd76..1f18b83e1e0 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -625,8 +625,11 @@ int _aac_rx_init(struct aac_dev *dev)
if (aac_init_adapter(dev) == NULL)
goto error_iounmap;
aac_adapter_comm(dev, dev->comm_interface);
- if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
+ dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 85b91bc578c..cfc3410ec07 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
@@ -385,7 +386,7 @@ int aac_sa_init(struct aac_dev *dev)
if(aac_init_adapter(dev) == NULL)
goto error_irq;
- if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED,
"aacraid", (void *)dev ) < 0) {
printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
@@ -403,7 +404,7 @@ int aac_sa_init(struct aac_dev *dev)
error_irq:
aac_sa_disable_interrupt(dev);
- free_irq(dev->scsi_host_ptr->irq, (void *)dev);
+ free_irq(dev->pdev->irq, (void *)dev);
error_iounmap:
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index ccef891d642..3c2d6888bb8 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -566,7 +566,7 @@ typedef struct asc_dvc_var {
ASC_SCSI_BIT_ID_TYPE unit_not_ready;
ASC_SCSI_BIT_ID_TYPE queue_full_or_busy;
ASC_SCSI_BIT_ID_TYPE start_motor;
- uchar overrun_buf[ASC_OVERRUN_BSIZE] __aligned(8);
+ uchar *overrun_buf;
dma_addr_t overrun_dma;
uchar scsi_reset_wait;
uchar chip_no;
@@ -13833,6 +13833,12 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
*/
if (ASC_NARROW_BOARD(boardp)) {
ASC_DBG(2, "AscInitAsc1000Driver()\n");
+
+ asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL);
+ if (!asc_dvc_varp->overrun_buf) {
+ ret = -ENOMEM;
+ goto err_free_wide_mem;
+ }
warn_code = AscInitAsc1000Driver(asc_dvc_varp);
if (warn_code || asc_dvc_varp->err_code) {
@@ -13840,8 +13846,10 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
"warn 0x%x, error 0x%x\n",
asc_dvc_varp->init_state, warn_code,
asc_dvc_varp->err_code);
- if (asc_dvc_varp->err_code)
+ if (asc_dvc_varp->err_code) {
ret = -ENODEV;
+ kfree(asc_dvc_varp->overrun_buf);
+ }
}
} else {
if (advansys_wide_init_chip(shost))
@@ -13894,6 +13902,7 @@ static int advansys_release(struct Scsi_Host *shost)
dma_unmap_single(board->dev,
board->dvc_var.asc_dvc_var.overrun_dma,
ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
+ kfree(board->dvc_var.asc_dvc_var.overrun_buf);
} else {
iounmap(board->ioremap_addr);
advansys_wide_free_mem(board);
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h
index fa7c5290257..912e6b755f7 100644
--- a/drivers/scsi/aic94xx/aic94xx_sas.h
+++ b/drivers/scsi/aic94xx/aic94xx_sas.h
@@ -292,7 +292,7 @@ struct scb_header {
#define INITIATE_SSP_TASK 0x00
#define INITIATE_LONG_SSP_TASK 0x01
#define INITIATE_BIDIR_SSP_TASK 0x02
-#define ABORT_TASK 0x03
+#define SCB_ABORT_TASK 0x03
#define INITIATE_SSP_TMF 0x04
#define SSP_TARG_GET_DATA 0x05
#define SSP_TARG_GET_DATA_GOOD 0x06
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 87b2f6e6adf..b52124f3d3a 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -369,7 +369,7 @@ int asd_abort_task(struct sas_task *task)
return -ENOMEM;
scb = ascb->scb;
- scb->header.opcode = ABORT_TASK;
+ scb->header.opcode = SCB_ABORT_TASK;
switch (task->task_proto) {
case SAS_PROTOCOL_SATA:
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index fb5f2028438..a715632e19d 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2018,6 +2018,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* the upper layers to process. This would have been set
* correctly by fas216_std_done.
*/
+ scsi_eh_restore_cmnd(SCpnt, &info->ses);
SCpnt->scsi_done(SCpnt);
}
@@ -2103,23 +2104,12 @@ request_sense:
if (SCpnt->cmnd[0] == REQUEST_SENSE)
goto done;
+ scsi_eh_prep_cmnd(SCpnt, &info->ses, NULL, 0, ~0);
fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
"requesting sense");
- memset(SCpnt->cmnd, 0, sizeof (SCpnt->cmnd));
- SCpnt->cmnd[0] = REQUEST_SENSE;
- SCpnt->cmnd[1] = SCpnt->device->lun << 5;
- SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer);
- SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
- SCpnt->SCp.buffer = NULL;
- SCpnt->SCp.buffers_residual = 0;
- SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer;
- SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer);
- SCpnt->SCp.phase = sizeof(SCpnt->sense_buffer);
+ init_SCp(SCpnt);
SCpnt->SCp.Message = 0;
SCpnt->SCp.Status = 0;
- SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer);
- SCpnt->sc_data_direction = DMA_FROM_DEVICE;
- SCpnt->use_sg = 0;
SCpnt->tag = 0;
SCpnt->host_scribble = (void *)fas216_rq_sns_done;
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 00e5f055afd..3e73e264972 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -16,6 +16,8 @@
#define NO_IRQ 255
#endif
+#include <scsi/scsi_eh.h>
+
#include "queue.h"
#include "msgqueue.h"
@@ -311,6 +313,7 @@ typedef struct {
/* miscellaneous */
int internal_done; /* flag to indicate request done */
+ struct scsi_eh_save *ses; /* holds request sense restore info */
unsigned long magic_end;
} FAS216_Info;
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index c82523908c2..6d67f5c0eb8 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -642,12 +642,15 @@ static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt,
*cnt, vendor, device));
pdev = NULL;
- while ((pdev = pci_find_device(vendor, device, pdev))
+ while ((pdev = pci_get_device(vendor, device, pdev))
!= NULL) {
if (pci_enable_device(pdev))
continue;
- if (*cnt >= MAXHA)
+ if (*cnt >= MAXHA) {
+ pci_dev_put(pdev);
return;
+ }
+
/* GDT PCI controller found, resources are already in pdev */
pcistr[*cnt].pdev = pdev;
pcistr[*cnt].irq = pdev->irq;
@@ -4836,6 +4839,9 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios)
if (error)
goto out_free_coal_stat;
list_add_tail(&ha->list, &gdth_instances);
+
+ scsi_scan_host(shp);
+
return 0;
out_free_coal_stat:
@@ -4963,6 +4969,9 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot)
if (error)
goto out_free_coal_stat;
list_add_tail(&ha->list, &gdth_instances);
+
+ scsi_scan_host(shp);
+
return 0;
out_free_ccb_phys:
@@ -5100,6 +5109,9 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr)
if (error)
goto out_free_coal_stat;
list_add_tail(&ha->list, &gdth_instances);
+
+ scsi_scan_host(shp);
+
return 0;
out_free_coal_stat:
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 83567b9755b..2ab2d24dcc1 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -307,6 +307,7 @@ struct lpfc_vport {
uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
+ uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */
struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
struct lpfc_name fc_nodename; /* fc nodename */
struct lpfc_name fc_portname; /* fc portname */
@@ -392,6 +393,13 @@ enum hba_temp_state {
HBA_OVER_TEMP
};
+enum intr_type_t {
+ NONE = 0,
+ INTx,
+ MSI,
+ MSIX,
+};
+
struct lpfc_hba {
struct lpfc_sli sli;
uint32_t sli_rev; /* SLI2 or SLI3 */
@@ -409,7 +417,7 @@ struct lpfc_hba {
/* This flag is set while issuing */
/* INIT_LINK mailbox command */
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
-#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */
+#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
struct lpfc_sli2_slim *slim2p;
struct lpfc_dmabuf hbqslimp;
@@ -487,6 +495,8 @@ struct lpfc_hba {
wait_queue_head_t *work_wait;
struct task_struct *worker_thread;
+ uint32_t hbq_in_use; /* HBQs in use flag */
+ struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
@@ -555,7 +565,8 @@ struct lpfc_hba {
mempool_t *nlp_mem_pool;
struct fc_host_statistics link_stats;
- uint8_t using_msi;
+ enum intr_type_t intr_type;
+ struct msix_entry msix_entries[1];
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
@@ -595,6 +606,8 @@ struct lpfc_hba {
unsigned long last_completion_time;
struct timer_list hb_tmofunc;
uint8_t hb_outstanding;
+ /* ndlp reference management */
+ spinlock_t ndlp_lock;
/*
* Following bit will be set for all buffer tags which are not
* associated with any HBQ.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4bae4a2ed2f..b12a841703c 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1191,7 +1191,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
- if (ndlp->rport)
+ if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
spin_unlock_irq(shost->host_lock);
}
@@ -1592,9 +1592,11 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
# support this feature
# 0 = MSI disabled (default)
# 1 = MSI enabled
-# Value range is [0,1]. Default value is 0.
+# 2 = MSI-X enabled
+# Value range is [0,2]. Default value is 0.
*/
-LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
+LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
+ "MSI-X (2), if possible");
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
@@ -1946,11 +1948,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
}
/* If HBA encountered an error attention, allow only DUMP
- * mailbox command until the HBA is restarted.
+ * or RESTART mailbox commands until the HBA is restarted.
*/
if ((phba->pport->stopped) &&
- (phba->sysfs_mbox.mbox->mb.mbxCommand
- != MBX_DUMP_MEMORY)) {
+ (phba->sysfs_mbox.mbox->mb.mbxCommand !=
+ MBX_DUMP_MEMORY &&
+ phba->sysfs_mbox.mbox->mb.mbxCommand !=
+ MBX_RESTART)) {
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
return -EPERM;
@@ -2384,7 +2388,8 @@ lpfc_get_node_by_target(struct scsi_target *starget)
spin_lock_irq(shost->host_lock);
/* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+ if (NLP_CHK_NODE_ACT(ndlp) &&
+ ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
spin_unlock_irq(shost->host_lock);
return ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 50fcb7c930b..848d97744b4 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -53,7 +53,11 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_disable_node(struct lpfc_vport *, struct lpfc_nodelist *);
+struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
+ struct lpfc_nodelist *, int);
void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_set_disctmo(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 92441ce610e..3d0ccd9b341 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -294,7 +294,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Save for completion so we can release these resources */
geniocb->context1 = (uint8_t *) inp;
geniocb->context2 = (uint8_t *) outp;
- geniocb->context_un.ndlp = ndlp;
+ geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
/* Fill in payload, bp points to frame payload */
icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
@@ -489,8 +489,10 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
*/
ndlp = lpfc_findnode_did(vport,
Did);
- if (ndlp && (ndlp->nlp_type &
- NLP_FCP_TARGET))
+ if (ndlp &&
+ NLP_CHK_NODE_ACT(ndlp)
+ && (ndlp->nlp_type &
+ NLP_FCP_TARGET))
lpfc_setup_disc_node
(vport, Did);
else if (lpfc_ns_cmd(vport,
@@ -773,7 +775,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"0267 NameServer GFF Rsp "
"x%x Error (%d %d) Data: x%x x%x\n",
did, irsp->ulpStatus, irsp->un.ulpWord[4],
- vport->fc_flag, vport->fc_rscn_id_cnt)
+ vport->fc_flag, vport->fc_rscn_id_cnt);
}
/* This is a target port, unregistered port, or the GFF_ID failed */
@@ -1064,7 +1066,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
int rc = 0;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)
+ || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
rc=1;
goto ns_cmd_exit;
}
@@ -1213,8 +1216,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
cmpl = lpfc_cmpl_ct_cmd_rff_id;
break;
}
- lpfc_nlp_get(ndlp);
-
+ /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+ * to hold ndlp reference for the corresponding callback function.
+ */
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
/* On success, The cmpl function will free the buffers */
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
@@ -1222,9 +1226,13 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
cmdcode, ndlp->nlp_DID, 0);
return 0;
}
-
rc=6;
+
+ /* Decrement ndlp reference count to release ndlp reference held
+ * for the failed command's callback function.
+ */
lpfc_nlp_put(ndlp);
+
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
ns_cmd_free_bmp:
kfree(bmp);
@@ -1271,6 +1279,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ goto fail_out;
+
if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -1294,6 +1305,8 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
break;
}
+
+fail_out:
lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
@@ -1650,12 +1663,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
bpl->tus.w = le32_to_cpu(bpl->tus.w);
cmpl = lpfc_cmpl_ct_cmd_fdmi;
- lpfc_nlp_get(ndlp);
+ /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+ * to hold ndlp reference for the corresponding callback function.
+ */
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
return 0;
+ /* Decrement ndlp reference count to release ndlp reference held
+ * for the failed command's callback function.
+ */
lpfc_nlp_put(ndlp);
+
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
fdmi_cmd_free_bmp:
kfree(bmp);
@@ -1698,7 +1717,7 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp;
ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (ndlp) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
if (init_utsname()->nodename[0] != '\0')
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
else
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index cfe81c50529..2db0b74b6fa 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -73,6 +73,12 @@ struct lpfc_nodelist {
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
+ uint16_t nlp_usg_map; /* ndlp management usage bitmap */
+#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
+#define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */
+#define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */
+#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
+
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct fc_rport *rport; /* Corresponding FC transport
port structure */
@@ -85,25 +91,51 @@ struct lpfc_nodelist {
};
/* Defines for nlp_flag (uint32) */
-#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */
-#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */
-#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */
-#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
-#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
-#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
-#define NLP_DEFER_RM 0x10000 /* Remove this ndlp if no longer used */
-#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
-#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
-#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
-#define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */
-#define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */
-#define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful
+#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
+#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
+#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */
+#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */
+#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
+#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
+#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
+#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
+#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
+#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
+#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */
+#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */
+#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful
ACC */
-#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
+#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from
NPR list */
-#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */
-#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
+#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */
+#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
+#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
+
+/* ndlp usage management macros */
+#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
+ & NLP_USG_NODE_ACT_BIT) \
+ && \
+ !((ndlp)->nlp_usg_map \
+ & NLP_USG_FREE_ACK_BIT))
+#define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
+ |= NLP_USG_NODE_ACT_BIT)
+#define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
+ = NLP_USG_NODE_ACT_BIT)
+#define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
+ &= ~NLP_USG_NODE_ACT_BIT)
+#define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
+ & NLP_USG_IACT_REQ_BIT)
+#define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
+ |= NLP_USG_IACT_REQ_BIT)
+#define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
+ & NLP_USG_FREE_REQ_BIT)
+#define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
+ |= NLP_USG_FREE_REQ_BIT)
+#define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
+ & NLP_USG_FREE_ACK_BIT)
+#define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
+ |= NLP_USG_FREE_ACK_BIT)
/* There are 4 different double linked lists nodelist entries can reside on.
* The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c6b739dc6bc..cbb68a94225 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -113,6 +113,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
if (elsiocb == NULL)
return NULL;
+
icmd = &elsiocb->iocb;
/* fill in BDEs for command */
@@ -134,9 +135,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
if (!prsp || !prsp->virt)
goto els_iocb_free_prsp_exit;
INIT_LIST_HEAD(&prsp->list);
- } else {
+ } else
prsp = NULL;
- }
/* Allocate buffer for Buffer ptr list */
pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -246,7 +246,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
sp = &phba->fc_fabparam;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp) {
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
err = 1;
goto fail;
}
@@ -282,6 +282,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
mbox->vport = vport;
+ /* increment the reference count on ndlp to hold reference
+ * for the callback routine.
+ */
mbox->context2 = lpfc_nlp_get(ndlp);
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -293,6 +296,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
return 0;
fail_issue_reg_login:
+ /* decrement the reference count on ndlp just incremented
+ * for the failed mbox command.
+ */
lpfc_nlp_put(ndlp);
mp = (struct lpfc_dmabuf *) mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -381,6 +387,8 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
continue;
@@ -456,6 +464,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
mempool_free(mbox, phba->mbox_mem_pool);
goto fail;
}
+ /* Decrement ndlp reference count indicating that ndlp can be
+ * safely released when other references to it are done.
+ */
lpfc_nlp_put(ndlp);
ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
@@ -467,22 +478,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
goto fail;
-
lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if(!ndlp)
+ goto fail;
}
memcpy(&ndlp->nlp_portname, &sp->portName,
sizeof(struct lpfc_name));
memcpy(&ndlp->nlp_nodename, &sp->nodeName,
sizeof(struct lpfc_name));
+ /* Set state will put ndlp onto node list if not already done */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else {
- /* This side will wait for the PLOGI */
+ } else
+ /* This side will wait for the PLOGI, decrement ndlp reference
+ * count indicating that ndlp can be released when other
+ * references to it are done.
+ */
lpfc_nlp_put(ndlp);
- }
/* If we are pt2pt with another NPort, force NPIV off! */
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -728,16 +746,21 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
if (!ndlp)
return 0;
lpfc_nlp_init(vport, ndlp, Fabric_DID);
- } else {
- lpfc_dequeue_node(vport, ndlp);
+ /* Put ndlp onto node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 0;
}
- if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
+ if (lpfc_issue_els_flogi(vport, ndlp, 0))
/* This decrement of reference count to node shall kick off
* the release of the node.
*/
lpfc_nlp_put(ndlp);
- }
+
return 1;
}
@@ -755,9 +778,15 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
if (!ndlp)
return 0;
lpfc_nlp_init(vport, ndlp, Fabric_DID);
- } else {
- lpfc_dequeue_node(vport, ndlp);
+ /* Put ndlp onto node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 0;
}
+
if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
/* decrement node reference count to trigger the release of
* the node.
@@ -816,7 +845,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
*/
new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
- if (new_ndlp == ndlp)
+ if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
return ndlp;
if (!new_ndlp) {
@@ -827,8 +856,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
if (!new_ndlp)
return ndlp;
-
lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
+ } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
+ new_ndlp = lpfc_enable_node(vport, new_ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!new_ndlp)
+ return ndlp;
}
lpfc_unreg_rpi(vport, new_ndlp);
@@ -839,6 +872,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ /* Set state will put new_ndlp on to node list if not already done */
lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
/* Move this back to NPR state */
@@ -912,7 +946,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->un.elsreq64.remoteID);
ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
- if (!ndlp) {
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0136 PLOGI completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
@@ -962,12 +996,11 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* PLOGI failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(irsp))
rc = NLP_STE_FREED_NODE;
- } else {
+ else
rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
- }
} else {
/* Good status, call state machine */
prsp = list_entry(((struct lpfc_dmabuf *)
@@ -1015,8 +1048,10 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
ndlp = lpfc_findnode_did(vport, did);
- /* If ndlp if not NULL, we will bump the reference count on it */
+ if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
+ ndlp = NULL;
+ /* If ndlp is not NULL, we will bump the reference count on it */
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
ELS_CMD_PLOGI);
@@ -1097,18 +1132,15 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* PRLI failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(irsp))
goto out;
- } else {
+ else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
- }
- } else {
+ } else
/* Good status, call state machine */
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
- }
-
out:
lpfc_els_free_iocb(phba, cmdiocb);
return;
@@ -1275,15 +1307,13 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* ADISC failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp)) {
+ if (!lpfc_error_lost_link(irsp))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
- }
- } else {
+ } else
/* Good status, call state machine */
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
- }
if (disc && vport->num_disc_nodes) {
/* Check to see if there are more ADISCs to be sent */
@@ -1443,14 +1473,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_LOGO);
- } else {
+ } else
/* Good status, call state machine.
* This will unregister the rpi if needed.
*/
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_LOGO);
- }
-
out:
lpfc_els_free_iocb(phba, cmdiocb);
return;
@@ -1556,11 +1584,19 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof(uint32_t) + sizeof(SCR));
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
- if (!ndlp)
- return 1;
- lpfc_nlp_init(vport, ndlp, nportid);
+ ndlp = lpfc_findnode_did(vport, nportid);
+ if (!ndlp) {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 1;
+ lpfc_nlp_init(vport, ndlp, nportid);
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 1;
+ }
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR);
@@ -1623,11 +1659,19 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
cmdsize = (sizeof(uint32_t) + sizeof(FARP));
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
- if (!ndlp)
- return 1;
- lpfc_nlp_init(vport, ndlp, nportid);
+ ndlp = lpfc_findnode_did(vport, nportid);
+ if (!ndlp) {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 1;
+ lpfc_nlp_init(vport, ndlp, nportid);
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ return 1;
+ }
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_RNID);
@@ -1657,7 +1701,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
ondlp = lpfc_findnode_did(vport, nportid);
- if (ondlp) {
+ if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
memcpy(&fp->OportName, &ondlp->nlp_portname,
sizeof(struct lpfc_name));
memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
@@ -1690,6 +1734,7 @@ void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_work_evt *evtp;
spin_lock_irq(shost->host_lock);
nlp->nlp_flag &= ~NLP_DELAY_TMO;
@@ -1697,8 +1742,12 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
del_timer_sync(&nlp->nlp_delayfunc);
nlp->nlp_last_elscmd = 0;
- if (!list_empty(&nlp->els_retry_evt.evt_listp))
+ if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
list_del_init(&nlp->els_retry_evt.evt_listp);
+ /* Decrement nlp reference count held for the delayed retry */
+ evtp = &nlp->els_retry_evt;
+ lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
+ }
if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
spin_lock_irq(shost->host_lock);
@@ -1842,13 +1891,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmd = *elscmd++;
}
- if (ndlp)
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp))
did = ndlp->nlp_DID;
else {
/* We should only hit this case for retrying PLOGI */
did = irsp->un.elsreq64.remoteID;
ndlp = lpfc_findnode_did(vport, did);
- if (!ndlp && (cmd != ELS_CMD_PLOGI))
+ if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ && (cmd != ELS_CMD_PLOGI))
return 1;
}
@@ -1870,18 +1920,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case IOERR_ILLEGAL_COMMAND:
- if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) &&
- (cmd == ELS_CMD_FDISC)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0124 FDISC failed (3/6) "
- "retrying...\n");
- lpfc_mbx_unreg_vpi(vport);
- retry = 1;
- /* FDISC retry policy */
- maxretry = 48;
- if (cmdiocb->retry >= 32)
- delay = 1000;
- }
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0124 Retry illegal cmd x%x "
+ "retry:x%x delay:x%x\n",
+ cmd, cmdiocb->retry, delay);
+ retry = 1;
+ /* All command's retry policy */
+ maxretry = 8;
+ if (cmdiocb->retry > 2)
+ delay = 1000;
break;
case IOERR_NO_RESOURCES:
@@ -1967,6 +2014,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case LSRJT_LOGICAL_ERR:
+ /* There are some cases where switches return this
+ * error when they are not ready and should be returning
+ * Logical Busy. We should delay every time.
+ */
+ if (cmd == ELS_CMD_FDISC &&
+ stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
+ maxretry = 3;
+ delay = 1000;
+ retry = 1;
+ break;
+ }
case LSRJT_PROTOCOL_ERR:
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
@@ -1996,7 +2054,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 1;
if ((cmd == ELS_CMD_FLOGI) &&
- (phba->fc_topology != TOPOLOGY_LOOP)) {
+ (phba->fc_topology != TOPOLOGY_LOOP) &&
+ !lpfc_error_lost_link(irsp)) {
/* FLOGI retry policy */
retry = 1;
maxretry = 48;
@@ -2322,6 +2381,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((rspiocb->iocb.ulpStatus == 0)
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
lpfc_unreg_rpi(vport, ndlp);
+ /* Increment reference count to ndlp to hold the
+ * reference to ndlp for the callback function.
+ */
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->vport = vport;
if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
@@ -2335,9 +2397,13 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
NLP_STE_REG_LOGIN_ISSUE);
}
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
- != MBX_NOT_FINISHED) {
+ != MBX_NOT_FINISHED)
goto out;
- }
+ else
+ /* Decrement the ndlp reference count we
+ * set for this failed mailbox command.
+ */
+ lpfc_nlp_put(ndlp);
/* ELS rsp: Cannot issue reg_login for <NPortid> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -2796,6 +2862,8 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS ADISCs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
@@ -2833,6 +2901,8 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS PLOGIs */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
@@ -2869,6 +2939,16 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
int i;
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_rscn_flush) {
+ /* Another thread is walking fc_rscn_id_list on this vport */
+ spin_unlock_irq(shost->host_lock);
+ return;
+ }
+ /* Indicate we are walking lpfc_els_flush_rscn on this vport */
+ vport->fc_rscn_flush = 1;
+ spin_unlock_irq(shost->host_lock);
+
for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
vport->fc_rscn_id_list[i] = NULL;
@@ -2878,6 +2958,8 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
+ /* Indicate we are done walking this fc_rscn_id_list */
+ vport->fc_rscn_flush = 0;
}
int
@@ -2887,6 +2969,7 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
D_ID rscn_did;
uint32_t *lp;
uint32_t payload_len, i;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
ns_did.un.word = did;
@@ -2898,6 +2981,15 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
if (vport->fc_flag & FC_RSCN_DISCOVERY)
return did;
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_rscn_flush) {
+ /* Another thread is walking fc_rscn_id_list on this vport */
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+ }
+ /* Indicate we are walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 1;
+ spin_unlock_irq(shost->host_lock);
for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
lp = vport->fc_rscn_id_list[i]->virt;
payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
@@ -2908,16 +3000,16 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
switch (rscn_did.un.b.resv) {
case 0: /* Single N_Port ID effected */
if (ns_did.un.word == rscn_did.un.word)
- return did;
+ goto return_did_out;
break;
case 1: /* Whole N_Port Area effected */
if ((ns_did.un.b.domain == rscn_did.un.b.domain)
&& (ns_did.un.b.area == rscn_did.un.b.area))
- return did;
+ goto return_did_out;
break;
case 2: /* Whole N_Port Domain effected */
if (ns_did.un.b.domain == rscn_did.un.b.domain)
- return did;
+ goto return_did_out;
break;
default:
/* Unknown Identifier in RSCN node */
@@ -2926,11 +3018,17 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
"RSCN payload Data: x%x\n",
rscn_did.un.word);
case 3: /* Whole Fabric effected */
- return did;
+ goto return_did_out;
}
}
}
+ /* Indicate we are done with walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 0;
return 0;
+return_did_out:
+ /* Indicate we are done with walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 0;
+ return did;
}
static int
@@ -2943,7 +3041,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
*/
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
+ if (!NLP_CHK_NODE_ACT(ndlp) ||
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
continue;
@@ -2971,7 +3070,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t *lp, *datap;
IOCB_t *icmd;
uint32_t payload_len, length, nportid, *cmd;
- int rscn_cnt = vport->fc_rscn_id_cnt;
+ int rscn_cnt;
int rscn_id = 0, hba_id = 0;
int i;
@@ -2984,7 +3083,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* RSCN received */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0214 RSCN received Data: x%x x%x x%x x%x\n",
- vport->fc_flag, payload_len, *lp, rscn_cnt);
+ vport->fc_flag, payload_len, *lp,
+ vport->fc_rscn_id_cnt);
for (i = 0; i < payload_len/sizeof(uint32_t); i++)
fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_RSCN, lp[i]);
@@ -3022,7 +3122,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
"0214 Ignore RSCN "
"Data: x%x x%x x%x x%x\n",
vport->fc_flag, payload_len,
- *lp, rscn_cnt);
+ *lp, vport->fc_rscn_id_cnt);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
ndlp->nlp_DID, vport->port_state,
@@ -3034,6 +3134,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
}
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_rscn_flush) {
+ /* Another thread is walking fc_rscn_id_list on this vport */
+ spin_unlock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_DISCOVERY;
+ return 0;
+ }
+ /* Indicate we are walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 1;
+ spin_unlock_irq(shost->host_lock);
+ /* Get the array count after sucessfully have the token */
+ rscn_cnt = vport->fc_rscn_id_cnt;
/* If we are already processing an RSCN, save the received
* RSCN payload buffer, cmdiocb->context2 to process later.
*/
@@ -3055,7 +3167,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((rscn_cnt) &&
(payload_len + length <= LPFC_BPL_SIZE)) {
*cmd &= ELS_CMD_MASK;
- *cmd |= be32_to_cpu(payload_len + length);
+ *cmd |= cpu_to_be32(payload_len + length);
memcpy(((uint8_t *)cmd) + length, lp,
payload_len);
} else {
@@ -3066,7 +3178,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
*/
cmdiocb->context2 = NULL;
}
-
/* Deferred RSCN */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0235 Deferred RSCN "
@@ -3083,9 +3194,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
vport->fc_rscn_id_cnt, vport->fc_flag,
vport->port_state);
}
+ /* Indicate we are done walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 0;
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
-
/* send RECOVERY event for ALL nodes that match RSCN payload */
lpfc_rscn_recovery_check(vport);
spin_lock_irq(shost->host_lock);
@@ -3093,7 +3205,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(shost->host_lock);
return 0;
}
-
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RSCN: did:x%x/ste:x%x flg:x%x",
ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
@@ -3102,20 +3213,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
vport->fc_flag |= FC_RSCN_MODE;
spin_unlock_irq(shost->host_lock);
vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
+ /* Indicate we are done walking fc_rscn_id_list on this vport */
+ vport->fc_rscn_flush = 0;
/*
* If we zero, cmdiocb->context2, the calling routine will
* not try to free it.
*/
cmdiocb->context2 = NULL;
-
lpfc_set_disctmo(vport);
-
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
-
/* send RECOVERY event for ALL nodes that match RSCN payload */
lpfc_rscn_recovery_check(vport);
-
return lpfc_els_handle_rscn(vport);
}
@@ -3145,7 +3254,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
vport->num_disc_nodes = 0;
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+ && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer */
if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
/* Wait for NameServer query cmpl before we can
@@ -3155,25 +3265,35 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* If login to NameServer does not exist, issue one */
/* Good status, issue PLOGI to NameServer */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp)
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp))
/* Wait for NameServer login cmpl before we can
continue */
return 1;
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
- if (!ndlp) {
- lpfc_els_flush_rscn(vport);
- return 0;
+ if (ndlp) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_PLOGI_ISSUE);
+ if (!ndlp) {
+ lpfc_els_flush_rscn(vport);
+ return 0;
+ }
+ ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
} else {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ lpfc_els_flush_rscn(vport);
+ return 0;
+ }
lpfc_nlp_init(vport, ndlp, NameServer_DID);
- ndlp->nlp_type |= NLP_FABRIC;
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(vport, NameServer_DID, 0);
- /* Wait for NameServer login cmpl before we can
- continue */
- return 1;
}
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_issue_els_plogi(vport, NameServer_DID, 0);
+ /* Wait for NameServer login cmpl before we can
+ * continue
+ */
+ return 1;
}
lpfc_els_flush_rscn(vport);
@@ -3672,6 +3792,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -3697,6 +3819,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
*/
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
@@ -3936,7 +4060,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t cmd, did, newnode, rjt_err = 0;
IOCB_t *icmd = &elsiocb->iocb;
- if (vport == NULL || elsiocb->context2 == NULL)
+ if (!vport || !(elsiocb->context2))
goto dropit;
newnode = 0;
@@ -3971,14 +4095,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_nlp_init(vport, ndlp, did);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
newnode = 1;
- if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
ndlp->nlp_type |= NLP_FABRIC;
+ } else {
+ if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ goto dropit;
}
- }
- else {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
/* This is simular to the new node path */
- lpfc_nlp_get(ndlp);
+ ndlp = lpfc_nlp_get(ndlp);
+ if (!ndlp)
+ goto dropit;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
newnode = 1;
}
@@ -3987,6 +4117,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvFrame++;
if (elsiocb->context1)
lpfc_nlp_put(elsiocb->context1);
+
elsiocb->context1 = lpfc_nlp_get(ndlp);
elsiocb->vport = vport;
@@ -4007,8 +4138,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
if (vport->port_state < LPFC_DISC_AUTH) {
- rjt_err = LSRJT_UNABLE_TPC;
- break;
+ if (!(phba->pport->fc_flag & FC_PT2PT) ||
+ (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ break;
+ }
+ /* We get here, and drop thru, if we are PT2PT with
+ * another NPort and the other side has initiated
+ * the PLOGI before responding to our FLOGI.
+ */
}
shost = lpfc_shost_from_vport(vport);
@@ -4251,15 +4389,15 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
vport = lpfc_find_vport_by_vpid(phba, vpi);
}
}
- /* If there are no BDEs associated
- * with this IOCB, there is nothing to do.
- */
+ /* If there are no BDEs associated
+ * with this IOCB, there is nothing to do.
+ */
if (icmd->ulpBdeCount == 0)
return;
- /* type of ELS cmd is first 32bit word
- * in packet
- */
+ /* type of ELS cmd is first 32bit word
+ * in packet
+ */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
elsiocb->context2 = bdeBuf1;
} else {
@@ -4314,6 +4452,18 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
lpfc_nlp_init(vport, ndlp, NameServer_DID);
ndlp->nlp_type |= NLP_FABRIC;
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp) {
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ lpfc_disc_start(vport);
+ return;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0348 NameServer login: node freed\n");
+ return;
+ }
}
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -4360,6 +4510,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
switch (mb->mbxStatus) {
case 0x11: /* unsupported feature */
case 0x9603: /* max_vpi exceeded */
+ case 0x9602: /* Link event since CLEAR_LA */
/* giving up on vport registration */
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
spin_lock_irq(shost->host_lock);
@@ -4373,7 +4524,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- lpfc_initial_fdisc(vport);
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_initial_flogi(vport);
+ else
+ lpfc_initial_fdisc(vport);
break;
}
@@ -4471,7 +4625,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4]);
if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
-
lpfc_nlp_put(ndlp);
/* giving up on FDISC. Cancel discovery timer */
lpfc_can_disctmo(vport);
@@ -4492,8 +4645,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
- if (np->nlp_state != NLP_STE_NPR_NODE
- || !(np->nlp_flag & NLP_NPR_ADISC))
+ if (!NLP_CHK_NODE_ACT(ndlp) ||
+ (np->nlp_state != NLP_STE_NPR_NODE) ||
+ !(np->nlp_flag & NLP_NPR_ADISC))
continue;
spin_lock_irq(shost->host_lock);
np->nlp_flag &= ~NLP_NPR_ADISC;
@@ -4599,6 +4753,8 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
IOCB_t *irsp;
+ struct lpfc_nodelist *ndlp;
+ ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -4607,6 +4763,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_els_free_iocb(phba, cmdiocb);
vport->unreg_vpi_cmpl = VPORT_ERROR;
+
+ /* Trigger the release of the ndlp after logo */
+ lpfc_nlp_put(ndlp);
}
int
@@ -4686,11 +4845,12 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
repeat:
iocb = NULL;
spin_lock_irqsave(&phba->hbalock, iflags);
- /* Post any pending iocb to the SLI layer */
+ /* Post any pending iocb to the SLI layer */
if (atomic_read(&phba->fabric_iocb_count) == 0) {
list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
list);
if (iocb)
+ /* Increment fabric iocb count to hold the position */
atomic_inc(&phba->fabric_iocb_count);
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -4737,9 +4897,7 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
int blocked;
blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
- /* Start a timer to unblock fabric
- * iocbs after 100ms
- */
+ /* Start a timer to unblock fabric iocbs after 100ms */
if (!blocked)
mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
@@ -4787,8 +4945,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
atomic_dec(&phba->fabric_iocb_count);
if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
- /* Post any pending iocbs to HBA */
- lpfc_resume_fabric_iocbs(phba);
+ /* Post any pending iocbs to HBA */
+ lpfc_resume_fabric_iocbs(phba);
}
}
@@ -4807,6 +4965,9 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+ if (ready)
+ /* Increment fabric iocb count to hold the position */
+ atomic_inc(&phba->fabric_iocb_count);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ready) {
iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
@@ -4817,7 +4978,6 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
"Fabric sched2: ste:x%x",
iocb->vport->port_state, 0, 0);
- atomic_inc(&phba->fabric_iocb_count);
ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
if (ret == IOCB_ERROR) {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index dc042bd97ba..bd572d6b60a 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -272,9 +272,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (!(vport->load_flag & FC_UNLOADING) &&
!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
- (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) {
+ (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
- }
}
@@ -566,9 +565,10 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
int rc;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
-
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
((vport->port_type == LPFC_NPIV_PORT) &&
(ndlp->nlp_DID == NameServer_DID)))
@@ -629,9 +629,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mb;
int i;
- if (phba->link_state == LPFC_LINK_DOWN) {
+ if (phba->link_state == LPFC_LINK_DOWN)
return 0;
- }
spin_lock_irq(&phba->hbalock);
if (phba->link_state > LPFC_LINK_DOWN) {
phba->link_state = LPFC_LINK_DOWN;
@@ -684,20 +683,21 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
-
if (ndlp->nlp_type & NLP_FABRIC) {
- /* On Linkup its safe to clean up the ndlp
- * from Fabric connections.
- */
+ /* On Linkup its safe to clean up the ndlp
+ * from Fabric connections.
+ */
if (ndlp->nlp_DID != Fabric_DID)
lpfc_unreg_rpi(vport, ndlp);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
- /* Fail outstanding IO now since device is
- * marked for PLOGI.
- */
+ /* Fail outstanding IO now since device is
+ * marked for PLOGI.
+ */
lpfc_unreg_rpi(vport, ndlp);
}
}
@@ -799,21 +799,9 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
spin_unlock_irq(&phba->hbalock);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
- vport->num_disc_nodes = 0;
- /* go thru NPR nodes and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
- lpfc_els_disc_plogi(vport);
-
- if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
- }
-
- vport->port_state = LPFC_VPORT_READY;
-
out:
/* Device Discovery completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -1133,7 +1121,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (la->attType == AT_LINK_UP) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
- lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1306 Link Up Event in loop back mode "
"x%x received Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
@@ -1150,11 +1138,21 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbx_process_link_up(phba, la);
} else {
phba->fc_stat.LinkDown++;
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ if (phba->link_flag & LS_LOOPBACK_MODE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1308 Link Down Event in loop back mode "
+ "x%x received "
+ "Data: x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag);
+ }
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
+ }
lpfc_mbx_issue_link_down(phba);
}
@@ -1305,7 +1303,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_nlp_put(ndlp);
if (phba->fc_topology == TOPOLOGY_LOOP) {
/* FLOGI failed, use loop map to make discovery list */
@@ -1313,6 +1310,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery */
lpfc_disc_start(vport);
+ /* Decrement the reference count to ndlp after the
+ * reference to the ndlp are done.
+ */
+ lpfc_nlp_put(ndlp);
return;
}
@@ -1320,6 +1321,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0258 Register Fabric login error: 0x%x\n",
mb->mbxStatus);
+ /* Decrement the reference count to ndlp after the reference
+ * to the ndlp are done.
+ */
+ lpfc_nlp_put(ndlp);
return;
}
@@ -1327,8 +1332,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
-
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@@ -1356,6 +1359,11 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Drop the reference count from the mbox at the end after
+ * all the current reference to the ndlp have been done.
+ */
+ lpfc_nlp_put(ndlp);
return;
}
@@ -1463,9 +1471,8 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* registered the port.
*/
if (ndlp->rport && ndlp->rport->dd_data &&
- ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
+ ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
lpfc_nlp_put(ndlp);
- }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
"rport add: did:x%x flg:x%x type x%x",
@@ -1660,6 +1667,18 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
void
+lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (list_empty(&ndlp->nlp_listp)) {
+ spin_lock_irq(shost->host_lock);
+ list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+ spin_unlock_irq(shost->host_lock);
+ }
+}
+
+void
lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -1672,7 +1691,80 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->nlp_listp);
spin_unlock_irq(shost->host_lock);
lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
- NLP_STE_UNUSED_NODE);
+ NLP_STE_UNUSED_NODE);
+}
+
+void
+lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+ lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+ lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+ NLP_STE_UNUSED_NODE);
+}
+
+struct lpfc_nodelist *
+lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int state)
+{
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t did;
+ unsigned long flags;
+
+ if (!ndlp)
+ return NULL;
+
+ spin_lock_irqsave(&phba->ndlp_lock, flags);
+ /* The ndlp should not be in memory free mode */
+ if (NLP_CHK_FREE_REQ(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0277 lpfc_enable_node: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return NULL;
+ }
+ /* The ndlp should not already be in active mode */
+ if (NLP_CHK_NODE_ACT(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0278 lpfc_enable_node: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return NULL;
+ }
+
+ /* Keep the original DID */
+ did = ndlp->nlp_DID;
+
+ /* re-initialize ndlp except of ndlp linked list pointer */
+ memset((((char *)ndlp) + sizeof (struct list_head)), 0,
+ sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
+ INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+ INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+ init_timer(&ndlp->nlp_delayfunc);
+ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+ ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+ ndlp->nlp_DID = did;
+ ndlp->vport = vport;
+ ndlp->nlp_sid = NLP_NO_SID;
+ /* ndlp management re-initialize */
+ kref_init(&ndlp->kref);
+ NLP_INT_NODE_ACT(ndlp);
+
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+
+ if (state != NLP_STE_UNUSED_NODE)
+ lpfc_nlp_set_state(vport, ndlp, state);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "node enable: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+ return ndlp;
}
void
@@ -1972,7 +2064,21 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
- lpfc_dequeue_node(vport, ndlp);
+ if (NLP_CHK_FREE_REQ(ndlp)) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0280 lpfc_cleanup_node: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ lpfc_dequeue_node(vport, ndlp);
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0281 lpfc_cleanup_node: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ lpfc_disable_node(vport, ndlp);
+ }
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
@@ -1994,12 +2100,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
}
list_del(&mb->list);
mempool_free(mb, phba->mbox_mem_pool);
- lpfc_nlp_put(ndlp);
+ /* We shall not invoke the lpfc_nlp_put to decrement
+ * the ndlp reference count as we are in the process
+ * of lpfc_nlp_release.
+ */
}
}
spin_unlock_irq(&phba->hbalock);
- lpfc_els_abort(phba,ndlp);
+ lpfc_els_abort(phba, ndlp);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
@@ -2057,7 +2167,6 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
}
}
}
-
lpfc_cleanup_node(vport, ndlp);
/*
@@ -2182,7 +2291,16 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
return ndlp;
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
+ if (!ndlp)
+ return NULL;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
}
+
if (vport->fc_flag & FC_RSCN_MODE) {
if (lpfc_rscn_payload_check(vport, did)) {
/* If we've already recieved a PLOGI from this NPort
@@ -2363,6 +2481,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
* continue discovery.
*/
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE)) {
lpfc_issue_reg_vpi(phba, vport);
return;
@@ -2485,6 +2604,8 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
lpfc_free_tx(phba, ndlp);
@@ -2572,6 +2693,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
/* Start discovery by sending FLOGI, clean up old rpis */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -2618,7 +2741,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
"NameServer login\n");
/* Next look for NameServer ndlp */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp)
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp))
lpfc_els_abort(phba, ndlp);
/* ReStart discovery */
@@ -2897,6 +3020,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_sid = NLP_NO_SID;
INIT_LIST_HEAD(&ndlp->nlp_listp);
kref_init(&ndlp->kref);
+ NLP_INT_NODE_ACT(ndlp);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node init: did:x%x",
@@ -2911,6 +3035,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
static void
lpfc_nlp_release(struct kref *kref)
{
+ struct lpfc_hba *phba;
+ unsigned long flags;
struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
kref);
@@ -2918,8 +3044,24 @@ lpfc_nlp_release(struct kref *kref)
"node release: did:x%x flg:x%x type:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "0279 lpfc_nlp_release: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+
+ /* remove ndlp from action. */
lpfc_nlp_remove(ndlp->vport, ndlp);
- mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
+
+ /* clear the ndlp active flag for all release cases */
+ phba = ndlp->vport->phba;
+ spin_lock_irqsave(&phba->ndlp_lock, flags);
+ NLP_CLR_NODE_ACT(ndlp);
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+
+ /* free ndlp memory for final ndlp release */
+ if (NLP_CHK_FREE_REQ(ndlp))
+ mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
}
/* This routine bumps the reference count for a ndlp structure to ensure
@@ -2929,37 +3071,108 @@ lpfc_nlp_release(struct kref *kref)
struct lpfc_nodelist *
lpfc_nlp_get(struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba;
+ unsigned long flags;
+
if (ndlp) {
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
"node get: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount));
- kref_get(&ndlp->kref);
+ /* The check of ndlp usage to prevent incrementing the
+ * ndlp reference count that is in the process of being
+ * released.
+ */
+ phba = ndlp->vport->phba;
+ spin_lock_irqsave(&phba->ndlp_lock, flags);
+ if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+ "0276 lpfc_nlp_get: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return NULL;
+ } else
+ kref_get(&ndlp->kref);
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
}
return ndlp;
}
-
/* This routine decrements the reference count for a ndlp structure. If the
- * count goes to 0, this indicates the the associated nodelist should be freed.
+ * count goes to 0, this indicates the the associated nodelist should be
+ * freed. Returning 1 indicates the ndlp resource has been released; on the
+ * other hand, returning 0 indicates the ndlp resource has not been released
+ * yet.
*/
int
lpfc_nlp_put(struct lpfc_nodelist *ndlp)
{
- if (ndlp) {
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node put: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount));
+ struct lpfc_hba *phba;
+ unsigned long flags;
+
+ if (!ndlp)
+ return 1;
+
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node put: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ atomic_read(&ndlp->kref.refcount));
+ phba = ndlp->vport->phba;
+ spin_lock_irqsave(&phba->ndlp_lock, flags);
+ /* Check the ndlp memory free acknowledge flag to avoid the
+ * possible race condition that kref_put got invoked again
+ * after previous one has done ndlp memory free.
+ */
+ if (NLP_CHK_FREE_ACK(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+ "0274 lpfc_nlp_put: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return 1;
}
- return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
+ /* Check the ndlp inactivate log flag to avoid the possible
+ * race condition that kref_put got invoked again after ndlp
+ * is already in inactivating state.
+ */
+ if (NLP_CHK_IACT_REQ(ndlp)) {
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+ "0275 lpfc_nlp_put: ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ (void *)ndlp, ndlp->nlp_usg_map,
+ atomic_read(&ndlp->kref.refcount));
+ return 1;
+ }
+ /* For last put, mark the ndlp usage flags to make sure no
+ * other kref_get and kref_put on the same ndlp shall get
+ * in between the process when the final kref_put has been
+ * invoked on this ndlp.
+ */
+ if (atomic_read(&ndlp->kref.refcount) == 1) {
+ /* Indicate ndlp is put to inactive state. */
+ NLP_SET_IACT_REQ(ndlp);
+ /* Acknowledge ndlp memory free has been seen. */
+ if (NLP_CHK_FREE_REQ(ndlp))
+ NLP_SET_FREE_ACK(ndlp);
+ }
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ /* Note, the kref_put returns 1 when decrementing a reference
+ * count that was 1, it invokes the release callback function,
+ * but it still left the reference count as 1 (not actually
+ * performs the last decrementation). Otherwise, it actually
+ * decrements the reference count and returns 0.
+ */
+ return kref_put(&ndlp->kref, lpfc_nlp_release);
}
/* This routine free's the specified nodelist if it is not in use
- * by any other discovery thread. This routine returns 1 if the ndlp
- * is not being used by anyone and has been freed. A return value of
- * 0 indicates it is being used by another discovery thread and the
- * refcount is left unchanged.
+ * by any other discovery thread. This routine returns 1 if the
+ * ndlp has been freed. A return value of 0 indicates the ndlp is
+ * not yet been released.
*/
int
lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
@@ -2968,11 +3181,8 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
"node not used: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
atomic_read(&ndlp->kref.refcount));
-
- if (atomic_read(&ndlp->kref.refcount) == 1) {
- lpfc_nlp_put(ndlp);
- return 1;
- }
+ if (atomic_read(&ndlp->kref.refcount) == 1)
+ if (lpfc_nlp_put(ndlp))
+ return 1;
return 0;
}
-
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 041f83e7634..7773b949aa7 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -581,6 +581,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
#define LSEXP_INVALID_O_SID 0x15
#define LSEXP_INVALID_OX_RX 0x17
#define LSEXP_CMD_IN_PROGRESS 0x19
+#define LSEXP_PORT_LOGIN_REQ 0x1E
#define LSEXP_INVALID_NPORT_ID 0x1F
#define LSEXP_INVALID_SEQ_ID 0x21
#define LSEXP_INVALID_XCHG 0x23
@@ -1376,11 +1377,26 @@ typedef struct { /* FireFly BIU registers */
#define CMD_QUE_XRI64_CX 0xB3
#define CMD_IOCB_RCV_SEQ64_CX 0xB5
#define CMD_IOCB_RCV_ELS64_CX 0xB7
+#define CMD_IOCB_RET_XRI64_CX 0xB9
#define CMD_IOCB_RCV_CONT64_CX 0xBB
#define CMD_GEN_REQUEST64_CR 0xC2
#define CMD_GEN_REQUEST64_CX 0xC3
+/* Unhandled SLI-3 Commands */
+#define CMD_IOCB_XMIT_MSEQ64_CR 0xB0
+#define CMD_IOCB_XMIT_MSEQ64_CX 0xB1
+#define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1
+#define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD
+#define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6
+#define CMD_IOCB_ABORT_EXTENDED_CN 0xBA
+#define CMD_IOCB_RET_HBQE64_CN 0xCA
+#define CMD_IOCB_FCP_IBIDIR64_CR 0xAC
+#define CMD_IOCB_FCP_IBIDIR64_CX 0xAD
+#define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF
+#define CMD_IOCB_LOGENTRY_CN 0x94
+#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
+
#define CMD_MAX_IOCB_CMD 0xE6
#define CMD_IOCB_MASK 0xff
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6cfeba7454d..22843751c2c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -461,11 +461,21 @@ lpfc_config_port_post(struct lpfc_hba *phba)
int
lpfc_hba_down_prep(struct lpfc_hba *phba)
{
+ struct lpfc_vport **vports;
+ int i;
/* Disable interrupts */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- lpfc_cleanup_discovery_resources(phba->pport);
+ if (phba->pport->load_flag & FC_UNLOADING)
+ lpfc_cleanup_discovery_resources(phba->pport);
+ else {
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+ lpfc_cleanup_discovery_resources(vports[i]);
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
return 0;
}
@@ -1422,9 +1432,32 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ continue;
+ spin_lock_irq(&phba->ndlp_lock);
+ NLP_SET_FREE_REQ(ndlp);
+ spin_unlock_irq(&phba->ndlp_lock);
+ /* Trigger the release of the ndlp memory */
+ lpfc_nlp_put(ndlp);
+ continue;
+ }
+ spin_lock_irq(&phba->ndlp_lock);
+ if (NLP_CHK_FREE_REQ(ndlp)) {
+ /* The ndlp should not be in memory free mode already */
+ spin_unlock_irq(&phba->ndlp_lock);
+ continue;
+ } else
+ /* Indicate request for freeing ndlp memory */
+ NLP_SET_FREE_REQ(ndlp);
+ spin_unlock_irq(&phba->ndlp_lock);
+
if (ndlp->nlp_type & NLP_FABRIC)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
+
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
@@ -1438,6 +1471,17 @@ lpfc_cleanup(struct lpfc_vport *vport)
if (i++ > 3000) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0233 Nodelist not empty\n");
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &vport->fc_nodes, nlp_listp) {
+ lpfc_printf_vlog(ndlp->vport, KERN_ERR,
+ LOG_NODE,
+ "0282: did:x%x ndlp:x%p "
+ "usgmap:x%x refcnt:%d\n",
+ ndlp->nlp_DID, (void *)ndlp,
+ ndlp->nlp_usg_map,
+ atomic_read(
+ &ndlp->kref.refcount));
+ }
break;
}
@@ -1586,6 +1630,8 @@ lpfc_offline_prep(struct lpfc_hba * phba)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -1695,9 +1741,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport = (struct lpfc_vport *) shost->hostdata;
vport->phba = phba;
-
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ vport->fc_rscn_flush = 0;
lpfc_get_vport_cfgparam(vport);
shost->unique_id = instance;
@@ -1879,6 +1925,42 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
spin_unlock_irq(shost->host_lock);
}
+static int
+lpfc_enable_msix(struct lpfc_hba *phba)
+{
+ int error;
+
+ phba->msix_entries[0].entry = 0;
+ phba->msix_entries[0].vector = 0;
+
+ error = pci_enable_msix(phba->pcidev, phba->msix_entries,
+ ARRAY_SIZE(phba->msix_entries));
+ if (error) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0420 Enable MSI-X failed (%d), continuing "
+ "with MSI\n", error);
+ pci_disable_msix(phba->pcidev);
+ return error;
+ }
+
+ error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0,
+ LPFC_DRIVER_NAME, phba);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0421 MSI-X request_irq failed (%d), "
+ "continuing with MSI\n", error);
+ pci_disable_msix(phba->pcidev);
+ }
+ return error;
+}
+
+static void
+lpfc_disable_msix(struct lpfc_hba *phba)
+{
+ free_irq(phba->msix_entries[0].vector, phba);
+ pci_disable_msix(phba->pcidev);
+}
+
static int __devinit
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
{
@@ -1905,6 +1987,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
spin_lock_init(&phba->hbalock);
+ /* Initialize ndlp management spinlock */
+ spin_lock_init(&phba->ndlp_lock);
+
phba->pcidev = pdev;
/* Assign an unused board number */
@@ -2002,6 +2087,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+ INIT_LIST_HEAD(&phba->hbqbuf_in_list);
+
/* Initialize the SLI Layer to run with lpfc HBAs. */
lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba);
@@ -2077,24 +2164,36 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_debugfs_initialize(vport);
pci_set_drvdata(pdev, shost);
+ phba->intr_type = NONE;
- if (phba->cfg_use_msi) {
+ if (phba->cfg_use_msi == 2) {
+ error = lpfc_enable_msix(phba);
+ if (!error)
+ phba->intr_type = MSIX;
+ }
+
+ /* Fallback to MSI if MSI-X initialization failed */
+ if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
retval = pci_enable_msi(phba->pcidev);
if (!retval)
- phba->using_msi = 1;
+ phba->intr_type = MSI;
else
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0452 Enable MSI failed, continuing "
"with IRQ\n");
}
- retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
- LPFC_DRIVER_NAME, phba);
- if (retval) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0451 Enable interrupt handler failed\n");
- error = retval;
- goto out_disable_msi;
+ /* MSI-X is the only case the doesn't need to call request_irq */
+ if (phba->intr_type != MSIX) {
+ retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (retval) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable "
+ "interrupt handler failed\n");
+ error = retval;
+ goto out_disable_msi;
+ } else if (phba->intr_type != MSI)
+ phba->intr_type = INTx;
}
phba->MBslimaddr = phba->slim_memmap_p;
@@ -2139,9 +2238,14 @@ out_remove_device:
out_free_irq:
lpfc_stop_phba_timers(phba);
phba->pport->work_port_events = 0;
- free_irq(phba->pcidev->irq, phba);
+
+ if (phba->intr_type == MSIX)
+ lpfc_disable_msix(phba);
+ else
+ free_irq(phba->pcidev->irq, phba);
+
out_disable_msi:
- if (phba->using_msi)
+ if (phba->intr_type == MSI)
pci_disable_msi(phba->pcidev);
destroy_port(vport);
out_kthread_stop:
@@ -2214,10 +2318,13 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
lpfc_debugfs_terminate(vport);
- /* Release the irq reservation */
- free_irq(phba->pcidev->irq, phba);
- if (phba->using_msi)
- pci_disable_msi(phba->pcidev);
+ if (phba->intr_type == MSIX)
+ lpfc_disable_msix(phba);
+ else {
+ free_irq(phba->pcidev->irq, phba);
+ if (phba->intr_type == MSI)
+ pci_disable_msi(phba->pcidev);
+ }
pci_set_drvdata(pdev, NULL);
scsi_host_put(shost);
@@ -2276,10 +2383,13 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
pring = &psli->ring[psli->fcp_ring];
lpfc_sli_abort_iocb_ring(phba, pring);
- /* Release the irq reservation */
- free_irq(phba->pcidev->irq, phba);
- if (phba->using_msi)
- pci_disable_msi(phba->pcidev);
+ if (phba->intr_type == MSIX)
+ lpfc_disable_msix(phba);
+ else {
+ free_irq(phba->pcidev->irq, phba);
+ if (phba->intr_type == MSI)
+ pci_disable_msi(phba->pcidev);
+ }
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index c5841d7565f..39fd2b843be 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -35,11 +35,15 @@
#define LOG_ALL_MSG 0xffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
+ do { \
{ if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
- fmt, (vport)->phba->brd_no, vport->vpi, ##arg); }
+ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
+ } while (0)
#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
+ do { \
{ if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
- fmt, phba->brd_no, ##arg); }
+ fmt, phba->brd_no, ##arg); } \
+ } while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 6dc5ab8d671..3c0cebc7180 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -264,19 +264,30 @@ void
lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
{
struct hbq_dmabuf *hbq_entry;
+ unsigned long flags;
+
+ if (!mp)
+ return;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ /* Check whether HBQ is still in use */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (!phba->hbq_in_use) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
+ list_del(&hbq_entry->dbuf.list);
if (hbq_entry->tag == -1) {
(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
(phba, hbq_entry);
} else {
lpfc_sli_free_hbq(phba, hbq_entry);
}
+ spin_unlock_irqrestore(&phba->hbalock, flags);
} else {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
return;
}
-
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 4a0e3406e37..d513813f669 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -249,6 +249,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
+ struct lpfc_work_evt *evtp;
uint32_t *lp;
IOCB_t *icmd;
struct serv_parm *sp;
@@ -435,8 +436,14 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
del_timer_sync(&ndlp->nlp_delayfunc);
ndlp->nlp_last_elscmd = 0;
- if (!list_empty(&ndlp->els_retry_evt.evt_listp))
+ if (!list_empty(&ndlp->els_retry_evt.evt_listp)) {
list_del_init(&ndlp->els_retry_evt.evt_listp);
+ /* Decrement ndlp reference count held for the
+ * delayed retry
+ */
+ evtp = &ndlp->els_retry_evt;
+ lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
+ }
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
spin_lock_irq(shost->host_lock);
@@ -638,13 +645,15 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 0;
}
- /* Check config parameter use-adisc or FCP-2 */
- if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
- ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
- return 1;
+ if (!(vport->fc_flag & FC_PT2PT)) {
+ /* Check config parameter use-adisc or FCP-2 */
+ if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+ ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ return 1;
+ }
}
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
lpfc_unreg_rpi(vport, ndlp);
@@ -656,7 +665,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0253 Illegal State Transition: node x%x "
+ "0271 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
ndlp->nlp_flag);
@@ -674,7 +683,7 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0253 Illegal State Transition: node x%x "
+ "0272 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
ndlp->nlp_flag);
@@ -2144,8 +2153,11 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t cur_state, rc;
uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
+ uint32_t got_ndlp = 0;
+
+ if (lpfc_nlp_get(ndlp))
+ got_ndlp = 1;
- lpfc_nlp_get(ndlp);
cur_state = ndlp->nlp_state;
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
@@ -2162,15 +2174,24 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
rc = (func) (vport, ndlp, arg, evt);
/* DSM out state <rc> on NPort <nlp_DID> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ if (got_ndlp) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0212 DSM out state %d on NPort x%x Data: x%x\n",
rc, ndlp->nlp_DID, ndlp->nlp_flag);
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
- "DSM out: ste:%d did:x%x flg:x%x",
- rc, ndlp->nlp_DID, ndlp->nlp_flag);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+ "DSM out: ste:%d did:x%x flg:x%x",
+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
+ /* Decrement the ndlp reference count held for this function */
+ lpfc_nlp_put(ndlp);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0212 DSM out state %d on NPort free\n", rc);
- lpfc_nlp_put(ndlp);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+ "DSM out: ste:%d did:x%x flg:x%x",
+ rc, 0, 0);
+ }
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index fc5c3a42b05..70255c11d3a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -1283,6 +1283,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
match = 0;
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
i == ndlp->nlp_sid &&
ndlp->rport) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fdd01e384e3..f53206411cd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -203,8 +203,25 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_IOCB_RCV_SEQ64_CX:
case CMD_IOCB_RCV_ELS64_CX:
case CMD_IOCB_RCV_CONT64_CX:
+ case CMD_IOCB_RET_XRI64_CX:
type = LPFC_UNSOL_IOCB;
break;
+ case CMD_IOCB_XMIT_MSEQ64_CR:
+ case CMD_IOCB_XMIT_MSEQ64_CX:
+ case CMD_IOCB_RCV_SEQ_LIST64_CX:
+ case CMD_IOCB_RCV_ELS_LIST64_CX:
+ case CMD_IOCB_CLOSE_EXTENDED_CN:
+ case CMD_IOCB_ABORT_EXTENDED_CN:
+ case CMD_IOCB_RET_HBQE64_CN:
+ case CMD_IOCB_FCP_IBIDIR64_CR:
+ case CMD_IOCB_FCP_IBIDIR64_CX:
+ case CMD_IOCB_FCP_ITASKMGT64_CX:
+ case CMD_IOCB_LOGENTRY_CN:
+ case CMD_IOCB_LOGENTRY_ASYNC_CN:
+ printk("%s - Unhandled SLI-3 Command x%x\n",
+ __FUNCTION__, iocb_cmnd);
+ type = LPFC_UNKNOWN_IOCB;
+ break;
default:
type = LPFC_UNKNOWN_IOCB;
break;
@@ -529,10 +546,13 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
{
struct lpfc_dmabuf *dmabuf, *next_dmabuf;
struct hbq_dmabuf *hbq_buf;
+ unsigned long flags;
int i, hbq_count;
+ uint32_t hbqno;
hbq_count = lpfc_sli_hbq_count();
/* Return all memory used by all HBQs */
+ spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < hbq_count; ++i) {
list_for_each_entry_safe(dmabuf, next_dmabuf,
&phba->hbqs[i].hbq_buffer_list, list) {
@@ -542,6 +562,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
}
phba->hbqs[i].buffer_count = 0;
}
+ /* Return all HBQ buffer that are in-fly */
+ list_for_each_entry_safe(dmabuf, next_dmabuf,
+ &phba->hbqbuf_in_list, list) {
+ hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
+ list_del(&hbq_buf->dbuf.list);
+ if (hbq_buf->tag == -1) {
+ (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
+ (phba, hbq_buf);
+ } else {
+ hbqno = hbq_buf->tag >> 16;
+ if (hbqno >= LPFC_MAX_HBQS)
+ (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
+ (phba, hbq_buf);
+ else
+ (phba->hbqs[hbqno].hbq_free_buffer)(phba,
+ hbq_buf);
+ }
+ }
+
+ /* Mark the HBQs not in use */
+ phba->hbq_in_use = 0;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
}
static struct lpfc_hbq_entry *
@@ -603,6 +645,7 @@ static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
{
uint32_t i, start, end;
+ unsigned long flags;
struct hbq_dmabuf *hbq_buffer;
if (!phba->hbqs[hbqno].hbq_alloc_buffer) {
@@ -615,6 +658,13 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
end = lpfc_hbq_defs[hbqno]->entry_count;
}
+ /* Check whether HBQ is still in use */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (!phba->hbq_in_use) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return 0;
+ }
+
/* Populate HBQ entries */
for (i = start; i < end; i++) {
hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
@@ -626,6 +676,8 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
else
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
}
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0;
}
@@ -910,16 +962,29 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
uint32_t hbqno;
void *virt; /* virtual address ptr */
dma_addr_t phys; /* mapped address */
+ unsigned long flags;
+
+ /* Check whether HBQ is still in use */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (!phba->hbq_in_use) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return NULL;
+ }
hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
- if (hbq_entry == NULL)
+ if (hbq_entry == NULL) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return NULL;
+ }
list_del(&hbq_entry->dbuf.list);
hbqno = tag >> 16;
new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
- if (new_hbq_entry == NULL)
+ if (new_hbq_entry == NULL) {
+ list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return &hbq_entry->dbuf;
+ }
new_hbq_entry->tag = -1;
phys = new_hbq_entry->dbuf.phys;
virt = new_hbq_entry->dbuf.virt;
@@ -928,6 +993,9 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
hbq_entry->dbuf.phys = phys;
hbq_entry->dbuf.virt = virt;
lpfc_sli_free_hbq(phba, hbq_entry);
+ list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
return &new_hbq_entry->dbuf;
}
@@ -951,6 +1019,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t Rctl, Type;
uint32_t match, i;
struct lpfc_iocbq *iocbq;
+ struct lpfc_dmabuf *dmzbuf;
match = 0;
irsp = &(saveq->iocb);
@@ -972,6 +1041,29 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 1;
}
+ if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
+ (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
+ if (irsp->ulpBdeCount > 0) {
+ dmzbuf = lpfc_sli_get_buff(phba, pring,
+ irsp->un.ulpWord[3]);
+ lpfc_in_buf_free(phba, dmzbuf);
+ }
+
+ if (irsp->ulpBdeCount > 1) {
+ dmzbuf = lpfc_sli_get_buff(phba, pring,
+ irsp->unsli3.sli3Words[3]);
+ lpfc_in_buf_free(phba, dmzbuf);
+ }
+
+ if (irsp->ulpBdeCount > 2) {
+ dmzbuf = lpfc_sli_get_buff(phba, pring,
+ irsp->unsli3.sli3Words[7]);
+ lpfc_in_buf_free(phba, dmzbuf);
+ }
+
+ return 1;
+ }
+
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
if (irsp->ulpBdeCount != 0) {
saveq->context2 = lpfc_sli_get_buff(phba, pring,
@@ -2293,6 +2385,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
/* Initialize the struct lpfc_sli_hbq structure for each hbq */
phba->link_state = LPFC_INIT_MBX_CMDS;
+ phba->hbq_in_use = 1;
hbq_entry_index = 0;
for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
@@ -2404,9 +2497,7 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
(!pmb->mb.un.varCfgPort.cMA)) {
rc = -ENXIO;
- goto do_prep_failed;
}
- return rc;
do_prep_failed:
mempool_free(pmb, phba->mbox_mem_pool);
@@ -2625,14 +2716,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED;
}
if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
!(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
- LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
return MBX_NOT_FINISHED;
}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4b633d39a82..ca540d1d041 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.2.4"
+#define LPFC_DRIVER_VERSION "8.2.5"
#define LPFC_DRIVER_NAME "lpfc"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 9fad7663c11..86d05beb00b 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -327,7 +327,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
lpfc_set_disctmo(vport);
lpfc_initial_fdisc(vport);
@@ -358,7 +359,8 @@ disable_vport(struct fc_vport *fc_vport)
long timeout;
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (ndlp && phba->link_state >= LPFC_LINK_UP) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+ && phba->link_state >= LPFC_LINK_UP) {
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
@@ -372,6 +374,8 @@ disable_vport(struct fc_vport *fc_vport)
* calling lpfc_cleanup_rpis(vport, 1)
*/
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -414,7 +418,8 @@ enable_vport(struct fc_vport *fc_vport)
* up and ready to FDISC.
*/
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+ && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
lpfc_set_disctmo(vport);
lpfc_initial_fdisc(vport);
@@ -498,7 +503,41 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
scsi_remove_host(lpfc_shost_from_vport(vport));
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+
+ /* In case of driver unload, we shall not perform fabric logo as the
+ * worker thread already stopped at this stage and, in this case, we
+ * can safely skip the fabric logo.
+ */
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+ phba->link_state >= LPFC_LINK_UP) {
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ goto skip_logo;
+ else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ goto skip_logo;
+ }
+ /* Remove ndlp from vport npld list */
+ lpfc_dequeue_node(vport, ndlp);
+
+ /* Indicate free memory when release */
+ spin_lock_irq(&phba->ndlp_lock);
+ NLP_SET_FREE_REQ(ndlp);
+ spin_unlock_irq(&phba->ndlp_lock);
+ /* Kick off release ndlp when it can be safely done */
+ lpfc_nlp_put(ndlp);
+ }
+ goto skip_logo;
+ }
+
+ /* Otherwise, we will perform fabric logo as needed */
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
phba->link_state >= LPFC_LINK_UP) {
if (vport->cfg_enable_da_id) {
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
@@ -519,8 +558,27 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
if (!ndlp)
goto skip_logo;
lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ /* Indicate free memory when release */
+ NLP_SET_FREE_REQ(ndlp);
} else {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ ndlp = lpfc_enable_node(vport, ndlp,
+ NLP_STE_UNUSED_NODE);
+ if (!ndlp)
+ goto skip_logo;
+
+ /* Remove ndlp from vport npld list */
lpfc_dequeue_node(vport, ndlp);
+ spin_lock_irq(&phba->ndlp_lock);
+ if (!NLP_CHK_FREE_REQ(ndlp))
+ /* Indicate free memory when release */
+ NLP_SET_FREE_REQ(ndlp);
+ else {
+ /* Skip this if ndlp is already in free mode */
+ spin_unlock_irq(&phba->ndlp_lock);
+ goto skip_logo;
+ }
+ spin_unlock_irq(&phba->ndlp_lock);
}
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
@@ -534,9 +592,9 @@ skip_logo:
lpfc_sli_host_down(vport);
lpfc_stop_vport_timers(vport);
- lpfc_unreg_all_rpis(vport);
if (!(phba->pport->load_flag & FC_UNLOADING)) {
+ lpfc_unreg_all_rpis(vport);
lpfc_unreg_default_rpis(vport);
/*
* Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index b6587a6d848..0ad215e27b8 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -59,7 +59,6 @@ EXPORT_SYMBOL(mraid_mm_register_adp);
EXPORT_SYMBOL(mraid_mm_unregister_adp);
EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
-static int majorno;
static uint32_t drvr_ver = 0x02200207;
static int adapters_count_g;
@@ -76,6 +75,12 @@ static const struct file_operations lsi_fops = {
.owner = THIS_MODULE,
};
+static struct miscdevice megaraid_mm_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "megadev0",
+ .fops = &lsi_fops,
+};
+
/**
* mraid_mm_open - open routine for char node interface
* @inode : unused
@@ -1184,15 +1189,16 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
static int __init
mraid_mm_init(void)
{
+ int err;
+
// Announce the driver version
con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
- majorno = register_chrdev(0, "megadev", &lsi_fops);
-
- if (majorno < 0) {
- con_log(CL_ANN, ("megaraid cmm: cannot get major\n"));
- return majorno;
+ err = misc_register(&megaraid_mm_dev);
+ if (err < 0) {
+ con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
+ return err;
}
init_waitqueue_head(&wait_q);
@@ -1230,7 +1236,7 @@ mraid_mm_exit(void)
{
con_log(CL_DLEVEL1 , ("exiting common mod\n"));
- unregister_chrdev(majorno, "megadev");
+ misc_deregister(&megaraid_mm_dev);
}
module_init(mraid_mm_init);
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index c8762b2b8ed..55b425c0a65 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -22,6 +22,7 @@
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/list.h>
+#include <linux/miscdevice.h>
#include "mbox_defs.h"
#include "megaraid_ioctl.h"
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 2a6e4f472ea..a57fed47b39 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -416,11 +416,11 @@ static int ses_intf_add(struct class_device *cdev,
int i, j, types, len, components = 0;
int err = -ENOMEM;
struct enclosure_device *edev;
- struct ses_component *scomp;
+ struct ses_component *scomp = NULL;
if (!scsi_device_enclosure(sdev)) {
/* not an enclosure, but might be in one */
- edev = enclosure_find(&sdev->host->shost_gendev);
+ edev = enclosure_find(&sdev->host->shost_gendev);
if (edev) {
ses_match_to_enclosure(edev, sdev);
class_device_put(&edev->cdev);
@@ -456,9 +456,6 @@ static int ses_intf_add(struct class_device *cdev,
if (!buf)
goto err_free;
- ses_dev->page1 = buf;
- ses_dev->page1_len = len;
-
result = ses_recv_diag(sdev, 1, buf, len);
if (result)
goto recv_failed;
@@ -473,6 +470,9 @@ static int ses_intf_add(struct class_device *cdev,
type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
components += type_ptr[1];
}
+ ses_dev->page1 = buf;
+ ses_dev->page1_len = len;
+ buf = NULL;
result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
if (result)
@@ -489,6 +489,7 @@ static int ses_intf_add(struct class_device *cdev,
goto recv_failed;
ses_dev->page2 = buf;
ses_dev->page2_len = len;
+ buf = NULL;
/* The additional information page --- allows us
* to match up the devices */
@@ -506,11 +507,12 @@ static int ses_intf_add(struct class_device *cdev,
goto recv_failed;
ses_dev->page10 = buf;
ses_dev->page10_len = len;
+ buf = NULL;
no_page10:
- scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
+ scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
if (!scomp)
- goto err_free;
+ goto err_free;
edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id,
components, &ses_enclosure_callbacks);
@@ -521,10 +523,9 @@ static int ses_intf_add(struct class_device *cdev,
edev->scratch = ses_dev;
for (i = 0; i < components; i++)
- edev->component[i].scratch = scomp++;
+ edev->component[i].scratch = scomp + i;
/* Page 7 for the descriptors is optional */
- buf = NULL;
result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
if (result)
goto simple_populate;
@@ -532,6 +533,8 @@ static int ses_intf_add(struct class_device *cdev,
len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
/* add 1 for trailing '\0' we'll use */
buf = kzalloc(len + 1, GFP_KERNEL);
+ if (!buf)
+ goto simple_populate;
result = ses_recv_diag(sdev, 7, buf, len);
if (result) {
simple_populate:
@@ -598,6 +601,7 @@ static int ses_intf_add(struct class_device *cdev,
err = -ENODEV;
err_free:
kfree(buf);
+ kfree(scomp);
kfree(ses_dev->page10);
kfree(ses_dev->page2);
kfree(ses_dev->page1);
@@ -630,6 +634,7 @@ static void ses_intf_remove(struct class_device *cdev,
ses_dev = edev->scratch;
edev->scratch = NULL;
+ kfree(ses_dev->page10);
kfree(ses_dev->page1);
kfree(ses_dev->page2);
kfree(ses_dev);
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 6325901e509..f7d279542fa 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -187,10 +187,10 @@
#define sym53c416_base_2 sym53c416_2
#define sym53c416_base_3 sym53c416_3
-static unsigned int sym53c416_base[2] = {0,0};
-static unsigned int sym53c416_base_1[2] = {0,0};
-static unsigned int sym53c416_base_2[2] = {0,0};
-static unsigned int sym53c416_base_3[2] = {0,0};
+static unsigned int sym53c416_base[2];
+static unsigned int sym53c416_base_1[2];
+static unsigned int sym53c416_base_2[2];
+static unsigned int sym53c416_base_3[2];
#endif
@@ -621,25 +621,25 @@ int __init sym53c416_detect(struct scsi_host_template *tpnt)
int ints[3];
ints[0] = 2;
- if(sym53c416_base)
+ if(sym53c416_base[0])
{
ints[1] = sym53c416_base[0];
ints[2] = sym53c416_base[1];
sym53c416_setup(NULL, ints);
}
- if(sym53c416_base_1)
+ if(sym53c416_base_1[0])
{
ints[1] = sym53c416_base_1[0];
ints[2] = sym53c416_base_1[1];
sym53c416_setup(NULL, ints);
}
- if(sym53c416_base_2)
+ if(sym53c416_base_2[0])
{
ints[1] = sym53c416_base_2[0];
ints[2] = sym53c416_base_2[1];
sym53c416_setup(NULL, ints);
}
- if(sym53c416_base_3)
+ if(sym53c416_base_3[0])
{
ints[1] = sym53c416_base_3[0];
ints[2] = sym53c416_base_3[1];
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index ddf63914453..9ce12cb2ceb 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -393,7 +393,7 @@ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
if (cflag & CRTSCTS) {
fcr_val |= SCFCR_MCE;
} else {
-#ifdef CONFIG_CPU_SUBTYPE_SH7343
+#if defined(CONFIG_CPU_SUBTYPE_SH7343) || defined(CONFIG_CPU_SUBTYPE_SH7366)
/* Nothing */
#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index f5764ebcfe0..01a9dd715f5 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -97,13 +97,18 @@
# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
# define SCIF_ONLY
# define PORT_PSCR 0xA405011E
+#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
+# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
+# define SCSPTR0 SCPDR0
+# define SCIF_ORER 0x0001 /* overrun error bit */
+# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+# define SCIF_ONLY
#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
# define SCIF_ONLY
#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
-# include <asm/hardware.h>
# define SCIF_BASE_ADDR 0x01030000
# define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
# define SCIF_PTR2_OFFS 0x0000020
@@ -577,7 +582,7 @@ static inline int sci_rxd_in(struct uart_port *port)
return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || defined(CONFIG_CPU_SUBTYPE_SH7366)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index e52a6296ca4..9cfcfd8dad5 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -31,6 +31,7 @@
#include <asm/mach/dma.h>
#include <asm/mach/sysasic.h>
#include <asm/mach/maple.h>
+#include <linux/delay.h>
MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin");
MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
@@ -53,12 +54,12 @@ static struct device maple_bus;
static int subdevice_map[MAPLE_PORTS];
static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
static unsigned long maple_pnp_time;
-static int started, scanning, liststatus;
+static int started, scanning, liststatus, realscan;
static struct kmem_cache *maple_queue_cache;
struct maple_device_specify {
- int port;
- int unit;
+ int port;
+ int unit;
};
/**
@@ -68,22 +69,22 @@ struct maple_device_specify {
*/
int maple_driver_register(struct device_driver *drv)
{
- if (!drv)
- return -EINVAL;
- drv->bus = &maple_bus_type;
- return driver_register(drv);
+ if (!drv)
+ return -EINVAL;
+ drv->bus = &maple_bus_type;
+ return driver_register(drv);
}
EXPORT_SYMBOL_GPL(maple_driver_register);
/* set hardware registers to enable next round of dma */
static void maplebus_dma_reset(void)
{
- ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
- /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
- ctrl_outl(1, MAPLE_TRIGTYPE);
- ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
- ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
- ctrl_outl(1, MAPLE_ENABLE);
+ ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
+ /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
+ ctrl_outl(1, MAPLE_TRIGTYPE);
+ ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
+ ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
+ ctrl_outl(1, MAPLE_ENABLE);
}
/**
@@ -94,27 +95,36 @@ static void maplebus_dma_reset(void)
* @function: the function code for the device
*/
void maple_getcond_callback(struct maple_device *dev,
- void (*callback) (struct mapleq * mq),
- unsigned long interval, unsigned long function)
+ void (*callback) (struct mapleq *mq),
+ unsigned long interval, unsigned long function)
{
- dev->callback = callback;
- dev->interval = interval;
- dev->function = cpu_to_be32(function);
- dev->when = jiffies;
+ dev->callback = callback;
+ dev->interval = interval;
+ dev->function = cpu_to_be32(function);
+ dev->when = jiffies;
}
EXPORT_SYMBOL_GPL(maple_getcond_callback);
static int maple_dma_done(void)
{
- return (ctrl_inl(MAPLE_STATE) & 1) == 0;
+ return (ctrl_inl(MAPLE_STATE) & 1) == 0;
}
static void maple_release_device(struct device *dev)
{
- if (dev->type) {
- kfree(dev->type->name);
- kfree(dev->type);
- }
+ struct maple_device *mdev;
+ struct mapleq *mq;
+ if (!dev)
+ return;
+ mdev = to_maple_dev(dev);
+ mq = mdev->mq;
+ if (mq) {
+ if (mq->recvbufdcsp)
+ kmem_cache_free(maple_queue_cache, mq->recvbufdcsp);
+ kfree(mq);
+ mq = NULL;
+ }
+ kfree(mdev);
}
/**
@@ -123,60 +133,64 @@ static void maple_release_device(struct device *dev)
*/
void maple_add_packet(struct mapleq *mq)
{
- mutex_lock(&maple_list_lock);
- list_add(&mq->list, &maple_waitq);
- mutex_unlock(&maple_list_lock);
+ mutex_lock(&maple_list_lock);
+ list_add(&mq->list, &maple_waitq);
+ mutex_unlock(&maple_list_lock);
}
EXPORT_SYMBOL_GPL(maple_add_packet);
-static struct mapleq *maple_allocq(struct maple_device *dev)
+static struct mapleq *maple_allocq(struct maple_device *mdev)
{
- struct mapleq *mq;
+ struct mapleq *mq;
- mq = kmalloc(sizeof(*mq), GFP_KERNEL);
- if (!mq)
- return NULL;
+ mq = kmalloc(sizeof(*mq), GFP_KERNEL);
+ if (!mq)
+ return NULL;
- mq->dev = dev;
- mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
- mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
- if (!mq->recvbuf) {
- kfree(mq);
- return NULL;
- }
+ mq->dev = mdev;
+ mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
+ mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
+ if (!mq->recvbuf) {
+ kfree(mq);
+ return NULL;
+ }
- return mq;
+ return mq;
}
static struct maple_device *maple_alloc_dev(int port, int unit)
{
- struct maple_device *dev;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return NULL;
-
- dev->port = port;
- dev->unit = unit;
- dev->mq = maple_allocq(dev);
-
- if (!dev->mq) {
- kfree(dev);
- return NULL;
- }
-
- return dev;
+ struct maple_device *mdev;
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return NULL;
+
+ mdev->port = port;
+ mdev->unit = unit;
+ mdev->mq = maple_allocq(mdev);
+
+ if (!mdev->mq) {
+ kfree(mdev);
+ return NULL;
+ }
+ mdev->dev.bus = &maple_bus_type;
+ mdev->dev.parent = &maple_bus;
+ mdev->function = 0;
+ return mdev;
}
static void maple_free_dev(struct maple_device *mdev)
{
- if (!mdev)
- return;
- if (mdev->mq) {
- kmem_cache_free(maple_queue_cache, mdev->mq->recvbufdcsp);
- kfree(mdev->mq);
- }
- kfree(mdev);
+ if (!mdev)
+ return;
+ if (mdev->mq) {
+ if (mdev->mq->recvbufdcsp)
+ kmem_cache_free(maple_queue_cache,
+ mdev->mq->recvbufdcsp);
+ kfree(mdev->mq);
+ }
+ kfree(mdev);
}
/* process the command queue into a maple command block
@@ -184,153 +198,162 @@ static void maple_free_dev(struct maple_device *mdev)
*/
static void maple_build_block(struct mapleq *mq)
{
- int port, unit, from, to, len;
- unsigned long *lsendbuf = mq->sendbuf;
+ int port, unit, from, to, len;
+ unsigned long *lsendbuf = mq->sendbuf;
- port = mq->dev->port & 3;
- unit = mq->dev->unit;
- len = mq->length;
- from = port << 6;
- to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
+ port = mq->dev->port & 3;
+ unit = mq->dev->unit;
+ len = mq->length;
+ from = port << 6;
+ to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
- *maple_lastptr &= 0x7fffffff;
- maple_lastptr = maple_sendptr;
+ *maple_lastptr &= 0x7fffffff;
+ maple_lastptr = maple_sendptr;
- *maple_sendptr++ = (port << 16) | len | 0x80000000;
- *maple_sendptr++ = PHYSADDR(mq->recvbuf);
- *maple_sendptr++ =
- mq->command | (to << 8) | (from << 16) | (len << 24);
+ *maple_sendptr++ = (port << 16) | len | 0x80000000;
+ *maple_sendptr++ = PHYSADDR(mq->recvbuf);
+ *maple_sendptr++ =
+ mq->command | (to << 8) | (from << 16) | (len << 24);
- while (len-- > 0)
- *maple_sendptr++ = *lsendbuf++;
+ while (len-- > 0)
+ *maple_sendptr++ = *lsendbuf++;
}
/* build up command queue */
static void maple_send(void)
{
- int i;
- int maple_packets;
- struct mapleq *mq, *nmq;
-
- if (!list_empty(&maple_sentq))
- return;
- if (list_empty(&maple_waitq) || !maple_dma_done())
- return;
- maple_packets = 0;
- maple_sendptr = maple_lastptr = maple_sendbuf;
- list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
- maple_build_block(mq);
- list_move(&mq->list, &maple_sentq);
- if (maple_packets++ > MAPLE_MAXPACKETS)
- break;
- }
- if (maple_packets > 0) {
- for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
- dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- }
+ int i;
+ int maple_packets;
+ struct mapleq *mq, *nmq;
+
+ if (!list_empty(&maple_sentq))
+ return;
+ if (list_empty(&maple_waitq) || !maple_dma_done())
+ return;
+ maple_packets = 0;
+ maple_sendptr = maple_lastptr = maple_sendbuf;
+ list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
+ maple_build_block(mq);
+ list_move(&mq->list, &maple_sentq);
+ if (maple_packets++ > MAPLE_MAXPACKETS)
+ break;
+ }
+ if (maple_packets > 0) {
+ for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
+ dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
}
static int attach_matching_maple_driver(struct device_driver *driver,
- void *devptr)
+ void *devptr)
{
- struct maple_driver *maple_drv;
- struct maple_device *mdev;
-
- mdev = devptr;
- maple_drv = to_maple_driver(driver);
- if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) {
- if (maple_drv->connect(mdev) == 0) {
- mdev->driver = maple_drv;
- return 1;
- }
- }
- return 0;
+ struct maple_driver *maple_drv;
+ struct maple_device *mdev;
+
+ mdev = devptr;
+ maple_drv = to_maple_driver(driver);
+ if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) {
+ if (maple_drv->connect(mdev) == 0) {
+ mdev->driver = maple_drv;
+ return 1;
+ }
+ }
+ return 0;
}
static void maple_detach_driver(struct maple_device *mdev)
{
- if (!mdev)
- return;
- if (mdev->driver) {
- if (mdev->driver->disconnect)
- mdev->driver->disconnect(mdev);
- }
- mdev->driver = NULL;
- if (mdev->registered) {
- maple_release_device(&mdev->dev);
- device_unregister(&mdev->dev);
- }
- mdev->registered = 0;
- maple_free_dev(mdev);
+ if (!mdev)
+ return;
+ if (mdev->driver) {
+ if (mdev->driver->disconnect)
+ mdev->driver->disconnect(mdev);
+ }
+ mdev->driver = NULL;
+ device_unregister(&mdev->dev);
+ mdev = NULL;
}
/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
-static void maple_attach_driver(struct maple_device *dev)
+static void maple_attach_driver(struct maple_device *mdev)
{
- char *p;
-
- char *recvbuf;
- unsigned long function;
- int matched, retval;
-
- recvbuf = dev->mq->recvbuf;
- memcpy(&dev->devinfo, recvbuf + 4, sizeof(dev->devinfo));
- memcpy(dev->product_name, dev->devinfo.product_name, 30);
- memcpy(dev->product_licence, dev->devinfo.product_licence, 60);
- dev->product_name[30] = '\0';
- dev->product_licence[60] = '\0';
-
- for (p = dev->product_name + 29; dev->product_name <= p; p--)
- if (*p == ' ')
- *p = '\0';
- else
- break;
-
- for (p = dev->product_licence + 59; dev->product_licence <= p; p--)
- if (*p == ' ')
- *p = '\0';
- else
- break;
-
- function = be32_to_cpu(dev->devinfo.function);
-
- if (function > 0x200) {
- /* Do this silently - as not a real device */
- function = 0;
- dev->driver = &maple_dummy_driver;
- sprintf(dev->dev.bus_id, "%d:0.port", dev->port);
- } else {
- printk(KERN_INFO
- "Maple bus at (%d, %d): Connected function 0x%lX\n",
- dev->port, dev->unit, function);
-
- matched =
- bus_for_each_drv(&maple_bus_type, NULL, dev,
- attach_matching_maple_driver);
-
- if (matched == 0) {
- /* Driver does not exist yet */
- printk(KERN_INFO
- "No maple driver found for this device\n");
- dev->driver = &maple_dummy_driver;
- }
-
- sprintf(dev->dev.bus_id, "%d:0%d.%lX", dev->port,
- dev->unit, function);
- }
- dev->function = function;
- dev->dev.bus = &maple_bus_type;
- dev->dev.parent = &maple_bus;
- dev->dev.release = &maple_release_device;
- retval = device_register(&dev->dev);
- if (retval) {
- printk(KERN_INFO
- "Maple bus: Attempt to register device (%x, %x) failed.\n",
- dev->port, dev->unit);
- maple_free_dev(dev);
- }
- dev->registered = 1;
+ char *p, *recvbuf;
+ unsigned long function;
+ int matched, retval;
+
+ recvbuf = mdev->mq->recvbuf;
+ /* copy the data as individual elements in
+ * case of memory optimisation */
+ memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
+ memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
+ memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
+ memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
+ memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
+ memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60);
+ memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
+ memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
+ memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
+ mdev->product_name[30] = '\0';
+ memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
+ mdev->product_licence[60] = '\0';
+
+ for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
+ if (*p == ' ')
+ *p = '\0';
+ else
+ break;
+ for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
+ if (*p == ' ')
+ *p = '\0';
+ else
+ break;
+
+ if (realscan) {
+ printk(KERN_INFO "Maple device detected: %s\n",
+ mdev->product_name);
+ printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
+ }
+
+ function = be32_to_cpu(mdev->devinfo.function);
+
+ if (function > 0x200) {
+ /* Do this silently - as not a real device */
+ function = 0;
+ mdev->driver = &maple_dummy_driver;
+ sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
+ } else {
+ if (realscan)
+ printk(KERN_INFO
+ "Maple bus at (%d, %d): Function 0x%lX\n",
+ mdev->port, mdev->unit, function);
+
+ matched =
+ bus_for_each_drv(&maple_bus_type, NULL, mdev,
+ attach_matching_maple_driver);
+
+ if (matched == 0) {
+ /* Driver does not exist yet */
+ if (realscan)
+ printk(KERN_INFO
+ "No maple driver found.\n");
+ mdev->driver = &maple_dummy_driver;
+ }
+ sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
+ mdev->unit, function);
+ }
+ mdev->function = function;
+ mdev->dev.release = &maple_release_device;
+ retval = device_register(&mdev->dev);
+ if (retval) {
+ printk(KERN_INFO
+ "Maple bus: Attempt to register device"
+ " (%x, %x) failed.\n",
+ mdev->port, mdev->unit);
+ maple_free_dev(mdev);
+ mdev = NULL;
+ return;
+ }
}
/*
@@ -340,270 +363,262 @@ static void maple_attach_driver(struct maple_device *dev)
*/
static int detach_maple_device(struct device *device, void *portptr)
{
- struct maple_device_specify *ds;
- struct maple_device *mdev;
-
- ds = portptr;
- mdev = to_maple_dev(device);
- if (mdev->port == ds->port && mdev->unit == ds->unit)
- return 1;
- return 0;
+ struct maple_device_specify *ds;
+ struct maple_device *mdev;
+
+ ds = portptr;
+ mdev = to_maple_dev(device);
+ if (mdev->port == ds->port && mdev->unit == ds->unit)
+ return 1;
+ return 0;
}
static int setup_maple_commands(struct device *device, void *ignored)
{
- struct maple_device *maple_dev = to_maple_dev(device);
-
- if ((maple_dev->interval > 0)
- && time_after(jiffies, maple_dev->when)) {
- maple_dev->when = jiffies + maple_dev->interval;
- maple_dev->mq->command = MAPLE_COMMAND_GETCOND;
- maple_dev->mq->sendbuf = &maple_dev->function;
- maple_dev->mq->length = 1;
- maple_add_packet(maple_dev->mq);
- liststatus++;
- } else {
- if (time_after(jiffies, maple_pnp_time)) {
- maple_dev->mq->command = MAPLE_COMMAND_DEVINFO;
- maple_dev->mq->length = 0;
- maple_add_packet(maple_dev->mq);
- liststatus++;
- }
- }
-
- return 0;
+ struct maple_device *maple_dev = to_maple_dev(device);
+
+ if ((maple_dev->interval > 0)
+ && time_after(jiffies, maple_dev->when)) {
+ maple_dev->when = jiffies + maple_dev->interval;
+ maple_dev->mq->command = MAPLE_COMMAND_GETCOND;
+ maple_dev->mq->sendbuf = &maple_dev->function;
+ maple_dev->mq->length = 1;
+ maple_add_packet(maple_dev->mq);
+ liststatus++;
+ } else {
+ if (time_after(jiffies, maple_pnp_time)) {
+ maple_dev->mq->command = MAPLE_COMMAND_DEVINFO;
+ maple_dev->mq->length = 0;
+ maple_add_packet(maple_dev->mq);
+ liststatus++;
+ }
+ }
+
+ return 0;
}
/* VBLANK bottom half - implemented via workqueue */
static void maple_vblank_handler(struct work_struct *work)
{
- if (!maple_dma_done())
- return;
- if (!list_empty(&maple_sentq))
- return;
- ctrl_outl(0, MAPLE_ENABLE);
- liststatus = 0;
- bus_for_each_dev(&maple_bus_type, NULL, NULL,
- setup_maple_commands);
- if (time_after(jiffies, maple_pnp_time))
- maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
- if (liststatus && list_empty(&maple_sentq)) {
- INIT_LIST_HEAD(&maple_sentq);
- maple_send();
- }
- maplebus_dma_reset();
+ if (!maple_dma_done())
+ return;
+ if (!list_empty(&maple_sentq))
+ return;
+ ctrl_outl(0, MAPLE_ENABLE);
+ liststatus = 0;
+ bus_for_each_dev(&maple_bus_type, NULL, NULL,
+ setup_maple_commands);
+ if (time_after(jiffies, maple_pnp_time))
+ maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
+ if (liststatus && list_empty(&maple_sentq)) {
+ INIT_LIST_HEAD(&maple_sentq);
+ maple_send();
+ }
+ maplebus_dma_reset();
}
/* handle devices added via hotplugs - placing them on queue for DEVINFO*/
static void maple_map_subunits(struct maple_device *mdev, int submask)
{
- int retval, k, devcheck;
- struct maple_device *mdev_add;
- struct maple_device_specify ds;
-
- for (k = 0; k < 5; k++) {
- ds.port = mdev->port;
- ds.unit = k + 1;
- retval =
- bus_for_each_dev(&maple_bus_type, NULL, &ds,
- detach_maple_device);
- if (retval) {
- submask = submask >> 1;
- continue;
- }
- devcheck = submask & 0x01;
- if (devcheck) {
- mdev_add = maple_alloc_dev(mdev->port, k + 1);
- if (!mdev_add)
- return;
- mdev_add->mq->command = MAPLE_COMMAND_DEVINFO;
- mdev_add->mq->length = 0;
- maple_add_packet(mdev_add->mq);
- scanning = 1;
- }
- submask = submask >> 1;
- }
+ int retval, k, devcheck;
+ struct maple_device *mdev_add;
+ struct maple_device_specify ds;
+
+ for (k = 0; k < 5; k++) {
+ ds.port = mdev->port;
+ ds.unit = k + 1;
+ retval =
+ bus_for_each_dev(&maple_bus_type, NULL, &ds,
+ detach_maple_device);
+ if (retval) {
+ submask = submask >> 1;
+ continue;
+ }
+ devcheck = submask & 0x01;
+ if (devcheck) {
+ mdev_add = maple_alloc_dev(mdev->port, k + 1);
+ if (!mdev_add)
+ return;
+ mdev_add->mq->command = MAPLE_COMMAND_DEVINFO;
+ mdev_add->mq->length = 0;
+ maple_add_packet(mdev_add->mq);
+ scanning = 1;
+ }
+ submask = submask >> 1;
+ }
}
/* mark a device as removed */
static void maple_clean_submap(struct maple_device *mdev)
{
- int killbit;
+ int killbit;
- killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
- killbit = ~killbit;
- killbit &= 0xFF;
- subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
+ killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
+ killbit = ~killbit;
+ killbit &= 0xFF;
+ subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
}
/* handle empty port or hotplug removal */
static void maple_response_none(struct maple_device *mdev,
- struct mapleq *mq)
+ struct mapleq *mq)
{
- if (mdev->unit != 0) {
- list_del(&mq->list);
- maple_clean_submap(mdev);
- printk(KERN_INFO
- "Maple bus device detaching at (%d, %d)\n",
- mdev->port, mdev->unit);
- maple_detach_driver(mdev);
- return;
- }
- if (!started) {
- printk(KERN_INFO "No maple devices attached to port %d\n",
- mdev->port);
- return;
- }
- maple_clean_submap(mdev);
+ if (mdev->unit != 0) {
+ list_del(&mq->list);
+ maple_clean_submap(mdev);
+ printk(KERN_INFO
+ "Maple bus device detaching at (%d, %d)\n",
+ mdev->port, mdev->unit);
+ maple_detach_driver(mdev);
+ return;
+ }
+ if (!started) {
+ printk(KERN_INFO "No maple devices attached to port %d\n",
+ mdev->port);
+ return;
+ }
+ maple_clean_submap(mdev);
}
/* preprocess hotplugs or scans */
static void maple_response_devinfo(struct maple_device *mdev,
- char *recvbuf)
+ char *recvbuf)
{
- char submask;
- if ((!started) || (scanning == 2)) {
- maple_attach_driver(mdev);
- return;
- }
- if (mdev->unit == 0) {
- submask = recvbuf[2] & 0x1F;
- if (submask ^ subdevice_map[mdev->port]) {
- maple_map_subunits(mdev, submask);
- subdevice_map[mdev->port] = submask;
- }
- }
+ char submask;
+ if ((!started) || (scanning == 2)) {
+ maple_attach_driver(mdev);
+ return;
+ }
+ if (mdev->unit == 0) {
+ submask = recvbuf[2] & 0x1F;
+ if (submask ^ subdevice_map[mdev->port]) {
+ maple_map_subunits(mdev, submask);
+ subdevice_map[mdev->port] = submask;
+ }
+ }
}
/* maple dma end bottom half - implemented via workqueue */
static void maple_dma_handler(struct work_struct *work)
{
- struct mapleq *mq, *nmq;
- struct maple_device *dev;
- char *recvbuf;
- enum maple_code code;
-
- if (!maple_dma_done())
- return;
- ctrl_outl(0, MAPLE_ENABLE);
- if (!list_empty(&maple_sentq)) {
- list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
- recvbuf = mq->recvbuf;
- code = recvbuf[0];
- dev = mq->dev;
- switch (code) {
- case MAPLE_RESPONSE_NONE:
- maple_response_none(dev, mq);
- break;
-
- case MAPLE_RESPONSE_DEVINFO:
- maple_response_devinfo(dev, recvbuf);
- break;
-
- case MAPLE_RESPONSE_DATATRF:
- if (dev->callback)
- dev->callback(mq);
- break;
-
- case MAPLE_RESPONSE_FILEERR:
- case MAPLE_RESPONSE_AGAIN:
- case MAPLE_RESPONSE_BADCMD:
- case MAPLE_RESPONSE_BADFUNC:
- printk(KERN_DEBUG
- "Maple non-fatal error 0x%X\n",
- code);
- break;
-
- case MAPLE_RESPONSE_ALLINFO:
- printk(KERN_DEBUG
- "Maple - extended device information not supported\n");
- break;
-
- case MAPLE_RESPONSE_OK:
- break;
-
- default:
- break;
- }
- }
- INIT_LIST_HEAD(&maple_sentq);
- if (scanning == 1) {
- maple_send();
- scanning = 2;
- } else
- scanning = 0;
-
- if (started == 0)
- started = 1;
- }
- maplebus_dma_reset();
+ struct mapleq *mq, *nmq;
+ struct maple_device *dev;
+ char *recvbuf;
+ enum maple_code code;
+
+ if (!maple_dma_done())
+ return;
+ ctrl_outl(0, MAPLE_ENABLE);
+ if (!list_empty(&maple_sentq)) {
+ list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
+ recvbuf = mq->recvbuf;
+ code = recvbuf[0];
+ dev = mq->dev;
+ switch (code) {
+ case MAPLE_RESPONSE_NONE:
+ maple_response_none(dev, mq);
+ break;
+
+ case MAPLE_RESPONSE_DEVINFO:
+ maple_response_devinfo(dev, recvbuf);
+ break;
+
+ case MAPLE_RESPONSE_DATATRF:
+ if (dev->callback)
+ dev->callback(mq);
+ break;
+
+ case MAPLE_RESPONSE_FILEERR:
+ case MAPLE_RESPONSE_AGAIN:
+ case MAPLE_RESPONSE_BADCMD:
+ case MAPLE_RESPONSE_BADFUNC:
+ printk(KERN_DEBUG
+ "Maple non-fatal error 0x%X\n",
+ code);
+ break;
+
+ case MAPLE_RESPONSE_ALLINFO:
+ printk(KERN_DEBUG
+ "Maple - extended device information"
+ " not supported\n");
+ break;
+
+ case MAPLE_RESPONSE_OK:
+ break;
+
+ default:
+ break;
+ }
+ }
+ INIT_LIST_HEAD(&maple_sentq);
+ if (scanning == 1) {
+ maple_send();
+ scanning = 2;
+ } else
+ scanning = 0;
+
+ if (started == 0)
+ started = 1;
+ }
+ maplebus_dma_reset();
}
static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id)
{
- /* Load everything into the bottom half */
- schedule_work(&maple_dma_process);
- return IRQ_HANDLED;
+ /* Load everything into the bottom half */
+ schedule_work(&maple_dma_process);
+ return IRQ_HANDLED;
}
static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
{
- schedule_work(&maple_vblank_process);
- return IRQ_HANDLED;
+ schedule_work(&maple_vblank_process);
+ return IRQ_HANDLED;
}
-static struct irqaction maple_dma_irq = {
- .name = "maple bus DMA handler",
- .handler = maplebus_dma_interrupt,
- .flags = IRQF_SHARED,
-};
-
-static struct irqaction maple_vblank_irq = {
- .name = "maple bus VBLANK handler",
- .handler = maplebus_vblank_interrupt,
- .flags = IRQF_SHARED,
-};
-
static int maple_set_dma_interrupt_handler(void)
{
- return setup_irq(HW_EVENT_MAPLE_DMA, &maple_dma_irq);
+ return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt,
+ IRQF_SHARED, "maple bus DMA", &maple_dummy_driver);
}
static int maple_set_vblank_interrupt_handler(void)
{
- return setup_irq(HW_EVENT_VSYNC, &maple_vblank_irq);
+ return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt,
+ IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver);
}
static int maple_get_dma_buffer(void)
{
- maple_sendbuf =
- (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- MAPLE_DMA_PAGES);
- if (!maple_sendbuf)
- return -ENOMEM;
- return 0;
+ maple_sendbuf =
+ (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ MAPLE_DMA_PAGES);
+ if (!maple_sendbuf)
+ return -ENOMEM;
+ return 0;
}
static int match_maple_bus_driver(struct device *devptr,
- struct device_driver *drvptr)
+ struct device_driver *drvptr)
{
- struct maple_driver *maple_drv;
- struct maple_device *maple_dev;
-
- maple_drv = container_of(drvptr, struct maple_driver, drv);
- maple_dev = container_of(devptr, struct maple_device, dev);
- /* Trap empty port case */
- if (maple_dev->devinfo.function == 0xFFFFFFFF)
- return 0;
- else if (maple_dev->devinfo.function &
- be32_to_cpu(maple_drv->function))
- return 1;
- return 0;
+ struct maple_driver *maple_drv;
+ struct maple_device *maple_dev;
+
+ maple_drv = container_of(drvptr, struct maple_driver, drv);
+ maple_dev = container_of(devptr, struct maple_device, dev);
+ /* Trap empty port case */
+ if (maple_dev->devinfo.function == 0xFFFFFFFF)
+ return 0;
+ else if (maple_dev->devinfo.function &
+ be32_to_cpu(maple_drv->function))
+ return 1;
+ return 0;
}
-static int maple_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int maple_bus_uevent(struct device *dev,
+ struct kobj_uevent_env *env)
{
- return 0;
+ return 0;
}
static void maple_bus_release(struct device *dev)
@@ -611,124 +626,122 @@ static void maple_bus_release(struct device *dev)
}
static struct maple_driver maple_dummy_driver = {
- .drv = {
- .name = "maple_dummy_driver",
- .bus = &maple_bus_type,
- },
+ .drv = {
+ .name = "maple_dummy_driver",
+ .bus = &maple_bus_type,
+ },
};
struct bus_type maple_bus_type = {
- .name = "maple",
- .match = match_maple_bus_driver,
- .uevent = maple_bus_uevent,
+ .name = "maple",
+ .match = match_maple_bus_driver,
+ .uevent = maple_bus_uevent,
};
EXPORT_SYMBOL_GPL(maple_bus_type);
static struct device maple_bus = {
- .bus_id = "maple",
- .release = maple_bus_release,
+ .bus_id = "maple",
+ .release = maple_bus_release,
};
static int __init maple_bus_init(void)
{
- int retval, i;
- struct maple_device *mdev[MAPLE_PORTS];
- ctrl_outl(0, MAPLE_STATE);
-
- retval = device_register(&maple_bus);
- if (retval)
- goto cleanup;
-
- retval = bus_register(&maple_bus_type);
- if (retval)
- goto cleanup_device;
-
- retval = driver_register(&maple_dummy_driver.drv);
-
- if (retval)
- goto cleanup_bus;
-
- /* allocate memory for maple bus dma */
- retval = maple_get_dma_buffer();
- if (retval) {
- printk(KERN_INFO
- "Maple bus: Failed to allocate Maple DMA buffers\n");
- goto cleanup_basic;
- }
-
- /* set up DMA interrupt handler */
- retval = maple_set_dma_interrupt_handler();
- if (retval) {
- printk(KERN_INFO
- "Maple bus: Failed to grab maple DMA IRQ\n");
- goto cleanup_dma;
- }
-
- /* set up VBLANK interrupt handler */
- retval = maple_set_vblank_interrupt_handler();
- if (retval) {
- printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
- goto cleanup_irq;
- }
-
- maple_queue_cache =
- kmem_cache_create("maple_queue_cache", 0x400, 0,
- SLAB_HWCACHE_ALIGN, NULL);
-
- if (!maple_queue_cache)
- goto cleanup_bothirqs;
-
- /* setup maple ports */
- for (i = 0; i < MAPLE_PORTS; i++) {
- mdev[i] = maple_alloc_dev(i, 0);
- if (!mdev[i]) {
- while (i-- > 0)
- maple_free_dev(mdev[i]);
- goto cleanup_cache;
- }
- mdev[i]->registered = 0;
- mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO;
- mdev[i]->mq->length = 0;
- maple_attach_driver(mdev[i]);
- maple_add_packet(mdev[i]->mq);
- subdevice_map[i] = 0;
- }
-
- /* setup maplebus hardware */
- maplebus_dma_reset();
-
- /* initial detection */
- maple_send();
-
- maple_pnp_time = jiffies;
-
- printk(KERN_INFO "Maple bus core now registered.\n");
-
- return 0;
+ int retval, i;
+ struct maple_device *mdev[MAPLE_PORTS];
+ ctrl_outl(0, MAPLE_STATE);
+
+ retval = device_register(&maple_bus);
+ if (retval)
+ goto cleanup;
+
+ retval = bus_register(&maple_bus_type);
+ if (retval)
+ goto cleanup_device;
+
+ retval = driver_register(&maple_dummy_driver.drv);
+ if (retval)
+ goto cleanup_bus;
+
+ /* allocate memory for maple bus dma */
+ retval = maple_get_dma_buffer();
+ if (retval) {
+ printk(KERN_INFO
+ "Maple bus: Failed to allocate Maple DMA buffers\n");
+ goto cleanup_basic;
+ }
+
+ /* set up DMA interrupt handler */
+ retval = maple_set_dma_interrupt_handler();
+ if (retval) {
+ printk(KERN_INFO
+ "Maple bus: Failed to grab maple DMA IRQ\n");
+ goto cleanup_dma;
+ }
+
+ /* set up VBLANK interrupt handler */
+ retval = maple_set_vblank_interrupt_handler();
+ if (retval) {
+ printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
+ goto cleanup_irq;
+ }
+
+ maple_queue_cache =
+ kmem_cache_create("maple_queue_cache", 0x400, 0,
+ SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL);
+
+ if (!maple_queue_cache)
+ goto cleanup_bothirqs;
+
+ /* setup maple ports */
+ for (i = 0; i < MAPLE_PORTS; i++) {
+ mdev[i] = maple_alloc_dev(i, 0);
+ if (!mdev[i]) {
+ while (i-- > 0)
+ maple_free_dev(mdev[i]);
+ goto cleanup_cache;
+ }
+ mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO;
+ mdev[i]->mq->length = 0;
+ maple_add_packet(mdev[i]->mq);
+ /* delay aids hardware detection */
+ mdelay(5);
+ subdevice_map[i] = 0;
+ }
+
+ realscan = 1;
+ /* setup maplebus hardware */
+ maplebus_dma_reset();
+ /* initial detection */
+ maple_send();
+ maple_pnp_time = jiffies;
+ printk(KERN_INFO "Maple bus core now registered.\n");
+
+ return 0;
cleanup_cache:
- kmem_cache_destroy(maple_queue_cache);
+ kmem_cache_destroy(maple_queue_cache);
cleanup_bothirqs:
- free_irq(HW_EVENT_VSYNC, 0);
+ free_irq(HW_EVENT_VSYNC, 0);
cleanup_irq:
- free_irq(HW_EVENT_MAPLE_DMA, 0);
+ free_irq(HW_EVENT_MAPLE_DMA, 0);
cleanup_dma:
- free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
+ free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
cleanup_basic:
- driver_unregister(&maple_dummy_driver.drv);
+ driver_unregister(&maple_dummy_driver.drv);
cleanup_bus:
- bus_unregister(&maple_bus_type);
+ bus_unregister(&maple_bus_type);
cleanup_device:
- device_unregister(&maple_bus);
+ device_unregister(&maple_bus);
cleanup:
- printk(KERN_INFO "Maple bus registration failed\n");
- return retval;
+ printk(KERN_INFO "Maple bus registration failed\n");
+ return retval;
}
-subsys_initcall(maple_bus_init);
+/* Push init to later to ensure hardware gets detected */
+fs_initcall(maple_bus_init);
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 3301167d4f2..017a196d041 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3563,8 +3563,7 @@ static ssize_t show_file(struct device *dev, struct device_attribute *attr,
down_read(&fsg->filesem);
if (backing_file_is_open(curlun)) { // Get the complete pathname
- p = d_path(curlun->filp->f_path.dentry,
- curlun->filp->f_path.mnt, buf, PAGE_SIZE - 1);
+ p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
if (IS_ERR(p))
rc = PTR_ERR(p);
else {
@@ -3981,9 +3980,8 @@ static int __init fsg_bind(struct usb_gadget *gadget)
if (backing_file_is_open(curlun)) {
p = NULL;
if (pathbuf) {
- p = d_path(curlun->filp->f_path.dentry,
- curlun->filp->f_path.mnt,
- pathbuf, PATH_MAX);
+ p = d_path(&curlun->filp->f_path,
+ pathbuf, PATH_MAX);
if (IS_ERR(p))
p = NULL;
}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 5ce43b63c60..a3510b8ba3e 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -218,16 +218,16 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
_enter("%p{%s},{%s:%p{%s},}",
dentry,
dentry->d_name.name,
- nd->mnt->mnt_devname,
+ nd->path.mnt->mnt_devname,
dentry,
- nd->dentry->d_name.name);
+ nd->path.dentry->d_name.name);
- dput(nd->dentry);
- nd->dentry = dget(dentry);
+ dput(nd->path.dentry);
+ nd->path.dentry = dget(dentry);
- newmnt = afs_mntpt_do_automount(nd->dentry);
+ newmnt = afs_mntpt_do_automount(nd->path.dentry);
if (IS_ERR(newmnt)) {
- path_release(nd);
+ path_put(&nd->path);
return (void *)newmnt;
}
@@ -235,17 +235,16 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts);
switch (err) {
case 0:
- dput(nd->dentry);
- mntput(nd->mnt);
- nd->mnt = newmnt;
- nd->dentry = dget(newmnt->mnt_root);
+ path_put(&nd->path);
+ nd->path.mnt = newmnt;
+ nd->path.dentry = dget(newmnt->mnt_root);
schedule_delayed_work(&afs_mntpt_expiry_timer,
afs_mntpt_expiry_timeout * HZ);
break;
case -EBUSY:
/* someone else made a mount here whilst we were busy */
- while (d_mountpoint(nd->dentry) &&
- follow_down(&nd->mnt, &nd->dentry))
+ while (d_mountpoint(nd->path.dentry) &&
+ follow_down(&nd->path.mnt, &nd->path.dentry))
;
err = 0;
default:
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2bbcc8151dc..a54a946a50a 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -368,7 +368,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
* so we don't need to follow the mount.
*/
if (d_mountpoint(dentry)) {
- if (!autofs4_follow_mount(&nd->mnt, &nd->dentry)) {
+ if (!autofs4_follow_mount(&nd->path.mnt,
+ &nd->path.dentry)) {
status = -ENOENT;
goto out_error;
}
@@ -382,7 +383,7 @@ done:
return NULL;
out_error:
- path_release(nd);
+ path_put(&nd->path);
return ERR_PTR(status);
}
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index d8a02f1e08c..0498b181dd5 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -443,12 +443,12 @@ static int load_flat_file(struct linux_binprm * bprm,
if (strncmp(hdr->magic, "bFLT", 4)) {
/*
+ * Previously, here was a printk to tell people
+ * "BINFMT_FLAT: bad header magic".
+ * But for the kernel which also use ELF FD-PIC format, this
+ * error message is confusing.
* because a lot of people do not manage to produce good
- * flat binaries, we leave this printk to help them realise
- * the problem. We only print the error if its not a script file
*/
- if (strncmp(hdr->magic, "#!", 2))
- printk("BINFMT_FLAT: bad header magic\n");
ret = -ENOEXEC;
goto err;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e63067d25cd..67fe72ce6ac 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1397,19 +1397,19 @@ struct block_device *lookup_bdev(const char *path)
if (error)
return ERR_PTR(error);
- inode = nd.dentry->d_inode;
+ inode = nd.path.dentry->d_inode;
error = -ENOTBLK;
if (!S_ISBLK(inode->i_mode))
goto fail;
error = -EACCES;
- if (nd.mnt->mnt_flags & MNT_NODEV)
+ if (nd.path.mnt->mnt_flags & MNT_NODEV)
goto fail;
error = -ENOMEM;
bdev = bd_acquire(inode);
if (!bdev)
goto fail;
out:
- path_release(&nd);
+ path_put(&nd.path);
return bdev;
fail:
bdev = ERR_PTR(error);
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 413ee2349d1..6ad44752996 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -259,18 +259,18 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
int err;
mntget(newmnt);
- err = do_add_mount(newmnt, nd, nd->mnt->mnt_flags, mntlist);
+ err = do_add_mount(newmnt, nd, nd->path.mnt->mnt_flags, mntlist);
switch (err) {
case 0:
- dput(nd->dentry);
- mntput(nd->mnt);
- nd->mnt = newmnt;
- nd->dentry = dget(newmnt->mnt_root);
+ dput(nd->path.dentry);
+ mntput(nd->path.mnt);
+ nd->path.mnt = newmnt;
+ nd->path.dentry = dget(newmnt->mnt_root);
break;
case -EBUSY:
/* someone else made a mount here whilst we were busy */
- while (d_mountpoint(nd->dentry) &&
- follow_down(&nd->mnt, &nd->dentry))
+ while (d_mountpoint(nd->path.dentry) &&
+ follow_down(&nd->path.mnt, &nd->path.dentry))
;
err = 0;
default:
@@ -307,8 +307,8 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
xid = GetXid();
- dput(nd->dentry);
- nd->dentry = dget(dentry);
+ dput(nd->path.dentry);
+ nd->path.dentry = dget(dentry);
cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
ses = cifs_sb->tcon->ses;
@@ -340,7 +340,8 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
rc = -EINVAL;
goto out_err;
}
- mnt = cifs_dfs_do_refmount(nd->mnt, nd->dentry,
+ mnt = cifs_dfs_do_refmount(nd->path.mnt,
+ nd->path.dentry,
referrals[i].node_name);
cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p",
__FUNCTION__,
@@ -357,7 +358,7 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
if (IS_ERR(mnt))
goto out_err;
- nd->mnt->mnt_flags |= MNT_SHRINKABLE;
+ nd->path.mnt->mnt_flags |= MNT_SHRINKABLE;
rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list);
out:
@@ -367,7 +368,7 @@ out:
cFYI(1, ("leaving %s" , __FUNCTION__));
return ERR_PTR(rc);
out_err:
- path_release(nd);
+ path_put(&nd->path);
goto out;
}
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 2bf3026adc8..c21a1f552a6 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -75,12 +75,12 @@ static int coda_pioctl(struct inode * inode, struct file * filp,
if ( error ) {
return error;
} else {
- target_inode = nd.dentry->d_inode;
+ target_inode = nd.path.dentry->d_inode;
}
/* return if it is not a Coda inode */
if ( target_inode->i_sb != inode->i_sb ) {
- path_release(&nd);
+ path_put(&nd.path);
return -EINVAL;
}
@@ -89,7 +89,7 @@ static int coda_pioctl(struct inode * inode, struct file * filp,
error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data);
- path_release(&nd);
+ path_put(&nd.path);
return error;
}
diff --git a/fs/compat.c b/fs/compat.c
index ee80ff341d3..43ca0165740 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -241,10 +241,10 @@ asmlinkage long compat_sys_statfs(const char __user *path, struct compat_statfs
error = user_path_walk(path, &nd);
if (!error) {
struct kstatfs tmp;
- error = vfs_statfs(nd.dentry, &tmp);
+ error = vfs_statfs(nd.path.dentry, &tmp);
if (!error)
error = put_compat_statfs(buf, &tmp);
- path_release(&nd);
+ path_put(&nd.path);
}
return error;
}
@@ -309,10 +309,10 @@ asmlinkage long compat_sys_statfs64(const char __user *path, compat_size_t sz, s
error = user_path_walk(path, &nd);
if (!error) {
struct kstatfs tmp;
- error = vfs_statfs(nd.dentry, &tmp);
+ error = vfs_statfs(nd.path.dentry, &tmp);
if (!error)
error = put_compat_statfs64(buf, &tmp);
- path_release(&nd);
+ path_put(&nd.path);
}
return error;
}
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index ee32c0eac7c..c6e72aebd16 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -2853,7 +2853,7 @@ static void compat_ioctl_error(struct file *filp, unsigned int fd,
/* find the name of the device. */
path = (char *)__get_free_page(GFP_KERNEL);
if (path) {
- fn = d_path(filp->f_path.dentry, filp->f_path.mnt, path, PAGE_SIZE);
+ fn = d_path(&filp->f_path, path, PAGE_SIZE);
if (IS_ERR(fn))
fn = "?";
}
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 22700d2857d..78929ea84ff 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -99,11 +99,11 @@ static int get_target(const char *symname, struct nameidata *nd,
ret = path_lookup(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, nd);
if (!ret) {
- if (nd->dentry->d_sb == configfs_sb) {
- *target = configfs_get_config_item(nd->dentry);
+ if (nd->path.dentry->d_sb == configfs_sb) {
+ *target = configfs_get_config_item(nd->path.dentry);
if (!*target) {
ret = -ENOENT;
- path_release(nd);
+ path_put(&nd->path);
}
} else
ret = -EPERM;
@@ -141,7 +141,7 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
ret = create_link(parent_item, target_item, dentry);
config_item_put(target_item);
- path_release(&nd);
+ path_put(&nd.path);
out_put:
config_item_put(parent_item);
diff --git a/fs/dcache.c b/fs/dcache.c
index 44f6cf23b70..43455776711 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -95,6 +95,14 @@ static void d_free(struct dentry *dentry)
call_rcu(&dentry->d_u.d_rcu, d_callback);
}
+static void dentry_lru_remove(struct dentry *dentry)
+{
+ if (!list_empty(&dentry->d_lru)) {
+ list_del_init(&dentry->d_lru);
+ dentry_stat.nr_unused--;
+ }
+}
+
/*
* Release the dentry's inode, using the filesystem
* d_iput() operation if defined.
@@ -211,13 +219,7 @@ repeat:
unhash_it:
__d_drop(dentry);
kill_it:
- /* If dentry was on d_lru list
- * delete it from there
- */
- if (!list_empty(&dentry->d_lru)) {
- list_del(&dentry->d_lru);
- dentry_stat.nr_unused--;
- }
+ dentry_lru_remove(dentry);
dentry = d_kill(dentry);
if (dentry)
goto repeat;
@@ -285,10 +287,7 @@ int d_invalidate(struct dentry * dentry)
static inline struct dentry * __dget_locked(struct dentry *dentry)
{
atomic_inc(&dentry->d_count);
- if (!list_empty(&dentry->d_lru)) {
- dentry_stat.nr_unused--;
- list_del_init(&dentry->d_lru);
- }
+ dentry_lru_remove(dentry);
return dentry;
}
@@ -404,10 +403,7 @@ static void prune_one_dentry(struct dentry * dentry)
if (dentry->d_op && dentry->d_op->d_delete)
dentry->d_op->d_delete(dentry);
- if (!list_empty(&dentry->d_lru)) {
- list_del(&dentry->d_lru);
- dentry_stat.nr_unused--;
- }
+ dentry_lru_remove(dentry);
__d_drop(dentry);
dentry = d_kill(dentry);
spin_lock(&dcache_lock);
@@ -596,10 +592,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
/* detach this root from the system */
spin_lock(&dcache_lock);
- if (!list_empty(&dentry->d_lru)) {
- dentry_stat.nr_unused--;
- list_del_init(&dentry->d_lru);
- }
+ dentry_lru_remove(dentry);
__d_drop(dentry);
spin_unlock(&dcache_lock);
@@ -613,11 +606,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
spin_lock(&dcache_lock);
list_for_each_entry(loop, &dentry->d_subdirs,
d_u.d_child) {
- if (!list_empty(&loop->d_lru)) {
- dentry_stat.nr_unused--;
- list_del_init(&loop->d_lru);
- }
-
+ dentry_lru_remove(loop);
__d_drop(loop);
cond_resched_lock(&dcache_lock);
}
@@ -799,10 +788,7 @@ resume:
struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
next = tmp->next;
- if (!list_empty(&dentry->d_lru)) {
- dentry_stat.nr_unused--;
- list_del_init(&dentry->d_lru);
- }
+ dentry_lru_remove(dentry);
/*
* move only zero ref count dentries to the end
* of the unused list for prune_dcache
@@ -1776,9 +1762,8 @@ shouldnt_be_hashed:
*
* "buflen" should be positive. Caller holds the dcache_lock.
*/
-static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
- struct dentry *root, struct vfsmount *rootmnt,
- char *buffer, int buflen)
+static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
+ struct path *root, char *buffer, int buflen)
{
char * end = buffer+buflen;
char * retval;
@@ -1803,7 +1788,7 @@ static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
for (;;) {
struct dentry * parent;
- if (dentry == root && vfsmnt == rootmnt)
+ if (dentry == root->dentry && vfsmnt == root->mnt)
break;
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
/* Global root? */
@@ -1844,13 +1829,23 @@ Elong:
return ERR_PTR(-ENAMETOOLONG);
}
-/* write full pathname into buffer and return start of pathname */
-char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
- char *buf, int buflen)
+/**
+ * d_path - return the path of a dentry
+ * @path: path to report
+ * @buf: buffer to return value in
+ * @buflen: buffer length
+ *
+ * Convert a dentry into an ASCII path name. If the entry has been deleted
+ * the string " (deleted)" is appended. Note that this is ambiguous.
+ *
+ * Returns the buffer or an error code if the path was too long.
+ *
+ * "buflen" should be positive. Caller holds the dcache_lock.
+ */
+char *d_path(struct path *path, char *buf, int buflen)
{
char *res;
- struct vfsmount *rootmnt;
- struct dentry *root;
+ struct path root;
/*
* We have various synthetic filesystems that never get mounted. On
@@ -1859,18 +1854,17 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
* user wants to identify the object in /proc/pid/fd/. The little hack
* below allows us to generate a name for these objects on demand:
*/
- if (dentry->d_op && dentry->d_op->d_dname)
- return dentry->d_op->d_dname(dentry, buf, buflen);
+ if (path->dentry->d_op && path->dentry->d_op->d_dname)
+ return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
read_lock(&current->fs->lock);
- rootmnt = mntget(current->fs->rootmnt);
- root = dget(current->fs->root);
+ root = current->fs->root;
+ path_get(&current->fs->root);
read_unlock(&current->fs->lock);
spin_lock(&dcache_lock);
- res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
+ res = __d_path(path->dentry, path->mnt, &root, buf, buflen);
spin_unlock(&dcache_lock);
- dput(root);
- mntput(rootmnt);
+ path_put(&root);
return res;
}
@@ -1916,28 +1910,27 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
{
int error;
- struct vfsmount *pwdmnt, *rootmnt;
- struct dentry *pwd, *root;
+ struct path pwd, root;
char *page = (char *) __get_free_page(GFP_USER);
if (!page)
return -ENOMEM;
read_lock(&current->fs->lock);
- pwdmnt = mntget(current->fs->pwdmnt);
- pwd = dget(current->fs->pwd);
- rootmnt = mntget(current->fs->rootmnt);
- root = dget(current->fs->root);
+ pwd = current->fs->pwd;
+ path_get(&current->fs->pwd);
+ root = current->fs->root;
+ path_get(&current->fs->root);
read_unlock(&current->fs->lock);
error = -ENOENT;
/* Has the current directory has been unlinked? */
spin_lock(&dcache_lock);
- if (pwd->d_parent == pwd || !d_unhashed(pwd)) {
+ if (pwd.dentry->d_parent == pwd.dentry || !d_unhashed(pwd.dentry)) {
unsigned long len;
char * cwd;
- cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
+ cwd = __d_path(pwd.dentry, pwd.mnt, &root, page, PAGE_SIZE);
spin_unlock(&dcache_lock);
error = PTR_ERR(cwd);
@@ -1955,10 +1948,8 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size)
spin_unlock(&dcache_lock);
out:
- dput(pwd);
- mntput(pwdmnt);
- dput(root);
- mntput(rootmnt);
+ path_put(&pwd);
+ path_put(&root);
free_page((unsigned long) page);
return error;
}
diff --git a/fs/dcookies.c b/fs/dcookies.c
index 792cbf55fa9..855d4b1d619 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -24,6 +24,7 @@
#include <linux/errno.h>
#include <linux/dcookies.h>
#include <linux/mutex.h>
+#include <linux/path.h>
#include <asm/uaccess.h>
/* The dcookies are allocated from a kmem_cache and
@@ -31,8 +32,7 @@
* code here is particularly performance critical
*/
struct dcookie_struct {
- struct dentry * dentry;
- struct vfsmount * vfsmnt;
+ struct path path;
struct list_head hash_list;
};
@@ -51,7 +51,7 @@ static inline int is_live(void)
/* The dentry is locked, its address will do for the cookie */
static inline unsigned long dcookie_value(struct dcookie_struct * dcs)
{
- return (unsigned long)dcs->dentry;
+ return (unsigned long)dcs->path.dentry;
}
@@ -89,19 +89,17 @@ static void hash_dcookie(struct dcookie_struct * dcs)
}
-static struct dcookie_struct * alloc_dcookie(struct dentry * dentry,
- struct vfsmount * vfsmnt)
+static struct dcookie_struct *alloc_dcookie(struct path *path)
{
- struct dcookie_struct * dcs = kmem_cache_alloc(dcookie_cache, GFP_KERNEL);
+ struct dcookie_struct *dcs = kmem_cache_alloc(dcookie_cache,
+ GFP_KERNEL);
if (!dcs)
return NULL;
- dentry->d_cookie = dcs;
-
- dcs->dentry = dget(dentry);
- dcs->vfsmnt = mntget(vfsmnt);
+ path->dentry->d_cookie = dcs;
+ dcs->path = *path;
+ path_get(path);
hash_dcookie(dcs);
-
return dcs;
}
@@ -109,8 +107,7 @@ static struct dcookie_struct * alloc_dcookie(struct dentry * dentry,
/* This is the main kernel-side routine that retrieves the cookie
* value for a dentry/vfsmnt pair.
*/
-int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
- unsigned long * cookie)
+int get_dcookie(struct path *path, unsigned long *cookie)
{
int err = 0;
struct dcookie_struct * dcs;
@@ -122,10 +119,10 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
goto out;
}
- dcs = dentry->d_cookie;
+ dcs = path->dentry->d_cookie;
if (!dcs)
- dcs = alloc_dcookie(dentry, vfsmnt);
+ dcs = alloc_dcookie(path);
if (!dcs) {
err = -ENOMEM;
@@ -174,7 +171,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len)
goto out;
/* FIXME: (deleted) ? */
- path = d_path(dcs->dentry, dcs->vfsmnt, kbuf, PAGE_SIZE);
+ path = d_path(&dcs->path, kbuf, PAGE_SIZE);
if (IS_ERR(path)) {
err = PTR_ERR(path);
@@ -254,9 +251,8 @@ out_kmem:
static void free_dcookie(struct dcookie_struct * dcs)
{
- dcs->dentry->d_cookie = NULL;
- dput(dcs->dentry);
- mntput(dcs->vfsmnt);
+ dcs->path.dentry->d_cookie = NULL;
+ path_put(&dcs->path);
kmem_cache_free(dcookie_cache, dcs);
}
diff --git a/fs/dquot.c b/fs/dquot.c
index def4e969df7..9c7feb62eed 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -1633,16 +1633,17 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path)
error = path_lookup(path, LOOKUP_FOLLOW, &nd);
if (error < 0)
return error;
- error = security_quota_on(nd.dentry);
+ error = security_quota_on(nd.path.dentry);
if (error)
goto out_path;
/* Quota file not on the same filesystem? */
- if (nd.mnt->mnt_sb != sb)
+ if (nd.path.mnt->mnt_sb != sb)
error = -EXDEV;
else
- error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id);
+ error = vfs_quota_on_inode(nd.path.dentry->d_inode, type,
+ format_id);
out_path:
- path_release(&nd);
+ path_put(&nd.path);
return error;
}
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index cb20b964419..841a032050a 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -51,13 +51,13 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
goto out;
- dentry_save = nd->dentry;
- vfsmount_save = nd->mnt;
- nd->dentry = lower_dentry;
- nd->mnt = lower_mnt;
+ dentry_save = nd->path.dentry;
+ vfsmount_save = nd->path.mnt;
+ nd->path.dentry = lower_dentry;
+ nd->path.mnt = lower_mnt;
rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
- nd->dentry = dentry_save;
- nd->mnt = vfsmount_save;
+ nd->path.dentry = dentry_save;
+ nd->path.mnt = vfsmount_save;
if (dentry->d_inode) {
struct inode *lower_inode =
ecryptfs_inode_to_lower(dentry->d_inode);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index edd1e44e9d4..e2386115210 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -77,13 +77,13 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
struct vfsmount *vfsmount_save;
int rc;
- dentry_save = nd->dentry;
- vfsmount_save = nd->mnt;
- nd->dentry = lower_dentry;
- nd->mnt = lower_mnt;
+ dentry_save = nd->path.dentry;
+ vfsmount_save = nd->path.mnt;
+ nd->path.dentry = lower_dentry;
+ nd->path.mnt = lower_mnt;
rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
- nd->dentry = dentry_save;
- nd->mnt = vfsmount_save;
+ nd->path.dentry = dentry_save;
+ nd->path.mnt = vfsmount_save;
return rc;
}
@@ -819,14 +819,14 @@ ecryptfs_permission(struct inode *inode, int mask, struct nameidata *nd)
int rc;
if (nd) {
- struct vfsmount *vfsmnt_save = nd->mnt;
- struct dentry *dentry_save = nd->dentry;
+ struct vfsmount *vfsmnt_save = nd->path.mnt;
+ struct dentry *dentry_save = nd->path.dentry;
- nd->mnt = ecryptfs_dentry_to_lower_mnt(nd->dentry);
- nd->dentry = ecryptfs_dentry_to_lower(nd->dentry);
+ nd->path.mnt = ecryptfs_dentry_to_lower_mnt(nd->path.dentry);
+ nd->path.dentry = ecryptfs_dentry_to_lower(nd->path.dentry);
rc = permission(ecryptfs_inode_to_lower(inode), mask, nd);
- nd->mnt = vfsmnt_save;
- nd->dentry = dentry_save;
+ nd->path.mnt = vfsmnt_save;
+ nd->path.dentry = dentry_save;
} else
rc = permission(ecryptfs_inode_to_lower(inode), mask, NULL);
return rc;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 778c420e4ca..d25ac9500a9 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -513,8 +513,8 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
goto out;
}
- lower_root = nd.dentry;
- lower_mnt = nd.mnt;
+ lower_root = nd.path.dentry;
+ lower_mnt = nd.path.mnt;
ecryptfs_set_superblock_lower(sb, lower_root->d_sb);
sb->s_maxbytes = lower_root->d_sb->s_maxbytes;
sb->s_blocksize = lower_root->d_sb->s_blocksize;
@@ -526,7 +526,7 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
rc = 0;
goto out;
out_free:
- path_release(&nd);
+ path_put(&nd.path);
out:
return rc;
}
diff --git a/fs/exec.c b/fs/exec.c
index 9ff6069094d..a44b142fb46 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -112,7 +112,7 @@ asmlinkage long sys_uselib(const char __user * library)
goto out;
error = -EINVAL;
- if (!S_ISREG(nd.dentry->d_inode->i_mode))
+ if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
goto exit;
error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
@@ -148,7 +148,7 @@ out:
return error;
exit:
release_open_intent(&nd);
- path_release(&nd);
+ path_put(&nd.path);
goto out;
}
@@ -652,7 +652,7 @@ struct file *open_exec(const char *name)
file = ERR_PTR(err);
if (!err) {
- struct inode *inode = nd.dentry->d_inode;
+ struct inode *inode = nd.path.dentry->d_inode;
file = ERR_PTR(-EACCES);
if (S_ISREG(inode->i_mode)) {
int err = vfs_permission(&nd, MAY_EXEC);
@@ -672,7 +672,7 @@ out:
}
}
release_open_intent(&nd);
- path_release(&nd);
+ path_put(&nd.path);
}
goto out;
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 8e02cbfb112..18769cc3237 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2758,16 +2758,16 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
if (err)
return err;
/* Quotafile not on the same filesystem? */
- if (nd.mnt->mnt_sb != sb) {
- path_release(&nd);
+ if (nd.path.mnt->mnt_sb != sb) {
+ path_put(&nd.path);
return -EXDEV;
}
/* Quotafile not of fs root? */
- if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode)
+ if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
printk(KERN_WARNING
"EXT3-fs: Quota file not on filesystem root. "
"Journalled quota will not work.\n");
- path_release(&nd);
+ path_put(&nd.path);
return vfs_quota_on(sb, type, format_id, path);
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0072da75221..13383ba18f1 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3158,16 +3158,16 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
if (err)
return err;
/* Quotafile not on the same filesystem? */
- if (nd.mnt->mnt_sb != sb) {
- path_release(&nd);
+ if (nd.path.mnt->mnt_sb != sb) {
+ path_put(&nd.path);
return -EXDEV;
}
/* Quotafile not of fs root? */
- if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode)
+ if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
printk(KERN_WARNING
"EXT4-fs: Quota file not on filesystem root. "
"Journalled quota will not work.\n");
- path_release(&nd);
+ path_put(&nd.path);
return vfs_quota_on(sb, type, format_id, path);
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 43d511bba52..4bee6aa845e 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -884,12 +884,13 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
dev_name);
goto out;
}
- error = vfs_getattr(nd.mnt, nd.dentry, &stat);
+ error = vfs_getattr(nd.path.mnt, nd.path.dentry, &stat);
fstype = get_fs_type("gfs2");
list_for_each_entry(s, &fstype->fs_supers, s_instances) {
if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
- (S_ISDIR(stat.mode) && s == nd.dentry->d_inode->i_sb)) {
+ (S_ISDIR(stat.mode) &&
+ s == nd.path.dentry->d_inode->i_sb)) {
sb = s;
goto free_nd;
}
@@ -899,7 +900,7 @@ static struct super_block* get_gfs2_sb(const char *dev_name)
"mount point %s\n", dev_name);
free_nd:
- path_release(&nd);
+ path_put(&nd.path);
out:
return sb;
}
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 3ab09a65c45..7b94a1e3c01 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -41,9 +41,9 @@ static struct kmem_cache *event_cachep __read_mostly;
static struct vfsmount *inotify_mnt __read_mostly;
/* these are configurable via /proc/sys/fs/inotify/ */
-int inotify_max_user_instances __read_mostly;
-int inotify_max_user_watches __read_mostly;
-int inotify_max_queued_events __read_mostly;
+static int inotify_max_user_instances __read_mostly;
+static int inotify_max_user_watches __read_mostly;
+static int inotify_max_queued_events __read_mostly;
/*
* Lock ordering:
@@ -367,7 +367,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd,
/* you can only watch an inode if you have read permissions on it */
error = vfs_permission(nd, MAY_READ);
if (error)
- path_release(nd);
+ path_put(&nd->path);
return error;
}
@@ -667,7 +667,7 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
goto fput_and_out;
/* inode held in place by reference to nd; dev by fget on fd */
- inode = nd.dentry->d_inode;
+ inode = nd.path.dentry->d_inode;
dev = filp->private_data;
mutex_lock(&dev->up_mutex);
@@ -676,7 +676,7 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
ret = create_watch(dev, inode, mask);
mutex_unlock(&dev->up_mutex);
- path_release(&nd);
+ path_put(&nd.path);
fput_and_out:
fput_light(filp, fput_needed);
return ret;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index ca6b16fc310..f1ef49fff11 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -243,10 +243,18 @@ nlm_bind_host(struct nlm_host *host)
.program = &nlm_program,
.version = host->h_version,
.authflavor = RPC_AUTH_UNIX,
- .flags = (RPC_CLNT_CREATE_HARDRTRY |
+ .flags = (RPC_CLNT_CREATE_NOPING |
RPC_CLNT_CREATE_AUTOBIND),
};
+ /*
+ * lockd retries server side blocks automatically so we want
+ * those to be soft RPC calls. Client side calls need to be
+ * hard RPC tasks.
+ */
+ if (!host->h_server)
+ args.flags |= RPC_CLNT_CREATE_HARDRTRY;
+
clnt = rpc_create(&args);
if (!IS_ERR(clnt))
host->h_rpcclnt = clnt;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 2f4d8fa6668..fe9bdb4a220 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -763,11 +763,20 @@ callback:
dprintk("lockd: GRANTing blocked lock.\n");
block->b_granted = 1;
- /* Schedule next grant callback in 30 seconds */
- nlmsvc_insert_block(block, 30 * HZ);
+ /* keep block on the list, but don't reattempt until the RPC
+ * completes or the submission fails
+ */
+ nlmsvc_insert_block(block, NLM_NEVER);
+
+ /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
+ * will queue up a new one if this one times out
+ */
+ error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
+ &nlmsvc_grant_ops);
- /* Call the client */
- nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops);
+ /* RPC submission failed, wait a bit and retry */
+ if (error < 0)
+ nlmsvc_insert_block(block, 10 * HZ);
}
/*
@@ -786,6 +795,17 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
dprintk("lockd: GRANT_MSG RPC callback\n");
+ /* if the block is not on a list at this point then it has
+ * been invalidated. Don't try to requeue it.
+ *
+ * FIXME: it's possible that the block is removed from the list
+ * after this check but before the nlmsvc_insert_block. In that
+ * case it will be added back. Perhaps we need better locking
+ * for nlm_blocked?
+ */
+ if (list_empty(&block->b_list))
+ return;
+
/* Technically, we should down the file semaphore here. Since we
* move the block towards the head of the queue only, no harm
* can be done, though. */
diff --git a/fs/namei.c b/fs/namei.c
index 52703986323..941c8e8228c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -231,7 +231,7 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
struct vfsmount *mnt = NULL;
if (nd)
- mnt = nd->mnt;
+ mnt = nd->path.mnt;
if (mask & MAY_WRITE) {
umode_t mode = inode->i_mode;
@@ -296,7 +296,7 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
*/
int vfs_permission(struct nameidata *nd, int mask)
{
- return permission(nd->dentry->d_inode, mask, nd);
+ return permission(nd->path.dentry->d_inode, mask, nd);
}
/**
@@ -362,21 +362,31 @@ int deny_write_access(struct file * file)
return 0;
}
-void path_release(struct nameidata *nd)
+/**
+ * path_get - get a reference to a path
+ * @path: path to get the reference to
+ *
+ * Given a path increment the reference count to the dentry and the vfsmount.
+ */
+void path_get(struct path *path)
{
- dput(nd->dentry);
- mntput(nd->mnt);
+ mntget(path->mnt);
+ dget(path->dentry);
}
+EXPORT_SYMBOL(path_get);
-/*
- * umount() mustn't call path_release()/mntput() as that would clear
- * mnt_expiry_mark
+/**
+ * path_put - put a reference to a path
+ * @path: path to put the reference to
+ *
+ * Given a path decrement the reference count to the dentry and the vfsmount.
*/
-void path_release_on_umount(struct nameidata *nd)
+void path_put(struct path *path)
{
- dput(nd->dentry);
- mntput_no_expire(nd->mnt);
+ dput(path->dentry);
+ mntput(path->mnt);
}
+EXPORT_SYMBOL(path_put);
/**
* release_open_intent - free up open intent resources
@@ -539,16 +549,16 @@ walk_init_root(const char *name, struct nameidata *nd)
struct fs_struct *fs = current->fs;
read_lock(&fs->lock);
- if (fs->altroot && !(nd->flags & LOOKUP_NOALT)) {
- nd->mnt = mntget(fs->altrootmnt);
- nd->dentry = dget(fs->altroot);
+ if (fs->altroot.dentry && !(nd->flags & LOOKUP_NOALT)) {
+ nd->path = fs->altroot;
+ path_get(&fs->altroot);
read_unlock(&fs->lock);
if (__emul_lookup_dentry(name,nd))
return 0;
read_lock(&fs->lock);
}
- nd->mnt = mntget(fs->rootmnt);
- nd->dentry = dget(fs->root);
+ nd->path = fs->root;
+ path_get(&fs->root);
read_unlock(&fs->lock);
return 1;
}
@@ -561,7 +571,7 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l
goto fail;
if (*link == '/') {
- path_release(nd);
+ path_put(&nd->path);
if (!walk_init_root(link, nd))
/* weird __emul_prefix() stuff did it */
goto out;
@@ -577,31 +587,31 @@ out:
*/
name = __getname();
if (unlikely(!name)) {
- path_release(nd);
+ path_put(&nd->path);
return -ENOMEM;
}
strcpy(name, nd->last.name);
nd->last.name = name;
return 0;
fail:
- path_release(nd);
+ path_put(&nd->path);
return PTR_ERR(link);
}
-static inline void dput_path(struct path *path, struct nameidata *nd)
+static void path_put_conditional(struct path *path, struct nameidata *nd)
{
dput(path->dentry);
- if (path->mnt != nd->mnt)
+ if (path->mnt != nd->path.mnt)
mntput(path->mnt);
}
static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
{
- dput(nd->dentry);
- if (nd->mnt != path->mnt)
- mntput(nd->mnt);
- nd->mnt = path->mnt;
- nd->dentry = path->dentry;
+ dput(nd->path.dentry);
+ if (nd->path.mnt != path->mnt)
+ mntput(nd->path.mnt);
+ nd->path.mnt = path->mnt;
+ nd->path.dentry = path->dentry;
}
static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd)
@@ -613,7 +623,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
touch_atime(path->mnt, dentry);
nd_set_link(nd, NULL);
- if (path->mnt != nd->mnt) {
+ if (path->mnt != nd->path.mnt) {
path_to_nameidata(path, nd);
dget(dentry);
}
@@ -628,8 +638,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
if (dentry->d_inode->i_op->put_link)
dentry->d_inode->i_op->put_link(dentry, nd, cookie);
}
- dput(dentry);
- mntput(path->mnt);
+ path_put(path);
return error;
}
@@ -661,8 +670,8 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
nd->depth--;
return err;
loop:
- dput_path(path, nd);
- path_release(nd);
+ path_put_conditional(path, nd);
+ path_put(&nd->path);
return err;
}
@@ -743,37 +752,37 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
while(1) {
struct vfsmount *parent;
- struct dentry *old = nd->dentry;
+ struct dentry *old = nd->path.dentry;
read_lock(&fs->lock);
- if (nd->dentry == fs->root &&
- nd->mnt == fs->rootmnt) {
+ if (nd->path.dentry == fs->root.dentry &&
+ nd->path.mnt == fs->root.mnt) {
read_unlock(&fs->lock);
break;
}
read_unlock(&fs->lock);
spin_lock(&dcache_lock);
- if (nd->dentry != nd->mnt->mnt_root) {
- nd->dentry = dget(nd->dentry->d_parent);
+ if (nd->path.dentry != nd->path.mnt->mnt_root) {
+ nd->path.dentry = dget(nd->path.dentry->d_parent);
spin_unlock(&dcache_lock);
dput(old);
break;
}
spin_unlock(&dcache_lock);
spin_lock(&vfsmount_lock);
- parent = nd->mnt->mnt_parent;
- if (parent == nd->mnt) {
+ parent = nd->path.mnt->mnt_parent;
+ if (parent == nd->path.mnt) {
spin_unlock(&vfsmount_lock);
break;
}
mntget(parent);
- nd->dentry = dget(nd->mnt->mnt_mountpoint);
+ nd->path.dentry = dget(nd->path.mnt->mnt_mountpoint);
spin_unlock(&vfsmount_lock);
dput(old);
- mntput(nd->mnt);
- nd->mnt = parent;
+ mntput(nd->path.mnt);
+ nd->path.mnt = parent;
}
- follow_mount(&nd->mnt, &nd->dentry);
+ follow_mount(&nd->path.mnt, &nd->path.dentry);
}
/*
@@ -784,8 +793,8 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
static int do_lookup(struct nameidata *nd, struct qstr *name,
struct path *path)
{
- struct vfsmount *mnt = nd->mnt;
- struct dentry *dentry = __d_lookup(nd->dentry, name);
+ struct vfsmount *mnt = nd->path.mnt;
+ struct dentry *dentry = __d_lookup(nd->path.dentry, name);
if (!dentry)
goto need_lookup;
@@ -798,7 +807,7 @@ done:
return 0;
need_lookup:
- dentry = real_lookup(nd->dentry, name, nd);
+ dentry = real_lookup(nd->path.dentry, name, nd);
if (IS_ERR(dentry))
goto fail;
goto done;
@@ -835,7 +844,7 @@ static int __link_path_walk(const char *name, struct nameidata *nd)
if (!*name)
goto return_reval;
- inode = nd->dentry->d_inode;
+ inode = nd->path.dentry->d_inode;
if (nd->depth)
lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE);
@@ -883,7 +892,7 @@ static int __link_path_walk(const char *name, struct nameidata *nd)
if (this.name[1] != '.')
break;
follow_dotdot(nd);
- inode = nd->dentry->d_inode;
+ inode = nd->path.dentry->d_inode;
/* fallthrough */
case 1:
continue;
@@ -892,8 +901,9 @@ static int __link_path_walk(const char *name, struct nameidata *nd)
* See if the low-level filesystem might want
* to use its own hash..
*/
- if (nd->dentry->d_op && nd->dentry->d_op->d_hash) {
- err = nd->dentry->d_op->d_hash(nd->dentry, &this);
+ if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) {
+ err = nd->path.dentry->d_op->d_hash(nd->path.dentry,
+ &this);
if (err < 0)
break;
}
@@ -915,7 +925,7 @@ static int __link_path_walk(const char *name, struct nameidata *nd)
if (err)
goto return_err;
err = -ENOENT;
- inode = nd->dentry->d_inode;
+ inode = nd->path.dentry->d_inode;
if (!inode)
break;
err = -ENOTDIR;
@@ -943,13 +953,14 @@ last_component:
if (this.name[1] != '.')
break;
follow_dotdot(nd);
- inode = nd->dentry->d_inode;
+ inode = nd->path.dentry->d_inode;
/* fallthrough */
case 1:
goto return_reval;
}
- if (nd->dentry->d_op && nd->dentry->d_op->d_hash) {
- err = nd->dentry->d_op->d_hash(nd->dentry, &this);
+ if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) {
+ err = nd->path.dentry->d_op->d_hash(nd->path.dentry,
+ &this);
if (err < 0)
break;
}
@@ -962,7 +973,7 @@ last_component:
err = do_follow_link(&next, nd);
if (err)
goto return_err;
- inode = nd->dentry->d_inode;
+ inode = nd->path.dentry->d_inode;
} else
path_to_nameidata(&next, nd);
err = -ENOENT;
@@ -990,20 +1001,21 @@ return_reval:
* We bypassed the ordinary revalidation routines.
* We may need to check the cached dentry for staleness.
*/
- if (nd->dentry && nd->dentry->d_sb &&
- (nd->dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) {
+ if (nd->path.dentry && nd->path.dentry->d_sb &&
+ (nd->path.dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) {
err = -ESTALE;
/* Note: we do not d_invalidate() */
- if (!nd->dentry->d_op->d_revalidate(nd->dentry, nd))
+ if (!nd->path.dentry->d_op->d_revalidate(
+ nd->path.dentry, nd))
break;
}
return_base:
return 0;
out_dput:
- dput_path(&next, nd);
+ path_put_conditional(&next, nd);
break;
}
- path_release(nd);
+ path_put(&nd->path);
return_err:
return err;
}
@@ -1021,20 +1033,19 @@ static int link_path_walk(const char *name, struct nameidata *nd)
int result;
/* make sure the stuff we saved doesn't go away */
- dget(save.dentry);
- mntget(save.mnt);
+ dget(save.path.dentry);
+ mntget(save.path.mnt);
result = __link_path_walk(name, nd);
if (result == -ESTALE) {
*nd = save;
- dget(nd->dentry);
- mntget(nd->mnt);
+ dget(nd->path.dentry);
+ mntget(nd->path.mnt);
nd->flags |= LOOKUP_REVAL;
result = __link_path_walk(name, nd);
}
- dput(save.dentry);
- mntput(save.mnt);
+ path_put(&save.path);
return result;
}
@@ -1054,9 +1065,9 @@ static int __emul_lookup_dentry(const char *name, struct nameidata *nd)
if (path_walk(name, nd))
return 0; /* something went wrong... */
- if (!nd->dentry->d_inode || S_ISDIR(nd->dentry->d_inode->i_mode)) {
- struct dentry *old_dentry = nd->dentry;
- struct vfsmount *old_mnt = nd->mnt;
+ if (!nd->path.dentry->d_inode ||
+ S_ISDIR(nd->path.dentry->d_inode->i_mode)) {
+ struct path old_path = nd->path;
struct qstr last = nd->last;
int last_type = nd->last_type;
struct fs_struct *fs = current->fs;
@@ -1067,19 +1078,17 @@ static int __emul_lookup_dentry(const char *name, struct nameidata *nd)
*/
nd->last_type = LAST_ROOT;
read_lock(&fs->lock);
- nd->mnt = mntget(fs->rootmnt);
- nd->dentry = dget(fs->root);
+ nd->path = fs->root;
+ path_get(&fs->root);
read_unlock(&fs->lock);
if (path_walk(name, nd) == 0) {
- if (nd->dentry->d_inode) {
- dput(old_dentry);
- mntput(old_mnt);
+ if (nd->path.dentry->d_inode) {
+ path_put(&old_path);
return 1;
}
- path_release(nd);
+ path_put(&nd->path);
}
- nd->dentry = old_dentry;
- nd->mnt = old_mnt;
+ nd->path = old_path;
nd->last = last;
nd->last_type = last_type;
}
@@ -1090,29 +1099,22 @@ void set_fs_altroot(void)
{
char *emul = __emul_prefix();
struct nameidata nd;
- struct vfsmount *mnt = NULL, *oldmnt;
- struct dentry *dentry = NULL, *olddentry;
+ struct path path = {}, old_path;
int err;
struct fs_struct *fs = current->fs;
if (!emul)
goto set_it;
err = path_lookup(emul, LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_NOALT, &nd);
- if (!err) {
- mnt = nd.mnt;
- dentry = nd.dentry;
- }
+ if (!err)
+ path = nd.path;
set_it:
write_lock(&fs->lock);
- oldmnt = fs->altrootmnt;
- olddentry = fs->altroot;
- fs->altrootmnt = mnt;
- fs->altroot = dentry;
+ old_path = fs->altroot;
+ fs->altroot = path;
write_unlock(&fs->lock);
- if (olddentry) {
- dput(olddentry);
- mntput(oldmnt);
- }
+ if (old_path.dentry)
+ path_put(&old_path);
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
@@ -1130,21 +1132,21 @@ static int do_path_lookup(int dfd, const char *name,
if (*name=='/') {
read_lock(&fs->lock);
- if (fs->altroot && !(nd->flags & LOOKUP_NOALT)) {
- nd->mnt = mntget(fs->altrootmnt);
- nd->dentry = dget(fs->altroot);
+ if (fs->altroot.dentry && !(nd->flags & LOOKUP_NOALT)) {
+ nd->path = fs->altroot;
+ path_get(&fs->altroot);
read_unlock(&fs->lock);
if (__emul_lookup_dentry(name,nd))
goto out; /* found in altroot */
read_lock(&fs->lock);
}
- nd->mnt = mntget(fs->rootmnt);
- nd->dentry = dget(fs->root);
+ nd->path = fs->root;
+ path_get(&fs->root);
read_unlock(&fs->lock);
} else if (dfd == AT_FDCWD) {
read_lock(&fs->lock);
- nd->mnt = mntget(fs->pwdmnt);
- nd->dentry = dget(fs->pwd);
+ nd->path = fs->pwd;
+ path_get(&fs->pwd);
read_unlock(&fs->lock);
} else {
struct dentry *dentry;
@@ -1164,17 +1166,17 @@ static int do_path_lookup(int dfd, const char *name,
if (retval)
goto fput_fail;
- nd->mnt = mntget(file->f_path.mnt);
- nd->dentry = dget(dentry);
+ nd->path = file->f_path;
+ path_get(&file->f_path);
fput_light(file, fput_needed);
}
retval = path_walk(name, nd);
out:
- if (unlikely(!retval && !audit_dummy_context() && nd->dentry &&
- nd->dentry->d_inode))
- audit_inode(name, nd->dentry);
+ if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
+ nd->path.dentry->d_inode))
+ audit_inode(name, nd->path.dentry);
out_fail:
return retval;
@@ -1208,13 +1210,13 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
nd->flags = flags;
nd->depth = 0;
- nd->mnt = mntget(mnt);
- nd->dentry = dget(dentry);
+ nd->path.mnt = mntget(mnt);
+ nd->path.dentry = dget(dentry);
retval = path_walk(name, nd);
- if (unlikely(!retval && !audit_dummy_context() && nd->dentry &&
- nd->dentry->d_inode))
- audit_inode(name, nd->dentry);
+ if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
+ nd->path.dentry->d_inode))
+ audit_inode(name, nd->path.dentry);
return retval;
@@ -1236,7 +1238,7 @@ static int __path_lookup_intent_open(int dfd, const char *name,
if (IS_ERR(nd->intent.open.file)) {
if (err == 0) {
err = PTR_ERR(nd->intent.open.file);
- path_release(nd);
+ path_put(&nd->path);
}
} else if (err != 0)
release_open_intent(nd);
@@ -1333,10 +1335,10 @@ static struct dentry *lookup_hash(struct nameidata *nd)
{
int err;
- err = permission(nd->dentry->d_inode, MAY_EXEC, nd);
+ err = permission(nd->path.dentry->d_inode, MAY_EXEC, nd);
if (err)
return ERR_PTR(err);
- return __lookup_hash(&nd->last, nd->dentry, nd);
+ return __lookup_hash(&nd->last, nd->path.dentry, nd);
}
static int __lookup_one_len(const char *name, struct qstr *this,
@@ -1595,7 +1597,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
int may_open(struct nameidata *nd, int acc_mode, int flag)
{
- struct dentry *dentry = nd->dentry;
+ struct dentry *dentry = nd->path.dentry;
struct inode *inode = dentry->d_inode;
int error;
@@ -1616,7 +1618,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
flag &= ~O_TRUNC;
} else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
- if (nd->mnt->mnt_flags & MNT_NODEV)
+ if (nd->path.mnt->mnt_flags & MNT_NODEV)
return -EACCES;
flag &= ~O_TRUNC;
@@ -1678,14 +1680,14 @@ static int open_namei_create(struct nameidata *nd, struct path *path,
int flag, int mode)
{
int error;
- struct dentry *dir = nd->dentry;
+ struct dentry *dir = nd->path.dentry;
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current->fs->umask;
error = vfs_create(dir->d_inode, path->dentry, mode, nd);
mutex_unlock(&dir->d_inode->i_mutex);
- dput(nd->dentry);
- nd->dentry = path->dentry;
+ dput(nd->path.dentry);
+ nd->path.dentry = path->dentry;
if (error)
return error;
/* Don't check for write permission, don't truncate */
@@ -1752,11 +1754,11 @@ int open_namei(int dfd, const char *pathname, int flag,
if (nd->last_type != LAST_NORM || nd->last.name[nd->last.len])
goto exit;
- dir = nd->dentry;
+ dir = nd->path.dentry;
nd->flags &= ~LOOKUP_PARENT;
mutex_lock(&dir->d_inode->i_mutex);
path.dentry = lookup_hash(nd);
- path.mnt = nd->mnt;
+ path.mnt = nd->path.mnt;
do_last:
error = PTR_ERR(path.dentry);
@@ -1812,11 +1814,11 @@ ok:
return 0;
exit_dput:
- dput_path(&path, nd);
+ path_put_conditional(&path, nd);
exit:
if (!IS_ERR(nd->intent.open.file))
release_open_intent(nd);
- path_release(nd);
+ path_put(&nd->path);
return error;
do_link:
@@ -1861,10 +1863,10 @@ do_link:
__putname(nd->last.name);
goto exit;
}
- dir = nd->dentry;
+ dir = nd->path.dentry;
mutex_lock(&dir->d_inode->i_mutex);
path.dentry = lookup_hash(nd);
- path.mnt = nd->mnt;
+ path.mnt = nd->path.mnt;
__putname(nd->last.name);
goto do_last;
}
@@ -1877,13 +1879,13 @@ do_link:
* Simple function to lookup and return a dentry and create it
* if it doesn't exist. Is SMP-safe.
*
- * Returns with nd->dentry->d_inode->i_mutex locked.
+ * Returns with nd->path.dentry->d_inode->i_mutex locked.
*/
struct dentry *lookup_create(struct nameidata *nd, int is_dir)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
- mutex_lock_nested(&nd->dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&nd->path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
/*
* Yucky last component or no last component at all?
* (foo/., foo/.., /////)
@@ -1962,19 +1964,19 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode,
dentry = lookup_create(&nd, 0);
error = PTR_ERR(dentry);
- if (!IS_POSIXACL(nd.dentry->d_inode))
+ if (!IS_POSIXACL(nd.path.dentry->d_inode))
mode &= ~current->fs->umask;
if (!IS_ERR(dentry)) {
switch (mode & S_IFMT) {
case 0: case S_IFREG:
- error = vfs_create(nd.dentry->d_inode,dentry,mode,&nd);
+ error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd);
break;
case S_IFCHR: case S_IFBLK:
- error = vfs_mknod(nd.dentry->d_inode,dentry,mode,
+ error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,
new_decode_dev(dev));
break;
case S_IFIFO: case S_IFSOCK:
- error = vfs_mknod(nd.dentry->d_inode,dentry,mode,0);
+ error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0);
break;
case S_IFDIR:
error = -EPERM;
@@ -1984,8 +1986,8 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode,
}
dput(dentry);
}
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
- path_release(&nd);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ path_put(&nd.path);
out:
putname(tmp);
@@ -2039,13 +2041,13 @@ asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode)
if (IS_ERR(dentry))
goto out_unlock;
- if (!IS_POSIXACL(nd.dentry->d_inode))
+ if (!IS_POSIXACL(nd.path.dentry->d_inode))
mode &= ~current->fs->umask;
- error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
+ error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
dput(dentry);
out_unlock:
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
- path_release(&nd);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ path_put(&nd.path);
out:
putname(tmp);
out_err:
@@ -2143,17 +2145,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
error = -EBUSY;
goto exit1;
}
- mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit2;
- error = vfs_rmdir(nd.dentry->d_inode, dentry);
+ error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
dput(dentry);
exit2:
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
exit1:
- path_release(&nd);
+ path_put(&nd.path);
exit:
putname(name);
return error;
@@ -2219,7 +2221,7 @@ static long do_unlinkat(int dfd, const char __user *pathname)
error = -EISDIR;
if (nd.last_type != LAST_NORM)
goto exit1;
- mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
@@ -2229,15 +2231,15 @@ static long do_unlinkat(int dfd, const char __user *pathname)
inode = dentry->d_inode;
if (inode)
atomic_inc(&inode->i_count);
- error = vfs_unlink(nd.dentry->d_inode, dentry);
+ error = vfs_unlink(nd.path.dentry->d_inode, dentry);
exit2:
dput(dentry);
}
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
if (inode)
iput(inode); /* truncate the inode here */
exit1:
- path_release(&nd);
+ path_put(&nd.path);
exit:
putname(name);
return error;
@@ -2310,11 +2312,11 @@ asmlinkage long sys_symlinkat(const char __user *oldname,
if (IS_ERR(dentry))
goto out_unlock;
- error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO);
+ error = vfs_symlink(nd.path.dentry->d_inode, dentry, from, S_IALLUGO);
dput(dentry);
out_unlock:
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
- path_release(&nd);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ path_put(&nd.path);
out:
putname(to);
out_putname:
@@ -2399,20 +2401,20 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
if (error)
goto out;
error = -EXDEV;
- if (old_nd.mnt != nd.mnt)
+ if (old_nd.path.mnt != nd.path.mnt)
goto out_release;
new_dentry = lookup_create(&nd, 0);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out_unlock;
- error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
+ error = vfs_link(old_nd.path.dentry, nd.path.dentry->d_inode, new_dentry);
dput(new_dentry);
out_unlock:
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
out_release:
- path_release(&nd);
+ path_put(&nd.path);
out:
- path_release(&old_nd);
+ path_put(&old_nd.path);
exit:
putname(to);
@@ -2588,15 +2590,15 @@ static int do_rename(int olddfd, const char *oldname,
goto exit1;
error = -EXDEV;
- if (oldnd.mnt != newnd.mnt)
+ if (oldnd.path.mnt != newnd.path.mnt)
goto exit2;
- old_dir = oldnd.dentry;
+ old_dir = oldnd.path.dentry;
error = -EBUSY;
if (oldnd.last_type != LAST_NORM)
goto exit2;
- new_dir = newnd.dentry;
+ new_dir = newnd.path.dentry;
if (newnd.last_type != LAST_NORM)
goto exit2;
@@ -2640,9 +2642,9 @@ exit4:
exit3:
unlock_rename(new_dir, old_dir);
exit2:
- path_release(&newnd);
+ path_put(&newnd.path);
exit1:
- path_release(&oldnd);
+ path_put(&oldnd.path);
exit:
return error;
}
@@ -2816,7 +2818,6 @@ EXPORT_SYMBOL(page_symlink);
EXPORT_SYMBOL(page_symlink_inode_operations);
EXPORT_SYMBOL(path_lookup);
EXPORT_SYMBOL(vfs_path_lookup);
-EXPORT_SYMBOL(path_release);
EXPORT_SYMBOL(permission);
EXPORT_SYMBOL(vfs_permission);
EXPORT_SYMBOL(file_permission);
diff --git a/fs/namespace.c b/fs/namespace.c
index 63ced21c12d..7953c96a207 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -157,13 +157,13 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
{
- old_nd->dentry = mnt->mnt_mountpoint;
- old_nd->mnt = mnt->mnt_parent;
+ old_nd->path.dentry = mnt->mnt_mountpoint;
+ old_nd->path.mnt = mnt->mnt_parent;
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt_root;
list_del_init(&mnt->mnt_child);
list_del_init(&mnt->mnt_hash);
- old_nd->dentry->d_mounted--;
+ old_nd->path.dentry->d_mounted--;
}
void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
@@ -176,10 +176,10 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
{
- mnt_set_mountpoint(nd->mnt, nd->dentry, mnt);
+ mnt_set_mountpoint(nd->path.mnt, nd->path.dentry, mnt);
list_add_tail(&mnt->mnt_hash, mount_hashtable +
- hash(nd->mnt, nd->dentry));
- list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
+ hash(nd->path.mnt, nd->path.dentry));
+ list_add_tail(&mnt->mnt_child, &nd->path.mnt->mnt_mounts);
}
/*
@@ -408,10 +408,11 @@ static int show_vfsmnt(struct seq_file *m, void *v)
{ 0, NULL }
};
struct proc_fs_info *fs_infop;
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
seq_putc(m, ' ');
- seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
+ seq_path(m, &mnt_path, " \t\n\\");
seq_putc(m, ' ');
mangle(m, mnt->mnt_sb->s_type->name);
if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
@@ -443,6 +444,7 @@ struct seq_operations mounts_op = {
static int show_vfsstat(struct seq_file *m, void *v)
{
struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
int err = 0;
/* device */
@@ -454,7 +456,7 @@ static int show_vfsstat(struct seq_file *m, void *v)
/* mount point */
seq_puts(m, " mounted on ");
- seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
+ seq_path(m, &mnt_path, " \t\n\\");
seq_putc(m, ' ');
/* file system type */
@@ -593,7 +595,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
* (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
*/
if (flags & MNT_EXPIRE) {
- if (mnt == current->fs->rootmnt ||
+ if (mnt == current->fs->root.mnt ||
flags & (MNT_FORCE | MNT_DETACH))
return -EINVAL;
@@ -628,7 +630,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
* /reboot - static binary that would close all descriptors and
* call reboot(9). Then init(8) could umount root and exec /reboot.
*/
- if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
+ if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
/*
* Special case for "unmounting" root ...
* we just try to remount it readonly.
@@ -679,18 +681,20 @@ asmlinkage long sys_umount(char __user * name, int flags)
if (retval)
goto out;
retval = -EINVAL;
- if (nd.dentry != nd.mnt->mnt_root)
+ if (nd.path.dentry != nd.path.mnt->mnt_root)
goto dput_and_out;
- if (!check_mnt(nd.mnt))
+ if (!check_mnt(nd.path.mnt))
goto dput_and_out;
retval = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto dput_and_out;
- retval = do_umount(nd.mnt, flags);
+ retval = do_umount(nd.path.mnt, flags);
dput_and_out:
- path_release_on_umount(&nd);
+ /* we mustn't call path_put() as that would clear mnt_expiry_mark */
+ dput(nd.path.dentry);
+ mntput_no_expire(nd.path.mnt);
out:
return retval;
}
@@ -713,10 +717,10 @@ static int mount_is_safe(struct nameidata *nd)
return 0;
return -EPERM;
#ifdef notyet
- if (S_ISLNK(nd->dentry->d_inode->i_mode))
+ if (S_ISLNK(nd->path.dentry->d_inode->i_mode))
return -EPERM;
- if (nd->dentry->d_inode->i_mode & S_ISVTX) {
- if (current->uid != nd->dentry->d_inode->i_uid)
+ if (nd->path.dentry->d_inode->i_mode & S_ISVTX) {
+ if (current->uid != nd->path.dentry->d_inode->i_uid)
return -EPERM;
}
if (vfs_permission(nd, MAY_WRITE))
@@ -765,8 +769,8 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
q = q->mnt_parent;
}
p = s;
- nd.mnt = q;
- nd.dentry = p->mnt_mountpoint;
+ nd.path.mnt = q;
+ nd.path.dentry = p->mnt_mountpoint;
q = clone_mnt(p, p->mnt_root, flag);
if (!q)
goto Enomem;
@@ -875,8 +879,8 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
struct nameidata *nd, struct nameidata *parent_nd)
{
LIST_HEAD(tree_list);
- struct vfsmount *dest_mnt = nd->mnt;
- struct dentry *dest_dentry = nd->dentry;
+ struct vfsmount *dest_mnt = nd->path.mnt;
+ struct dentry *dest_dentry = nd->path.dentry;
struct vfsmount *child, *p;
if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
@@ -911,13 +915,13 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
if (mnt->mnt_sb->s_flags & MS_NOUSER)
return -EINVAL;
- if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
+ if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
S_ISDIR(mnt->mnt_root->d_inode->i_mode))
return -ENOTDIR;
err = -ENOENT;
- mutex_lock(&nd->dentry->d_inode->i_mutex);
- if (IS_DEADDIR(nd->dentry->d_inode))
+ mutex_lock(&nd->path.dentry->d_inode->i_mutex);
+ if (IS_DEADDIR(nd->path.dentry->d_inode))
goto out_unlock;
err = security_sb_check_sb(mnt, nd);
@@ -925,10 +929,10 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
goto out_unlock;
err = -ENOENT;
- if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
+ if (IS_ROOT(nd->path.dentry) || !d_unhashed(nd->path.dentry))
err = attach_recursive_mnt(mnt, nd, NULL);
out_unlock:
- mutex_unlock(&nd->dentry->d_inode->i_mutex);
+ mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
if (!err)
security_sb_post_addmount(mnt, nd);
return err;
@@ -940,14 +944,14 @@ out_unlock:
*/
static noinline int do_change_type(struct nameidata *nd, int flag)
{
- struct vfsmount *m, *mnt = nd->mnt;
+ struct vfsmount *m, *mnt = nd->path.mnt;
int recurse = flag & MS_REC;
int type = flag & ~MS_REC;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (nd->dentry != nd->mnt->mnt_root)
+ if (nd->path.dentry != nd->path.mnt->mnt_root)
return -EINVAL;
down_write(&namespace_sem);
@@ -979,17 +983,17 @@ static noinline int do_loopback(struct nameidata *nd, char *old_name,
down_write(&namespace_sem);
err = -EINVAL;
- if (IS_MNT_UNBINDABLE(old_nd.mnt))
- goto out;
+ if (IS_MNT_UNBINDABLE(old_nd.path.mnt))
+ goto out;
- if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
+ if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
goto out;
err = -ENOMEM;
if (recurse)
- mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0);
+ mnt = copy_tree(old_nd.path.mnt, old_nd.path.dentry, 0);
else
- mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0);
+ mnt = clone_mnt(old_nd.path.mnt, old_nd.path.dentry, 0);
if (!mnt)
goto out;
@@ -1005,7 +1009,7 @@ static noinline int do_loopback(struct nameidata *nd, char *old_name,
out:
up_write(&namespace_sem);
- path_release(&old_nd);
+ path_put(&old_nd.path);
return err;
}
@@ -1019,24 +1023,24 @@ static noinline int do_remount(struct nameidata *nd, int flags, int mnt_flags,
void *data)
{
int err;
- struct super_block *sb = nd->mnt->mnt_sb;
+ struct super_block *sb = nd->path.mnt->mnt_sb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!check_mnt(nd->mnt))
+ if (!check_mnt(nd->path.mnt))
return -EINVAL;
- if (nd->dentry != nd->mnt->mnt_root)
+ if (nd->path.dentry != nd->path.mnt->mnt_root)
return -EINVAL;
down_write(&sb->s_umount);
err = do_remount_sb(sb, flags, data, 0);
if (!err)
- nd->mnt->mnt_flags = mnt_flags;
+ nd->path.mnt->mnt_flags = mnt_flags;
up_write(&sb->s_umount);
if (!err)
- security_sb_post_remount(nd->mnt, flags, data);
+ security_sb_post_remount(nd->path.mnt, flags, data);
return err;
}
@@ -1067,61 +1071,65 @@ static noinline int do_move_mount(struct nameidata *nd, char *old_name)
return err;
down_write(&namespace_sem);
- while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+ while (d_mountpoint(nd->path.dentry) &&
+ follow_down(&nd->path.mnt, &nd->path.dentry))
;
err = -EINVAL;
- if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
+ if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
goto out;
err = -ENOENT;
- mutex_lock(&nd->dentry->d_inode->i_mutex);
- if (IS_DEADDIR(nd->dentry->d_inode))
+ mutex_lock(&nd->path.dentry->d_inode->i_mutex);
+ if (IS_DEADDIR(nd->path.dentry->d_inode))
goto out1;
- if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
+ if (!IS_ROOT(nd->path.dentry) && d_unhashed(nd->path.dentry))
goto out1;
err = -EINVAL;
- if (old_nd.dentry != old_nd.mnt->mnt_root)
+ if (old_nd.path.dentry != old_nd.path.mnt->mnt_root)
goto out1;
- if (old_nd.mnt == old_nd.mnt->mnt_parent)
+ if (old_nd.path.mnt == old_nd.path.mnt->mnt_parent)
goto out1;
- if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
- S_ISDIR(old_nd.dentry->d_inode->i_mode))
+ if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
+ S_ISDIR(old_nd.path.dentry->d_inode->i_mode))
goto out1;
/*
* Don't move a mount residing in a shared parent.
*/
- if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent))
+ if (old_nd.path.mnt->mnt_parent &&
+ IS_MNT_SHARED(old_nd.path.mnt->mnt_parent))
goto out1;
/*
* Don't move a mount tree containing unbindable mounts to a destination
* mount which is shared.
*/
- if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt))
+ if (IS_MNT_SHARED(nd->path.mnt) &&
+ tree_contains_unbindable(old_nd.path.mnt))
goto out1;
err = -ELOOP;
- for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent)
- if (p == old_nd.mnt)
+ for (p = nd->path.mnt; p->mnt_parent != p; p = p->mnt_parent)
+ if (p == old_nd.path.mnt)
goto out1;
- if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd)))
+ err = attach_recursive_mnt(old_nd.path.mnt, nd, &parent_nd);
+ if (err)
goto out1;
spin_lock(&vfsmount_lock);
/* if the mount is moved, it should no longer be expire
* automatically */
- list_del_init(&old_nd.mnt->mnt_expire);
+ list_del_init(&old_nd.path.mnt->mnt_expire);
spin_unlock(&vfsmount_lock);
out1:
- mutex_unlock(&nd->dentry->d_inode->i_mutex);
+ mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
out:
up_write(&namespace_sem);
if (!err)
- path_release(&parent_nd);
- path_release(&old_nd);
+ path_put(&parent_nd.path);
+ path_put(&old_nd.path);
return err;
}
@@ -1160,16 +1168,17 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
down_write(&namespace_sem);
/* Something was mounted here while we slept */
- while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+ while (d_mountpoint(nd->path.dentry) &&
+ follow_down(&nd->path.mnt, &nd->path.dentry))
;
err = -EINVAL;
- if (!check_mnt(nd->mnt))
+ if (!check_mnt(nd->path.mnt))
goto unlock;
/* Refuse the same filesystem on the same mount point */
err = -EBUSY;
- if (nd->mnt->mnt_sb == newmnt->mnt_sb &&
- nd->mnt->mnt_root == nd->dentry)
+ if (nd->path.mnt->mnt_sb == newmnt->mnt_sb &&
+ nd->path.mnt->mnt_root == nd->path.dentry)
goto unlock;
err = -EINVAL;
@@ -1505,7 +1514,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
retval = do_new_mount(&nd, type_page, flags, mnt_flags,
dev_name, data_page);
dput_out:
- path_release(&nd);
+ path_put(&nd.path);
return retval;
}
@@ -1552,17 +1561,17 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
while (p) {
q->mnt_ns = new_ns;
if (fs) {
- if (p == fs->rootmnt) {
+ if (p == fs->root.mnt) {
rootmnt = p;
- fs->rootmnt = mntget(q);
+ fs->root.mnt = mntget(q);
}
- if (p == fs->pwdmnt) {
+ if (p == fs->pwd.mnt) {
pwdmnt = p;
- fs->pwdmnt = mntget(q);
+ fs->pwd.mnt = mntget(q);
}
- if (p == fs->altrootmnt) {
+ if (p == fs->altroot.mnt) {
altrootmnt = p;
- fs->altrootmnt = mntget(q);
+ fs->altroot.mnt = mntget(q);
}
}
p = next_mnt(p, mnt_ns->root);
@@ -1643,44 +1652,35 @@ out1:
* Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
* It can block. Requires the big lock held.
*/
-void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt,
- struct dentry *dentry)
+void set_fs_root(struct fs_struct *fs, struct path *path)
{
- struct dentry *old_root;
- struct vfsmount *old_rootmnt;
+ struct path old_root;
+
write_lock(&fs->lock);
old_root = fs->root;
- old_rootmnt = fs->rootmnt;
- fs->rootmnt = mntget(mnt);
- fs->root = dget(dentry);
+ fs->root = *path;
+ path_get(path);
write_unlock(&fs->lock);
- if (old_root) {
- dput(old_root);
- mntput(old_rootmnt);
- }
+ if (old_root.dentry)
+ path_put(&old_root);
}
/*
* Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
* It can block. Requires the big lock held.
*/
-void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
- struct dentry *dentry)
+void set_fs_pwd(struct fs_struct *fs, struct path *path)
{
- struct dentry *old_pwd;
- struct vfsmount *old_pwdmnt;
+ struct path old_pwd;
write_lock(&fs->lock);
old_pwd = fs->pwd;
- old_pwdmnt = fs->pwdmnt;
- fs->pwdmnt = mntget(mnt);
- fs->pwd = dget(dentry);
+ fs->pwd = *path;
+ path_get(path);
write_unlock(&fs->lock);
- if (old_pwd) {
- dput(old_pwd);
- mntput(old_pwdmnt);
- }
+ if (old_pwd.dentry)
+ path_put(&old_pwd);
}
static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
@@ -1695,12 +1695,12 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
if (fs) {
atomic_inc(&fs->count);
task_unlock(p);
- if (fs->root == old_nd->dentry
- && fs->rootmnt == old_nd->mnt)
- set_fs_root(fs, new_nd->mnt, new_nd->dentry);
- if (fs->pwd == old_nd->dentry
- && fs->pwdmnt == old_nd->mnt)
- set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
+ if (fs->root.dentry == old_nd->path.dentry
+ && fs->root.mnt == old_nd->path.mnt)
+ set_fs_root(fs, &new_nd->path);
+ if (fs->pwd.dentry == old_nd->path.dentry
+ && fs->pwd.mnt == old_nd->path.mnt)
+ set_fs_pwd(fs, &new_nd->path);
put_fs_struct(fs);
} else
task_unlock(p);
@@ -1750,7 +1750,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
if (error)
goto out0;
error = -EINVAL;
- if (!check_mnt(new_nd.mnt))
+ if (!check_mnt(new_nd.path.mnt))
goto out1;
error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
@@ -1759,74 +1759,78 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
error = security_sb_pivotroot(&old_nd, &new_nd);
if (error) {
- path_release(&old_nd);
+ path_put(&old_nd.path);
goto out1;
}
read_lock(&current->fs->lock);
- user_nd.mnt = mntget(current->fs->rootmnt);
- user_nd.dentry = dget(current->fs->root);
+ user_nd.path = current->fs->root;
+ path_get(&current->fs->root);
read_unlock(&current->fs->lock);
down_write(&namespace_sem);
- mutex_lock(&old_nd.dentry->d_inode->i_mutex);
+ mutex_lock(&old_nd.path.dentry->d_inode->i_mutex);
error = -EINVAL;
- if (IS_MNT_SHARED(old_nd.mnt) ||
- IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
- IS_MNT_SHARED(user_nd.mnt->mnt_parent))
+ if (IS_MNT_SHARED(old_nd.path.mnt) ||
+ IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) ||
+ IS_MNT_SHARED(user_nd.path.mnt->mnt_parent))
goto out2;
- if (!check_mnt(user_nd.mnt))
+ if (!check_mnt(user_nd.path.mnt))
goto out2;
error = -ENOENT;
- if (IS_DEADDIR(new_nd.dentry->d_inode))
+ if (IS_DEADDIR(new_nd.path.dentry->d_inode))
goto out2;
- if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
+ if (d_unhashed(new_nd.path.dentry) && !IS_ROOT(new_nd.path.dentry))
goto out2;
- if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
+ if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry))
goto out2;
error = -EBUSY;
- if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
+ if (new_nd.path.mnt == user_nd.path.mnt ||
+ old_nd.path.mnt == user_nd.path.mnt)
goto out2; /* loop, on the same file system */
error = -EINVAL;
- if (user_nd.mnt->mnt_root != user_nd.dentry)
+ if (user_nd.path.mnt->mnt_root != user_nd.path.dentry)
goto out2; /* not a mountpoint */
- if (user_nd.mnt->mnt_parent == user_nd.mnt)
+ if (user_nd.path.mnt->mnt_parent == user_nd.path.mnt)
goto out2; /* not attached */
- if (new_nd.mnt->mnt_root != new_nd.dentry)
+ if (new_nd.path.mnt->mnt_root != new_nd.path.dentry)
goto out2; /* not a mountpoint */
- if (new_nd.mnt->mnt_parent == new_nd.mnt)
+ if (new_nd.path.mnt->mnt_parent == new_nd.path.mnt)
goto out2; /* not attached */
- tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
+ /* make sure we can reach put_old from new_root */
+ tmp = old_nd.path.mnt;
spin_lock(&vfsmount_lock);
- if (tmp != new_nd.mnt) {
+ if (tmp != new_nd.path.mnt) {
for (;;) {
if (tmp->mnt_parent == tmp)
goto out3; /* already mounted on put_old */
- if (tmp->mnt_parent == new_nd.mnt)
+ if (tmp->mnt_parent == new_nd.path.mnt)
break;
tmp = tmp->mnt_parent;
}
- if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
+ if (!is_subdir(tmp->mnt_mountpoint, new_nd.path.dentry))
goto out3;
- } else if (!is_subdir(old_nd.dentry, new_nd.dentry))
+ } else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
goto out3;
- detach_mnt(new_nd.mnt, &parent_nd);
- detach_mnt(user_nd.mnt, &root_parent);
- attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */
- attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
+ detach_mnt(new_nd.path.mnt, &parent_nd);
+ detach_mnt(user_nd.path.mnt, &root_parent);
+ /* mount old root on put_old */
+ attach_mnt(user_nd.path.mnt, &old_nd);
+ /* mount new_root on / */
+ attach_mnt(new_nd.path.mnt, &root_parent);
touch_mnt_namespace(current->nsproxy->mnt_ns);
spin_unlock(&vfsmount_lock);
chroot_fs_refs(&user_nd, &new_nd);
security_sb_post_pivotroot(&user_nd, &new_nd);
error = 0;
- path_release(&root_parent);
- path_release(&parent_nd);
+ path_put(&root_parent.path);
+ path_put(&parent_nd.path);
out2:
- mutex_unlock(&old_nd.dentry->d_inode->i_mutex);
+ mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
up_write(&namespace_sem);
- path_release(&user_nd);
- path_release(&old_nd);
+ path_put(&user_nd.path);
+ path_put(&old_nd.path);
out1:
- path_release(&new_nd);
+ path_put(&new_nd.path);
out0:
unlock_kernel();
return error;
@@ -1839,6 +1843,7 @@ static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
struct mnt_namespace *ns;
+ struct path root;
mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
if (IS_ERR(mnt))
@@ -1857,8 +1862,11 @@ static void __init init_mount_tree(void)
init_task.nsproxy->mnt_ns = ns;
get_mnt_ns(ns);
- set_fs_pwd(current->fs, ns->root, ns->root->mnt_root);
- set_fs_root(current->fs, ns->root, ns->root->mnt_root);
+ root.mnt = ns->root;
+ root.dentry = ns->root->mnt_root;
+
+ set_fs_pwd(current->fs, &root);
+ set_fs_root(current->fs, &root);
}
void __init mnt_init(void)
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index be4ce1c3a3d..607f6eb9cdb 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -107,38 +107,40 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
BUG_ON(IS_ROOT(dentry));
dprintk("%s: enter\n", __FUNCTION__);
- dput(nd->dentry);
- nd->dentry = dget(dentry);
+ dput(nd->path.dentry);
+ nd->path.dentry = dget(dentry);
/* Look it up again */
- parent = dget_parent(nd->dentry);
+ parent = dget_parent(nd->path.dentry);
err = server->nfs_client->rpc_ops->lookup(parent->d_inode,
- &nd->dentry->d_name,
+ &nd->path.dentry->d_name,
&fh, &fattr);
dput(parent);
if (err != 0)
goto out_err;
if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL)
- mnt = nfs_do_refmount(nd->mnt, nd->dentry);
+ mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry);
else
- mnt = nfs_do_submount(nd->mnt, nd->dentry, &fh, &fattr);
+ mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh,
+ &fattr);
err = PTR_ERR(mnt);
if (IS_ERR(mnt))
goto out_err;
mntget(mnt);
- err = do_add_mount(mnt, nd, nd->mnt->mnt_flags|MNT_SHRINKABLE, &nfs_automount_list);
+ err = do_add_mount(mnt, nd, nd->path.mnt->mnt_flags|MNT_SHRINKABLE,
+ &nfs_automount_list);
if (err < 0) {
mntput(mnt);
if (err == -EBUSY)
goto out_follow;
goto out_err;
}
- mntput(nd->mnt);
- dput(nd->dentry);
- nd->mnt = mnt;
- nd->dentry = dget(mnt->mnt_root);
+ mntput(nd->path.mnt);
+ dput(nd->path.dentry);
+ nd->path.mnt = mnt;
+ nd->path.dentry = dget(mnt->mnt_root);
schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
out:
dprintk("%s: done, returned %d\n", __FUNCTION__, err);
@@ -146,10 +148,11 @@ out:
dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
return ERR_PTR(err);
out_err:
- path_release(nd);
+ path_put(&nd->path);
goto out;
out_follow:
- while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+ while (d_mountpoint(nd->path.dentry) &&
+ follow_down(&nd->path.mnt, &nd->path.dentry))
;
err = 0;
goto out;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 027e1095256..7ce07862c2f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1384,11 +1384,11 @@ out_close:
struct dentry *
nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
- struct dentry *parent;
struct path path = {
- .mnt = nd->mnt,
+ .mnt = nd->path.mnt,
.dentry = dentry,
};
+ struct dentry *parent;
struct iattr attr;
struct rpc_cred *cred;
struct nfs4_state *state;
@@ -1433,7 +1433,7 @@ int
nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
{
struct path path = {
- .mnt = nd->mnt,
+ .mnt = nd->path.mnt,
.dentry = dentry,
};
struct rpc_cred *cred;
@@ -1885,7 +1885,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nameidata *nd)
{
struct path path = {
- .mnt = nd->mnt,
+ .mnt = nd->path.mnt,
.dentry = dentry,
};
struct nfs4_state *state;
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index 51f1b31acbf..aed8145d908 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -41,9 +41,9 @@ static struct file *do_open(char *name, int flags)
error = may_open(&nd, MAY_WRITE, FMODE_WRITE);
if (!error)
- return dentry_open(nd.dentry, nd.mnt, flags);
+ return dentry_open(nd.path.dentry, nd.path.mnt, flags);
- path_release(&nd);
+ path_put(&nd.path);
return ERR_PTR(error);
}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 346570f6d84..8a6f7c924c7 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -63,10 +63,8 @@ static void expkey_put(struct kref *ref)
struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
if (test_bit(CACHE_VALID, &key->h.flags) &&
- !test_bit(CACHE_NEGATIVE, &key->h.flags)) {
- dput(key->ek_dentry);
- mntput(key->ek_mnt);
- }
+ !test_bit(CACHE_NEGATIVE, &key->h.flags))
+ path_put(&key->ek_path);
auth_domain_put(key->ek_client);
kfree(key);
}
@@ -169,15 +167,14 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out;
dprintk("Found the path %s\n", buf);
- key.ek_mnt = nd.mnt;
- key.ek_dentry = nd.dentry;
-
+ key.ek_path = nd.path;
+
ek = svc_expkey_update(&key, ek);
if (ek)
cache_put(&ek->h, &svc_expkey_cache);
else
err = -ENOMEM;
- path_release(&nd);
+ path_put(&nd.path);
}
cache_flush();
out:
@@ -206,7 +203,7 @@ static int expkey_show(struct seq_file *m,
if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags)) {
seq_printf(m, " ");
- seq_path(m, ek->ek_mnt, ek->ek_dentry, "\\ \t\n");
+ seq_path(m, &ek->ek_path, "\\ \t\n");
}
seq_printf(m, "\n");
return 0;
@@ -243,8 +240,8 @@ static inline void expkey_update(struct cache_head *cnew,
struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
- new->ek_mnt = mntget(item->ek_mnt);
- new->ek_dentry = dget(item->ek_dentry);
+ new->ek_path = item->ek_path;
+ path_get(&item->ek_path);
}
static struct cache_head *expkey_alloc(void)
@@ -332,10 +329,9 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
static void svc_export_put(struct kref *ref)
{
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
- dput(exp->ex_dentry);
- mntput(exp->ex_mnt);
+ path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
- kfree(exp->ex_path);
+ kfree(exp->ex_pathname);
nfsd4_fslocs_free(&exp->ex_fslocs);
kfree(exp);
}
@@ -349,7 +345,7 @@ static void svc_export_request(struct cache_detail *cd,
char *pth;
qword_add(bpp, blen, exp->ex_client->name);
- pth = d_path(exp->ex_dentry, exp->ex_mnt, *bpp, *blen);
+ pth = d_path(&exp->ex_path, *bpp, *blen);
if (IS_ERR(pth)) {
/* is this correct? */
(*bpp)[0] = '\n';
@@ -507,8 +503,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
struct svc_export exp, *expp;
int an_int;
- nd.dentry = NULL;
- exp.ex_path = NULL;
+ nd.path.dentry = NULL;
+ exp.ex_pathname = NULL;
/* fs locations */
exp.ex_fslocs.locations = NULL;
@@ -547,11 +543,11 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
exp.h.flags = 0;
exp.ex_client = dom;
- exp.ex_mnt = nd.mnt;
- exp.ex_dentry = nd.dentry;
- exp.ex_path = kstrdup(buf, GFP_KERNEL);
+ exp.ex_path.mnt = nd.path.mnt;
+ exp.ex_path.dentry = nd.path.dentry;
+ exp.ex_pathname = kstrdup(buf, GFP_KERNEL);
err = -ENOMEM;
- if (!exp.ex_path)
+ if (!exp.ex_pathname)
goto out;
/* expiry */
@@ -610,7 +606,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out;
}
- err = check_export(nd.dentry->d_inode, exp.ex_flags,
+ err = check_export(nd.path.dentry->d_inode, exp.ex_flags,
exp.ex_uuid);
if (err) goto out;
}
@@ -628,9 +624,9 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
out:
nfsd4_fslocs_free(&exp.ex_fslocs);
kfree(exp.ex_uuid);
- kfree(exp.ex_path);
- if (nd.dentry)
- path_release(&nd);
+ kfree(exp.ex_pathname);
+ if (nd.path.dentry)
+ path_put(&nd.path);
out_no_path:
if (dom)
auth_domain_put(dom);
@@ -653,7 +649,7 @@ static int svc_export_show(struct seq_file *m,
return 0;
}
exp = container_of(h, struct svc_export, h);
- seq_path(m, exp->ex_mnt, exp->ex_dentry, " \t\n\\");
+ seq_path(m, &exp->ex_path, " \t\n\\");
seq_putc(m, '\t');
seq_escape(m, exp->ex_client->name, " \t\n\\");
seq_putc(m, '(');
@@ -680,8 +676,8 @@ static int svc_export_match(struct cache_head *a, struct cache_head *b)
struct svc_export *orig = container_of(a, struct svc_export, h);
struct svc_export *new = container_of(b, struct svc_export, h);
return orig->ex_client == new->ex_client &&
- orig->ex_dentry == new->ex_dentry &&
- orig->ex_mnt == new->ex_mnt;
+ orig->ex_path.dentry == new->ex_path.dentry &&
+ orig->ex_path.mnt == new->ex_path.mnt;
}
static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
@@ -691,9 +687,9 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
kref_get(&item->ex_client->ref);
new->ex_client = item->ex_client;
- new->ex_dentry = dget(item->ex_dentry);
- new->ex_mnt = mntget(item->ex_mnt);
- new->ex_path = NULL;
+ new->ex_path.dentry = dget(item->ex_path.dentry);
+ new->ex_path.mnt = mntget(item->ex_path.mnt);
+ new->ex_pathname = NULL;
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
@@ -711,8 +707,8 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
new->ex_fsid = item->ex_fsid;
new->ex_uuid = item->ex_uuid;
item->ex_uuid = NULL;
- new->ex_path = item->ex_path;
- item->ex_path = NULL;
+ new->ex_pathname = item->ex_pathname;
+ item->ex_pathname = NULL;
new->ex_fslocs.locations = item->ex_fslocs.locations;
item->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = item->ex_fslocs.locations_count;
@@ -755,8 +751,8 @@ svc_export_lookup(struct svc_export *exp)
struct cache_head *ch;
int hash;
hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS);
- hash ^= hash_ptr(exp->ex_dentry, EXPORT_HASHBITS);
- hash ^= hash_ptr(exp->ex_mnt, EXPORT_HASHBITS);
+ hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS);
+ hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS);
ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
hash);
@@ -772,8 +768,8 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
struct cache_head *ch;
int hash;
hash = hash_ptr(old->ex_client, EXPORT_HASHBITS);
- hash ^= hash_ptr(old->ex_dentry, EXPORT_HASHBITS);
- hash ^= hash_ptr(old->ex_mnt, EXPORT_HASHBITS);
+ hash ^= hash_ptr(old->ex_path.dentry, EXPORT_HASHBITS);
+ hash ^= hash_ptr(old->ex_path.mnt, EXPORT_HASHBITS);
ch = sunrpc_cache_update(&svc_export_cache, &new->h,
&old->h,
@@ -815,8 +811,7 @@ static int exp_set_key(svc_client *clp, int fsid_type, u32 *fsidv,
key.ek_client = clp;
key.ek_fsidtype = fsid_type;
memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
- key.ek_mnt = exp->ex_mnt;
- key.ek_dentry = exp->ex_dentry;
+ key.ek_path = exp->ex_path;
key.h.expiry_time = NEVER;
key.h.flags = 0;
@@ -865,13 +860,13 @@ static svc_export *exp_get_by_name(svc_client *clp, struct vfsmount *mnt,
{
struct svc_export *exp, key;
int err;
-
+
if (!clp)
return ERR_PTR(-ENOENT);
key.ex_client = clp;
- key.ex_mnt = mnt;
- key.ex_dentry = dentry;
+ key.ex_path.mnt = mnt;
+ key.ex_path.dentry = dentry;
exp = svc_export_lookup(&key);
if (exp == NULL)
@@ -968,7 +963,7 @@ static int exp_fsid_hash(svc_client *clp, struct svc_export *exp)
static int exp_hash(struct auth_domain *clp, struct svc_export *exp)
{
u32 fsid[2];
- struct inode *inode = exp->ex_dentry->d_inode;
+ struct inode *inode = exp->ex_path.dentry->d_inode;
dev_t dev = inode->i_sb->s_dev;
if (old_valid_dev(dev)) {
@@ -982,7 +977,7 @@ static int exp_hash(struct auth_domain *clp, struct svc_export *exp)
static void exp_unhash(struct svc_export *exp)
{
struct svc_expkey *ek;
- struct inode *inode = exp->ex_dentry->d_inode;
+ struct inode *inode = exp->ex_path.dentry->d_inode;
ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino);
if (!IS_ERR(ek)) {
@@ -1030,15 +1025,16 @@ exp_export(struct nfsctl_export *nxp)
goto out_unlock;
err = -EINVAL;
- exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL);
+ exp = exp_get_by_name(clp, nd.path.mnt, nd.path.dentry, NULL);
memset(&new, 0, sizeof(new));
/* must make sure there won't be an ex_fsid clash */
if ((nxp->ex_flags & NFSEXP_FSID) &&
(!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
- fsid_key->ek_mnt &&
- (fsid_key->ek_mnt != nd.mnt || fsid_key->ek_dentry != nd.dentry) )
+ fsid_key->ek_path.mnt &&
+ (fsid_key->ek_path.mnt != nd.path.mnt ||
+ fsid_key->ek_path.dentry != nd.path.dentry))
goto finish;
if (!IS_ERR(exp)) {
@@ -1054,7 +1050,7 @@ exp_export(struct nfsctl_export *nxp)
goto finish;
}
- err = check_export(nd.dentry->d_inode, nxp->ex_flags, NULL);
+ err = check_export(nd.path.dentry->d_inode, nxp->ex_flags, NULL);
if (err) goto finish;
err = -ENOMEM;
@@ -1063,12 +1059,11 @@ exp_export(struct nfsctl_export *nxp)
new.h.expiry_time = NEVER;
new.h.flags = 0;
- new.ex_path = kstrdup(nxp->ex_path, GFP_KERNEL);
- if (!new.ex_path)
+ new.ex_pathname = kstrdup(nxp->ex_path, GFP_KERNEL);
+ if (!new.ex_pathname)
goto finish;
new.ex_client = clp;
- new.ex_mnt = nd.mnt;
- new.ex_dentry = nd.dentry;
+ new.ex_path = nd.path;
new.ex_flags = nxp->ex_flags;
new.ex_anon_uid = nxp->ex_anon_uid;
new.ex_anon_gid = nxp->ex_anon_gid;
@@ -1089,15 +1084,14 @@ exp_export(struct nfsctl_export *nxp)
} else
err = 0;
finish:
- if (new.ex_path)
- kfree(new.ex_path);
+ kfree(new.ex_pathname);
if (exp)
exp_put(exp);
if (fsid_key && !IS_ERR(fsid_key))
cache_put(&fsid_key->h, &svc_expkey_cache);
if (clp)
auth_domain_put(clp);
- path_release(&nd);
+ path_put(&nd.path);
out_unlock:
exp_writeunlock();
out:
@@ -1148,8 +1142,8 @@ exp_unexport(struct nfsctl_export *nxp)
goto out_domain;
err = -EINVAL;
- exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
- path_release(&nd);
+ exp = exp_get_by_name(dom, nd.path.mnt, nd.path.dentry, NULL);
+ path_put(&nd.path);
if (IS_ERR(exp))
goto out_domain;
@@ -1185,12 +1179,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
printk("nfsd: exp_rootfh path not found %s", path);
return err;
}
- inode = nd.dentry->d_inode;
+ inode = nd.path.dentry->d_inode;
dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
- path, nd.dentry, clp->name,
+ path, nd.path.dentry, clp->name,
inode->i_sb->s_id, inode->i_ino);
- exp = exp_parent(clp, nd.mnt, nd.dentry, NULL);
+ exp = exp_parent(clp, nd.path.mnt, nd.path.dentry, NULL);
if (IS_ERR(exp)) {
err = PTR_ERR(exp);
goto out;
@@ -1200,7 +1194,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
* fh must be initialized before calling fh_compose
*/
fh_init(&fh, maxsize);
- if (fh_compose(&fh, exp, nd.dentry, NULL))
+ if (fh_compose(&fh, exp, nd.path.dentry, NULL))
err = -EINVAL;
else
err = 0;
@@ -1208,7 +1202,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
fh_put(&fh);
exp_put(exp);
out:
- path_release(&nd);
+ path_put(&nd.path);
return err;
}
@@ -1220,7 +1214,7 @@ static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type,
if (IS_ERR(ek))
return ERR_CAST(ek);
- exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp);
+ exp = exp_get_by_name(clp, ek->ek_path.mnt, ek->ek_path.dentry, reqp);
cache_put(&ek->h, &svc_expkey_cache);
if (IS_ERR(exp))
@@ -1359,7 +1353,7 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
if (IS_ERR(exp))
return nfserrno(PTR_ERR(exp));
- rv = fh_compose(fhp, exp, exp->ex_dentry, NULL);
+ rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL);
if (rv)
goto out;
rv = check_nfsd_access(exp, rqstp);
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index eac82830bfd..c721a1e6e9d 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -67,7 +67,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
if (nfserr)
RETURN_STATUS(nfserr);
- err = vfs_getattr(resp->fh.fh_export->ex_mnt,
+ err = vfs_getattr(resp->fh.fh_export->ex_path.mnt,
resp->fh.fh_dentry, &resp->stat);
nfserr = nfserrno(err);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index d7647f70e02..17d0dd99720 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -218,7 +218,7 @@ encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
int err;
struct kstat stat;
- err = vfs_getattr(fhp->fh_export->ex_mnt, dentry, &stat);
+ err = vfs_getattr(fhp->fh_export->ex_path.mnt, dentry, &stat);
if (!err) {
*p++ = xdr_one; /* attributes follow */
lease_get_mtime(dentry->d_inode, &stat.mtime);
@@ -270,7 +270,7 @@ void fill_post_wcc(struct svc_fh *fhp)
if (fhp->fh_post_saved)
printk("nfsd: inode locked twice during operation.\n");
- err = vfs_getattr(fhp->fh_export->ex_mnt, fhp->fh_dentry,
+ err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
&fhp->fh_post_attr);
if (err)
fhp->fh_post_saved = 0;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 1602cd00dd4..1ff90625860 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -120,9 +120,9 @@ out_no_tfm:
static void
nfsd4_sync_rec_dir(void)
{
- mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
- nfsd_sync_dir(rec_dir.dentry);
- mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+ nfsd_sync_dir(rec_dir.path.dentry);
+ mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
}
int
@@ -142,9 +142,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
nfs4_save_user(&uid, &gid);
/* lock the parent */
- mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
+ mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
- dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
+ dentry = lookup_one_len(dname, rec_dir.path.dentry, HEXDIR_LEN-1);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
goto out_unlock;
@@ -154,11 +154,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
goto out_put;
}
- status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, S_IRWXU);
+ status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
out_put:
dput(dentry);
out_unlock:
- mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
if (status == 0) {
clp->cl_firststate = 1;
nfsd4_sync_rec_dir();
@@ -221,7 +221,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
nfs4_save_user(&uid, &gid);
- filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY);
+ filp = dentry_open(dget(dir), mntget(rec_dir.path.mnt), O_RDONLY);
status = PTR_ERR(filp);
if (IS_ERR(filp))
goto out;
@@ -286,9 +286,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
- mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
- dentry = lookup_one_len(name, rec_dir.dentry, namlen);
- mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+ mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex);
+ dentry = lookup_one_len(name, rec_dir.path.dentry, namlen);
+ mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
return status;
@@ -297,7 +297,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
if (!dentry->d_inode)
goto out;
- status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry);
+ status = nfsd4_clear_clid_dir(rec_dir.path.dentry, dentry);
out:
dput(dentry);
return status;
@@ -347,12 +347,12 @@ nfsd4_recdir_purge_old(void) {
if (!rec_dir_init)
return;
- status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old);
+ status = nfsd4_list_rec_dir(rec_dir.path.dentry, purge_old);
if (status == 0)
nfsd4_sync_rec_dir();
if (status)
printk("nfsd4: failed to purge old clients from recovery"
- " directory %s\n", rec_dir.dentry->d_name.name);
+ " directory %s\n", rec_dir.path.dentry->d_name.name);
return;
}
@@ -373,10 +373,10 @@ int
nfsd4_recdir_load(void) {
int status;
- status = nfsd4_list_rec_dir(rec_dir.dentry, load_recdir);
+ status = nfsd4_list_rec_dir(rec_dir.path.dentry, load_recdir);
if (status)
printk("nfsd4: failed loading clients from recovery"
- " directory %s\n", rec_dir.dentry->d_name.name);
+ " directory %s\n", rec_dir.path.dentry->d_name.name);
return status;
}
@@ -415,5 +415,5 @@ nfsd4_shutdown_recdir(void)
if (!rec_dir_init)
return;
rec_dir_init = 0;
- path_release(&rec_dir);
+ path_put(&rec_dir.path);
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f6744bc03da..bcb97d8e8b8 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3261,11 +3261,11 @@ nfs4_reset_recoverydir(char *recdir)
if (status)
return status;
status = -ENOTDIR;
- if (S_ISDIR(nd.dentry->d_inode->i_mode)) {
+ if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) {
nfs4_set_recdir(recdir);
status = 0;
}
- path_release(&nd);
+ path_put(&nd.path);
return status;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b0592e7c378..0e6a179ecca 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1330,9 +1330,9 @@ static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 *
*stat = exp_pseudoroot(rqstp, &tmp_fh);
if (*stat)
return NULL;
- rootpath = tmp_fh.fh_export->ex_path;
+ rootpath = tmp_fh.fh_export->ex_pathname;
- path = exp->ex_path;
+ path = exp->ex_pathname;
if (strncmp(path, rootpath, strlen(rootpath))) {
dprintk("nfsd: fs_locations failed;"
@@ -1481,7 +1481,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
goto out;
}
- err = vfs_getattr(exp->ex_mnt, dentry, &stat);
+ err = vfs_getattr(exp->ex_path.mnt, dentry, &stat);
if (err)
goto out_nfserr;
if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
@@ -1838,9 +1838,9 @@ out_acl:
* and this is the root of a cross-mounted filesystem.
*/
if (ignore_crossmnt == 0 &&
- exp->ex_mnt->mnt_root->d_inode == dentry->d_inode) {
- err = vfs_getattr(exp->ex_mnt->mnt_parent,
- exp->ex_mnt->mnt_mountpoint, &stat);
+ exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) {
+ err = vfs_getattr(exp->ex_path.mnt->mnt_parent,
+ exp->ex_path.mnt->mnt_mountpoint, &stat);
if (err)
goto out_nfserr;
}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 8fbd2dc08a9..0130b345234 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -47,7 +47,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
return 1;
tdentry = dget(dentry);
- while (tdentry != exp->ex_dentry && ! IS_ROOT(tdentry)) {
+ while (tdentry != exp->ex_path.dentry && !IS_ROOT(tdentry)) {
/* make sure parents give x permission to user */
int err;
parent = dget_parent(tdentry);
@@ -59,9 +59,9 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
dput(tdentry);
tdentry = parent;
}
- if (tdentry != exp->ex_dentry)
+ if (tdentry != exp->ex_path.dentry)
dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name);
- rv = (tdentry == exp->ex_dentry);
+ rv = (tdentry == exp->ex_path.dentry);
dput(tdentry);
return rv;
}
@@ -209,9 +209,9 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
fileid_type = fh->fh_fileid_type;
if (fileid_type == FILEID_ROOT)
- dentry = dget(exp->ex_dentry);
+ dentry = dget(exp->ex_path.dentry);
else {
- dentry = exportfs_decode_fh(exp->ex_mnt, fid,
+ dentry = exportfs_decode_fh(exp->ex_path.mnt, fid,
data_left, fileid_type,
nfsd_acceptable, exp);
}
@@ -299,7 +299,7 @@ out:
static void _fh_update(struct svc_fh *fhp, struct svc_export *exp,
struct dentry *dentry)
{
- if (dentry != exp->ex_dentry) {
+ if (dentry != exp->ex_path.dentry) {
struct fid *fid = (struct fid *)
(fhp->fh_handle.fh_auth + fhp->fh_handle.fh_size/4 - 1);
int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4;
@@ -344,12 +344,12 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
struct inode * inode = dentry->d_inode;
struct dentry *parent = dentry->d_parent;
__u32 *datap;
- dev_t ex_dev = exp->ex_dentry->d_inode->i_sb->s_dev;
- int root_export = (exp->ex_dentry == exp->ex_dentry->d_sb->s_root);
+ dev_t ex_dev = exp->ex_path.dentry->d_inode->i_sb->s_dev;
+ int root_export = (exp->ex_path.dentry == exp->ex_path.dentry->d_sb->s_root);
dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n",
MAJOR(ex_dev), MINOR(ex_dev),
- (long) exp->ex_dentry->d_inode->i_ino,
+ (long) exp->ex_path.dentry->d_inode->i_ino,
parent->d_name.name, dentry->d_name.name,
(inode ? inode->i_ino : 0));
@@ -391,7 +391,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
/* FALL THROUGH */
case FSID_MAJOR_MINOR:
case FSID_ENCODE_DEV:
- if (!(exp->ex_dentry->d_inode->i_sb->s_type->fs_flags
+ if (!(exp->ex_path.dentry->d_inode->i_sb->s_type->fs_flags
& FS_REQUIRES_DEV))
goto retry;
break;
@@ -454,7 +454,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
fhp->fh_handle.ofh_dev = old_encode_dev(ex_dev);
fhp->fh_handle.ofh_xdev = fhp->fh_handle.ofh_dev;
fhp->fh_handle.ofh_xino =
- ino_t_to_u32(exp->ex_dentry->d_inode->i_ino);
+ ino_t_to_u32(exp->ex_path.dentry->d_inode->i_ino);
fhp->fh_handle.ofh_dirino = ino_t_to_u32(parent_ino(dentry));
if (inode)
_fh_update_old(dentry, exp, &fhp->fh_handle);
@@ -465,7 +465,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
datap = fhp->fh_handle.fh_auth+0;
fhp->fh_handle.fh_fsid_type = fsid_type;
mk_fsid(fsid_type, datap, ex_dev,
- exp->ex_dentry->d_inode->i_ino,
+ exp->ex_path.dentry->d_inode->i_ino,
exp->ex_fsid, exp->ex_uuid);
len = key_len(fsid_type);
@@ -571,7 +571,7 @@ enum fsid_source fsid_source(struct svc_fh *fhp)
case FSID_DEV:
case FSID_ENCODE_DEV:
case FSID_MAJOR_MINOR:
- if (fhp->fh_export->ex_dentry->d_inode->i_sb->s_type->fs_flags
+ if (fhp->fh_export->ex_path.dentry->d_inode->i_sb->s_type->fs_flags
& FS_REQUIRES_DEV)
return FSIDSOURCE_DEV;
break;
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 977a71f64e1..6cfc96a1248 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -41,7 +41,7 @@ static __be32
nfsd_return_attrs(__be32 err, struct nfsd_attrstat *resp)
{
if (err) return err;
- return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt,
+ return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
resp->fh.fh_dentry,
&resp->stat));
}
@@ -49,7 +49,7 @@ static __be32
nfsd_return_dirop(__be32 err, struct nfsd_diropres *resp)
{
if (err) return err;
- return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt,
+ return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
resp->fh.fh_dentry,
&resp->stat));
}
@@ -164,7 +164,7 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
&resp->count);
if (nfserr) return nfserr;
- return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt,
+ return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt,
resp->fh.fh_dentry,
&resp->stat));
}
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 61ad61743d9..afd08e2c90a 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -207,7 +207,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
{
struct kstat stat;
- vfs_getattr(fhp->fh_export->ex_mnt, fhp->fh_dentry, &stat);
+ vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, &stat);
return encode_fattr(rqstp, p, fhp, &stat);
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index cc75e4fcd02..46f59d5365a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -101,7 +101,7 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
{
struct svc_export *exp = *expp, *exp2 = NULL;
struct dentry *dentry = *dpp;
- struct vfsmount *mnt = mntget(exp->ex_mnt);
+ struct vfsmount *mnt = mntget(exp->ex_path.mnt);
struct dentry *mounts = dget(dentry);
int err = 0;
@@ -156,15 +156,15 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (isdotent(name, len)) {
if (len==1)
dentry = dget(dparent);
- else if (dparent != exp->ex_dentry) {
+ else if (dparent != exp->ex_path.dentry)
dentry = dget_parent(dparent);
- } else if (!EX_NOHIDE(exp))
+ else if (!EX_NOHIDE(exp))
dentry = dget(dparent); /* .. == . just like at / */
else {
/* checking mountpoint crossing is very different when stepping up */
struct svc_export *exp2 = NULL;
struct dentry *dp;
- struct vfsmount *mnt = mntget(exp->ex_mnt);
+ struct vfsmount *mnt = mntget(exp->ex_path.mnt);
dentry = dget(dparent);
while(dentry == mnt->mnt_root && follow_up(&mnt, &dentry))
;
@@ -721,7 +721,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
DQUOT_INIT(inode);
}
- *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_mnt), flags);
+ *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
+ flags);
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
out_nfserr:
@@ -1462,7 +1463,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
if (!inode->i_op || !inode->i_op->readlink)
goto out;
- touch_atime(fhp->fh_export->ex_mnt, dentry);
+ touch_atime(fhp->fh_export->ex_path.mnt, dentry);
/* N.B. Why does this call need a get_fs()??
* Remove the set_fs and watch the fireworks:-) --okir
*/
diff --git a/fs/open.c b/fs/open.c
index 43fcd603196..54198538b67 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -127,10 +127,10 @@ asmlinkage long sys_statfs(const char __user * path, struct statfs __user * buf)
error = user_path_walk(path, &nd);
if (!error) {
struct statfs tmp;
- error = vfs_statfs_native(nd.dentry, &tmp);
+ error = vfs_statfs_native(nd.path.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
- path_release(&nd);
+ path_put(&nd.path);
}
return error;
}
@@ -146,10 +146,10 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz, struct statfs64
error = user_path_walk(path, &nd);
if (!error) {
struct statfs64 tmp;
- error = vfs_statfs64(nd.dentry, &tmp);
+ error = vfs_statfs64(nd.path.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
- path_release(&nd);
+ path_put(&nd.path);
}
return error;
}
@@ -233,7 +233,7 @@ static long do_sys_truncate(const char __user * path, loff_t length)
error = user_path_walk(path, &nd);
if (error)
goto out;
- inode = nd.dentry->d_inode;
+ inode = nd.path.dentry->d_inode;
/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
error = -EISDIR;
@@ -271,13 +271,13 @@ static long do_sys_truncate(const char __user * path, loff_t length)
error = locks_verify_truncate(inode, NULL, length);
if (!error) {
DQUOT_INIT(inode);
- error = do_truncate(nd.dentry, length, 0, NULL);
+ error = do_truncate(nd.path.dentry, length, 0, NULL);
}
put_write_and_out:
put_write_access(inode);
dput_and_out:
- path_release(&nd);
+ path_put(&nd.path);
out:
return error;
}
@@ -455,14 +455,14 @@ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode)
res = vfs_permission(&nd, mode);
/* SuS v2 requires we report a read only fs too */
if(res || !(mode & S_IWOTH) ||
- special_file(nd.dentry->d_inode->i_mode))
+ special_file(nd.path.dentry->d_inode->i_mode))
goto out_path_release;
- if(IS_RDONLY(nd.dentry->d_inode))
+ if(IS_RDONLY(nd.path.dentry->d_inode))
res = -EROFS;
out_path_release:
- path_release(&nd);
+ path_put(&nd.path);
out:
current->fsuid = old_fsuid;
current->fsgid = old_fsgid;
@@ -490,10 +490,10 @@ asmlinkage long sys_chdir(const char __user * filename)
if (error)
goto dput_and_out;
- set_fs_pwd(current->fs, nd.mnt, nd.dentry);
+ set_fs_pwd(current->fs, &nd.path);
dput_and_out:
- path_release(&nd);
+ path_put(&nd.path);
out:
return error;
}
@@ -501,9 +501,7 @@ out:
asmlinkage long sys_fchdir(unsigned int fd)
{
struct file *file;
- struct dentry *dentry;
struct inode *inode;
- struct vfsmount *mnt;
int error;
error = -EBADF;
@@ -511,9 +509,7 @@ asmlinkage long sys_fchdir(unsigned int fd)
if (!file)
goto out;
- dentry = file->f_path.dentry;
- mnt = file->f_path.mnt;
- inode = dentry->d_inode;
+ inode = file->f_path.dentry->d_inode;
error = -ENOTDIR;
if (!S_ISDIR(inode->i_mode))
@@ -521,7 +517,7 @@ asmlinkage long sys_fchdir(unsigned int fd)
error = file_permission(file, MAY_EXEC);
if (!error)
- set_fs_pwd(current->fs, mnt, dentry);
+ set_fs_pwd(current->fs, &file->f_path);
out_putf:
fput(file);
out:
@@ -545,11 +541,11 @@ asmlinkage long sys_chroot(const char __user * filename)
if (!capable(CAP_SYS_CHROOT))
goto dput_and_out;
- set_fs_root(current->fs, nd.mnt, nd.dentry);
+ set_fs_root(current->fs, &nd.path);
set_fs_altroot();
error = 0;
dput_and_out:
- path_release(&nd);
+ path_put(&nd.path);
out:
return error;
}
@@ -602,7 +598,7 @@ asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd);
if (error)
goto out;
- inode = nd.dentry->d_inode;
+ inode = nd.path.dentry->d_inode;
error = -EROFS;
if (IS_RDONLY(inode))
@@ -617,11 +613,11 @@ asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
mode = inode->i_mode;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- error = notify_change(nd.dentry, &newattrs);
+ error = notify_change(nd.path.dentry, &newattrs);
mutex_unlock(&inode->i_mutex);
dput_and_out:
- path_release(&nd);
+ path_put(&nd.path);
out:
return error;
}
@@ -675,8 +671,8 @@ asmlinkage long sys_chown(const char __user * filename, uid_t user, gid_t group)
error = user_path_walk(filename, &nd);
if (error)
goto out;
- error = chown_common(nd.dentry, user, group);
- path_release(&nd);
+ error = chown_common(nd.path.dentry, user, group);
+ path_put(&nd.path);
out:
return error;
}
@@ -695,8 +691,8 @@ asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
error = __user_walk_fd(dfd, filename, follow, &nd);
if (error)
goto out;
- error = chown_common(nd.dentry, user, group);
- path_release(&nd);
+ error = chown_common(nd.path.dentry, user, group);
+ path_put(&nd.path);
out:
return error;
}
@@ -709,8 +705,8 @@ asmlinkage long sys_lchown(const char __user * filename, uid_t user, gid_t group
error = user_path_walk_link(filename, &nd);
if (error)
goto out;
- error = chown_common(nd.dentry, user, group);
- path_release(&nd);
+ error = chown_common(nd.path.dentry, user, group);
+ path_put(&nd.path);
out:
return error;
}
@@ -863,7 +859,7 @@ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry
goto out;
if (IS_ERR(dentry))
goto out_err;
- nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->mnt),
+ nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
nd->intent.open.flags - 1,
nd->intent.open.file,
open);
@@ -891,9 +887,10 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags)
filp = nd->intent.open.file;
/* Has the filesystem initialised the file for us? */
if (filp->f_path.dentry == NULL)
- filp = __dentry_open(nd->dentry, nd->mnt, flags, filp, NULL);
+ filp = __dentry_open(nd->path.dentry, nd->path.mnt, flags, filp,
+ NULL);
else
- path_release(nd);
+ path_put(&nd->path);
return filp;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index a07e9a54206..3c185b6527b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -171,7 +171,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
*
* Description:
* This function returns a kernel virtual address mapping for the
- * passed in @pipe_buffer. If @atomic is set, an atomic map is provided
+ * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
* and the caller has to be careful not to fault before calling
* the unmap function.
*
@@ -208,15 +208,15 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
}
/**
- * generic_pipe_buf_steal - attempt to take ownership of a @pipe_buffer
+ * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to attempt to steal
*
* Description:
- * This function attempts to steal the @struct page attached to
+ * This function attempts to steal the &struct page attached to
* @buf. If successful, this function returns 0 and returns with
* the page locked. The caller may then reuse the page for whatever
- * he wishes, the typical use is insertion into a different file
+ * he wishes; the typical use is insertion into a different file
* page cache.
*/
int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
@@ -238,7 +238,7 @@ int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
}
/**
- * generic_pipe_buf_get - get a reference to a @struct pipe_buffer
+ * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
*
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7c6b4ec83cb..88f8edf1825 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -153,7 +153,7 @@ static int get_nr_threads(struct task_struct *tsk)
return count;
}
-static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+static int proc_cwd_link(struct inode *inode, struct path *path)
{
struct task_struct *task = get_proc_task(inode);
struct fs_struct *fs = NULL;
@@ -165,8 +165,8 @@ static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfs
}
if (fs) {
read_lock(&fs->lock);
- *mnt = mntget(fs->pwdmnt);
- *dentry = dget(fs->pwd);
+ *path = fs->pwd;
+ path_get(&fs->pwd);
read_unlock(&fs->lock);
result = 0;
put_fs_struct(fs);
@@ -174,7 +174,7 @@ static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfs
return result;
}
-static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+static int proc_root_link(struct inode *inode, struct path *path)
{
struct task_struct *task = get_proc_task(inode);
struct fs_struct *fs = NULL;
@@ -186,8 +186,8 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
}
if (fs) {
read_lock(&fs->lock);
- *mnt = mntget(fs->rootmnt);
- *dentry = dget(fs->root);
+ *path = fs->root;
+ path_get(&fs->root);
read_unlock(&fs->lock);
result = 0;
put_fs_struct(fs);
@@ -1164,39 +1164,36 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
int error = -EACCES;
/* We don't need a base pointer in the /proc filesystem */
- path_release(nd);
+ path_put(&nd->path);
/* Are we allowed to snoop on the tasks file descriptors? */
if (!proc_fd_access_allowed(inode))
goto out;
- error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt);
+ error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
nd->last_type = LAST_BIND;
out:
return ERR_PTR(error);
}
-static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt,
- char __user *buffer, int buflen)
+static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
{
- struct inode * inode;
char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
- char *path;
+ char *pathname;
int len;
if (!tmp)
return -ENOMEM;
- inode = dentry->d_inode;
- path = d_path(dentry, mnt, tmp, PAGE_SIZE);
- len = PTR_ERR(path);
- if (IS_ERR(path))
+ pathname = d_path(path, tmp, PAGE_SIZE);
+ len = PTR_ERR(pathname);
+ if (IS_ERR(pathname))
goto out;
- len = tmp + PAGE_SIZE - 1 - path;
+ len = tmp + PAGE_SIZE - 1 - pathname;
if (len > buflen)
len = buflen;
- if (copy_to_user(buffer, path, len))
+ if (copy_to_user(buffer, pathname, len))
len = -EFAULT;
out:
free_page((unsigned long)tmp);
@@ -1207,20 +1204,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
{
int error = -EACCES;
struct inode *inode = dentry->d_inode;
- struct dentry *de;
- struct vfsmount *mnt = NULL;
+ struct path path;
/* Are we allowed to snoop on the tasks file descriptors? */
if (!proc_fd_access_allowed(inode))
goto out;
- error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt);
+ error = PROC_I(inode)->op.proc_get_link(inode, &path);
if (error)
goto out;
- error = do_proc_readlink(de, mnt, buffer, buflen);
- dput(de);
- mntput(mnt);
+ error = do_proc_readlink(&path, buffer, buflen);
+ path_put(&path);
out:
return error;
}
@@ -1447,8 +1442,7 @@ out:
#define PROC_FDINFO_MAX 64
-static int proc_fd_info(struct inode *inode, struct dentry **dentry,
- struct vfsmount **mnt, char *info)
+static int proc_fd_info(struct inode *inode, struct path *path, char *info)
{
struct task_struct *task = get_proc_task(inode);
struct files_struct *files = NULL;
@@ -1467,10 +1461,10 @@ static int proc_fd_info(struct inode *inode, struct dentry **dentry,
spin_lock(&files->file_lock);
file = fcheck_files(files, fd);
if (file) {
- if (mnt)
- *mnt = mntget(file->f_path.mnt);
- if (dentry)
- *dentry = dget(file->f_path.dentry);
+ if (path) {
+ *path = file->f_path;
+ path_get(&file->f_path);
+ }
if (info)
snprintf(info, PROC_FDINFO_MAX,
"pos:\t%lli\n"
@@ -1487,10 +1481,9 @@ static int proc_fd_info(struct inode *inode, struct dentry **dentry,
return -ENOENT;
}
-static int proc_fd_link(struct inode *inode, struct dentry **dentry,
- struct vfsmount **mnt)
+static int proc_fd_link(struct inode *inode, struct path *path)
{
- return proc_fd_info(inode, dentry, mnt, NULL);
+ return proc_fd_info(inode, path, NULL);
}
static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
@@ -1684,7 +1677,7 @@ static ssize_t proc_fdinfo_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
char tmp[PROC_FDINFO_MAX];
- int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, NULL, tmp);
+ int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, tmp);
if (!err)
err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp));
return err;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index ea496ffeabe..1c81c8f1aee 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -48,7 +48,7 @@ extern int maps_protect;
extern void create_seq_entry(char *name, mode_t mode,
const struct file_operations *f);
-extern int proc_exe_link(struct inode *, struct dentry **, struct vfsmount **);
+extern int proc_exe_link(struct inode *, struct path *);
extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
extern int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 5d9147b9d73..941e95114b5 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -67,7 +67,7 @@ int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
if (len < 1)
len = 1;
seq_printf(m, "%*c", len, ' ');
- seq_path(m, file->f_path.mnt, file->f_path.dentry, "");
+ seq_path(m, &file->f_path, "");
}
seq_putc(m, '\n');
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index b9cb23c08f6..614c34b6d1c 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -407,7 +407,7 @@ static int proc_sys_permission(struct inode *inode, int mask, struct nameidata *
if (!nd || !depth)
goto out;
- dentry = nd->dentry;
+ dentry = nd->path.dentry;
table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
/* If the entry does not exist deny permission */
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ae4d3f2c8cb..49958cffbd8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -75,7 +75,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
return mm->total_vm;
}
-int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+int proc_exe_link(struct inode *inode, struct path *path)
{
struct vm_area_struct * vma;
int result = -ENOENT;
@@ -98,8 +98,8 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount *
}
if (vma) {
- *mnt = mntget(vma->vm_file->f_path.mnt);
- *dentry = dget(vma->vm_file->f_path.dentry);
+ *path = vma->vm_file->f_path;
+ path_get(&vma->vm_file->f_path);
result = 0;
}
@@ -271,7 +271,7 @@ static int show_map(struct seq_file *m, void *v)
*/
if (file) {
pad_len_spaces(m, len);
- seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n");
+ seq_path(m, &file->f_path, "\n");
} else {
const char *name = arch_vma_name(vma);
if (!name) {
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index abfc6f5e56c..8011528518b 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -103,7 +103,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
return size;
}
-int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+int proc_exe_link(struct inode *inode, struct path *path)
{
struct vm_list_struct *vml;
struct vm_area_struct *vma;
@@ -126,8 +126,8 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount *
}
if (vma) {
- *mnt = mntget(vma->vm_file->f_path.mnt);
- *dentry = dget(vma->vm_file->f_path.dentry);
+ *path = vma->vm_file->f_path;
+ path_get(&vma->vm_file->f_path);
result = 0;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 6033f0c3bd0..6841452e0de 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2026,29 +2026,29 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
if (err)
return err;
/* Quotafile not on the same filesystem? */
- if (nd.mnt->mnt_sb != sb) {
- path_release(&nd);
+ if (nd.path.mnt->mnt_sb != sb) {
+ path_put(&nd.path);
return -EXDEV;
}
/* We must not pack tails for quota files on reiserfs for quota IO to work */
- if (!REISERFS_I(nd.dentry->d_inode)->i_flags & i_nopack_mask) {
+ if (!REISERFS_I(nd.path.dentry->d_inode)->i_flags & i_nopack_mask) {
reiserfs_warning(sb,
"reiserfs: Quota file must have tail packing disabled.");
- path_release(&nd);
+ path_put(&nd.path);
return -EINVAL;
}
/* Not journalling quota? No more tests needed... */
if (!REISERFS_SB(sb)->s_qf_names[USRQUOTA] &&
!REISERFS_SB(sb)->s_qf_names[GRPQUOTA]) {
- path_release(&nd);
+ path_put(&nd.path);
return vfs_quota_on(sb, type, format_id, path);
}
/* Quotafile not of fs root? */
- if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode)
+ if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
reiserfs_warning(sb,
"reiserfs: Quota file not on filesystem root. "
"Journalled quota will not work.");
- path_release(&nd);
+ path_put(&nd.path);
return vfs_quota_on(sb, type, format_id, path);
}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index ca71c115bda..853770274f2 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -342,13 +342,11 @@ int seq_printf(struct seq_file *m, const char *f, ...)
}
EXPORT_SYMBOL(seq_printf);
-int seq_path(struct seq_file *m,
- struct vfsmount *mnt, struct dentry *dentry,
- char *esc)
+int seq_path(struct seq_file *m, struct path *path, char *esc)
{
if (m->count < m->size) {
char *s = m->buf + m->count;
- char *p = d_path(dentry, mnt, s, m->size - m->count);
+ char *p = d_path(path, s, m->size - m->count);
if (!IS_ERR(p)) {
while (s <= p) {
char c = *p++;
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 4e5c22ca802..376ef3ee6ed 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -505,7 +505,7 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
if (warn_count < 5) {
warn_count++;
printk(KERN_EMERG "smbfs is deprecated and will be removed"
- "from the 2.6.27 kernel. Please migrate to cifs\n");
+ " from the 2.6.27 kernel. Please migrate to cifs\n");
}
if (!raw_data)
diff --git a/fs/stat.c b/fs/stat.c
index 68510068a64..9cf41f719d5 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -62,8 +62,8 @@ int vfs_stat_fd(int dfd, char __user *name, struct kstat *stat)
error = __user_walk_fd(dfd, name, LOOKUP_FOLLOW, &nd);
if (!error) {
- error = vfs_getattr(nd.mnt, nd.dentry, stat);
- path_release(&nd);
+ error = vfs_getattr(nd.path.mnt, nd.path.dentry, stat);
+ path_put(&nd.path);
}
return error;
}
@@ -82,8 +82,8 @@ int vfs_lstat_fd(int dfd, char __user *name, struct kstat *stat)
error = __user_walk_fd(dfd, name, 0, &nd);
if (!error) {
- error = vfs_getattr(nd.mnt, nd.dentry, stat);
- path_release(&nd);
+ error = vfs_getattr(nd.path.mnt, nd.path.dentry, stat);
+ path_put(&nd.path);
}
return error;
}
@@ -302,17 +302,18 @@ asmlinkage long sys_readlinkat(int dfd, const char __user *path,
error = __user_walk_fd(dfd, path, 0, &nd);
if (!error) {
- struct inode * inode = nd.dentry->d_inode;
+ struct inode *inode = nd.path.dentry->d_inode;
error = -EINVAL;
if (inode->i_op && inode->i_op->readlink) {
- error = security_inode_readlink(nd.dentry);
+ error = security_inode_readlink(nd.path.dentry);
if (!error) {
- touch_atime(nd.mnt, nd.dentry);
- error = inode->i_op->readlink(nd.dentry, buf, bufsiz);
+ touch_atime(nd.path.mnt, nd.path.dentry);
+ error = inode->i_op->readlink(nd.path.dentry,
+ buf, bufsiz);
}
}
- path_release(&nd);
+ path_put(&nd.path);
}
return error;
}
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index d721a1af197..f855dcbbdfb 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -145,7 +145,7 @@ static bool udf_add_free_space(struct udf_sb_info *sbi,
{
struct logicalVolIntegrityDesc *lvid;
- if (sbi->s_lvid_bh)
+ if (sbi->s_lvid_bh == NULL)
return false;
lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 4b44e23caa1..8d8643ada19 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -43,13 +43,13 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
struct fileIdentDesc *fi = NULL;
struct fileIdentDesc cfi;
int block, iblock;
- loff_t nf_pos = filp->f_pos - 1;
+ loff_t nf_pos = (filp->f_pos - 1) << 2;
int flen;
char fname[UDF_NAME_LEN];
char *nameptr;
uint16_t liu;
uint8_t lfi;
- loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2;
+ loff_t size = udf_ext0_offset(dir) + dir->i_size;
struct buffer_head *tmp, *bha[16];
kernel_lb_addr eloc;
uint32_t elen;
@@ -63,13 +63,13 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
return 0;
if (nf_pos == 0)
- nf_pos = (udf_ext0_offset(dir) >> 2);
+ nf_pos = udf_ext0_offset(dir);
- fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2;
+ fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
iinfo = UDF_I(dir);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
fibh.sbh = fibh.ebh = NULL;
- } else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2),
+ } else if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
&epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
@@ -111,7 +111,7 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
}
while (nf_pos < size) {
- filp->f_pos = nf_pos + 1;
+ filp->f_pos = (nf_pos >> 2) + 1;
fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
&elen, &offset);
@@ -178,7 +178,7 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
}
} /* end while */
- filp->f_pos = nf_pos + 1;
+ filp->f_pos = (nf_pos >> 2) + 1;
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
diff --git a/fs/utimes.c b/fs/utimes.c
index e5588cd8530..b18da9c0b97 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -84,7 +84,7 @@ long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags
if (error)
goto out;
- dentry = nd.dentry;
+ dentry = nd.path.dentry;
}
inode = dentry->d_inode;
@@ -138,7 +138,7 @@ dput_and_out:
if (f)
fput(f);
else
- path_release(&nd);
+ path_put(&nd.path);
out:
return error;
}
diff --git a/fs/xattr.c b/fs/xattr.c
index f7c8f87bb39..3acab161546 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -262,8 +262,8 @@ sys_setxattr(char __user *path, char __user *name, void __user *value,
error = user_path_walk(path, &nd);
if (error)
return error;
- error = setxattr(nd.dentry, name, value, size, flags);
- path_release(&nd);
+ error = setxattr(nd.path.dentry, name, value, size, flags);
+ path_put(&nd.path);
return error;
}
@@ -277,8 +277,8 @@ sys_lsetxattr(char __user *path, char __user *name, void __user *value,
error = user_path_walk_link(path, &nd);
if (error)
return error;
- error = setxattr(nd.dentry, name, value, size, flags);
- path_release(&nd);
+ error = setxattr(nd.path.dentry, name, value, size, flags);
+ path_put(&nd.path);
return error;
}
@@ -347,8 +347,8 @@ sys_getxattr(char __user *path, char __user *name, void __user *value,
error = user_path_walk(path, &nd);
if (error)
return error;
- error = getxattr(nd.dentry, name, value, size);
- path_release(&nd);
+ error = getxattr(nd.path.dentry, name, value, size);
+ path_put(&nd.path);
return error;
}
@@ -362,8 +362,8 @@ sys_lgetxattr(char __user *path, char __user *name, void __user *value,
error = user_path_walk_link(path, &nd);
if (error)
return error;
- error = getxattr(nd.dentry, name, value, size);
- path_release(&nd);
+ error = getxattr(nd.path.dentry, name, value, size);
+ path_put(&nd.path);
return error;
}
@@ -421,8 +421,8 @@ sys_listxattr(char __user *path, char __user *list, size_t size)
error = user_path_walk(path, &nd);
if (error)
return error;
- error = listxattr(nd.dentry, list, size);
- path_release(&nd);
+ error = listxattr(nd.path.dentry, list, size);
+ path_put(&nd.path);
return error;
}
@@ -435,8 +435,8 @@ sys_llistxattr(char __user *path, char __user *list, size_t size)
error = user_path_walk_link(path, &nd);
if (error)
return error;
- error = listxattr(nd.dentry, list, size);
- path_release(&nd);
+ error = listxattr(nd.path.dentry, list, size);
+ path_put(&nd.path);
return error;
}
@@ -482,8 +482,8 @@ sys_removexattr(char __user *path, char __user *name)
error = user_path_walk(path, &nd);
if (error)
return error;
- error = removexattr(nd.dentry, name);
- path_release(&nd);
+ error = removexattr(nd.path.dentry, name);
+ path_put(&nd.path);
return error;
}
@@ -496,8 +496,8 @@ sys_lremovexattr(char __user *path, char __user *name)
error = user_path_walk_link(path, &nd);
if (error)
return error;
- error = removexattr(nd.dentry, name);
- path_release(&nd);
+ error = removexattr(nd.path.dentry, name);
+ path_put(&nd.path);
return error;
}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 4c82a050a3a..a9952e490ac 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -91,10 +91,10 @@ xfs_find_handle(
if (error)
return error;
- ASSERT(nd.dentry);
- ASSERT(nd.dentry->d_inode);
- inode = igrab(nd.dentry->d_inode);
- path_release(&nd);
+ ASSERT(nd.path.dentry);
+ ASSERT(nd.path.dentry->d_inode);
+ inode = igrab(nd.path.dentry->d_inode);
+ path_put(&nd.path);
break;
}
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 35582fe9d64..1f3da5b8657 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1648,14 +1648,14 @@ xfs_qm_quotacheck_dqadjust(
* Adjust the inode count and the block count to reflect this inode's
* resource usage.
*/
- be64_add(&dqp->q_core.d_icount, 1);
+ be64_add_cpu(&dqp->q_core.d_icount, 1);
dqp->q_res_icount++;
if (nblks) {
- be64_add(&dqp->q_core.d_bcount, nblks);
+ be64_add_cpu(&dqp->q_core.d_bcount, nblks);
dqp->q_res_bcount += nblks;
}
if (rtblks) {
- be64_add(&dqp->q_core.d_rtbcount, rtblks);
+ be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
dqp->q_res_rtbcount += rtblks;
}
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 7de6874bf1b..f441f836ca8 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -421,13 +421,13 @@ xfs_trans_apply_dquot_deltas(
(xfs_qcnt_t) -qtrx->qt_icount_delta);
#endif
if (totalbdelta)
- be64_add(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
+ be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
if (qtrx->qt_icount_delta)
- be64_add(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
+ be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
if (totalrtbdelta)
- be64_add(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
+ be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
/*
* Get any default limits in use.
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index ea6aa60ace0..bdbfbbee495 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -592,7 +592,7 @@ xfs_alloc_ag_vextent(
if (!(args->wasfromfl)) {
agf = XFS_BUF_TO_AGF(args->agbp);
- be32_add(&agf->agf_freeblks, -(args->len));
+ be32_add_cpu(&agf->agf_freeblks, -(args->len));
xfs_trans_agblocks_delta(args->tp,
-((long)(args->len)));
args->pag->pagf_freeblks -= args->len;
@@ -1720,7 +1720,7 @@ xfs_free_ag_extent(
agf = XFS_BUF_TO_AGF(agbp);
pag = &mp->m_perag[agno];
- be32_add(&agf->agf_freeblks, len);
+ be32_add_cpu(&agf->agf_freeblks, len);
xfs_trans_agblocks_delta(tp, len);
pag->pagf_freeblks += len;
XFS_WANT_CORRUPTED_GOTO(
@@ -2008,18 +2008,18 @@ xfs_alloc_get_freelist(
* Get the block number and update the data structures.
*/
bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
- be32_add(&agf->agf_flfirst, 1);
+ be32_add_cpu(&agf->agf_flfirst, 1);
xfs_trans_brelse(tp, agflbp);
if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
agf->agf_flfirst = 0;
pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
- be32_add(&agf->agf_flcount, -1);
+ be32_add_cpu(&agf->agf_flcount, -1);
xfs_trans_agflist_delta(tp, -1);
pag->pagf_flcount--;
logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
if (btreeblk) {
- be32_add(&agf->agf_btreeblks, 1);
+ be32_add_cpu(&agf->agf_btreeblks, 1);
pag->pagf_btreeblks++;
logflags |= XFS_AGF_BTREEBLKS;
}
@@ -2117,17 +2117,17 @@ xfs_alloc_put_freelist(
be32_to_cpu(agf->agf_seqno), &agflbp)))
return error;
agfl = XFS_BUF_TO_AGFL(agflbp);
- be32_add(&agf->agf_fllast, 1);
+ be32_add_cpu(&agf->agf_fllast, 1);
if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
agf->agf_fllast = 0;
pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
- be32_add(&agf->agf_flcount, 1);
+ be32_add_cpu(&agf->agf_flcount, 1);
xfs_trans_agflist_delta(tp, 1);
pag->pagf_flcount++;
logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
if (btreeblk) {
- be32_add(&agf->agf_btreeblks, -1);
+ be32_add_cpu(&agf->agf_btreeblks, -1);
pag->pagf_btreeblks--;
logflags |= XFS_AGF_BTREEBLKS;
}
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 1603ce59585..3ce2645508a 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -221,7 +221,7 @@ xfs_alloc_delrec(
*/
bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
agf->agf_roots[cur->bc_btnum] = *lpp;
- be32_add(&agf->agf_levels[cur->bc_btnum], -1);
+ be32_add_cpu(&agf->agf_levels[cur->bc_btnum], -1);
mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--;
/*
* Put this buffer/block on the ag's freelist.
@@ -1256,9 +1256,9 @@ xfs_alloc_lshift(
/*
* Bump and log left's numrecs, decrement and log right's numrecs.
*/
- be16_add(&left->bb_numrecs, 1);
+ be16_add_cpu(&left->bb_numrecs, 1);
xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
- be16_add(&right->bb_numrecs, -1);
+ be16_add_cpu(&right->bb_numrecs, -1);
xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
/*
* Slide the contents of right down one entry.
@@ -1346,7 +1346,7 @@ xfs_alloc_newroot(
agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno);
- be32_add(&agf->agf_levels[cur->bc_btnum], 1);
+ be32_add_cpu(&agf->agf_levels[cur->bc_btnum], 1);
seqno = be32_to_cpu(agf->agf_seqno);
mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++;
xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
@@ -1558,9 +1558,9 @@ xfs_alloc_rshift(
/*
* Decrement and log left's numrecs, bump and log right's numrecs.
*/
- be16_add(&left->bb_numrecs, -1);
+ be16_add_cpu(&left->bb_numrecs, -1);
xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
- be16_add(&right->bb_numrecs, 1);
+ be16_add_cpu(&right->bb_numrecs, 1);
xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
/*
* Using a temporary cursor, update the parent key values of the
@@ -1643,7 +1643,7 @@ xfs_alloc_split(
*/
if ((be16_to_cpu(left->bb_numrecs) & 1) &&
cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
- be16_add(&right->bb_numrecs, 1);
+ be16_add_cpu(&right->bb_numrecs, 1);
i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
/*
* For non-leaf blocks, copy keys and addresses over to the new block.
@@ -1689,7 +1689,7 @@ xfs_alloc_split(
* Adjust numrecs, sibling pointers.
*/
lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp));
- be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
+ be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
right->bb_rightsib = left->bb_rightsib;
left->bb_rightsib = cpu_to_be32(rbno);
right->bb_leftsib = cpu_to_be32(lbno);
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
index c4836890b72..f9472a2076d 100644
--- a/fs/xfs/xfs_arch.h
+++ b/fs/xfs/xfs_arch.h
@@ -170,21 +170,6 @@
} \
}
-static inline void be16_add(__be16 *a, __s16 b)
-{
- *a = cpu_to_be16(be16_to_cpu(*a) + b);
-}
-
-static inline void be32_add(__be32 *a, __s32 b)
-{
- *a = cpu_to_be32(be32_to_cpu(*a) + b);
-}
-
-static inline void be64_add(__be64 *a, __s64 b)
-{
- *a = cpu_to_be64(be64_to_cpu(*a) + b);
-}
-
/*
* In directories inode numbers are stored as unaligned arrays of unsigned
* 8bit integers on disk.
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index eb3815ebb7a..b08e2a2a8ad 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -317,7 +317,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
memcpy(sfe->nameval, args->name, args->namelen);
memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
sf->hdr.count++;
- be16_add(&sf->hdr.totsize, size);
+ be16_add_cpu(&sf->hdr.totsize, size);
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
xfs_sbversion_add_attr2(mp, args->trans);
@@ -363,7 +363,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
if (end != totsize)
memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end);
sf->hdr.count--;
- be16_add(&sf->hdr.totsize, -size);
+ be16_add_cpu(&sf->hdr.totsize, -size);
/*
* Fix up the start offset of the attribute fork
@@ -1133,7 +1133,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
}
- be16_add(&hdr->count, 1);
+ be16_add_cpu(&hdr->count, 1);
/*
* Allocate space for the new string (at the end of the run).
@@ -1147,7 +1147,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
mp->m_sb.sb_blocksize, NULL));
ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
ASSERT((be16_to_cpu(map->size) & 0x3) == 0);
- be16_add(&map->size,
+ be16_add_cpu(&map->size,
-xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
mp->m_sb.sb_blocksize, &tmp));
entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) +
@@ -1214,12 +1214,12 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
map = &hdr->freemap[0];
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
if (be16_to_cpu(map->base) == tmp) {
- be16_add(&map->base, sizeof(xfs_attr_leaf_entry_t));
- be16_add(&map->size,
+ be16_add_cpu(&map->base, sizeof(xfs_attr_leaf_entry_t));
+ be16_add_cpu(&map->size,
-((int)sizeof(xfs_attr_leaf_entry_t)));
}
}
- be16_add(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index));
+ be16_add_cpu(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index));
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
return(0);
@@ -1727,9 +1727,9 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
if (be16_to_cpu(map->base) == tablesize) {
- be16_add(&map->base,
+ be16_add_cpu(&map->base,
-((int)sizeof(xfs_attr_leaf_entry_t)));
- be16_add(&map->size, sizeof(xfs_attr_leaf_entry_t));
+ be16_add_cpu(&map->size, sizeof(xfs_attr_leaf_entry_t));
}
if ((be16_to_cpu(map->base) + be16_to_cpu(map->size))
@@ -1751,19 +1751,19 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
if ((before >= 0) || (after >= 0)) {
if ((before >= 0) && (after >= 0)) {
map = &hdr->freemap[before];
- be16_add(&map->size, entsize);
- be16_add(&map->size,
+ be16_add_cpu(&map->size, entsize);
+ be16_add_cpu(&map->size,
be16_to_cpu(hdr->freemap[after].size));
hdr->freemap[after].base = 0;
hdr->freemap[after].size = 0;
} else if (before >= 0) {
map = &hdr->freemap[before];
- be16_add(&map->size, entsize);
+ be16_add_cpu(&map->size, entsize);
} else {
map = &hdr->freemap[after];
/* both on-disk, don't endian flip twice */
map->base = entry->nameidx;
- be16_add(&map->size, entsize);
+ be16_add_cpu(&map->size, entsize);
}
} else {
/*
@@ -1788,7 +1788,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
* Compress the remaining entries and zero out the removed stuff.
*/
memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize);
- be16_add(&hdr->usedbytes, -entsize);
+ be16_add_cpu(&hdr->usedbytes, -entsize);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index),
entsize));
@@ -1796,7 +1796,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
tmp = (be16_to_cpu(hdr->count) - args->index)
* sizeof(xfs_attr_leaf_entry_t);
memmove((char *)entry, (char *)(entry+1), tmp);
- be16_add(&hdr->count, -1);
+ be16_add_cpu(&hdr->count, -1);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
entry = &leaf->entries[be16_to_cpu(hdr->count)];
@@ -2182,15 +2182,15 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
*/
if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
- be16_add(&hdr_s->usedbytes, -tmp);
- be16_add(&hdr_s->count, -1);
+ be16_add_cpu(&hdr_s->usedbytes, -tmp);
+ be16_add_cpu(&hdr_s->count, -1);
entry_d--; /* to compensate for ++ in loop hdr */
desti--;
if ((start_s + i) < offset)
result++; /* insertion index adjustment */
} else {
#endif /* GROT */
- be16_add(&hdr_d->firstused, -tmp);
+ be16_add_cpu(&hdr_d->firstused, -tmp);
/* both on-disk, don't endian flip twice */
entry_d->hashval = entry_s->hashval;
/* both on-disk, don't endian flip twice */
@@ -2203,10 +2203,10 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
<= XFS_LBSIZE(mp));
memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp);
- be16_add(&hdr_s->usedbytes, -tmp);
- be16_add(&hdr_d->usedbytes, tmp);
- be16_add(&hdr_s->count, -1);
- be16_add(&hdr_d->count, 1);
+ be16_add_cpu(&hdr_s->usedbytes, -tmp);
+ be16_add_cpu(&hdr_d->usedbytes, tmp);
+ be16_add_cpu(&hdr_s->count, -1);
+ be16_add_cpu(&hdr_d->count, 1);
tmp = be16_to_cpu(hdr_d->count)
* sizeof(xfs_attr_leaf_entry_t)
+ sizeof(xfs_attr_leaf_hdr_t);
@@ -2247,7 +2247,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
* Fill in the freemap information
*/
hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
- be16_add(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) *
+ be16_add_cpu(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) *
sizeof(xfs_attr_leaf_entry_t));
hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused)
- be16_to_cpu(hdr_d->freemap[0].base));
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index c4181d85605..bd18987326a 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -631,7 +631,7 @@ xfs_bmbt_delrec(
memcpy(lrp, rrp, numrrecs * sizeof(*lrp));
xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs);
}
- be16_add(&left->bb_numrecs, numrrecs);
+ be16_add_cpu(&left->bb_numrecs, numrrecs);
left->bb_rightsib = right->bb_rightsib;
xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) {
@@ -924,7 +924,7 @@ xfs_bmbt_killroot(
xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork);
block = ifp->if_broot;
}
- be16_add(&block->bb_numrecs, i);
+ be16_add_cpu(&block->bb_numrecs, i);
ASSERT(block->bb_numrecs == cblock->bb_numrecs);
kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur);
@@ -947,7 +947,7 @@ xfs_bmbt_killroot(
XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(cur->bc_tp, cbp);
cur->bc_bufs[level - 1] = NULL;
- be16_add(&block->bb_level, -1);
+ be16_add_cpu(&block->bb_level, -1);
xfs_trans_log_inode(cur->bc_tp, ip,
XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
cur->bc_nlevels--;
@@ -1401,9 +1401,9 @@ xfs_bmbt_rshift(
key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp));
rkp = &key;
}
- be16_add(&left->bb_numrecs, -1);
+ be16_add_cpu(&left->bb_numrecs, -1);
xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS);
- be16_add(&right->bb_numrecs, 1);
+ be16_add_cpu(&right->bb_numrecs, 1);
#ifdef DEBUG
if (level > 0)
xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1);
@@ -1535,7 +1535,7 @@ xfs_bmbt_split(
right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
if ((be16_to_cpu(left->bb_numrecs) & 1) &&
cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
- be16_add(&right->bb_numrecs, 1);
+ be16_add_cpu(&right->bb_numrecs, 1);
i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
if (level > 0) {
lkp = XFS_BMAP_KEY_IADDR(left, i, cur);
@@ -1562,7 +1562,7 @@ xfs_bmbt_split(
xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
*startoff = xfs_bmbt_disk_get_startoff(rrp);
}
- be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
+ be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
right->bb_rightsib = left->bb_rightsib;
left->bb_rightsib = cpu_to_be64(args.fsbno);
right->bb_leftsib = cpu_to_be64(lbno);
@@ -2240,7 +2240,7 @@ xfs_bmbt_newroot(
bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
*cblock = *block;
- be16_add(&block->bb_level, 1);
+ be16_add_cpu(&block->bb_level, 1);
block->bb_numrecs = cpu_to_be16(1);
cur->bc_nlevels++;
cur->bc_ptrs[level + 1] = 1;
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 1b446849fb3..021a8f7e563 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -511,12 +511,12 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
* Move the req'd B-tree elements from high in node1 to
* low in node2.
*/
- be16_add(&node2->hdr.count, count);
+ be16_add_cpu(&node2->hdr.count, count);
tmp = count * (uint)sizeof(xfs_da_node_entry_t);
btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
btree_d = &node2->btree[0];
memcpy(btree_d, btree_s, tmp);
- be16_add(&node1->hdr.count, -count);
+ be16_add_cpu(&node1->hdr.count, -count);
} else {
/*
* Move the req'd B-tree elements from low in node2 to
@@ -527,7 +527,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
btree_s = &node2->btree[0];
btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
memcpy(btree_d, btree_s, tmp);
- be16_add(&node1->hdr.count, count);
+ be16_add_cpu(&node1->hdr.count, count);
xfs_da_log_buf(tp, blk1->bp,
XFS_DA_LOGRANGE(node1, btree_d, tmp));
@@ -539,7 +539,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
btree_s = &node2->btree[count];
btree_d = &node2->btree[0];
memmove(btree_d, btree_s, tmp);
- be16_add(&node2->hdr.count, -count);
+ be16_add_cpu(&node2->hdr.count, -count);
}
/*
@@ -604,7 +604,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
btree->before = cpu_to_be32(newblk->blkno);
xfs_da_log_buf(state->args->trans, oldblk->bp,
XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
- be16_add(&node->hdr.count, 1);
+ be16_add_cpu(&node->hdr.count, 1);
xfs_da_log_buf(state->args->trans, oldblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
@@ -959,7 +959,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
xfs_da_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
- be16_add(&node->hdr.count, -1);
+ be16_add_cpu(&node->hdr.count, -1);
xfs_da_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
@@ -1018,7 +1018,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
*/
tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
memcpy(btree, &drop_node->btree[0], tmp);
- be16_add(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
+ be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
xfs_da_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_node->hdr,
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index a5f4f4fb886..fb5a556725b 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -271,7 +271,7 @@ xfs_dir2_block_addname(
}
lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1);
lfloghigh -= be32_to_cpu(btp->stale) - 1;
- be32_add(&btp->count, -(be32_to_cpu(btp->stale) - 1));
+ be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
xfs_dir2_data_make_free(tp, bp,
(xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
(xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
@@ -326,7 +326,7 @@ xfs_dir2_block_addname(
/*
* Update the tail (entry count).
*/
- be32_add(&btp->count, 1);
+ be32_add_cpu(&btp->count, 1);
/*
* If we now need to rebuild the bestfree map, do so.
* This needs to happen before the next call to use_free.
@@ -387,7 +387,7 @@ xfs_dir2_block_addname(
lfloglow = MIN(mid, lfloglow);
lfloghigh = MAX(highstale, lfloghigh);
}
- be32_add(&btp->stale, -1);
+ be32_add_cpu(&btp->stale, -1);
}
/*
* Point to the new data entry.
@@ -767,7 +767,7 @@ xfs_dir2_block_removename(
/*
* Fix up the block tail.
*/
- be32_add(&btp->stale, 1);
+ be32_add_cpu(&btp->stale, 1);
xfs_dir2_block_log_tail(tp, bp);
/*
* Remove the leaf entry by marking it stale.
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index d2452699e9b..fb8c9e08b23 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -587,7 +587,7 @@ xfs_dir2_data_make_free(
/*
* Fix up the new big freespace.
*/
- be16_add(&prevdup->length, len + be16_to_cpu(postdup->length));
+ be16_add_cpu(&prevdup->length, len + be16_to_cpu(postdup->length));
*xfs_dir2_data_unused_tag_p(prevdup) =
cpu_to_be16((char *)prevdup - (char *)d);
xfs_dir2_data_log_unused(tp, bp, prevdup);
@@ -621,7 +621,7 @@ xfs_dir2_data_make_free(
*/
else if (prevdup) {
dfp = xfs_dir2_data_freefind(d, prevdup);
- be16_add(&prevdup->length, len);
+ be16_add_cpu(&prevdup->length, len);
*xfs_dir2_data_unused_tag_p(prevdup) =
cpu_to_be16((char *)prevdup - (char *)d);
xfs_dir2_data_log_unused(tp, bp, prevdup);
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 0ca0020ba09..bc52b803d79 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -359,7 +359,7 @@ xfs_dir2_leaf_addname(
bestsp--;
memmove(&bestsp[0], &bestsp[1],
be32_to_cpu(ltp->bestcount) * sizeof(bestsp[0]));
- be32_add(&ltp->bestcount, 1);
+ be32_add_cpu(&ltp->bestcount, 1);
xfs_dir2_leaf_log_tail(tp, lbp);
xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
}
@@ -445,7 +445,7 @@ xfs_dir2_leaf_addname(
*/
lfloglow = index;
lfloghigh = be16_to_cpu(leaf->hdr.count);
- be16_add(&leaf->hdr.count, 1);
+ be16_add_cpu(&leaf->hdr.count, 1);
}
/*
* There are stale entries.
@@ -523,7 +523,7 @@ xfs_dir2_leaf_addname(
lfloglow = MIN(index, lfloglow);
lfloghigh = MAX(highstale, lfloghigh);
}
- be16_add(&leaf->hdr.stale, -1);
+ be16_add_cpu(&leaf->hdr.stale, -1);
}
/*
* Fill in the new leaf entry.
@@ -626,7 +626,7 @@ xfs_dir2_leaf_compact(
* Update and log the header, log the leaf entries.
*/
ASSERT(be16_to_cpu(leaf->hdr.stale) == from - to);
- be16_add(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale)));
+ be16_add_cpu(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale)));
leaf->hdr.stale = 0;
xfs_dir2_leaf_log_header(args->trans, bp);
if (loglow != -1)
@@ -728,7 +728,7 @@ xfs_dir2_leaf_compact_x1(
/*
* Adjust the leaf header values.
*/
- be16_add(&leaf->hdr.count, -(from - to));
+ be16_add_cpu(&leaf->hdr.count, -(from - to));
leaf->hdr.stale = cpu_to_be16(1);
/*
* Remember the low/high stale value only in the "right"
@@ -1470,7 +1470,7 @@ xfs_dir2_leaf_removename(
/*
* We just mark the leaf entry stale by putting a null in it.
*/
- be16_add(&leaf->hdr.stale, 1);
+ be16_add_cpu(&leaf->hdr.stale, 1);
xfs_dir2_leaf_log_header(tp, lbp);
lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
xfs_dir2_leaf_log_ents(tp, lbp, index, index);
@@ -1531,7 +1531,7 @@ xfs_dir2_leaf_removename(
*/
memmove(&bestsp[db - i], bestsp,
(be32_to_cpu(ltp->bestcount) - (db - i)) * sizeof(*bestsp));
- be32_add(&ltp->bestcount, -(db - i));
+ be32_add_cpu(&ltp->bestcount, -(db - i));
xfs_dir2_leaf_log_tail(tp, lbp);
xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
} else
@@ -1712,7 +1712,7 @@ xfs_dir2_leaf_trim_data(
* Eliminate the last bests entry from the table.
*/
bestsp = xfs_dir2_leaf_bests_p(ltp);
- be32_add(&ltp->bestcount, -1);
+ be32_add_cpu(&ltp->bestcount, -1);
memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp));
xfs_dir2_leaf_log_tail(tp, lbp);
xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index eb18e399e83..8dade711f09 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -254,7 +254,7 @@ xfs_dir2_leafn_add(
(be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep));
lfloglow = index;
lfloghigh = be16_to_cpu(leaf->hdr.count);
- be16_add(&leaf->hdr.count, 1);
+ be16_add_cpu(&leaf->hdr.count, 1);
}
/*
* There are stale entries. We'll use one for the new entry.
@@ -322,7 +322,7 @@ xfs_dir2_leafn_add(
lfloglow = MIN(index, lfloglow);
lfloghigh = MAX(highstale, lfloghigh);
}
- be16_add(&leaf->hdr.stale, -1);
+ be16_add_cpu(&leaf->hdr.stale, -1);
}
/*
* Insert the new entry, log everything.
@@ -697,10 +697,10 @@ xfs_dir2_leafn_moveents(
/*
* Update the headers and log them.
*/
- be16_add(&leaf_s->hdr.count, -(count));
- be16_add(&leaf_s->hdr.stale, -(stale));
- be16_add(&leaf_d->hdr.count, count);
- be16_add(&leaf_d->hdr.stale, stale);
+ be16_add_cpu(&leaf_s->hdr.count, -(count));
+ be16_add_cpu(&leaf_s->hdr.stale, -(stale));
+ be16_add_cpu(&leaf_d->hdr.count, count);
+ be16_add_cpu(&leaf_d->hdr.stale, stale);
xfs_dir2_leaf_log_header(tp, bp_s);
xfs_dir2_leaf_log_header(tp, bp_d);
xfs_dir2_leafn_check(args->dp, bp_s);
@@ -885,7 +885,7 @@ xfs_dir2_leafn_remove(
* Kill the leaf entry by marking it stale.
* Log the leaf block changes.
*/
- be16_add(&leaf->hdr.stale, 1);
+ be16_add_cpu(&leaf->hdr.stale, 1);
xfs_dir2_leaf_log_header(tp, bp);
lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
xfs_dir2_leaf_log_ents(tp, bp, index, index);
@@ -971,7 +971,7 @@ xfs_dir2_leafn_remove(
/*
* One less used entry in the free table.
*/
- be32_add(&free->hdr.nused, -1);
+ be32_add_cpu(&free->hdr.nused, -1);
xfs_dir2_free_log_header(tp, fbp);
/*
* If this was the last entry in the table, we can
@@ -1642,7 +1642,7 @@ xfs_dir2_node_addname_int(
* (this should always be true) then update the header.
*/
if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) {
- be32_add(&free->hdr.nused, 1);
+ be32_add_cpu(&free->hdr.nused, 1);
xfs_dir2_free_log_header(tp, fbp);
}
/*
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index b8de7f3cc17..eadc1591c79 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -318,7 +318,7 @@ xfs_growfs_data_private(
}
ASSERT(bp);
agi = XFS_BUF_TO_AGI(bp);
- be32_add(&agi->agi_length, new);
+ be32_add_cpu(&agi->agi_length, new);
ASSERT(nagcount == oagcount ||
be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
@@ -331,7 +331,7 @@ xfs_growfs_data_private(
}
ASSERT(bp);
agf = XFS_BUF_TO_AGF(bp);
- be32_add(&agf->agf_length, new);
+ be32_add_cpu(&agf->agf_length, new);
ASSERT(be32_to_cpu(agf->agf_length) ==
be32_to_cpu(agi->agi_length));
xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 1409c2d61c1..c5836b951d0 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -301,8 +301,8 @@ xfs_ialloc_ag_alloc(
}
xfs_trans_inode_alloc_buf(tp, fbuf);
}
- be32_add(&agi->agi_count, newlen);
- be32_add(&agi->agi_freecount, newlen);
+ be32_add_cpu(&agi->agi_count, newlen);
+ be32_add_cpu(&agi->agi_freecount, newlen);
agno = be32_to_cpu(agi->agi_seqno);
down_read(&args.mp->m_peraglock);
args.mp->m_perag[agno].pagi_freecount += newlen;
@@ -885,7 +885,7 @@ nextag:
if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
rec.ir_free)))
goto error0;
- be32_add(&agi->agi_freecount, -1);
+ be32_add_cpu(&agi->agi_freecount, -1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
down_read(&mp->m_peraglock);
mp->m_perag[tagno].pagi_freecount--;
@@ -1065,8 +1065,8 @@ xfs_difree(
* to be freed when the transaction is committed.
*/
ilen = XFS_IALLOC_INODES(mp);
- be32_add(&agi->agi_count, -ilen);
- be32_add(&agi->agi_freecount, -(ilen - 1));
+ be32_add_cpu(&agi->agi_count, -ilen);
+ be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
down_read(&mp->m_peraglock);
mp->m_perag[agno].pagi_freecount -= ilen - 1;
@@ -1095,7 +1095,7 @@ xfs_difree(
/*
* Change the inode free counts and log the ag/sb changes.
*/
- be32_add(&agi->agi_freecount, 1);
+ be32_add_cpu(&agi->agi_freecount, 1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
down_read(&mp->m_peraglock);
mp->m_perag[agno].pagi_freecount++;
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 8cdeeaf8632..e5310c90e50 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -189,7 +189,7 @@ xfs_inobt_delrec(
*/
bno = be32_to_cpu(agi->agi_root);
agi->agi_root = *pp;
- be32_add(&agi->agi_level, -1);
+ be32_add_cpu(&agi->agi_level, -1);
/*
* Free the block.
*/
@@ -1132,7 +1132,7 @@ xfs_inobt_lshift(
/*
* Bump and log left's numrecs, decrement and log right's numrecs.
*/
- be16_add(&left->bb_numrecs, 1);
+ be16_add_cpu(&left->bb_numrecs, 1);
xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
#ifdef DEBUG
if (level > 0)
@@ -1140,7 +1140,7 @@ xfs_inobt_lshift(
else
xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp);
#endif
- be16_add(&right->bb_numrecs, -1);
+ be16_add_cpu(&right->bb_numrecs, -1);
xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS);
/*
* Slide the contents of right down one entry.
@@ -1232,7 +1232,7 @@ xfs_inobt_newroot(
* Set the root data in the a.g. inode structure.
*/
agi->agi_root = cpu_to_be32(args.agbno);
- be32_add(&agi->agi_level, 1);
+ be32_add_cpu(&agi->agi_level, 1);
xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp,
XFS_AGI_ROOT | XFS_AGI_LEVEL);
/*
@@ -1426,9 +1426,9 @@ xfs_inobt_rshift(
/*
* Decrement and log left's numrecs, bump and log right's numrecs.
*/
- be16_add(&left->bb_numrecs, -1);
+ be16_add_cpu(&left->bb_numrecs, -1);
xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS);
- be16_add(&right->bb_numrecs, 1);
+ be16_add_cpu(&right->bb_numrecs, 1);
#ifdef DEBUG
if (level > 0)
xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1);
@@ -1529,7 +1529,7 @@ xfs_inobt_split(
*/
if ((be16_to_cpu(left->bb_numrecs) & 1) &&
cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
- be16_add(&right->bb_numrecs, 1);
+ be16_add_cpu(&right->bb_numrecs, 1);
i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
/*
* For non-leaf blocks, copy keys and addresses over to the new block.
@@ -1565,7 +1565,7 @@ xfs_inobt_split(
* Find the left block number by looking in the buffer.
* Adjust numrecs, sibling pointers.
*/
- be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
+ be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
right->bb_rightsib = left->bb_rightsib;
left->bb_rightsib = cpu_to_be32(args.agbno);
right->bb_leftsib = cpu_to_be32(lbno);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index b3ac3805d3c..a75edca1860 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1509,9 +1509,9 @@ xlog_sync(xlog_t *log,
* case, though.
*/
for (i = 0; i < split; i += BBSIZE) {
- be32_add((__be32 *)dptr, 1);
+ be32_add_cpu((__be32 *)dptr, 1);
if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM)
- be32_add((__be32 *)dptr, 1);
+ be32_add_cpu((__be32 *)dptr, 1);
dptr += BBSIZE;
}
@@ -1600,7 +1600,7 @@ xlog_state_finish_copy(xlog_t *log,
{
spin_lock(&log->l_icloglock);
- be32_add(&iclog->ic_header.h_num_logops, record_cnt);
+ be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
iclog->ic_offset += copy_bytes;
spin_unlock(&log->l_icloglock);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 71e4c8dcc69..140386434aa 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -567,26 +567,26 @@ xfs_trans_apply_sb_deltas(
*/
if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
if (tp->t_icount_delta)
- be64_add(&sbp->sb_icount, tp->t_icount_delta);
+ be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
if (tp->t_ifree_delta)
- be64_add(&sbp->sb_ifree, tp->t_ifree_delta);
+ be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
if (tp->t_fdblocks_delta)
- be64_add(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
+ be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
if (tp->t_res_fdblocks_delta)
- be64_add(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
+ be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
}
if (tp->t_frextents_delta)
- be64_add(&sbp->sb_frextents, tp->t_frextents_delta);
+ be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
if (tp->t_res_frextents_delta)
- be64_add(&sbp->sb_frextents, tp->t_res_frextents_delta);
+ be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
if (tp->t_dblocks_delta) {
- be64_add(&sbp->sb_dblocks, tp->t_dblocks_delta);
+ be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
whole = 1;
}
if (tp->t_agcount_delta) {
- be32_add(&sbp->sb_agcount, tp->t_agcount_delta);
+ be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
whole = 1;
}
if (tp->t_imaxpct_delta) {
@@ -594,19 +594,19 @@ xfs_trans_apply_sb_deltas(
whole = 1;
}
if (tp->t_rextsize_delta) {
- be32_add(&sbp->sb_rextsize, tp->t_rextsize_delta);
+ be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
whole = 1;
}
if (tp->t_rbmblocks_delta) {
- be32_add(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
+ be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
whole = 1;
}
if (tp->t_rblocks_delta) {
- be64_add(&sbp->sb_rblocks, tp->t_rblocks_delta);
+ be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
whole = 1;
}
if (tp->t_rextents_delta) {
- be64_add(&sbp->sb_rextents, tp->t_rextents_delta);
+ be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
whole = 1;
}
if (tp->t_rextslog_delta) {
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 022a5fd80c8..4839f2af94c 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -222,7 +222,7 @@ acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width);
*/
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id,
- u32 reg, void *value, u32 width);
+ u32 reg, u32 *value, u32 width);
acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index cdc8004cfd1..06480bcabfd 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -32,9 +32,11 @@
#define DOMAIN_COORD_TYPE_SW_ANY 0xfd
#define DOMAIN_COORD_TYPE_HW_ALL 0xfe
-#define ACPI_CSTATE_SYSTEMIO (0)
-#define ACPI_CSTATE_FFH (1)
-#define ACPI_CSTATE_HALT (2)
+#define ACPI_CSTATE_SYSTEMIO 0
+#define ACPI_CSTATE_FFH 1
+#define ACPI_CSTATE_HALT 2
+
+#define ACPI_CX_DESC_LEN 32
/* Power Management */
@@ -74,6 +76,7 @@ struct acpi_processor_cx {
u64 time;
struct acpi_processor_cx_policy promotion;
struct acpi_processor_cx_policy demotion;
+ char desc[ACPI_CX_DESC_LEN];
};
struct acpi_processor_power {
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 5d9d70cd17f..342a2a0105c 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -30,19 +30,19 @@
/* Other architectures wishing to use this simple topology API should fill
in the below functions as appropriate in their own <asm/topology.h> file. */
#ifndef cpu_to_node
-#define cpu_to_node(cpu) (0)
+#define cpu_to_node(cpu) ((void)(cpu),0)
#endif
#ifndef parent_node
-#define parent_node(node) (0)
+#define parent_node(node) ((void)(node),0)
#endif
#ifndef node_to_cpumask
-#define node_to_cpumask(node) (cpu_online_map)
+#define node_to_cpumask(node) ((void)node, cpu_online_map)
#endif
#ifndef node_to_first_cpu
-#define node_to_first_cpu(node) (0)
+#define node_to_first_cpu(node) ((void)(node),0)
#endif
#ifndef pcibus_to_node
-#define pcibus_to_node(node) (-1)
+#define pcibus_to_node(bus) ((void)(bus), -1)
#endif
#ifndef pcibus_to_cpumask
diff --git a/include/asm-ia64/param.h b/include/asm-ia64/param.h
index 49c62dd5ecc..0964c32c135 100644
--- a/include/asm-ia64/param.h
+++ b/include/asm-ia64/param.h
@@ -19,15 +19,7 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#ifdef __KERNEL__
-# ifdef CONFIG_IA64_HP_SIM
- /*
- * Yeah, simulating stuff is slow, so let us catch some breath between
- * timer interrupts...
- */
-# define HZ 32
-# else
-# define HZ CONFIG_HZ
-# endif
+# define HZ CONFIG_HZ
# define USER_HZ HZ
# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
#else
diff --git a/include/asm-m68knommu/cacheflush.h b/include/asm-m68knommu/cacheflush.h
index 29bc0aad2eb..87e5dc0413b 100644
--- a/include/asm-m68knommu/cacheflush.h
+++ b/include/asm-m68knommu/cacheflush.h
@@ -54,28 +54,28 @@ static inline void __flush_cache_all(void)
#if defined(CONFIG_M527x) || defined(CONFIG_M528x)
__asm__ __volatile__ (
"movel #0x81000200, %%d0\n\t"
- "movec %%d0, %%CACR\n\t"
+ "movec %%d0, %%CACR\n\t"
"nop\n\t"
: : : "d0" );
#endif /* CONFIG_M527x || CONFIG_M528x */
#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || defined(CONFIG_M5272)
__asm__ __volatile__ (
- "movel #0x81000100, %%d0\n\t"
- "movec %%d0, %%CACR\n\t"
+ "movel #0x81000100, %%d0\n\t"
+ "movec %%d0, %%CACR\n\t"
"nop\n\t"
: : : "d0" );
#endif /* CONFIG_M5206 || CONFIG_M5206e || CONFIG_M5272 */
#ifdef CONFIG_M5249
__asm__ __volatile__ (
- "movel #0xa1000200, %%d0\n\t"
- "movec %%d0, %%CACR\n\t"
+ "movel #0xa1000200, %%d0\n\t"
+ "movec %%d0, %%CACR\n\t"
"nop\n\t"
: : : "d0" );
#endif /* CONFIG_M5249 */
#ifdef CONFIG_M532x
__asm__ __volatile__ (
- "movel #0x81000200, %%d0\n\t"
- "movec %%d0, %%CACR\n\t"
+ "movel #0x81000200, %%d0\n\t"
+ "movec %%d0, %%CACR\n\t"
"nop\n\t"
: : : "d0" );
#endif /* CONFIG_M532x */
diff --git a/include/asm-m68knommu/system.h b/include/asm-m68knommu/system.h
index 039ab3f8173..64c64432bbb 100644
--- a/include/asm-m68knommu/system.h
+++ b/include/asm-m68knommu/system.h
@@ -104,7 +104,7 @@ asmlinkage void resume(void);
#define mb() asm volatile ("" : : :"memory")
#define rmb() asm volatile ("" : : :"memory")
#define wmb() asm volatile ("" : : :"memory")
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#define set_mb(var, value) ({ (var) = (value); wmb(); })
#ifdef CONFIG_SMP
#define smp_mb() mb()
diff --git a/include/asm-mn10300/highmem.h b/include/asm-mn10300/highmem.h
index 383c0c42982..5256854c045 100644
--- a/include/asm-mn10300/highmem.h
+++ b/include/asm-mn10300/highmem.h
@@ -42,8 +42,8 @@ extern void __init kmap_init(void);
#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-extern unsigned long __fastcall kmap_high(struct page *page);
-extern void __fastcall kunmap_high(struct page *page);
+extern unsigned long kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
static inline unsigned long kmap(struct page *page)
{
diff --git a/include/asm-mn10300/linkage.h b/include/asm-mn10300/linkage.h
index 29a32e46752..dda3002a5df 100644
--- a/include/asm-mn10300/linkage.h
+++ b/include/asm-mn10300/linkage.h
@@ -13,8 +13,6 @@
/* don't override anything */
#define asmlinkage
-#define FASTCALL(x) x
-#define fastcall
#define __ALIGN .align 4,0xcb
#define __ALIGN_STR ".align 4,0xcb"
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
index e996521fb3a..ae7085c6569 100644
--- a/include/asm-powerpc/systbl.h
+++ b/include/asm-powerpc/systbl.h
@@ -309,8 +309,10 @@ SYSCALL_SPU(getcpu)
COMPAT_SYS(epoll_pwait)
COMPAT_SYS_SPU(utimensat)
COMPAT_SYS_SPU(signalfd)
-SYSCALL(ni_syscall)
+SYSCALL_SPU(timerfd_create)
SYSCALL_SPU(eventfd)
COMPAT_SYS_SPU(sync_file_range2)
COMPAT_SYS(fallocate)
SYSCALL(subpage_prot)
+COMPAT_SYS_SPU(timerfd_settime)
+COMPAT_SYS_SPU(timerfd_gettime)
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index fedc4b8e49e..ce91bb66206 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -328,15 +328,17 @@
#define __NR_epoll_pwait 303
#define __NR_utimensat 304
#define __NR_signalfd 305
-#define __NR_timerfd 306
+#define __NR_timerfd_create 306
#define __NR_eventfd 307
#define __NR_sync_file_range2 308
#define __NR_fallocate 309
#define __NR_subpage_prot 310
+#define __NR_timerfd_settime 311
+#define __NR_timerfd_gettime 312
#ifdef __KERNEL__
-#define __NR_syscalls 311
+#define __NR_syscalls 313
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h
index ad4c5a1bc9d..37e4756b6b2 100644
--- a/include/asm-ppc/page.h
+++ b/include/asm-ppc/page.h
@@ -125,6 +125,8 @@ extern __inline__ int get_order(unsigned long size)
return 32 - lz;
}
+typedef struct page *pgtable_t;
+
#endif /* __ASSEMBLY__ */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index def8128b8b7..cfda7d5bf02 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -39,7 +39,7 @@ static void __init check_bugs(void)
*p++ = '4';
*p++ = 'a';
break;
- case CPU_SH7343 ... CPU_SH7722:
+ case CPU_SH7343 ... CPU_SH7366:
*p++ = '4';
*p++ = 'a';
*p++ = 'l';
diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h
index 1ac10b9a078..ec028c64921 100644
--- a/include/asm-sh/cpu-sh4/freq.h
+++ b/include/asm-sh/cpu-sh4/freq.h
@@ -10,12 +10,14 @@
#ifndef __ASM_CPU_SH4_FREQ_H
#define __ASM_CPU_SH4_FREQ_H
-#if defined(CONFIG_CPU_SUBTYPE_SH7722)
+#if defined(CONFIG_CPU_SUBTYPE_SH7722) || defined(CONFIG_CPU_SUBTYPE_SH7366)
#define FRQCR 0xa4150000
#define VCLKCR 0xa4150004
#define SCLKACR 0xa4150008
#define SCLKBCR 0xa415000c
+#if defined(CONFIG_CPU_SUBTYPE_SH7722)
#define IrDACLKCR 0xa4150010
+#endif
#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
defined(CONFIG_CPU_SUBTYPE_SH7780)
#define FRQCR 0xffc80000
diff --git a/include/asm-sh/cpu-sh5/cacheflush.h b/include/asm-sh/cpu-sh5/cacheflush.h
index 98edb5b1da3..5a11f0b7e66 100644
--- a/include/asm-sh/cpu-sh5/cacheflush.h
+++ b/include/asm-sh/cpu-sh5/cacheflush.h
@@ -3,15 +3,13 @@
#ifndef __ASSEMBLY__
-#include <asm/page.h>
-
struct vm_area_struct;
struct page;
struct mm_struct;
extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm);
-extern void flush_cache_sigtramp(unsigned long start, unsigned long end);
+extern void flush_cache_sigtramp(unsigned long vaddr);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
@@ -27,7 +25,7 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
-#define p3_cache_init() do { } while (0)
+void p3_cache_init(void);
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-sh/cpu-sh5/mmu_context.h b/include/asm-sh/cpu-sh5/mmu_context.h
index df857fc0996..68a1d2cff45 100644
--- a/include/asm-sh/cpu-sh5/mmu_context.h
+++ b/include/asm-sh/cpu-sh5/mmu_context.h
@@ -16,12 +16,6 @@
/* This has to be a common function because the next location to fill
* information is shared. */
extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
-
-/* Profiling counter. */
-#ifdef CONFIG_SH64_PROC_TLB
-extern unsigned long long calls_to_do_fast_page_fault;
-#endif
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */
diff --git a/include/asm-sh/hp6xx.h b/include/asm-sh/hp6xx.h
index 53ca5643d9c..0d4165a32dc 100644
--- a/include/asm-sh/hp6xx.h
+++ b/include/asm-sh/hp6xx.h
@@ -10,9 +10,9 @@
*
*/
-#define HP680_BTN_IRQ 32 /* IRQ0_IRQ */
-#define HP680_TS_IRQ 35 /* IRQ3_IRQ */
-#define HP680_HD64461_IRQ 36 /* IRQ4_IRQ */
+#define HP680_BTN_IRQ 32 /* IRQ0_IRQ */
+#define HP680_TS_IRQ 35 /* IRQ3_IRQ */
+#define HP680_HD64461_IRQ 36 /* IRQ4_IRQ */
#define DAC_LCD_BRIGHTNESS 0
#define DAC_SPEAKER_VOLUME 1
@@ -55,26 +55,4 @@
#define PJDR 0xa4000130
#define PKDR 0xa4000132
-static inline void hp6xx_led_red(int on)
-{
- u16 v16;
- v16 = ctrl_inw(CONFIG_HD64461_IOBASE + HD64461_GPBDR - 0x10000);
- if (on)
- ctrl_outw(v16 & (~HD64461_GPBDR_LED_RED), CONFIG_HD64461_IOBASE + HD64461_GPBDR - 0x10000);
- else
- ctrl_outw(v16 | HD64461_GPBDR_LED_RED, CONFIG_HD64461_IOBASE + HD64461_GPBDR - 0x10000);
-}
-
-static inline void hp6xx_led_green(int on)
-{
- u8 v8;
-
- v8 = ctrl_inb(PKDR);
- if (on)
- ctrl_outb(v8 & (~PKDR_LED_GREEN), PKDR);
- else
- ctrl_outb(v8 | PKDR_LED_GREEN, PKDR);
-}
-
-
#endif /* __ASM_SH_HP6XX_H */
diff --git a/include/asm-sh/io.h b/include/asm-sh/io.h
index 94900c08951..356e50d0674 100644
--- a/include/asm-sh/io.h
+++ b/include/asm-sh/io.h
@@ -38,6 +38,7 @@
*/
#define __IO_PREFIX generic
#include <asm/io_generic.h>
+#include <asm/io_trapped.h>
#define maybebadio(port) \
printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
@@ -181,13 +182,13 @@ __BUILD_MEMORY_STRING(w, u16)
#define iowrite32(v,a) writel((v),(a))
#define iowrite32be(v,a) __raw_writel(cpu_to_be32((v)),(a))
-#define ioread8_rep(a,d,c) insb((a),(d),(c))
-#define ioread16_rep(a,d,c) insw((a),(d),(c))
-#define ioread32_rep(a,d,c) insl((a),(d),(c))
+#define ioread8_rep(a, d, c) readsb((a), (d), (c))
+#define ioread16_rep(a, d, c) readsw((a), (d), (c))
+#define ioread32_rep(a, d, c) readsl((a), (d), (c))
-#define iowrite8_rep(a,s,c) outsb((a),(s),(c))
-#define iowrite16_rep(a,s,c) outsw((a),(s),(c))
-#define iowrite32_rep(a,s,c) outsl((a),(s),(c))
+#define iowrite8_rep(a, s, c) writesb((a), (s), (c))
+#define iowrite16_rep(a, s, c) writesw((a), (s), (c))
+#define iowrite32_rep(a, s, c) writesl((a), (s), (c))
#define mmiowb() wmb() /* synco on SH-4A, otherwise a nop */
@@ -207,6 +208,8 @@ static inline void __set_io_port_base(unsigned long pbase)
generic_io_base = pbase;
}
+#define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))
+
/* We really want to try and get these to memcpy etc */
extern void memcpy_fromio(void *, volatile void __iomem *, unsigned long);
extern void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
@@ -309,7 +312,14 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
{
#ifdef CONFIG_SUPERH32
unsigned long last_addr = offset + size - 1;
+#endif
+ void __iomem *ret;
+ ret = __ioremap_trapped(offset, size);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_SUPERH32
/*
* For P1 and P2 space this is trivial, as everything is already
* mapped. Uncached access for P1 addresses are done through P2.
diff --git a/include/asm-sh/io_trapped.h b/include/asm-sh/io_trapped.h
new file mode 100644
index 00000000000..f1251d4f0ba
--- /dev/null
+++ b/include/asm-sh/io_trapped.h
@@ -0,0 +1,58 @@
+#ifndef __ASM_SH_IO_TRAPPED_H
+#define __ASM_SH_IO_TRAPPED_H
+
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <asm/page.h>
+
+#define IO_TRAPPED_MAGIC 0xfeedbeef
+
+struct trapped_io {
+ unsigned int magic;
+ struct resource *resource;
+ unsigned int num_resources;
+ unsigned int minimum_bus_width;
+ struct list_head list;
+ void __iomem *virt_base;
+} __aligned(PAGE_SIZE);
+
+#ifdef CONFIG_IO_TRAPPED
+int register_trapped_io(struct trapped_io *tiop);
+int handle_trapped_io(struct pt_regs *regs, unsigned long address);
+
+void __iomem *match_trapped_io_handler(struct list_head *list,
+ unsigned long offset,
+ unsigned long size);
+
+#ifdef CONFIG_HAS_IOMEM
+extern struct list_head trapped_mem;
+
+static inline void __iomem *
+__ioremap_trapped(unsigned long offset, unsigned long size)
+{
+ return match_trapped_io_handler(&trapped_mem, offset, size);
+}
+#else
+#define __ioremap_trapped(offset, size) NULL
+#endif
+
+#ifdef CONFIG_HAS_IOPORT
+extern struct list_head trapped_io;
+
+static inline void __iomem *
+__ioport_map_trapped(unsigned long offset, unsigned long size)
+{
+ return match_trapped_io_handler(&trapped_io, offset, size);
+}
+#else
+#define __ioport_map_trapped(offset, size) NULL
+#endif
+
+#else
+#define register_trapped_io(tiop) (-1)
+#define handle_trapped_io(tiop, address) 0
+#define __ioremap_trapped(offset, size) NULL
+#define __ioport_map_trapped(offset, size) NULL
+#endif
+
+#endif /* __ASM_SH_IO_TRAPPED_H */
diff --git a/include/asm-sh/ioctls.h b/include/asm-sh/ioctls.h
index 35805df010a..c212c371a4a 100644
--- a/include/asm-sh/ioctls.h
+++ b/include/asm-sh/ioctls.h
@@ -78,6 +78,10 @@
#define TIOCSBRK _IO('T', 39) /* 0x5427 */ /* BSD compatibility */
#define TIOCCBRK _IO('T', 40) /* 0x5428 */ /* BSD compatibility */
#define TIOCGSID _IOR('T', 41, pid_t) /* 0x5429 */ /* Return the session ID of FD */
+#define TCGETS2 _IOR('T', 42, struct termios2)
+#define TCSETS2 _IOW('T', 43, struct termios2)
+#define TCSETSW2 _IOW('T', 44, struct termios2)
+#define TCSETSF2 _IOW('T', 45, struct termios2)
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h
index 11850f65c92..ca66e5df69d 100644
--- a/include/asm-sh/irq.h
+++ b/include/asm-sh/irq.h
@@ -50,4 +50,8 @@ extern void irq_ctx_exit(int cpu);
# define irq_ctx_exit(cpu) do { } while (0)
#endif
+#ifdef CONFIG_CPU_SH5
+#include <asm/cpu/irq.h>
+#endif
+
#endif /* __ASM_SH_IRQ_H */
diff --git a/include/asm-sh/mmu_context_64.h b/include/asm-sh/mmu_context_64.h
index 020be744b08..9649f1c07ca 100644
--- a/include/asm-sh/mmu_context_64.h
+++ b/include/asm-sh/mmu_context_64.h
@@ -66,6 +66,9 @@ static inline void set_asid(unsigned long asid)
: "=r" (sr), "=r" (pc) : "0" (sr));
}
+/* arch/sh/kernel/cpu/sh5/entry.S */
+extern unsigned long switch_and_save_asid(unsigned long new_asid);
+
/* No spare register to twiddle, so use a software cache */
extern pgd_t *mmu_pdtp_cache;
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 134562dc8c4..304c30b5d94 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -55,11 +55,14 @@ extern void clear_page(void *to);
extern void copy_page(void *to, void *from);
#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
- (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
+ (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \
+ defined(CONFIG_SH7705_CACHE_32KB))
struct page;
struct vm_area_struct;
extern void clear_user_page(void *to, unsigned long address, struct page *page);
-#ifdef CONFIG_CPU_SH4
+extern void copy_user_page(void *to, void *from, unsigned long address,
+ struct page *page);
+#if defined(CONFIG_CPU_SH4)
extern void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
diff --git a/include/asm-sh/pgtable_64.h b/include/asm-sh/pgtable_64.h
index 972211671c9..f9dd9d31144 100644
--- a/include/asm-sh/pgtable_64.h
+++ b/include/asm-sh/pgtable_64.h
@@ -138,6 +138,14 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
#endif
/*
+ * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
+ * to make pte_mkhuge() happy.
+ */
+#ifndef _PAGE_SZHUGE
+# define _PAGE_SZHUGE (0)
+#endif
+
+/*
* Default flags for a Kernel page.
* This is fundametally also SHARED because the main use of this define
* (other than for PGD/PMD entries) is for the VMALLOC pool which is
@@ -179,6 +187,11 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
_PAGE_WRITE | _PAGE_EXECUTE)
#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
+#define PAGE_KERNEL_NOCACHE \
+ __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_EXECUTE | _PAGE_ACCESSED | \
+ _PAGE_DIRTY | _PAGE_SHARED)
+
/* Make it a device mapping for maximum safety (e.g. for mapping device
registers into user-space via /dev/map). */
#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index c9b14161f73..19fe47c1ca1 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -33,7 +33,7 @@ enum cpu_type {
CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SHX3,
/* SH4AL-DSP types */
- CPU_SH7343, CPU_SH7722,
+ CPU_SH7343, CPU_SH7722, CPU_SH7366,
/* SH-5 types */
CPU_SH5_101, CPU_SH5_103,
diff --git a/include/asm-sh/r7780rp.h b/include/asm-sh/r7780rp.h
index bdecea0840a..1770460a461 100644
--- a/include/asm-sh/r7780rp.h
+++ b/include/asm-sh/r7780rp.h
@@ -195,7 +195,4 @@ unsigned char *highlander_init_irq_r7780mp(void);
unsigned char *highlander_init_irq_r7780rp(void);
unsigned char *highlander_init_irq_r7785rp(void);
-#define __IO_PREFIX r7780rp
-#include <asm/io_generic.h>
-
#endif /* __ASM_SH_RENESAS_R7780RP */
diff --git a/include/asm-sh/rts7751r2d.h b/include/asm-sh/rts7751r2d.h
index 83b9c111f17..0a800157b82 100644
--- a/include/asm-sh/rts7751r2d.h
+++ b/include/asm-sh/rts7751r2d.h
@@ -67,7 +67,4 @@
void init_rts7751r2d_IRQ(void);
int rts7751r2d_irq_demux(int);
-#define __IO_PREFIX rts7751r2d
-#include <asm/io_generic.h>
-
#endif /* __ASM_SH_RENESAS_RTS7751R2D */
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 772cd1a0a67..5145aa2a0ce 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -182,6 +182,11 @@ BUILD_TRAP_HANDLER(fpu_state_restore);
#define arch_align_stack(x) (x)
+struct mem_access {
+ unsigned long (*from)(void *dst, const void *src, unsigned long cnt);
+ unsigned long (*to)(void *dst, const void *src, unsigned long cnt);
+};
+
#ifdef CONFIG_SUPERH32
# include "system_32.h"
#else
diff --git a/include/asm-sh/system_32.h b/include/asm-sh/system_32.h
index 7ff08d956ba..f11bcf0855e 100644
--- a/include/asm-sh/system_32.h
+++ b/include/asm-sh/system_32.h
@@ -96,4 +96,7 @@ do { \
: "=&r" (__dummy)); \
} while (0)
+int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
+ struct mem_access *ma);
+
#endif /* __ASM_SH_SYSTEM_32_H */
diff --git a/include/asm-sh/termbits.h b/include/asm-sh/termbits.h
index 7ee1b42eeab..77db116948c 100644
--- a/include/asm-sh/termbits.h
+++ b/include/asm-sh/termbits.h
@@ -140,6 +140,7 @@ struct ktermios {
#define HUPCL 0002000
#define CLOCAL 0004000
#define CBAUDEX 0010000
+#define BOTHER 0010000
#define B57600 0010001
#define B115200 0010002
#define B230400 0010003
@@ -155,10 +156,12 @@ struct ktermios {
#define B3000000 0010015
#define B3500000 0010016
#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CIBAUD 002003600000 /* input baud rate */
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
/* c_lflag bits */
#define ISIG 0000001
#define ICANON 0000002
diff --git a/include/asm-sh/termios.h b/include/asm-sh/termios.h
index e7c8f86ef89..0a8c793c76f 100644
--- a/include/asm-sh/termios.h
+++ b/include/asm-sh/termios.h
@@ -80,8 +80,10 @@ struct termio {
copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
})
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
#endif /* __KERNEL__ */
diff --git a/include/asm-sh/tlb.h b/include/asm-sh/tlb.h
index 56ad1fb888a..88ff1ae8a6b 100644
--- a/include/asm-sh/tlb.h
+++ b/include/asm-sh/tlb.h
@@ -20,6 +20,7 @@
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-sh/uaccess.h b/include/asm-sh/uaccess.h
index ff24ce95b23..b3440c305b5 100644
--- a/include/asm-sh/uaccess.h
+++ b/include/asm-sh/uaccess.h
@@ -1,5 +1,34 @@
+#ifndef __ASM_SH_UACCESS_H
+#define __ASM_SH_UACCESS_H
+
#ifdef CONFIG_SUPERH32
# include "uaccess_32.h"
#else
# include "uaccess_64.h"
#endif
+
+static inline unsigned long
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long __copy_from = (unsigned long) from;
+ __kernel_size_t __copy_size = (__kernel_size_t) n;
+
+ if (__copy_size && __access_ok(__copy_from, __copy_size))
+ return __copy_user(to, from, __copy_size);
+
+ return __copy_size;
+}
+
+static inline unsigned long
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long __copy_to = (unsigned long) to;
+ __kernel_size_t __copy_size = (__kernel_size_t) n;
+
+ if (__copy_size && __access_ok(__copy_to, __copy_size))
+ return __copy_user(to, from, __copy_size);
+
+ return __copy_size;
+}
+
+#endif /* __ASM_SH_UACCESS_H */
diff --git a/include/asm-sh/uaccess_32.h b/include/asm-sh/uaccess_32.h
index b6082f3c1dc..c0318b60889 100644
--- a/include/asm-sh/uaccess_32.h
+++ b/include/asm-sh/uaccess_32.h
@@ -10,8 +10,8 @@
* Copyright (C) 1996, 1997, 1998 by Ralf Baechle
* and i386 version.
*/
-#ifndef __ASM_SH_UACCESS_H
-#define __ASM_SH_UACCESS_H
+#ifndef __ASM_SH_UACCESS_32_H
+#define __ASM_SH_UACCESS_32_H
#include <linux/errno.h>
#include <linux/sched.h>
@@ -302,24 +302,6 @@ extern void __put_user_unknown(void);
/* Return the number of bytes NOT copied */
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
-#define copy_to_user(to,from,n) ({ \
-void *__copy_to = (void *) (to); \
-__kernel_size_t __copy_size = (__kernel_size_t) (n); \
-__kernel_size_t __copy_res; \
-if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
-__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
-} else __copy_res = __copy_size; \
-__copy_res; })
-
-#define copy_from_user(to,from,n) ({ \
-void *__copy_to = (void *) (to); \
-void *__copy_from = (void *) (from); \
-__kernel_size_t __copy_size = (__kernel_size_t) (n); \
-__kernel_size_t __copy_res; \
-if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
-__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
-} else __copy_res = __copy_size; \
-__copy_res; })
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
@@ -507,4 +489,4 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
-#endif /* __ASM_SH_UACCESS_H */
+#endif /* __ASM_SH_UACCESS_32_H */
diff --git a/include/asm-sh/uaccess_64.h b/include/asm-sh/uaccess_64.h
index d54ec082d25..f956b7b316c 100644
--- a/include/asm-sh/uaccess_64.h
+++ b/include/asm-sh/uaccess_64.h
@@ -202,15 +202,6 @@ extern void __put_user_unknown(void);
/* XXX: should be such that: 4byte and the rest. */
extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n);
-#define copy_to_user(to,from,n) ({ \
-void *__copy_to = (void *) (to); \
-__kernel_size_t __copy_size = (__kernel_size_t) (n); \
-__kernel_size_t __copy_res; \
-if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
-__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
-} else __copy_res = __copy_size; \
-__copy_res; })
-
#define copy_to_user_ret(to,from,n,retval) ({ \
if (copy_to_user(to,from,n)) \
return retval; \
@@ -225,16 +216,6 @@ if (__copy_to_user(to,from,n)) \
return retval; \
})
-#define copy_from_user(to,from,n) ({ \
-void *__copy_to = (void *) (to); \
-void *__copy_from = (void *) (from); \
-__kernel_size_t __copy_size = (__kernel_size_t) (n); \
-__kernel_size_t __copy_res; \
-if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
-__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
-} else __copy_res = __copy_size; \
-__copy_res; })
-
#define copy_from_user_ret(to,from,n,retval) ({ \
if (copy_from_user(to,from,n)) \
return retval; \
diff --git a/include/asm-sh/unistd_32.h b/include/asm-sh/unistd_32.h
index 433fd1b48fa..0b07212ec65 100644
--- a/include/asm-sh/unistd_32.h
+++ b/include/asm-sh/unistd_32.h
@@ -330,11 +330,13 @@
#define __NR_epoll_pwait 319
#define __NR_utimensat 320
#define __NR_signalfd 321
-/* #define __NR_timerfd 322 removed */
+#define __NR_timerfd_create 322
#define __NR_eventfd 323
#define __NR_fallocate 324
+#define __NR_timerfd_settime 325
+#define __NR_timerfd_gettime 326
-#define NR_syscalls 325
+#define NR_syscalls 327
#ifdef __KERNEL__
diff --git a/include/asm-sh/unistd_64.h b/include/asm-sh/unistd_64.h
index 108d2ba897f..9d21eab5242 100644
--- a/include/asm-sh/unistd_64.h
+++ b/include/asm-sh/unistd_64.h
@@ -90,7 +90,7 @@
#define __NR_sigpending 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
-#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
+#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
@@ -370,9 +370,11 @@
#define __NR_epoll_pwait 347
#define __NR_utimensat 348
#define __NR_signalfd 349
-/* #define __NR_timerfd 350 removed */
+#define __NR_timerfd_create 350
#define __NR_eventfd 351
#define __NR_fallocate 352
+#define __NR_timerfd_settime 353
+#define __NR_timerfd_gettime 354
#ifdef __KERNEL__
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 6a22212b4b2..5396c212d8c 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -48,12 +48,15 @@ void cpa_init(void);
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
+extern const int rodata_test_data;
#endif
+
#ifdef CONFIG_DEBUG_RODATA_TEST
-void rodata_test(void);
+int rodata_test(void);
#else
-static inline void rodata_test(void)
+static inline int rodata_test(void)
{
+ return 0;
}
#endif
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
index dd442a1632c..99dcbafa151 100644
--- a/include/asm-x86/kdebug.h
+++ b/include/asm-x86/kdebug.h
@@ -31,7 +31,6 @@ extern void show_trace(struct task_struct *t, struct pt_regs *regs,
unsigned long *sp, unsigned long bp);
extern void __show_regs(struct pt_regs *regs);
extern void show_regs(struct pt_regs *regs);
-extern void dump_pagetable(unsigned long);
extern unsigned long oops_begin(void);
extern void oops_end(unsigned long, struct pt_regs *, int signr);
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
index 681deade5f0..d743947f4c7 100644
--- a/include/asm-x86/sigcontext.h
+++ b/include/asm-x86/sigcontext.h
@@ -58,6 +58,7 @@ struct _fpstate {
#define X86_FXSR_MAGIC 0x0000
+#ifdef __KERNEL__
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
@@ -82,6 +83,35 @@ struct sigcontext {
unsigned long oldmask;
unsigned long cr2;
};
+#else /* __KERNEL__ */
+/*
+ * User-space might still rely on the old definition:
+ */
+struct sigcontext {
+ unsigned short gs, __gsh;
+ unsigned short fs, __fsh;
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebp;
+ unsigned long esp;
+ unsigned long ebx;
+ unsigned long edx;
+ unsigned long ecx;
+ unsigned long eax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long eip;
+ unsigned short cs, __csh;
+ unsigned long eflags;
+ unsigned long esp_at_signal;
+ unsigned short ss, __ssh;
+ struct _fpstate __user * fpstate;
+ unsigned long oldmask;
+ unsigned long cr2;
+};
+#endif /* !__KERNEL__ */
#else /* __i386__ */
@@ -102,6 +132,7 @@ struct _fpstate {
__u32 reserved2[24];
};
+#ifdef __KERNEL__
struct sigcontext {
unsigned long r8;
unsigned long r9;
@@ -132,6 +163,41 @@ struct sigcontext {
struct _fpstate __user *fpstate; /* zero when no FPU context */
unsigned long reserved1[8];
};
+#else /* __KERNEL__ */
+/*
+ * User-space might still rely on the old definition:
+ */
+struct sigcontext {
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long rdi;
+ unsigned long rsi;
+ unsigned long rbp;
+ unsigned long rbx;
+ unsigned long rdx;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rsp;
+ unsigned long rip;
+ unsigned long eflags; /* RFLAGS */
+ unsigned short cs;
+ unsigned short gs;
+ unsigned short fs;
+ unsigned short __pad0;
+ unsigned long err;
+ unsigned long trapno;
+ unsigned long oldmask;
+ unsigned long cr2;
+ struct _fpstate __user *fpstate; /* zero when no FPU context */
+ unsigned long reserved1[8];
+};
+#endif /* !__KERNEL__ */
#endif /* !__i386__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ddbe7efe590..2c7e003356a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -203,6 +203,7 @@ extern bool wmi_has_guid(const char *guid);
extern int acpi_blacklisted(void);
#ifdef CONFIG_DMI
extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
+extern int acpi_osi_setup(char *str);
#endif
#ifdef CONFIG_ACPI_NUMA
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 7ef8de66200..a9931e2e562 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -206,21 +206,21 @@ struct kioctx {
/* prototypes */
extern unsigned aio_max_size;
-extern ssize_t FASTCALL(wait_on_sync_kiocb(struct kiocb *iocb));
-extern int FASTCALL(aio_put_req(struct kiocb *iocb));
-extern void FASTCALL(kick_iocb(struct kiocb *iocb));
-extern int FASTCALL(aio_complete(struct kiocb *iocb, long res, long res2));
-extern void FASTCALL(__put_ioctx(struct kioctx *ctx));
+extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
+extern int aio_put_req(struct kiocb *iocb);
+extern void kick_iocb(struct kiocb *iocb);
+extern int aio_complete(struct kiocb *iocb, long res, long res2);
+extern void __put_ioctx(struct kioctx *ctx);
struct mm_struct;
-extern void FASTCALL(exit_aio(struct mm_struct *mm));
+extern void exit_aio(struct mm_struct *mm);
extern struct kioctx *lookup_ioctx(unsigned long ctx_id);
-extern int FASTCALL(io_submit_one(struct kioctx *ctx,
- struct iocb __user *user_iocb, struct iocb *iocb));
+extern int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+ struct iocb *iocb);
/* semi private, but used by the 32bit emulations: */
struct kioctx *lookup_ioctx(unsigned long ctx_id);
-int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- struct iocb *iocb));
+int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+ struct iocb *iocb);
#define get_ioctx(kioctx) do { \
BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 97153027207..2af9ec02501 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -534,8 +534,7 @@ extern void audit_log_n_untrustedstring(struct audit_buffer *ab,
const char *string);
extern void audit_log_d_path(struct audit_buffer *ab,
const char *prefix,
- struct dentry *dentry,
- struct vfsmount *vfsmnt);
+ struct path *path);
extern void audit_log_lost(const char *message);
/* Private API (for audit.c only) */
extern int audit_filter_user(struct netlink_skb_parms *cb, int type);
@@ -552,7 +551,7 @@ extern int audit_enabled;
#define audit_log_hex(a,b,l) do { ; } while (0)
#define audit_log_untrustedstring(a,s) do { ; } while (0)
#define audit_log_n_untrustedstring(a,n,s) do { ; } while (0)
-#define audit_log_d_path(b,p,d,v) do { ; } while (0)
+#define audit_log_d_path(b, p, d) do { ; } while (0)
#define audit_enabled 0
#endif
#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index e98801f06dc..932eb02a275 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -144,7 +144,7 @@ BUFFER_FNS(Unwritten, unwritten)
* Declarations
*/
-void FASTCALL(mark_buffer_dirty(struct buffer_head *bh));
+void mark_buffer_dirty(struct buffer_head *bh);
void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
@@ -185,8 +185,8 @@ struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size
void invalidate_bh_lrus(void);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
-void FASTCALL(unlock_buffer(struct buffer_head *bh));
-void FASTCALL(__lock_buffer(struct buffer_head *bh));
+void unlock_buffer(struct buffer_head *bh);
+void __lock_buffer(struct buffer_head *bh);
void ll_rw_block(int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int submit_bh(int, struct buffer_head *);
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 228235c5ae5..ac6aad98b60 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -25,7 +25,7 @@ SUBSYS(ns)
/* */
-#ifdef CONFIG_FAIR_CGROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
SUBSYS(cpu_cgroup)
#endif
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 8c6967f3fb1..4b287ad9371 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -37,6 +37,7 @@
#ifdef __KERNEL__
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/kref.h>
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 385d45b616d..6b72a458408 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -19,6 +19,7 @@
#define CPUIDLE_STATE_MAX 8
#define CPUIDLE_NAME_LEN 16
+#define CPUIDLE_DESC_LEN 32
struct cpuidle_device;
@@ -29,6 +30,7 @@ struct cpuidle_device;
struct cpuidle_state {
char name[CPUIDLE_NAME_LEN];
+ char desc[CPUIDLE_DESC_LEN];
void *driver_data;
unsigned int flags;
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index f8c9a2752f0..0a26be353cb 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -26,8 +26,6 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
void cpuset_update_task_memory_state(void);
-#define cpuset_nodes_subset_current_mems_allowed(nodes) \
- nodes_subset((nodes), current->mems_allowed)
int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
@@ -103,7 +101,6 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
static inline void cpuset_init_current_mems_allowed(void) {}
static inline void cpuset_update_task_memory_state(void) {}
-#define cpuset_nodes_subset_current_mems_allowed(nodes) (1)
static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
{
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c2c153f97e8..6bd646096fa 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -10,6 +10,7 @@
#include <linux/rcupdate.h>
struct nameidata;
+struct path;
struct vfsmount;
/*
@@ -300,8 +301,8 @@ extern int d_validate(struct dentry *, struct dentry *);
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-extern char * d_path(struct dentry *, struct vfsmount *, char *, int);
-
+extern char *d_path(struct path *, char *, int);
+
/* Allocation counts.. */
/**
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
index 98c69ab80c8..24c806f12a6 100644
--- a/include/linux/dcookies.h
+++ b/include/linux/dcookies.h
@@ -13,6 +13,7 @@
#ifdef CONFIG_PROFILING
#include <linux/dcache.h>
+#include <linux/path.h>
#include <linux/types.h>
struct dcookie_user;
@@ -43,8 +44,7 @@ void dcookie_unregister(struct dcookie_user * user);
*
* Returns 0 on success, with *cookie filled in
*/
-int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt,
- unsigned long * cookie);
+int get_dcookie(struct path *path, unsigned long *cookie);
#else
@@ -57,13 +57,12 @@ static inline void dcookie_unregister(struct dcookie_user * user)
{
return;
}
-
-static inline int get_dcookie(struct dentry * dentry,
- struct vfsmount * vfsmnt, unsigned long * cookie)
+
+static inline int get_dcookie(struct path *path, unsigned long *cookie)
{
return -ENOSYS;
-}
-
+}
+
#endif /* CONFIG_PROFILING */
-
+
#endif /* DCOOKIES_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index b7558ec81ed..25d62e6e329 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -70,8 +70,7 @@ static inline int is_multicast_ether_addr(const u8 *addr)
}
/**
- * is_local_ether_addr - Determine if the Ethernet address is locally-assigned
- * one (IEEE 802).
+ * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Return true if the address is a local address.
diff --git a/include/linux/file.h b/include/linux/file.h
index 56023c74e9f..7239baac81a 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -59,8 +59,8 @@ struct files_struct {
extern struct kmem_cache *filp_cachep;
-extern void FASTCALL(__fput(struct file *));
-extern void FASTCALL(fput(struct file *));
+extern void __fput(struct file *);
+extern void fput(struct file *);
struct file_operations;
struct vfsmount;
@@ -77,13 +77,13 @@ static inline void fput_light(struct file *file, int fput_needed)
fput(file);
}
-extern struct file * FASTCALL(fget(unsigned int fd));
-extern struct file * FASTCALL(fget_light(unsigned int fd, int *fput_needed));
-extern void FASTCALL(set_close_on_exec(unsigned int fd, int flag));
+extern struct file *fget(unsigned int fd);
+extern struct file *fget_light(unsigned int fd, int *fput_needed);
+extern void set_close_on_exec(unsigned int fd, int flag);
extern void put_filp(struct file *);
extern int get_unused_fd(void);
extern int get_unused_fd_flags(int flags);
-extern void FASTCALL(put_unused_fd(unsigned int fd));
+extern void put_unused_fd(unsigned int fd);
struct kmem_cache;
extern int expand_files(struct files_struct *, int nr);
@@ -110,12 +110,12 @@ static inline struct file * fcheck_files(struct files_struct *files, unsigned in
*/
#define fcheck(fd) fcheck_files(current->files, fd)
-extern void FASTCALL(fd_install(unsigned int fd, struct file * file));
+extern void fd_install(unsigned int fd, struct file *file);
struct task_struct;
struct files_struct *get_files_struct(struct task_struct *);
-void FASTCALL(put_files_struct(struct files_struct *fs));
+void put_files_struct(struct files_struct *fs);
void reset_files_struct(struct task_struct *, struct files_struct *);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 18cfbf76ec5..98ffb6ead43 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1284,8 +1284,10 @@ struct super_operations {
*
* I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
* fdatasync(). i_atime is the usual cause.
- * I_DIRTY_DATASYNC Inode is dirty and must be written on fdatasync(), f.e.
- * because i_size changed.
+ * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+ * these changes separately from I_DIRTY_SYNC so that we
+ * don't have to write inode on fdatasync() when only
+ * mtime has changed in it.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
* I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both
* are cleared by unlock_new_inode(), called from iget().
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 11a36ceddf7..282f5421912 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -1,15 +1,13 @@
#ifndef _LINUX_FS_STRUCT_H
#define _LINUX_FS_STRUCT_H
-struct dentry;
-struct vfsmount;
+#include <linux/path.h>
struct fs_struct {
atomic_t count;
rwlock_t lock;
int umask;
- struct dentry * root, * pwd, * altroot;
- struct vfsmount * rootmnt, * pwdmnt, * altrootmnt;
+ struct path root, pwd, altroot;
};
#define INIT_FS { \
@@ -22,8 +20,8 @@ extern struct kmem_cache *fs_cachep;
extern void exit_fs(struct task_struct *);
extern void set_fs_altroot(void);
-extern void set_fs_root(struct fs_struct *, struct vfsmount *, struct dentry *);
-extern void set_fs_pwd(struct fs_struct *, struct vfsmount *, struct dentry *);
+extern void set_fs_root(struct fs_struct *, struct path *);
+extern void set_fs_pwd(struct fs_struct *, struct path *);
extern struct fs_struct *copy_fs_struct(struct fs_struct *);
extern void put_fs_struct(struct fs_struct *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0c6ce515185..164be9da3c1 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -172,8 +172,7 @@ static inline void arch_free_page(struct page *page, int order) { }
static inline void arch_alloc_page(struct page *page, int order) { }
#endif
-extern struct page *
-FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
+extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *);
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
@@ -209,8 +208,8 @@ extern struct page *alloc_page_vma(gfp_t gfp_mask,
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order));
-extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask));
+extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
+extern unsigned long get_zeroed_page(gfp_t gfp_mask);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask),0)
@@ -218,10 +217,10 @@ extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask));
#define __get_dma_pages(gfp_mask, order) \
__get_free_pages((gfp_mask) | GFP_DMA,(order))
-extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
-extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
-extern void FASTCALL(free_hot_page(struct page *page));
-extern void FASTCALL(free_cold_page(struct page *page));
+extern void __free_pages(struct page *page, unsigned int order);
+extern void free_pages(unsigned long addr, unsigned int order);
+extern void free_hot_page(struct page *page);
+extern void free_cold_page(struct page *page);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr),0)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 7ca198b379a..addca4cd4f1 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -33,8 +33,8 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
extern unsigned long max_huge_pages;
+extern unsigned long sysctl_overcommit_huge_pages;
extern unsigned long hugepages_treat_as_movable;
-extern unsigned long nr_overcommit_huge_pages;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
extern int sysctl_hugetlb_shm_group;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index acec99da832..a3b69c10d66 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -906,6 +906,8 @@ enum {
IDE_TFLAG_IN_DEVICE,
/* force 16-bit I/O operations */
IDE_TFLAG_IO_16BIT = (1 << 30),
+ /* ide_task_t was allocated using kmalloc() */
+ IDE_TFLAG_DYN = (1 << 31),
};
struct ide_taskfile {
@@ -998,8 +1000,7 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o
void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8 *);
void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
-/* FIXME: palm_bk3710 uses BLK_DEV_IDEDMA_PCI without BLK_DEV_IDEPCI! */
-#if defined(CONFIG_BLK_DEV_IDEPCI) && defined(CONFIG_BLK_DEV_IDEDMA_PCI)
+#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
void ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
#else
static inline void ide_hwif_setup_dma(ide_hwif_t *hwif,
@@ -1146,7 +1147,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *);
int ide_build_sglist(ide_drive_t *, struct request *);
void ide_destroy_dmatable(ide_drive_t *);
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
extern int ide_build_dmatable(ide_drive_t *, struct request *);
extern int ide_release_dma(ide_hwif_t *);
extern void ide_setup_dma(ide_hwif_t *, unsigned long);
@@ -1157,7 +1158,7 @@ extern void ide_dma_start(ide_drive_t *);
extern int __ide_dma_end(ide_drive_t *);
extern void ide_dma_lost_irq(ide_drive_t *);
extern void ide_dma_timeout(ide_drive_t *);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
#else
static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
@@ -1171,7 +1172,7 @@ static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
#endif /* CONFIG_BLK_DEV_IDEDMA */
-#ifndef CONFIG_BLK_DEV_IDEDMA_PCI
+#ifndef CONFIG_BLK_DEV_IDEDMA_SFF
static inline void ide_release_dma(ide_hwif_t *drive) {;}
#endif
@@ -1294,7 +1295,7 @@ static inline void ide_dump_identify(u8 *id)
static inline int hwif_to_node(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
- return dev ? pcibus_to_node(dev->bus) : -1;
+ return hwif->dev ? pcibus_to_node(dev->bus) : -1;
}
static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index dea7598aeff..f8ab4ce7056 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -273,8 +273,8 @@ asmlinkage void do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
extern void softirq_init(void);
#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
-extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
-extern void FASTCALL(raise_softirq(unsigned int nr));
+extern void raise_softirq_irqoff(unsigned int nr);
+extern void raise_softirq(unsigned int nr);
/* Tasklets --- multithreaded analogue of BHs.
@@ -341,7 +341,7 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
#define tasklet_unlock(t) do { } while (0)
#endif
-extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
+extern void __tasklet_schedule(struct tasklet_struct *t);
static inline void tasklet_schedule(struct tasklet_struct *t)
{
@@ -349,7 +349,7 @@ static inline void tasklet_schedule(struct tasklet_struct *t)
__tasklet_schedule(t);
}
-extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
+extern void __tasklet_hi_schedule(struct tasklet_struct *t);
static inline void tasklet_hi_schedule(struct tasklet_struct *t)
{
diff --git a/include/linux/irq.h b/include/linux/irq.h
index bfd9efb5cb4..176e5e790a4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -285,7 +285,6 @@ extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
/*
* Monolithic do_IRQ implementation.
- * (is an explicit fastcall, because i386 4KSTACKS calls it from assembly)
*/
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
extern unsigned int __do_IRQ(unsigned int irq);
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 36c542b70c6..2cd7fa73d1a 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -310,6 +310,8 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
return ktime_sub_ns(kt, usec * 1000);
}
+extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
+
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 3faf599ea58..0592936344c 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -73,9 +73,4 @@
#define ATTRIB_NORET __attribute__((noreturn))
#define NORET_AND noreturn,
-#ifndef FASTCALL
-#define FASTCALL(x) x
-#define fastcall
-#endif
-
#endif
diff --git a/include/linux/maple.h b/include/linux/maple.h
index bad9a7b319d..3f01e2bae1a 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -7,74 +7,74 @@ extern struct bus_type maple_bus_type;
/* Maple Bus command and response codes */
enum maple_code {
- MAPLE_RESPONSE_FILEERR = -5,
- MAPLE_RESPONSE_AGAIN = -4, /* request should be retransmitted */
- MAPLE_RESPONSE_BADCMD = -3,
- MAPLE_RESPONSE_BADFUNC = -2,
- MAPLE_RESPONSE_NONE = -1, /* unit didn't respond at all */
- MAPLE_COMMAND_DEVINFO = 1,
- MAPLE_COMMAND_ALLINFO = 2,
- MAPLE_COMMAND_RESET = 3,
- MAPLE_COMMAND_KILL = 4,
- MAPLE_RESPONSE_DEVINFO = 5,
- MAPLE_RESPONSE_ALLINFO = 6,
- MAPLE_RESPONSE_OK = 7,
- MAPLE_RESPONSE_DATATRF = 8,
- MAPLE_COMMAND_GETCOND = 9,
- MAPLE_COMMAND_GETMINFO = 10,
- MAPLE_COMMAND_BREAD = 11,
- MAPLE_COMMAND_BWRITE = 12,
- MAPLE_COMMAND_SETCOND = 14
+ MAPLE_RESPONSE_FILEERR = -5,
+ MAPLE_RESPONSE_AGAIN = -4, /* request should be retransmitted */
+ MAPLE_RESPONSE_BADCMD = -3,
+ MAPLE_RESPONSE_BADFUNC = -2,
+ MAPLE_RESPONSE_NONE = -1, /* unit didn't respond at all */
+ MAPLE_COMMAND_DEVINFO = 1,
+ MAPLE_COMMAND_ALLINFO = 2,
+ MAPLE_COMMAND_RESET = 3,
+ MAPLE_COMMAND_KILL = 4,
+ MAPLE_RESPONSE_DEVINFO = 5,
+ MAPLE_RESPONSE_ALLINFO = 6,
+ MAPLE_RESPONSE_OK = 7,
+ MAPLE_RESPONSE_DATATRF = 8,
+ MAPLE_COMMAND_GETCOND = 9,
+ MAPLE_COMMAND_GETMINFO = 10,
+ MAPLE_COMMAND_BREAD = 11,
+ MAPLE_COMMAND_BWRITE = 12,
+ MAPLE_COMMAND_SETCOND = 14
};
struct mapleq {
- struct list_head list;
- struct maple_device *dev;
- void *sendbuf, *recvbuf, *recvbufdcsp;
- unsigned char length;
- enum maple_code command;
+ struct list_head list;
+ struct maple_device *dev;
+ void *sendbuf, *recvbuf, *recvbufdcsp;
+ unsigned char length;
+ enum maple_code command;
};
struct maple_devinfo {
- unsigned long function;
- unsigned long function_data[3];
- unsigned char area_code;
- unsigned char connector_directon;
- char product_name[31];
- char product_licence[61];
- unsigned short standby_power;
- unsigned short max_power;
+ unsigned long function;
+ unsigned long function_data[3];
+ unsigned char area_code;
+ unsigned char connector_direction;
+ char product_name[31];
+ char product_licence[61];
+ unsigned short standby_power;
+ unsigned short max_power;
};
struct maple_device {
- struct maple_driver *driver;
- struct mapleq *mq;
- void *private_data;
- void (*callback) (struct mapleq * mq);
- unsigned long when, interval, function;
- struct maple_devinfo devinfo;
- unsigned char port, unit;
- char product_name[32];
- char product_licence[64];
- int registered;
- struct device dev;
+ struct maple_driver *driver;
+ struct mapleq *mq;
+ void *private_data;
+ void (*callback) (struct mapleq * mq);
+ unsigned long when, interval, function;
+ struct maple_devinfo devinfo;
+ unsigned char port, unit;
+ char product_name[32];
+ char product_licence[64];
+ struct device dev;
};
struct maple_driver {
- unsigned long function;
- int (*connect) (struct maple_device * dev);
- void (*disconnect) (struct maple_device * dev);
- struct device_driver drv;
+ unsigned long function;
+ int (*connect) (struct maple_device * dev);
+ void (*disconnect) (struct maple_device * dev);
+ struct device_driver drv;
+ int registered;
};
void maple_getcond_callback(struct maple_device *dev,
- void (*callback) (struct mapleq * mq),
- unsigned long interval,
- unsigned long function);
+ void (*callback) (struct mapleq * mq),
+ unsigned long interval,
+ unsigned long function);
int maple_driver_register(struct device_driver *drv);
void maple_add_packet(struct mapleq *mq);
#define to_maple_dev(n) container_of(n, struct maple_device, dev)
#define to_maple_driver(n) container_of(n, struct maple_driver, drv)
-#endif /* __LINUX_MAPLE_H */
+#endif /* __LINUX_MAPLE_H */
diff --git a/include/linux/marker.h b/include/linux/marker.h
index 5f36cf946bc..5df879dc377 100644
--- a/include/linux/marker.h
+++ b/include/linux/marker.h
@@ -19,16 +19,23 @@ struct marker;
/**
* marker_probe_func - Type of a marker probe function
- * @mdata: pointer of type struct marker
- * @private_data: caller site private data
+ * @probe_private: probe private data
+ * @call_private: call site private data
* @fmt: format string
- * @...: variable argument list
+ * @args: variable argument list pointer. Use a pointer to overcome C's
+ * inability to pass this around as a pointer in a portable manner in
+ * the callee otherwise.
*
* Type of marker probe functions. They receive the mdata and need to parse the
* format string to recover the variable argument list.
*/
-typedef void marker_probe_func(const struct marker *mdata,
- void *private_data, const char *fmt, ...);
+typedef void marker_probe_func(void *probe_private, void *call_private,
+ const char *fmt, va_list *args);
+
+struct marker_probe_closure {
+ marker_probe_func *func; /* Callback */
+ void *probe_private; /* Private probe data */
+};
struct marker {
const char *name; /* Marker name */
@@ -36,8 +43,11 @@ struct marker {
* variable argument list.
*/
char state; /* Marker state. */
- marker_probe_func *call;/* Probe handler function pointer */
- void *private; /* Private probe data */
+ char ptype; /* probe type : 0 : single, 1 : multi */
+ void (*call)(const struct marker *mdata, /* Probe wrapper */
+ void *call_private, const char *fmt, ...);
+ struct marker_probe_closure single;
+ struct marker_probe_closure *multi;
} __attribute__((aligned(8)));
#ifdef CONFIG_MARKERS
@@ -49,35 +59,31 @@ struct marker {
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*/
-#define __trace_mark(name, call_data, format, args...) \
+#define __trace_mark(name, call_private, format, args...) \
do { \
- static const char __mstrtab_name_##name[] \
- __attribute__((section("__markers_strings"))) \
- = #name; \
- static const char __mstrtab_format_##name[] \
+ static const char __mstrtab_##name[] \
__attribute__((section("__markers_strings"))) \
- = format; \
+ = #name "\0" format; \
static struct marker __mark_##name \
__attribute__((section("__markers"), aligned(8))) = \
- { __mstrtab_name_##name, __mstrtab_format_##name, \
- 0, __mark_empty_function, NULL }; \
+ { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
+ 0, 0, marker_probe_cb, \
+ { __mark_empty_function, NULL}, NULL }; \
__mark_check_format(format, ## args); \
if (unlikely(__mark_##name.state)) { \
- preempt_disable(); \
(*__mark_##name.call) \
- (&__mark_##name, call_data, \
+ (&__mark_##name, call_private, \
format, ## args); \
- preempt_enable(); \
} \
} while (0)
extern void marker_update_probe_range(struct marker *begin,
- struct marker *end, struct module *probe_module, int *refcount);
+ struct marker *end);
#else /* !CONFIG_MARKERS */
-#define __trace_mark(name, call_data, format, args...) \
+#define __trace_mark(name, call_private, format, args...) \
__mark_check_format(format, ## args)
static inline void marker_update_probe_range(struct marker *begin,
- struct marker *end, struct module *probe_module, int *refcount)
+ struct marker *end)
{ }
#endif /* CONFIG_MARKERS */
@@ -92,8 +98,6 @@ static inline void marker_update_probe_range(struct marker *begin,
#define trace_mark(name, format, args...) \
__trace_mark(name, NULL, format, ## args)
-#define MARK_MAX_FORMAT_LEN 1024
-
/**
* MARK_NOARGS - Format string for a marker with no argument.
*/
@@ -106,24 +110,30 @@ static inline void __printf(1, 2) __mark_check_format(const char *fmt, ...)
extern marker_probe_func __mark_empty_function;
+extern void marker_probe_cb(const struct marker *mdata,
+ void *call_private, const char *fmt, ...);
+extern void marker_probe_cb_noarg(const struct marker *mdata,
+ void *call_private, const char *fmt, ...);
+
/*
* Connect a probe to a marker.
* private data pointer must be a valid allocated memory address, or NULL.
*/
extern int marker_probe_register(const char *name, const char *format,
- marker_probe_func *probe, void *private);
+ marker_probe_func *probe, void *probe_private);
/*
* Returns the private data given to marker_probe_register.
*/
-extern void *marker_probe_unregister(const char *name);
+extern int marker_probe_unregister(const char *name,
+ marker_probe_func *probe, void *probe_private);
/*
* Unregister a marker by providing the registered private data.
*/
-extern void *marker_probe_unregister_private_data(void *private);
+extern int marker_probe_unregister_private_data(marker_probe_func *probe,
+ void *probe_private);
-extern int marker_arm(const char *name);
-extern int marker_disarm(const char *name);
-extern void *marker_get_private_data(const char *name);
+extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
+ int num);
#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e8abb381420..26c7124b841 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -786,7 +786,7 @@ int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
-int FASTCALL(set_page_dirty(struct page *page));
+int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
@@ -829,7 +829,7 @@ extern void unregister_shrinker(struct shrinker *);
int vma_wants_writenotify(struct vm_area_struct *vma);
-extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
+extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
#ifdef __PAGETABLE_PUD_FOLDED
static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
diff --git a/include/linux/module.h b/include/linux/module.h
index ac28e8761e8..819c4e889bf 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -465,7 +465,7 @@ int unregister_module_notifier(struct notifier_block * nb);
extern void print_modules(void);
-extern void module_update_markers(struct module *probe_module, int *refcount);
+extern void module_update_markers(void);
#else /* !CONFIG_MODULES... */
#define EXPORT_SYMBOL(sym)
@@ -567,8 +567,7 @@ static inline void print_modules(void)
{
}
-static inline void module_update_markers(struct module *probe_module,
- int *refcount)
+static inline void module_update_markers(void)
{
}
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 8126e55c5bd..ec624381c84 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -62,6 +62,16 @@ struct kparam_array
void *elem;
};
+/* On alpha, ia64 and ppc64 relocations to global data cannot go into
+ read-only sections (which is part of respective UNIX ABI on these
+ platforms). So 'const' makes no sense and even causes compile failures
+ with some compilers. */
+#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64)
+#define __moduleparam_const
+#else
+#define __moduleparam_const const
+#endif
+
/* This is the fundamental function for registering boot/module
parameters. perm sets the visibility in sysfs: 000 means it's
not there, read bits mean it's readable, write bits mean it's
@@ -71,7 +81,7 @@ struct kparam_array
static int __param_perm_check_##name __attribute__((unused)) = \
BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \
static const char __param_str_##name[] = prefix #name; \
- static struct kernel_param const __param_##name \
+ static struct kernel_param __moduleparam_const __param_##name \
__used \
__attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
= { __param_str_##name, perm, set, get, { arg } }
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 2537285e106..731d77d6e15 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -18,6 +18,6 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
-extern void FASTCALL(mutex_destroy(struct mutex *lock));
+extern void mutex_destroy(struct mutex *lock);
#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 4cb4f8d2f78..24d88e98a62 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -3,6 +3,7 @@
#include <linux/dcache.h>
#include <linux/linkage.h>
+#include <linux/path.h>
struct vfsmount;
@@ -15,8 +16,7 @@ struct open_intent {
enum { MAX_NESTED_LINKS = 8 };
struct nameidata {
- struct dentry *dentry;
- struct vfsmount *mnt;
+ struct path path;
struct qstr last;
unsigned int flags;
int last_type;
@@ -29,11 +29,6 @@ struct nameidata {
} intent;
};
-struct path {
- struct vfsmount *mnt;
- struct dentry *dentry;
-};
-
/*
* Type of the last component on LOOKUP_PARENT
*/
@@ -62,17 +57,15 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_ACCESS (0x0400)
#define LOOKUP_CHDIR (0x0800)
-extern int FASTCALL(__user_walk(const char __user *, unsigned, struct nameidata *));
-extern int FASTCALL(__user_walk_fd(int dfd, const char __user *, unsigned, struct nameidata *));
+extern int __user_walk(const char __user *, unsigned, struct nameidata *);
+extern int __user_walk_fd(int dfd, const char __user *, unsigned, struct nameidata *);
#define user_path_walk(name,nd) \
__user_walk_fd(AT_FDCWD, name, LOOKUP_FOLLOW, nd)
#define user_path_walk_link(name,nd) \
__user_walk_fd(AT_FDCWD, name, 0, nd)
-extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *));
+extern int path_lookup(const char *, unsigned, struct nameidata *);
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
const char *, unsigned int, struct nameidata *);
-extern void path_release(struct nameidata *);
-extern void path_release_on_umount(struct nameidata *);
extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags);
extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 047d432bde5..7128a02f1d3 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -322,7 +322,7 @@ enum
NAPI_STATE_DISABLE, /* Disable pending */
};
-extern void FASTCALL(__napi_schedule(struct napi_struct *n));
+extern void __napi_schedule(struct napi_struct *n);
static inline int napi_disable_pending(struct napi_struct *n)
{
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 3a168725136..5431512b275 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -84,9 +84,8 @@ struct svc_export {
struct cache_head h;
struct auth_domain * ex_client;
int ex_flags;
- struct vfsmount * ex_mnt;
- struct dentry * ex_dentry;
- char * ex_path;
+ struct path ex_path;
+ char *ex_pathname;
uid_t ex_anon_uid;
gid_t ex_anon_gid;
int ex_fsid;
@@ -107,8 +106,7 @@ struct svc_expkey {
int ek_fsidtype;
u32 ek_fsid[6];
- struct vfsmount * ek_mnt;
- struct dentry * ek_dentry;
+ struct path ek_path;
};
#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b62a105622..d2fca802f80 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -156,10 +156,10 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
}
-extern void FASTCALL(__lock_page(struct page *page));
-extern int FASTCALL(__lock_page_killable(struct page *page));
-extern void FASTCALL(__lock_page_nosync(struct page *page));
-extern void FASTCALL(unlock_page(struct page *page));
+extern void __lock_page(struct page *page);
+extern int __lock_page_killable(struct page *page);
+extern void __lock_page_nosync(struct page *page);
+extern void unlock_page(struct page *page);
/*
* lock_page may only be called if we have the page's inode pinned.
@@ -199,7 +199,7 @@ static inline void lock_page_nosync(struct page *page)
* This is exported only for wait_on_page_locked/wait_on_page_writeback.
* Never use this directly!
*/
-extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
+extern void wait_on_page_bit(struct page *page, int bit_nr);
/*
* Wait for a page to be unlocked.
diff --git a/include/linux/path.h b/include/linux/path.h
new file mode 100644
index 00000000000..915e0c382a5
--- /dev/null
+++ b/include/linux/path.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_PATH_H
+#define _LINUX_PATH_H
+
+struct dentry;
+struct vfsmount;
+
+struct path {
+ struct vfsmount *mnt;
+ struct dentry *dentry;
+};
+
+extern void path_get(struct path *);
+extern void path_put(struct path *);
+
+#endif /* _LINUX_PATH_H */
diff --git a/include/linux/pid.h b/include/linux/pid.h
index f84d532b5d2..c7980810eb0 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -79,10 +79,9 @@ static inline struct pid *get_pid(struct pid *pid)
return pid;
}
-extern void FASTCALL(put_pid(struct pid *pid));
-extern struct task_struct *FASTCALL(pid_task(struct pid *pid, enum pid_type));
-extern struct task_struct *FASTCALL(get_pid_task(struct pid *pid,
- enum pid_type));
+extern void put_pid(struct pid *pid);
+extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
+extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
@@ -90,11 +89,11 @@ extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
* attach_pid() and detach_pid() must be called with the tasklist_lock
* write-held.
*/
-extern int FASTCALL(attach_pid(struct task_struct *task,
- enum pid_type type, struct pid *pid));
-extern void FASTCALL(detach_pid(struct task_struct *task, enum pid_type));
-extern void FASTCALL(transfer_pid(struct task_struct *old,
- struct task_struct *new, enum pid_type));
+extern int attach_pid(struct task_struct *task, enum pid_type type,
+ struct pid *pid);
+extern void detach_pid(struct task_struct *task, enum pid_type);
+extern void transfer_pid(struct task_struct *old, struct task_struct *new,
+ enum pid_type);
struct pid_namespace;
extern struct pid_namespace init_pid_ns;
@@ -109,7 +108,7 @@ extern struct pid_namespace init_pid_ns;
*
* see also find_task_by_pid() set in include/linux/sched.h
*/
-extern struct pid *FASTCALL(find_pid_ns(int nr, struct pid_namespace *ns));
+extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
extern struct pid *find_vpid(int nr);
extern struct pid *find_pid(int nr);
@@ -121,7 +120,7 @@ extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
int next_pidmap(struct pid_namespace *pid_ns, int last);
extern struct pid *alloc_pid(struct pid_namespace *ns);
-extern void FASTCALL(free_pid(struct pid *pid));
+extern void free_pid(struct pid *pid);
/*
* the helpers to get the pid's id seen from different namespaces
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index d6a4f69bdc9..d9a9e718ad1 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -269,7 +269,7 @@ extern void kclist_add(struct kcore_list *, void *, size_t);
#endif
union proc_op {
- int (*proc_get_link)(struct inode *, struct dentry **, struct vfsmount **);
+ int (*proc_get_link)(struct inode *, struct path *);
int (*proc_read)(struct task_struct *task, char *page);
int (*proc_show)(struct seq_file *m,
struct pid_namespace *ns, struct pid *pid,
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 813cee13da0..6c3c0f6c261 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -60,14 +60,14 @@ do { \
__init_rwsem((sem), #sem, &__key); \
} while (0)
-extern void FASTCALL(__down_read(struct rw_semaphore *sem));
-extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
-extern void FASTCALL(__down_write(struct rw_semaphore *sem));
-extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass));
-extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
-extern void FASTCALL(__up_read(struct rw_semaphore *sem));
-extern void FASTCALL(__up_write(struct rw_semaphore *sem));
-extern void FASTCALL(__downgrade_write(struct rw_semaphore *sem));
+extern void __down_read(struct rw_semaphore *sem);
+extern int __down_read_trylock(struct rw_semaphore *sem);
+extern void __down_write(struct rw_semaphore *sem);
+extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int __down_write_trylock(struct rw_semaphore *sem);
+extern void __up_read(struct rw_semaphore *sem);
+extern void __up_write(struct rw_semaphore *sem);
+extern void __downgrade_write(struct rw_semaphore *sem);
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 00e14411732..e217d188a10 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -323,7 +323,7 @@ extern char __sched_text_start[], __sched_text_end[];
extern int in_sched_functions(unsigned long addr);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
-extern signed long FASTCALL(schedule_timeout(signed long timeout));
+extern signed long schedule_timeout(signed long timeout);
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
@@ -590,7 +590,7 @@ struct user_struct {
struct hlist_node uidhash_node;
uid_t uid;
-#ifdef CONFIG_FAIR_USER_SCHED
+#ifdef CONFIG_USER_SCHED
struct task_group *tg;
#ifdef CONFIG_SYSFS
struct kobject kobj;
@@ -973,7 +973,7 @@ struct sched_rt_entity {
unsigned long timeout;
int nr_cpus_allowed;
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;
/* rq on which this entity is (to be) queued: */
struct rt_rq *rt_rq;
@@ -1541,8 +1541,6 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
-extern unsigned int sysctl_sched_rt_period;
-extern unsigned int sysctl_sched_rt_ratio;
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
extern unsigned int sysctl_sched_min_bal_int_shares;
extern unsigned int sysctl_sched_max_bal_int_shares;
@@ -1552,6 +1550,8 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
loff_t *ppos);
#endif
+extern unsigned int sysctl_sched_rt_period;
+extern int sysctl_sched_rt_runtime;
extern unsigned int sysctl_sched_compat_yield;
@@ -1648,10 +1648,10 @@ extern void release_uids(struct user_namespace *ns);
extern void do_timer(unsigned long ticks);
-extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
-extern int FASTCALL(wake_up_process(struct task_struct * tsk));
-extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
- unsigned long clone_flags));
+extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+extern int wake_up_process(struct task_struct *tsk);
+extern void wake_up_new_task(struct task_struct *tsk,
+ unsigned long clone_flags);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
#else
@@ -1741,7 +1741,7 @@ static inline int sas_ss_flags(unsigned long sp)
extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
-extern void FASTCALL(__mmdrop(struct mm_struct *));
+extern void __mmdrop(struct mm_struct *);
static inline void mmdrop(struct mm_struct * mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
@@ -1925,7 +1925,7 @@ static inline int signal_pending(struct task_struct *p)
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
-extern int FASTCALL(__fatal_signal_pending(struct task_struct *p));
+extern int __fatal_signal_pending(struct task_struct *p);
static inline int fatal_signal_pending(struct task_struct *p)
{
@@ -2027,16 +2027,22 @@ extern int sched_mc_power_savings, sched_smt_power_savings;
extern void normalize_rt_tasks(void);
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED
extern struct task_group init_task_group;
extern struct task_group *sched_create_group(void);
extern void sched_destroy_group(struct task_group *tg);
extern void sched_move_task(struct task_struct *tsk);
+#ifdef CONFIG_FAIR_GROUP_SCHED
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
extern unsigned long sched_group_shares(struct task_group *tg);
-
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+extern int sched_group_set_rt_runtime(struct task_group *tg,
+ long rt_runtime_us);
+extern long sched_group_rt_runtime(struct task_group *tg);
+#endif
#endif
#ifdef CONFIG_TASK_XACCT
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 648dfeb444d..67c2563961f 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -8,8 +8,7 @@
struct seq_operations;
struct file;
-struct vfsmount;
-struct dentry;
+struct path;
struct inode;
struct seq_file {
@@ -42,7 +41,7 @@ int seq_puts(struct seq_file *m, const char *s);
int seq_printf(struct seq_file *, const char *, ...)
__attribute__ ((format (printf,2,3)));
-int seq_path(struct seq_file *, struct vfsmount *, struct dentry *, char *);
+int seq_path(struct seq_file *, struct path *, char *);
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_release(struct inode *, struct file *);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1a0b6cf83ff..289942fc665 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -149,6 +149,8 @@
/* Freescale ColdFire */
#define PORT_MCF 78
+#define PORT_SC26XX 79
+
/* MN10300 on-chip UART numbers */
#define PORT_MN10300 80
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5e6d3d634d5..57deecc79d5 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -71,6 +71,7 @@ struct kmem_cache {
/* Allocation and freeing of slabs */
int objects; /* Number of objects in slab */
+ gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(struct kmem_cache *, void *);
int inuse; /* Offset to metadata */
@@ -110,7 +111,7 @@ struct kmem_cache {
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
-extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
+extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
/*
* Sorry that the following has to be that ugly but some versions of GCC
@@ -188,12 +189,16 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
+}
+
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
- if (size > PAGE_SIZE / 2)
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ if (size > PAGE_SIZE)
+ return kmalloc_large(size, flags);
if (!(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
@@ -214,7 +219,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) &&
- size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
+ size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 64c77105618..64c97552964 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -409,16 +409,13 @@ char * svc_print_addr(struct svc_rqst *, char *, size_t);
* for all cases without actually generating the checksum, so we just use a
* static value.
*/
-static inline void
-svc_reserve_auth(struct svc_rqst *rqstp, int space)
+static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
{
- int added_space = 0;
+ int added_space = 0;
- switch(rqstp->rq_authop->flavour) {
- case RPC_AUTH_GSS:
- added_space = RPC_MAX_AUTH_SIZE;
- }
- return svc_reserve(rqstp, space + added_space);
+ if (rqstp->rq_authop->flavour)
+ added_space = RPC_MAX_AUTH_SIZE;
+ svc_reserve(rqstp, space + added_space);
}
#endif /* SUNRPC_SVC_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 3ca5c4bd6d3..878459ae045 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -171,10 +171,10 @@ extern unsigned int nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
-extern void FASTCALL(lru_cache_add(struct page *));
-extern void FASTCALL(lru_cache_add_active(struct page *));
-extern void FASTCALL(activate_page(struct page *));
-extern void FASTCALL(mark_page_accessed(struct page *));
+extern void lru_cache_add(struct page *);
+extern void lru_cache_add_active(struct page *);
+extern void activate_page(struct page *);
+extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
extern int lru_add_drain_all(void);
extern int rotate_reclaimable_page(struct page *page);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 33a2aa9e02f..0081147a9fe 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -117,9 +117,9 @@ static inline int waitqueue_active(wait_queue_head_t *q)
*/
#define is_sync_wait(wait) (!(wait) || ((wait)->private))
-extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
-extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
-extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
+extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
+extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
+extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
{
@@ -141,16 +141,16 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
list_del(&old->task_list);
}
-void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
-extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
-extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
-void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
-int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
-int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
-void FASTCALL(wake_up_bit(void *, int));
-int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
-int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
-wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
+void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
+extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
+extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
+void __wake_up_bit(wait_queue_head_t *, void *, int);
+int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
+int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
+void wake_up_bit(void *, int);
+int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
+int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
+wait_queue_head_t *bit_waitqueue(void *, int);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -437,11 +437,9 @@ extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
/*
* Waitqueues which are removed from the waitqueue_head at wakeup time
*/
-void FASTCALL(prepare_to_wait(wait_queue_head_t *q,
- wait_queue_t *wait, int state));
-void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait, int state));
-void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
+void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
+void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
+void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 7f28c32d9ac..542526c6e8e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -178,18 +178,17 @@ __create_workqueue_key(const char *name, int singlethread,
extern void destroy_workqueue(struct workqueue_struct *wq);
-extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq,
- struct delayed_work *work, unsigned long delay));
+extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
+extern int queue_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *work, unsigned long delay);
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
-extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
+extern void flush_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void);
-extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct delayed_work *work,
- unsigned long delay));
+extern int schedule_work(struct work_struct *work);
+extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
unsigned long delay);
extern int schedule_on_each_cpu(work_func_t func);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index d1299e99972..530ff4c553f 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <scsi/scsi.h>
struct request_queue;
struct block_device;
@@ -25,12 +26,15 @@ struct blk_queue_tags;
* NONE: Self evident. Host adapter is not capable of scatter-gather.
* ALL: Means that the host adapter module can do scatter-gather,
* and that there is no limit to the size of the table to which
- * we scatter/gather data.
+ * we scatter/gather data. The value we set here is the maximum
+ * single element sglist. To use chained sglists, the adapter
+ * has to set a value beyond ALL (and correctly use the chain
+ * handling API.
* Anything else: Indicates the maximum number of chains that can be
* used in one scatter-gather request.
*/
#define SG_NONE 0
-#define SG_ALL 0xff
+#define SG_ALL SCSI_MAX_SG_SEGMENTS
#define MODE_UNKNOWN 0x00
#define MODE_INITIATOR 0x01
diff --git a/init/Kconfig b/init/Kconfig
index 824d48cb67b..dcef8b55011 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -311,25 +311,36 @@ config CPUSETS
Say N if unsure.
-config FAIR_GROUP_SCHED
- bool "Fair group CPU scheduler"
+config GROUP_SCHED
+ bool "Group CPU scheduler"
default y
help
This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups.
+config FAIR_GROUP_SCHED
+ bool "Group scheduling for SCHED_OTHER"
+ depends on GROUP_SCHED
+ default y
+
+config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on GROUP_SCHED
+ default n
+
choice
- depends on FAIR_GROUP_SCHED
+ depends on GROUP_SCHED
prompt "Basis for grouping tasks"
- default FAIR_USER_SCHED
+ default USER_SCHED
-config FAIR_USER_SCHED
+config USER_SCHED
bool "user id"
help
This option will choose userid as the basis for grouping
tasks, thus providing equal CPU bandwidth to each user.
-config FAIR_CGROUP_SCHED
+config CGROUP_SCHED
bool "Control groups"
depends on CGROUPS
help
diff --git a/init/Makefile b/init/Makefile
index c5f157ce293..4a243df426f 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -27,6 +27,7 @@ $(obj)/version.o: include/linux/compile.h
# mkcompile_h will make sure to only update the
# actual file if its content has changed.
+ chk_compile.h = :
quiet_chk_compile.h = echo ' CHK $@'
silent_chk_compile.h = :
include/linux/compile.h: FORCE
diff --git a/init/do_mounts.c b/init/do_mounts.c
index f86573126f8..3885e70e775 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -193,10 +193,10 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
return err;
sys_chdir("/root");
- ROOT_DEV = current->fs->pwdmnt->mnt_sb->s_dev;
+ ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
printk("VFS: Mounted root (%s filesystem)%s.\n",
- current->fs->pwdmnt->mnt_sb->s_type->name,
- current->fs->pwdmnt->mnt_sb->s_flags & MS_RDONLY ?
+ current->fs->pwd.mnt->mnt_sb->s_type->name,
+ current->fs->pwd.mnt->mnt_sb->s_flags & MS_RDONLY ?
" readonly" : "");
return 0;
}
diff --git a/kernel/audit.c b/kernel/audit.c
index c8555b18021..2eeea9a1424 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1312,26 +1312,26 @@ void audit_log_untrustedstring(struct audit_buffer *ab, const char *string)
/* This is a helper-function to print the escaped d_path */
void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
- struct dentry *dentry, struct vfsmount *vfsmnt)
+ struct path *path)
{
- char *p, *path;
+ char *p, *pathname;
if (prefix)
audit_log_format(ab, " %s", prefix);
/* We will allow 11 spaces for ' (deleted)' to be appended */
- path = kmalloc(PATH_MAX+11, ab->gfp_mask);
- if (!path) {
+ pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
+ if (!pathname) {
audit_log_format(ab, "<no memory>");
return;
}
- p = d_path(dentry, vfsmnt, path, PATH_MAX+11);
+ p = d_path(path, pathname, PATH_MAX+11);
if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
/* FIXME: can we save some information here? */
audit_log_format(ab, "<too long>");
} else
audit_log_untrustedstring(ab, p);
- kfree(path);
+ kfree(pathname);
}
/**
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index f4fcf58f20f..9ef5e0aacc3 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -549,8 +549,8 @@ void audit_trim_trees(void)
if (err)
goto skip_it;
- root_mnt = collect_mounts(nd.mnt, nd.dentry);
- path_release(&nd);
+ root_mnt = collect_mounts(nd.path.mnt, nd.path.dentry);
+ path_put(&nd.path);
if (!root_mnt)
goto skip_it;
@@ -583,17 +583,17 @@ skip_it:
static int is_under(struct vfsmount *mnt, struct dentry *dentry,
struct nameidata *nd)
{
- if (mnt != nd->mnt) {
+ if (mnt != nd->path.mnt) {
for (;;) {
if (mnt->mnt_parent == mnt)
return 0;
- if (mnt->mnt_parent == nd->mnt)
+ if (mnt->mnt_parent == nd->path.mnt)
break;
mnt = mnt->mnt_parent;
}
dentry = mnt->mnt_mountpoint;
}
- return is_subdir(dentry, nd->dentry);
+ return is_subdir(dentry, nd->path.dentry);
}
int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
@@ -641,8 +641,8 @@ int audit_add_tree_rule(struct audit_krule *rule)
err = path_lookup(tree->pathname, 0, &nd);
if (err)
goto Err;
- mnt = collect_mounts(nd.mnt, nd.dentry);
- path_release(&nd);
+ mnt = collect_mounts(nd.path.mnt, nd.path.dentry);
+ path_put(&nd.path);
if (!mnt) {
err = -ENOMEM;
goto Err;
@@ -701,8 +701,8 @@ int audit_tag_tree(char *old, char *new)
err = path_lookup(new, 0, &nd);
if (err)
return err;
- tagged = collect_mounts(nd.mnt, nd.dentry);
- path_release(&nd);
+ tagged = collect_mounts(nd.path.mnt, nd.path.dentry);
+ path_put(&nd.path);
if (!tagged)
return -ENOMEM;
@@ -711,9 +711,9 @@ int audit_tag_tree(char *old, char *new)
drop_collected_mounts(tagged);
return err;
}
- mnt = mntget(nd.mnt);
- dentry = dget(nd.dentry);
- path_release(&nd);
+ mnt = mntget(nd.path.mnt);
+ dentry = dget(nd.path.dentry);
+ path_put(&nd.path);
if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
follow_up(&mnt, &dentry);
@@ -744,13 +744,13 @@ int audit_tag_tree(char *old, char *new)
spin_lock(&vfsmount_lock);
if (!is_under(mnt, dentry, &nd)) {
spin_unlock(&vfsmount_lock);
- path_release(&nd);
+ path_put(&nd.path);
put_tree(tree);
mutex_lock(&audit_filter_mutex);
continue;
}
spin_unlock(&vfsmount_lock);
- path_release(&nd);
+ path_put(&nd.path);
list_for_each_entry(p, &list, mnt_list) {
failed = tag_chunk(p->mnt_root->d_inode, tree);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 6f19fd477aa..2f2914b7cc3 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -169,8 +169,8 @@ static struct audit_parent *audit_init_parent(struct nameidata *ndp)
inotify_init_watch(&parent->wdata);
/* grab a ref so inotify watch hangs around until we take audit_filter_mutex */
get_inotify_watch(&parent->wdata);
- wd = inotify_add_watch(audit_ih, &parent->wdata, ndp->dentry->d_inode,
- AUDIT_IN_WATCH);
+ wd = inotify_add_watch(audit_ih, &parent->wdata,
+ ndp->path.dentry->d_inode, AUDIT_IN_WATCH);
if (wd < 0) {
audit_free_parent(&parent->wdata);
return ERR_PTR(wd);
@@ -1161,11 +1161,11 @@ static int audit_get_nd(char *path, struct nameidata **ndp,
static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw)
{
if (ndp) {
- path_release(ndp);
+ path_put(&ndp->path);
kfree(ndp);
}
if (ndw) {
- path_release(ndw);
+ path_put(&ndw->path);
kfree(ndw);
}
}
@@ -1214,8 +1214,8 @@ static int audit_add_watch(struct audit_krule *krule, struct nameidata *ndp,
/* update watch filter fields */
if (ndw) {
- watch->dev = ndw->dentry->d_inode->i_sb->s_dev;
- watch->ino = ndw->dentry->d_inode->i_ino;
+ watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev;
+ watch->ino = ndw->path.dentry->d_inode->i_ino;
}
/* The audit_filter_mutex must not be held during inotify calls because
@@ -1225,7 +1225,8 @@ static int audit_add_watch(struct audit_krule *krule, struct nameidata *ndp,
*/
mutex_unlock(&audit_filter_mutex);
- if (inotify_find_watch(audit_ih, ndp->dentry->d_inode, &i_watch) < 0) {
+ if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode,
+ &i_watch) < 0) {
parent = audit_init_parent(ndp);
if (IS_ERR(parent)) {
/* caller expects mutex locked */
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 1c06ecf38d7..ac6d9b23b01 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -208,8 +208,7 @@ struct audit_context {
int name_count;
struct audit_names names[AUDIT_NAMES];
char * filterkey; /* key for rule that triggered record */
- struct dentry * pwd;
- struct vfsmount * pwdmnt;
+ struct path pwd;
struct audit_context *previous; /* For nested syscalls */
struct audit_aux_data *aux;
struct audit_aux_data *aux_pids;
@@ -786,12 +785,9 @@ static inline void audit_free_names(struct audit_context *context)
__putname(context->names[i].name);
}
context->name_count = 0;
- if (context->pwd)
- dput(context->pwd);
- if (context->pwdmnt)
- mntput(context->pwdmnt);
- context->pwd = NULL;
- context->pwdmnt = NULL;
+ path_put(&context->pwd);
+ context->pwd.dentry = NULL;
+ context->pwd.mnt = NULL;
}
static inline void audit_free_aux(struct audit_context *context)
@@ -930,8 +926,7 @@ static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk
if ((vma->vm_flags & VM_EXECUTABLE) &&
vma->vm_file) {
audit_log_d_path(ab, "exe=",
- vma->vm_file->f_path.dentry,
- vma->vm_file->f_path.mnt);
+ &vma->vm_file->f_path);
break;
}
vma = vma->vm_next;
@@ -1341,10 +1336,10 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
context->target_sid, context->target_comm))
call_panic = 1;
- if (context->pwd && context->pwdmnt) {
+ if (context->pwd.dentry && context->pwd.mnt) {
ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD);
if (ab) {
- audit_log_d_path(ab, "cwd=", context->pwd, context->pwdmnt);
+ audit_log_d_path(ab, "cwd=", &context->pwd);
audit_log_end(ab);
}
}
@@ -1367,8 +1362,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
case 0:
/* name was specified as a relative path and the
* directory component is the cwd */
- audit_log_d_path(ab, " name=", context->pwd,
- context->pwdmnt);
+ audit_log_d_path(ab, " name=", &context->pwd);
break;
default:
/* log the name's directory component */
@@ -1695,10 +1689,10 @@ void __audit_getname(const char *name)
context->names[context->name_count].ino = (unsigned long)-1;
context->names[context->name_count].osid = 0;
++context->name_count;
- if (!context->pwd) {
+ if (!context->pwd.dentry) {
read_lock(&current->fs->lock);
- context->pwd = dget(current->fs->pwd);
- context->pwdmnt = mntget(current->fs->pwdmnt);
+ context->pwd = current->fs->pwd;
+ path_get(&current->fs->pwd);
read_unlock(&current->fs->lock);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 3b893e78ce6..506a957b665 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -512,14 +512,10 @@ static void __put_fs_struct(struct fs_struct *fs)
{
/* No need to hold fs->lock if we are killing it */
if (atomic_dec_and_test(&fs->count)) {
- dput(fs->root);
- mntput(fs->rootmnt);
- dput(fs->pwd);
- mntput(fs->pwdmnt);
- if (fs->altroot) {
- dput(fs->altroot);
- mntput(fs->altrootmnt);
- }
+ path_put(&fs->root);
+ path_put(&fs->pwd);
+ if (fs->altroot.dentry)
+ path_put(&fs->altroot);
kmem_cache_free(fs_cachep, fs);
}
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 4363a4eb84e..dd249c37b3a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -600,16 +600,16 @@ static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
rwlock_init(&fs->lock);
fs->umask = old->umask;
read_lock(&old->lock);
- fs->rootmnt = mntget(old->rootmnt);
- fs->root = dget(old->root);
- fs->pwdmnt = mntget(old->pwdmnt);
- fs->pwd = dget(old->pwd);
- if (old->altroot) {
- fs->altrootmnt = mntget(old->altrootmnt);
- fs->altroot = dget(old->altroot);
+ fs->root = old->root;
+ path_get(&old->root);
+ fs->pwd = old->pwd;
+ path_get(&old->pwd);
+ if (old->altroot.dentry) {
+ fs->altroot = old->altroot;
+ path_get(&old->altroot);
} else {
- fs->altrootmnt = NULL;
- fs->altroot = NULL;
+ fs->altroot.mnt = NULL;
+ fs->altroot.dentry = NULL;
}
read_unlock(&old->lock);
}
diff --git a/kernel/futex.c b/kernel/futex.c
index a6baaec44b8..221f2128a43 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2116,7 +2116,7 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
t = timespec_to_ktime(ts);
if (cmd == FUTEX_WAIT)
- t = ktime_add(ktime_get(), t);
+ t = ktime_add_safe(ktime_get(), t);
tp = &t;
}
/*
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 133d558db45..7d5e4b016f3 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -176,7 +176,7 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
t = timespec_to_ktime(ts);
if (cmd == FUTEX_WAIT)
- t = ktime_add(ktime_get(), t);
+ t = ktime_add_safe(ktime_get(), t);
tp = &t;
}
if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 3f4a57c7895..98bee013f71 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -326,6 +326,23 @@ u64 ktime_divns(const ktime_t kt, s64 div)
#endif /* BITS_PER_LONG >= 64 */
/*
+ * Add two ktime values and do a safety check for overflow:
+ */
+ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
+{
+ ktime_t res = ktime_add(lhs, rhs);
+
+ /*
+ * We use KTIME_SEC_MAX here, the maximum timeout which we can
+ * return to user space in a timespec:
+ */
+ if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
+ res = ktime_set(KTIME_SEC_MAX, 0);
+
+ return res;
+}
+
+/*
* Check, whether the timer is on the callback pending list
*/
static inline int hrtimer_cb_pending(const struct hrtimer *timer)
@@ -425,6 +442,8 @@ static int hrtimer_reprogram(struct hrtimer *timer,
ktime_t expires = ktime_sub(timer->expires, base->offset);
int res;
+ WARN_ON_ONCE(timer->expires.tv64 < 0);
+
/*
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
@@ -435,6 +454,15 @@ static int hrtimer_reprogram(struct hrtimer *timer,
if (hrtimer_callback_running(timer))
return 0;
+ /*
+ * CLOCK_REALTIME timer might be requested with an absolute
+ * expiry time which is less than base->offset. Nothing wrong
+ * about that, just avoid to call into the tick code, which
+ * has now objections against negative expiry values.
+ */
+ if (expires.tv64 < 0)
+ return -ETIME;
+
if (expires.tv64 >= expires_next->tv64)
return 0;
@@ -682,13 +710,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
*/
orun++;
}
- timer->expires = ktime_add(timer->expires, interval);
- /*
- * Make sure, that the result did not wrap with a very large
- * interval.
- */
- if (timer->expires.tv64 < 0)
- timer->expires = ktime_set(KTIME_SEC_MAX, 0);
+ timer->expires = ktime_add_safe(timer->expires, interval);
return orun;
}
@@ -839,7 +861,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
new_base = switch_hrtimer_base(timer, base);
if (mode == HRTIMER_MODE_REL) {
- tim = ktime_add(tim, new_base->get_time());
+ tim = ktime_add_safe(tim, new_base->get_time());
/*
* CONFIG_TIME_LOW_RES is a temporary way for architectures
* to signal that they simply return xtime in
@@ -848,16 +870,8 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
* timeouts. This will go away with the GTOD framework.
*/
#ifdef CONFIG_TIME_LOW_RES
- tim = ktime_add(tim, base->resolution);
+ tim = ktime_add_safe(tim, base->resolution);
#endif
- /*
- * Careful here: User space might have asked for a
- * very long sleep, so the add above might result in a
- * negative number, which enqueues the timer in front
- * of the queue.
- */
- if (tim.tv64 < 0)
- tim.tv64 = KTIME_MAX;
}
timer->expires = tim;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index bb7df2a28bd..22be3ff3f36 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -173,10 +173,7 @@ static int ____call_usermodehelper(void *data)
*/
set_user_nice(current, 0);
- retval = -EPERM;
- if (current->fs->root)
- retval = kernel_execve(sub_info->path,
- sub_info->argv, sub_info->envp);
+ retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);
/* Exec failed? */
sub_info->retval = retval;
diff --git a/kernel/marker.c b/kernel/marker.c
index 5323cfaedbc..c4c2cd8b61f 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -27,35 +27,42 @@
extern struct marker __start___markers[];
extern struct marker __stop___markers[];
+/* Set to 1 to enable marker debug output */
+const int marker_debug;
+
/*
* markers_mutex nests inside module_mutex. Markers mutex protects the builtin
- * and module markers, the hash table and deferred_sync.
+ * and module markers and the hash table.
*/
static DEFINE_MUTEX(markers_mutex);
/*
- * Marker deferred synchronization.
- * Upon marker probe_unregister, we delay call to synchronize_sched() to
- * accelerate mass unregistration (only when there is no more reference to a
- * given module do we call synchronize_sched()). However, we need to make sure
- * every critical region has ended before we re-arm a marker that has been
- * unregistered and then registered back with a different probe data.
- */
-static int deferred_sync;
-
-/*
* Marker hash table, containing the active markers.
* Protected by module_mutex.
*/
#define MARKER_HASH_BITS 6
#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
+/*
+ * Note about RCU :
+ * It is used to make sure every handler has finished using its private data
+ * between two consecutive operation (add or remove) on a given marker. It is
+ * also used to delay the free of multiple probes array until a quiescent state
+ * is reached.
+ * marker entries modifications are protected by the markers_mutex.
+ */
struct marker_entry {
struct hlist_node hlist;
char *format;
- marker_probe_func *probe;
- void *private;
+ void (*call)(const struct marker *mdata, /* Probe wrapper */
+ void *call_private, const char *fmt, ...);
+ struct marker_probe_closure single;
+ struct marker_probe_closure *multi;
int refcount; /* Number of times armed. 0 if disarmed. */
+ struct rcu_head rcu;
+ void *oldptr;
+ char rcu_pending:1;
+ char ptype:1;
char name[0]; /* Contains name'\0'format'\0' */
};
@@ -63,7 +70,8 @@ static struct hlist_head marker_table[MARKER_TABLE_SIZE];
/**
* __mark_empty_function - Empty probe callback
- * @mdata: pointer of type const struct marker
+ * @probe_private: probe private data
+ * @call_private: call site private data
* @fmt: format string
* @...: variable argument list
*
@@ -72,13 +80,267 @@ static struct hlist_head marker_table[MARKER_TABLE_SIZE];
* though the function pointer change and the marker enabling are two distinct
* operations that modifies the execution flow of preemptible code.
*/
-void __mark_empty_function(const struct marker *mdata, void *private,
- const char *fmt, ...)
+void __mark_empty_function(void *probe_private, void *call_private,
+ const char *fmt, va_list *args)
{
}
EXPORT_SYMBOL_GPL(__mark_empty_function);
/*
+ * marker_probe_cb Callback that prepares the variable argument list for probes.
+ * @mdata: pointer of type struct marker
+ * @call_private: caller site private data
+ * @fmt: format string
+ * @...: Variable argument list.
+ *
+ * Since we do not use "typical" pointer based RCU in the 1 argument case, we
+ * need to put a full smp_rmb() in this branch. This is why we do not use
+ * rcu_dereference() for the pointer read.
+ */
+void marker_probe_cb(const struct marker *mdata, void *call_private,
+ const char *fmt, ...)
+{
+ va_list args;
+ char ptype;
+
+ /*
+ * disabling preemption to make sure the teardown of the callbacks can
+ * be done correctly when they are in modules and they insure RCU read
+ * coherency.
+ */
+ preempt_disable();
+ ptype = ACCESS_ONCE(mdata->ptype);
+ if (likely(!ptype)) {
+ marker_probe_func *func;
+ /* Must read the ptype before ptr. They are not data dependant,
+ * so we put an explicit smp_rmb() here. */
+ smp_rmb();
+ func = ACCESS_ONCE(mdata->single.func);
+ /* Must read the ptr before private data. They are not data
+ * dependant, so we put an explicit smp_rmb() here. */
+ smp_rmb();
+ va_start(args, fmt);
+ func(mdata->single.probe_private, call_private, fmt, &args);
+ va_end(args);
+ } else {
+ struct marker_probe_closure *multi;
+ int i;
+ /*
+ * multi points to an array, therefore accessing the array
+ * depends on reading multi. However, even in this case,
+ * we must insure that the pointer is read _before_ the array
+ * data. Same as rcu_dereference, but we need a full smp_rmb()
+ * in the fast path, so put the explicit barrier here.
+ */
+ smp_read_barrier_depends();
+ multi = ACCESS_ONCE(mdata->multi);
+ for (i = 0; multi[i].func; i++) {
+ va_start(args, fmt);
+ multi[i].func(multi[i].probe_private, call_private, fmt,
+ &args);
+ va_end(args);
+ }
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(marker_probe_cb);
+
+/*
+ * marker_probe_cb Callback that does not prepare the variable argument list.
+ * @mdata: pointer of type struct marker
+ * @call_private: caller site private data
+ * @fmt: format string
+ * @...: Variable argument list.
+ *
+ * Should be connected to markers "MARK_NOARGS".
+ */
+void marker_probe_cb_noarg(const struct marker *mdata,
+ void *call_private, const char *fmt, ...)
+{
+ va_list args; /* not initialized */
+ char ptype;
+
+ preempt_disable();
+ ptype = ACCESS_ONCE(mdata->ptype);
+ if (likely(!ptype)) {
+ marker_probe_func *func;
+ /* Must read the ptype before ptr. They are not data dependant,
+ * so we put an explicit smp_rmb() here. */
+ smp_rmb();
+ func = ACCESS_ONCE(mdata->single.func);
+ /* Must read the ptr before private data. They are not data
+ * dependant, so we put an explicit smp_rmb() here. */
+ smp_rmb();
+ func(mdata->single.probe_private, call_private, fmt, &args);
+ } else {
+ struct marker_probe_closure *multi;
+ int i;
+ /*
+ * multi points to an array, therefore accessing the array
+ * depends on reading multi. However, even in this case,
+ * we must insure that the pointer is read _before_ the array
+ * data. Same as rcu_dereference, but we need a full smp_rmb()
+ * in the fast path, so put the explicit barrier here.
+ */
+ smp_read_barrier_depends();
+ multi = ACCESS_ONCE(mdata->multi);
+ for (i = 0; multi[i].func; i++)
+ multi[i].func(multi[i].probe_private, call_private, fmt,
+ &args);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(marker_probe_cb_noarg);
+
+static void free_old_closure(struct rcu_head *head)
+{
+ struct marker_entry *entry = container_of(head,
+ struct marker_entry, rcu);
+ kfree(entry->oldptr);
+ /* Make sure we free the data before setting the pending flag to 0 */
+ smp_wmb();
+ entry->rcu_pending = 0;
+}
+
+static void debug_print_probes(struct marker_entry *entry)
+{
+ int i;
+
+ if (!marker_debug)
+ return;
+
+ if (!entry->ptype) {
+ printk(KERN_DEBUG "Single probe : %p %p\n",
+ entry->single.func,
+ entry->single.probe_private);
+ } else {
+ for (i = 0; entry->multi[i].func; i++)
+ printk(KERN_DEBUG "Multi probe %d : %p %p\n", i,
+ entry->multi[i].func,
+ entry->multi[i].probe_private);
+ }
+}
+
+static struct marker_probe_closure *
+marker_entry_add_probe(struct marker_entry *entry,
+ marker_probe_func *probe, void *probe_private)
+{
+ int nr_probes = 0;
+ struct marker_probe_closure *old, *new;
+
+ WARN_ON(!probe);
+
+ debug_print_probes(entry);
+ old = entry->multi;
+ if (!entry->ptype) {
+ if (entry->single.func == probe &&
+ entry->single.probe_private == probe_private)
+ return ERR_PTR(-EBUSY);
+ if (entry->single.func == __mark_empty_function) {
+ /* 0 -> 1 probes */
+ entry->single.func = probe;
+ entry->single.probe_private = probe_private;
+ entry->refcount = 1;
+ entry->ptype = 0;
+ debug_print_probes(entry);
+ return NULL;
+ } else {
+ /* 1 -> 2 probes */
+ nr_probes = 1;
+ old = NULL;
+ }
+ } else {
+ /* (N -> N+1), (N != 0, 1) probes */
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++)
+ if (old[nr_probes].func == probe
+ && old[nr_probes].probe_private
+ == probe_private)
+ return ERR_PTR(-EBUSY);
+ }
+ /* + 2 : one for new probe, one for NULL func */
+ new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
+ GFP_KERNEL);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (!old)
+ new[0] = entry->single;
+ else
+ memcpy(new, old,
+ nr_probes * sizeof(struct marker_probe_closure));
+ new[nr_probes].func = probe;
+ new[nr_probes].probe_private = probe_private;
+ entry->refcount = nr_probes + 1;
+ entry->multi = new;
+ entry->ptype = 1;
+ debug_print_probes(entry);
+ return old;
+}
+
+static struct marker_probe_closure *
+marker_entry_remove_probe(struct marker_entry *entry,
+ marker_probe_func *probe, void *probe_private)
+{
+ int nr_probes = 0, nr_del = 0, i;
+ struct marker_probe_closure *old, *new;
+
+ old = entry->multi;
+
+ debug_print_probes(entry);
+ if (!entry->ptype) {
+ /* 0 -> N is an error */
+ WARN_ON(entry->single.func == __mark_empty_function);
+ /* 1 -> 0 probes */
+ WARN_ON(probe && entry->single.func != probe);
+ WARN_ON(entry->single.probe_private != probe_private);
+ entry->single.func = __mark_empty_function;
+ entry->refcount = 0;
+ entry->ptype = 0;
+ debug_print_probes(entry);
+ return NULL;
+ } else {
+ /* (N -> M), (N > 1, M >= 0) probes */
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
+ if ((!probe || old[nr_probes].func == probe)
+ && old[nr_probes].probe_private
+ == probe_private)
+ nr_del++;
+ }
+ }
+
+ if (nr_probes - nr_del == 0) {
+ /* N -> 0, (N > 1) */
+ entry->single.func = __mark_empty_function;
+ entry->refcount = 0;
+ entry->ptype = 0;
+ } else if (nr_probes - nr_del == 1) {
+ /* N -> 1, (N > 1) */
+ for (i = 0; old[i].func; i++)
+ if ((probe && old[i].func != probe) ||
+ old[i].probe_private != probe_private)
+ entry->single = old[i];
+ entry->refcount = 1;
+ entry->ptype = 0;
+ } else {
+ int j = 0;
+ /* N -> M, (N > 1, M > 1) */
+ /* + 1 for NULL */
+ new = kzalloc((nr_probes - nr_del + 1)
+ * sizeof(struct marker_probe_closure), GFP_KERNEL);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ for (i = 0; old[i].func; i++)
+ if ((probe && old[i].func != probe) ||
+ old[i].probe_private != probe_private)
+ new[j++] = old[i];
+ entry->refcount = nr_probes - nr_del;
+ entry->ptype = 1;
+ entry->multi = new;
+ }
+ debug_print_probes(entry);
+ return old;
+}
+
+/*
* Get marker if the marker is present in the marker hash table.
* Must be called with markers_mutex held.
* Returns NULL if not present.
@@ -102,8 +364,7 @@ static struct marker_entry *get_marker(const char *name)
* Add the marker to the marker hash table. Must be called with markers_mutex
* held.
*/
-static int add_marker(const char *name, const char *format,
- marker_probe_func *probe, void *private)
+static struct marker_entry *add_marker(const char *name, const char *format)
{
struct hlist_head *head;
struct hlist_node *node;
@@ -118,9 +379,8 @@ static int add_marker(const char *name, const char *format,
hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(name, e->name)) {
printk(KERN_NOTICE
- "Marker %s busy, probe %p already installed\n",
- name, e->probe);
- return -EBUSY; /* Already there */
+ "Marker %s busy\n", name);
+ return ERR_PTR(-EBUSY); /* Already there */
}
}
/*
@@ -130,34 +390,42 @@ static int add_marker(const char *name, const char *format,
e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
GFP_KERNEL);
if (!e)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
memcpy(&e->name[0], name, name_len);
if (format) {
e->format = &e->name[name_len];
memcpy(e->format, format, format_len);
+ if (strcmp(e->format, MARK_NOARGS) == 0)
+ e->call = marker_probe_cb_noarg;
+ else
+ e->call = marker_probe_cb;
trace_mark(core_marker_format, "name %s format %s",
e->name, e->format);
- } else
+ } else {
e->format = NULL;
- e->probe = probe;
- e->private = private;
+ e->call = marker_probe_cb;
+ }
+ e->single.func = __mark_empty_function;
+ e->single.probe_private = NULL;
+ e->multi = NULL;
+ e->ptype = 0;
e->refcount = 0;
+ e->rcu_pending = 0;
hlist_add_head(&e->hlist, head);
- return 0;
+ return e;
}
/*
* Remove the marker from the marker hash table. Must be called with mutex_lock
* held.
*/
-static void *remove_marker(const char *name)
+static int remove_marker(const char *name)
{
struct hlist_head *head;
struct hlist_node *node;
struct marker_entry *e;
int found = 0;
size_t len = strlen(name) + 1;
- void *private = NULL;
u32 hash = jhash(name, len-1, 0);
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
@@ -167,12 +435,16 @@ static void *remove_marker(const char *name)
break;
}
}
- if (found) {
- private = e->private;
- hlist_del(&e->hlist);
- kfree(e);
- }
- return private;
+ if (!found)
+ return -ENOENT;
+ if (e->single.func != __mark_empty_function)
+ return -EBUSY;
+ hlist_del(&e->hlist);
+ /* Make sure the call_rcu has been executed */
+ if (e->rcu_pending)
+ rcu_barrier();
+ kfree(e);
+ return 0;
}
/*
@@ -184,6 +456,7 @@ static int marker_set_format(struct marker_entry **entry, const char *format)
size_t name_len = strlen((*entry)->name) + 1;
size_t format_len = strlen(format) + 1;
+
e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
GFP_KERNEL);
if (!e)
@@ -191,11 +464,20 @@ static int marker_set_format(struct marker_entry **entry, const char *format)
memcpy(&e->name[0], (*entry)->name, name_len);
e->format = &e->name[name_len];
memcpy(e->format, format, format_len);
- e->probe = (*entry)->probe;
- e->private = (*entry)->private;
+ if (strcmp(e->format, MARK_NOARGS) == 0)
+ e->call = marker_probe_cb_noarg;
+ else
+ e->call = marker_probe_cb;
+ e->single = (*entry)->single;
+ e->multi = (*entry)->multi;
+ e->ptype = (*entry)->ptype;
e->refcount = (*entry)->refcount;
+ e->rcu_pending = 0;
hlist_add_before(&e->hlist, &(*entry)->hlist);
hlist_del(&(*entry)->hlist);
+ /* Make sure the call_rcu has been executed */
+ if ((*entry)->rcu_pending)
+ rcu_barrier();
kfree(*entry);
*entry = e;
trace_mark(core_marker_format, "name %s format %s",
@@ -206,7 +488,8 @@ static int marker_set_format(struct marker_entry **entry, const char *format)
/*
* Sets the probe callback corresponding to one marker.
*/
-static int set_marker(struct marker_entry **entry, struct marker *elem)
+static int set_marker(struct marker_entry **entry, struct marker *elem,
+ int active)
{
int ret;
WARN_ON(strcmp((*entry)->name, elem->name) != 0);
@@ -226,9 +509,43 @@ static int set_marker(struct marker_entry **entry, struct marker *elem)
if (ret)
return ret;
}
- elem->call = (*entry)->probe;
- elem->private = (*entry)->private;
- elem->state = 1;
+
+ /*
+ * probe_cb setup (statically known) is done here. It is
+ * asynchronous with the rest of execution, therefore we only
+ * pass from a "safe" callback (with argument) to an "unsafe"
+ * callback (does not set arguments).
+ */
+ elem->call = (*entry)->call;
+ /*
+ * Sanity check :
+ * We only update the single probe private data when the ptr is
+ * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
+ */
+ WARN_ON(elem->single.func != __mark_empty_function
+ && elem->single.probe_private
+ != (*entry)->single.probe_private &&
+ !elem->ptype);
+ elem->single.probe_private = (*entry)->single.probe_private;
+ /*
+ * Make sure the private data is valid when we update the
+ * single probe ptr.
+ */
+ smp_wmb();
+ elem->single.func = (*entry)->single.func;
+ /*
+ * We also make sure that the new probe callbacks array is consistent
+ * before setting a pointer to it.
+ */
+ rcu_assign_pointer(elem->multi, (*entry)->multi);
+ /*
+ * Update the function or multi probe array pointer before setting the
+ * ptype.
+ */
+ smp_wmb();
+ elem->ptype = (*entry)->ptype;
+ elem->state = active;
+
return 0;
}
@@ -240,8 +557,12 @@ static int set_marker(struct marker_entry **entry, struct marker *elem)
*/
static void disable_marker(struct marker *elem)
{
+ /* leave "call" as is. It is known statically. */
elem->state = 0;
- elem->call = __mark_empty_function;
+ elem->single.func = __mark_empty_function;
+ /* Update the function before setting the ptype */
+ smp_wmb();
+ elem->ptype = 0; /* single probe */
/*
* Leave the private data and id there, because removal is racy and
* should be done only after a synchronize_sched(). These are never used
@@ -253,14 +574,11 @@ static void disable_marker(struct marker *elem)
* marker_update_probe_range - Update a probe range
* @begin: beginning of the range
* @end: end of the range
- * @probe_module: module address of the probe being updated
- * @refcount: number of references left to the given probe_module (out)
*
* Updates the probe callback corresponding to a range of markers.
*/
void marker_update_probe_range(struct marker *begin,
- struct marker *end, struct module *probe_module,
- int *refcount)
+ struct marker *end)
{
struct marker *iter;
struct marker_entry *mark_entry;
@@ -268,15 +586,12 @@ void marker_update_probe_range(struct marker *begin,
mutex_lock(&markers_mutex);
for (iter = begin; iter < end; iter++) {
mark_entry = get_marker(iter->name);
- if (mark_entry && mark_entry->refcount) {
- set_marker(&mark_entry, iter);
+ if (mark_entry) {
+ set_marker(&mark_entry, iter,
+ !!mark_entry->refcount);
/*
* ignore error, continue
*/
- if (probe_module)
- if (probe_module ==
- __module_text_address((unsigned long)mark_entry->probe))
- (*refcount)++;
} else {
disable_marker(iter);
}
@@ -289,20 +604,27 @@ void marker_update_probe_range(struct marker *begin,
* Issues a synchronize_sched() when no reference to the module passed
* as parameter is found in the probes so the probe module can be
* safely unloaded from now on.
+ *
+ * Internal callback only changed before the first probe is connected to it.
+ * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
+ * transitions. All other transitions will leave the old private data valid.
+ * This makes the non-atomicity of the callback/private data updates valid.
+ *
+ * "special case" updates :
+ * 0 -> 1 callback
+ * 1 -> 0 callback
+ * 1 -> 2 callbacks
+ * 2 -> 1 callbacks
+ * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
+ * Site effect : marker_set_format may delete the marker entry (creating a
+ * replacement).
*/
-static void marker_update_probes(struct module *probe_module)
+static void marker_update_probes(void)
{
- int refcount = 0;
-
/* Core kernel markers */
- marker_update_probe_range(__start___markers,
- __stop___markers, probe_module, &refcount);
+ marker_update_probe_range(__start___markers, __stop___markers);
/* Markers in modules. */
- module_update_markers(probe_module, &refcount);
- if (probe_module && refcount == 0) {
- synchronize_sched();
- deferred_sync = 0;
- }
+ module_update_markers();
}
/**
@@ -310,33 +632,49 @@ static void marker_update_probes(struct module *probe_module)
* @name: marker name
* @format: format string
* @probe: probe handler
- * @private: probe private data
+ * @probe_private: probe private data
*
* private data must be a valid allocated memory address, or NULL.
* Returns 0 if ok, error value on error.
+ * The probe address must at least be aligned on the architecture pointer size.
*/
int marker_probe_register(const char *name, const char *format,
- marker_probe_func *probe, void *private)
+ marker_probe_func *probe, void *probe_private)
{
struct marker_entry *entry;
int ret = 0;
+ struct marker_probe_closure *old;
mutex_lock(&markers_mutex);
entry = get_marker(name);
- if (entry && entry->refcount) {
- ret = -EBUSY;
- goto end;
- }
- if (deferred_sync) {
- synchronize_sched();
- deferred_sync = 0;
+ if (!entry) {
+ entry = add_marker(name, format);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry);
+ goto end;
+ }
}
- ret = add_marker(name, format, probe, private);
- if (ret)
+ /*
+ * If we detect that a call_rcu is pending for this marker,
+ * make sure it's executed now.
+ */
+ if (entry->rcu_pending)
+ rcu_barrier();
+ old = marker_entry_add_probe(entry, probe, probe_private);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
goto end;
+ }
mutex_unlock(&markers_mutex);
- marker_update_probes(NULL);
- return ret;
+ marker_update_probes(); /* may update entry */
+ mutex_lock(&markers_mutex);
+ entry = get_marker(name);
+ WARN_ON(!entry);
+ entry->oldptr = old;
+ entry->rcu_pending = 1;
+ /* write rcu_pending before calling the RCU callback */
+ smp_wmb();
+ call_rcu(&entry->rcu, free_old_closure);
end:
mutex_unlock(&markers_mutex);
return ret;
@@ -346,171 +684,166 @@ EXPORT_SYMBOL_GPL(marker_probe_register);
/**
* marker_probe_unregister - Disconnect a probe from a marker
* @name: marker name
+ * @probe: probe function pointer
+ * @probe_private: probe private data
*
* Returns the private data given to marker_probe_register, or an ERR_PTR().
+ * We do not need to call a synchronize_sched to make sure the probes have
+ * finished running before doing a module unload, because the module unload
+ * itself uses stop_machine(), which insures that every preempt disabled section
+ * have finished.
*/
-void *marker_probe_unregister(const char *name)
+int marker_probe_unregister(const char *name,
+ marker_probe_func *probe, void *probe_private)
{
- struct module *probe_module;
struct marker_entry *entry;
- void *private;
+ struct marker_probe_closure *old;
+ int ret = 0;
mutex_lock(&markers_mutex);
entry = get_marker(name);
if (!entry) {
- private = ERR_PTR(-ENOENT);
+ ret = -ENOENT;
goto end;
}
- entry->refcount = 0;
- /* In what module is the probe handler ? */
- probe_module = __module_text_address((unsigned long)entry->probe);
- private = remove_marker(name);
- deferred_sync = 1;
+ if (entry->rcu_pending)
+ rcu_barrier();
+ old = marker_entry_remove_probe(entry, probe, probe_private);
mutex_unlock(&markers_mutex);
- marker_update_probes(probe_module);
- return private;
+ marker_update_probes(); /* may update entry */
+ mutex_lock(&markers_mutex);
+ entry = get_marker(name);
+ entry->oldptr = old;
+ entry->rcu_pending = 1;
+ /* write rcu_pending before calling the RCU callback */
+ smp_wmb();
+ call_rcu(&entry->rcu, free_old_closure);
+ remove_marker(name); /* Ignore busy error message */
end:
mutex_unlock(&markers_mutex);
- return private;
+ return ret;
}
EXPORT_SYMBOL_GPL(marker_probe_unregister);
-/**
- * marker_probe_unregister_private_data - Disconnect a probe from a marker
- * @private: probe private data
- *
- * Unregister a marker by providing the registered private data.
- * Returns the private data given to marker_probe_register, or an ERR_PTR().
- */
-void *marker_probe_unregister_private_data(void *private)
+static struct marker_entry *
+get_marker_from_private_data(marker_probe_func *probe, void *probe_private)
{
- struct module *probe_module;
- struct hlist_head *head;
- struct hlist_node *node;
struct marker_entry *entry;
- int found = 0;
unsigned int i;
+ struct hlist_head *head;
+ struct hlist_node *node;
- mutex_lock(&markers_mutex);
for (i = 0; i < MARKER_TABLE_SIZE; i++) {
head = &marker_table[i];
hlist_for_each_entry(entry, node, head, hlist) {
- if (entry->private == private) {
- found = 1;
- goto iter_end;
+ if (!entry->ptype) {
+ if (entry->single.func == probe
+ && entry->single.probe_private
+ == probe_private)
+ return entry;
+ } else {
+ struct marker_probe_closure *closure;
+ closure = entry->multi;
+ for (i = 0; closure[i].func; i++) {
+ if (closure[i].func == probe &&
+ closure[i].probe_private
+ == probe_private)
+ return entry;
+ }
}
}
}
-iter_end:
- if (!found) {
- private = ERR_PTR(-ENOENT);
- goto end;
- }
- entry->refcount = 0;
- /* In what module is the probe handler ? */
- probe_module = __module_text_address((unsigned long)entry->probe);
- private = remove_marker(entry->name);
- deferred_sync = 1;
- mutex_unlock(&markers_mutex);
- marker_update_probes(probe_module);
- return private;
-end:
- mutex_unlock(&markers_mutex);
- return private;
+ return NULL;
}
-EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
/**
- * marker_arm - Arm a marker
- * @name: marker name
+ * marker_probe_unregister_private_data - Disconnect a probe from a marker
+ * @probe: probe function
+ * @probe_private: probe private data
*
- * Activate a marker. It keeps a reference count of the number of
- * arming/disarming done.
- * Returns 0 if ok, error value on error.
+ * Unregister a probe by providing the registered private data.
+ * Only removes the first marker found in hash table.
+ * Return 0 on success or error value.
+ * We do not need to call a synchronize_sched to make sure the probes have
+ * finished running before doing a module unload, because the module unload
+ * itself uses stop_machine(), which insures that every preempt disabled section
+ * have finished.
*/
-int marker_arm(const char *name)
+int marker_probe_unregister_private_data(marker_probe_func *probe,
+ void *probe_private)
{
struct marker_entry *entry;
int ret = 0;
+ struct marker_probe_closure *old;
mutex_lock(&markers_mutex);
- entry = get_marker(name);
+ entry = get_marker_from_private_data(probe, probe_private);
if (!entry) {
ret = -ENOENT;
goto end;
}
- /*
- * Only need to update probes when refcount passes from 0 to 1.
- */
- if (entry->refcount++)
- goto end;
-end:
+ if (entry->rcu_pending)
+ rcu_barrier();
+ old = marker_entry_remove_probe(entry, NULL, probe_private);
mutex_unlock(&markers_mutex);
- marker_update_probes(NULL);
- return ret;
-}
-EXPORT_SYMBOL_GPL(marker_arm);
-
-/**
- * marker_disarm - Disarm a marker
- * @name: marker name
- *
- * Disarm a marker. It keeps a reference count of the number of arming/disarming
- * done.
- * Returns 0 if ok, error value on error.
- */
-int marker_disarm(const char *name)
-{
- struct marker_entry *entry;
- int ret = 0;
-
+ marker_update_probes(); /* may update entry */
mutex_lock(&markers_mutex);
- entry = get_marker(name);
- if (!entry) {
- ret = -ENOENT;
- goto end;
- }
- /*
- * Only permit decrement refcount if higher than 0.
- * Do probe update only on 1 -> 0 transition.
- */
- if (entry->refcount) {
- if (--entry->refcount)
- goto end;
- } else {
- ret = -EPERM;
- goto end;
- }
+ entry = get_marker_from_private_data(probe, probe_private);
+ WARN_ON(!entry);
+ entry->oldptr = old;
+ entry->rcu_pending = 1;
+ /* write rcu_pending before calling the RCU callback */
+ smp_wmb();
+ call_rcu(&entry->rcu, free_old_closure);
+ remove_marker(entry->name); /* Ignore busy error message */
end:
mutex_unlock(&markers_mutex);
- marker_update_probes(NULL);
return ret;
}
-EXPORT_SYMBOL_GPL(marker_disarm);
+EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
/**
* marker_get_private_data - Get a marker's probe private data
* @name: marker name
+ * @probe: probe to match
+ * @num: get the nth matching probe's private data
*
+ * Returns the nth private data pointer (starting from 0) matching, or an
+ * ERR_PTR.
* Returns the private data pointer, or an ERR_PTR.
* The private data pointer should _only_ be dereferenced if the caller is the
* owner of the data, or its content could vanish. This is mostly used to
* confirm that a caller is the owner of a registered probe.
*/
-void *marker_get_private_data(const char *name)
+void *marker_get_private_data(const char *name, marker_probe_func *probe,
+ int num)
{
struct hlist_head *head;
struct hlist_node *node;
struct marker_entry *e;
size_t name_len = strlen(name) + 1;
u32 hash = jhash(name, name_len-1, 0);
- int found = 0;
+ int i;
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(name, e->name)) {
- found = 1;
- return e->private;
+ if (!e->ptype) {
+ if (num == 0 && e->single.func == probe)
+ return e->single.probe_private;
+ else
+ break;
+ } else {
+ struct marker_probe_closure *closure;
+ int match = 0;
+ closure = e->multi;
+ for (i = 0; closure[i].func; i++) {
+ if (closure[i].func != probe)
+ continue;
+ if (match++ == num)
+ return closure[i].probe_private;
+ }
+ }
}
}
return ERR_PTR(-ENOENT);
diff --git a/kernel/module.c b/kernel/module.c
index 4202da97a1d..92595bad381 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2038,7 +2038,7 @@ static struct module *load_module(void __user *umod,
#ifdef CONFIG_MARKERS
if (!mod->taints)
marker_update_probe_range(mod->markers,
- mod->markers + mod->num_markers, NULL, NULL);
+ mod->markers + mod->num_markers);
#endif
err = module_finalize(hdr, sechdrs, mod);
if (err < 0)
@@ -2564,7 +2564,7 @@ EXPORT_SYMBOL(struct_module);
#endif
#ifdef CONFIG_MARKERS
-void module_update_markers(struct module *probe_module, int *refcount)
+void module_update_markers(void)
{
struct module *mod;
@@ -2572,8 +2572,7 @@ void module_update_markers(struct module *probe_module, int *refcount)
list_for_each_entry(mod, &modules, list)
if (!mod->taints)
marker_update_probe_range(mod->markers,
- mod->markers + mod->num_markers,
- probe_module, refcount);
+ mod->markers + mod->num_markers);
mutex_unlock(&module_mutex);
}
#endif
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 022c9c3cee6..a9b04203a66 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -767,9 +767,11 @@ common_timer_set(struct k_itimer *timr, int flags,
/* SIGEV_NONE timers are not queued ! See common_timer_get */
if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
/* Setup correct expiry time for relative timers */
- if (mode == HRTIMER_MODE_REL)
- timer->expires = ktime_add(timer->expires,
- timer->base->get_time());
+ if (mode == HRTIMER_MODE_REL) {
+ timer->expires =
+ ktime_add_safe(timer->expires,
+ timer->base->get_time());
+ }
return 0;
}
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 760dfc233a0..c09605f8d16 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -56,7 +56,10 @@ static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
static struct completion rcu_barrier_completion;
-/* Because of FASTCALL declaration of complete, we use this wrapper */
+/*
+ * Awaken the corresponding synchronize_rcu() instance now that a
+ * grace period has elapsed.
+ */
static void wakeme_after_rcu(struct rcu_head *head)
{
struct rcu_synchronize *rcu;
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 0deef71ff8d..6522ae5b14a 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -630,9 +630,12 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
set_current_state(state);
/* Setup the timer, when timeout != NULL */
- if (unlikely(timeout))
+ if (unlikely(timeout)) {
hrtimer_start(&timeout->timer, timeout->timer.expires,
HRTIMER_MODE_ABS);
+ if (!hrtimer_active(&timeout->timer))
+ timeout->task = NULL;
+ }
for (;;) {
/* Try to acquire the lock: */
diff --git a/kernel/sched.c b/kernel/sched.c
index 3eedd526090..f28f19e65b5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -155,7 +155,7 @@ struct rt_prio_array {
struct list_head queue[MAX_RT_PRIO];
};
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED
#include <linux/cgroup.h>
@@ -165,19 +165,16 @@ static LIST_HEAD(task_groups);
/* task group related information */
struct task_group {
-#ifdef CONFIG_FAIR_CGROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
struct cgroup_subsys_state css;
#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
struct sched_entity **se;
/* runqueue "owned" by this group on each cpu */
struct cfs_rq **cfs_rq;
- struct sched_rt_entity **rt_se;
- struct rt_rq **rt_rq;
-
- unsigned int rt_ratio;
-
/*
* shares assigned to a task group governs how much of cpu bandwidth
* is allocated to the group. The more shares a group has, the more is
@@ -213,33 +210,46 @@ struct task_group {
*
*/
unsigned long shares;
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+ struct sched_rt_entity **rt_se;
+ struct rt_rq **rt_rq;
+
+ u64 rt_runtime;
+#endif
struct rcu_head rcu;
struct list_head list;
};
+#ifdef CONFIG_FAIR_GROUP_SCHED
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */
static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
-static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
-
static struct sched_entity *init_sched_entity_p[NR_CPUS];
static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
+static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS];
static struct rt_rq *init_rt_rq_p[NR_CPUS];
+#endif
-/* task_group_mutex serializes add/remove of task groups and also changes to
+/* task_group_lock serializes add/remove of task groups and also changes to
* a task group's cpu shares.
*/
-static DEFINE_MUTEX(task_group_mutex);
+static DEFINE_SPINLOCK(task_group_lock);
/* doms_cur_mutex serializes access to doms_cur[] array */
static DEFINE_MUTEX(doms_cur_mutex);
+#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_SMP
/* kernel thread that runs rebalance_shares() periodically */
static struct task_struct *lb_monitor_task;
@@ -248,35 +258,40 @@ static int load_balance_monitor(void *unused);
static void set_se_shares(struct sched_entity *se, unsigned long shares);
+#ifdef CONFIG_USER_SCHED
+# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
+#else
+# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
+#endif
+
+#define MIN_GROUP_SHARES 2
+
+static int init_task_group_load = INIT_TASK_GROUP_LOAD;
+#endif
+
/* Default task group.
* Every task in system belong to this group at bootup.
*/
struct task_group init_task_group = {
+#ifdef CONFIG_FAIR_GROUP_SCHED
.se = init_sched_entity_p,
.cfs_rq = init_cfs_rq_p,
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
.rt_se = init_sched_rt_entity_p,
.rt_rq = init_rt_rq_p,
-};
-
-#ifdef CONFIG_FAIR_USER_SCHED
-# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
-#else
-# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif
-
-#define MIN_GROUP_SHARES 2
-
-static int init_task_group_load = INIT_TASK_GROUP_LOAD;
+};
/* return group to which a task belongs */
static inline struct task_group *task_group(struct task_struct *p)
{
struct task_group *tg;
-#ifdef CONFIG_FAIR_USER_SCHED
+#ifdef CONFIG_USER_SCHED
tg = p->user->tg;
-#elif defined(CONFIG_FAIR_CGROUP_SCHED)
+#elif defined(CONFIG_CGROUP_SCHED)
tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
struct task_group, css);
#else
@@ -288,21 +303,15 @@ static inline struct task_group *task_group(struct task_struct *p)
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
+#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
p->se.parent = task_group(p)->se[cpu];
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
p->rt.parent = task_group(p)->rt_se[cpu];
-}
-
-static inline void lock_task_group_list(void)
-{
- mutex_lock(&task_group_mutex);
-}
-
-static inline void unlock_task_group_list(void)
-{
- mutex_unlock(&task_group_mutex);
+#endif
}
static inline void lock_doms_cur(void)
@@ -318,12 +327,10 @@ static inline void unlock_doms_cur(void)
#else
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
-static inline void lock_task_group_list(void) { }
-static inline void unlock_task_group_list(void) { }
static inline void lock_doms_cur(void) { }
static inline void unlock_doms_cur(void) { }
-#endif /* CONFIG_FAIR_GROUP_SCHED */
+#endif /* CONFIG_GROUP_SCHED */
/* CFS-related fields in a runqueue */
struct cfs_rq {
@@ -363,7 +370,7 @@ struct cfs_rq {
struct rt_rq {
struct rt_prio_array active;
unsigned long rt_nr_running;
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
int highest_prio; /* highest queued rt task prio */
#endif
#ifdef CONFIG_SMP
@@ -373,7 +380,9 @@ struct rt_rq {
int rt_throttled;
u64 rt_time;
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
+ unsigned long rt_nr_boosted;
+
struct rq *rq;
struct list_head leaf_rt_rq_list;
struct task_group *tg;
@@ -447,6 +456,8 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
struct list_head leaf_rt_rq_list;
#endif
@@ -652,19 +663,21 @@ const_debug unsigned int sysctl_sched_features =
const_debug unsigned int sysctl_sched_nr_migrate = 32;
/*
- * period over which we measure -rt task cpu usage in ms.
+ * period over which we measure -rt task cpu usage in us.
* default: 1s
*/
-const_debug unsigned int sysctl_sched_rt_period = 1000;
+unsigned int sysctl_sched_rt_period = 1000000;
-#define SCHED_RT_FRAC_SHIFT 16
-#define SCHED_RT_FRAC (1UL << SCHED_RT_FRAC_SHIFT)
+/*
+ * part of the period that we allow rt tasks to run in us.
+ * default: 0.95s
+ */
+int sysctl_sched_rt_runtime = 950000;
/*
- * ratio of time -rt tasks may consume.
- * default: 95%
+ * single value that denotes runtime == period, ie unlimited time.
*/
-const_debug unsigned int sysctl_sched_rt_ratio = 62259;
+#define RUNTIME_INF ((u64)~0ULL)
/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
@@ -4571,6 +4584,15 @@ recheck:
return -EPERM;
}
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * Do not allow realtime tasks into groups that have no runtime
+ * assigned.
+ */
+ if (rt_policy(policy) && task_group(p)->rt_runtime == 0)
+ return -EPERM;
+#endif
+
retval = security_task_setscheduler(p, policy, param);
if (retval)
return retval;
@@ -7112,7 +7134,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
/* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap);
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
rt_rq->highest_prio = MAX_RT_PRIO;
#endif
#ifdef CONFIG_SMP
@@ -7123,7 +7145,8 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
+ rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
#endif
}
@@ -7146,7 +7169,9 @@ static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg,
se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
se->parent = NULL;
}
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
static void init_tg_rt_entry(struct rq *rq, struct task_group *tg,
struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
int cpu, int add)
@@ -7175,7 +7200,7 @@ void __init sched_init(void)
init_defrootdomain();
#endif
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED
list_add(&init_task_group.list, &task_groups);
#endif
@@ -7196,7 +7221,10 @@ void __init sched_init(void)
&per_cpu(init_cfs_rq, i),
&per_cpu(init_sched_entity, i), i, 1);
- init_task_group.rt_ratio = sysctl_sched_rt_ratio; /* XXX */
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+ init_task_group.rt_runtime =
+ sysctl_sched_rt_runtime * NSEC_PER_USEC;
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
init_tg_rt_entry(rq, &init_task_group,
&per_cpu(init_rt_rq, i),
@@ -7303,7 +7331,7 @@ void normalize_rt_tasks(void)
unsigned long flags;
struct rq *rq;
- read_lock_irq(&tasklist_lock);
+ read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) {
/*
* Only normalize user tasks:
@@ -7329,16 +7357,16 @@ void normalize_rt_tasks(void)
continue;
}
- spin_lock_irqsave(&p->pi_lock, flags);
+ spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
- read_unlock_irq(&tasklist_lock);
+ read_unlock_irqrestore(&tasklist_lock, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ */
@@ -7387,9 +7415,9 @@ void set_curr_task(int cpu, struct task_struct *p)
#endif
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_GROUP_SCHED
-#ifdef CONFIG_SMP
+#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
/*
* distribute shares of all task groups among their schedulable entities,
* to reflect load distribution across cpus.
@@ -7540,7 +7568,8 @@ static int load_balance_monitor(void *unused)
}
#endif /* CONFIG_SMP */
-static void free_sched_group(struct task_group *tg)
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static void free_fair_sched_group(struct task_group *tg)
{
int i;
@@ -7549,49 +7578,27 @@ static void free_sched_group(struct task_group *tg)
kfree(tg->cfs_rq[i]);
if (tg->se)
kfree(tg->se[i]);
- if (tg->rt_rq)
- kfree(tg->rt_rq[i]);
- if (tg->rt_se)
- kfree(tg->rt_se[i]);
}
kfree(tg->cfs_rq);
kfree(tg->se);
- kfree(tg->rt_rq);
- kfree(tg->rt_se);
- kfree(tg);
}
-/* allocate runqueue etc for a new task group */
-struct task_group *sched_create_group(void)
+static int alloc_fair_sched_group(struct task_group *tg)
{
- struct task_group *tg;
struct cfs_rq *cfs_rq;
struct sched_entity *se;
- struct rt_rq *rt_rq;
- struct sched_rt_entity *rt_se;
struct rq *rq;
int i;
- tg = kzalloc(sizeof(*tg), GFP_KERNEL);
- if (!tg)
- return ERR_PTR(-ENOMEM);
-
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL);
if (!tg->se)
goto err;
- tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL);
- if (!tg->rt_rq)
- goto err;
- tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL);
- if (!tg->rt_se)
- goto err;
tg->shares = NICE_0_LOAD;
- tg->rt_ratio = 0; /* XXX */
for_each_possible_cpu(i) {
rq = cpu_rq(i);
@@ -7606,6 +7613,79 @@ struct task_group *sched_create_group(void)
if (!se)
goto err;
+ init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0);
+ }
+
+ return 1;
+
+ err:
+ return 0;
+}
+
+static inline void register_fair_sched_group(struct task_group *tg, int cpu)
+{
+ list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
+ &cpu_rq(cpu)->leaf_cfs_rq_list);
+}
+
+static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
+{
+ list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
+}
+#else
+static inline void free_fair_sched_group(struct task_group *tg)
+{
+}
+
+static inline int alloc_fair_sched_group(struct task_group *tg)
+{
+ return 1;
+}
+
+static inline void register_fair_sched_group(struct task_group *tg, int cpu)
+{
+}
+
+static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
+{
+}
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+static void free_rt_sched_group(struct task_group *tg)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ if (tg->rt_rq)
+ kfree(tg->rt_rq[i]);
+ if (tg->rt_se)
+ kfree(tg->rt_se[i]);
+ }
+
+ kfree(tg->rt_rq);
+ kfree(tg->rt_se);
+}
+
+static int alloc_rt_sched_group(struct task_group *tg)
+{
+ struct rt_rq *rt_rq;
+ struct sched_rt_entity *rt_se;
+ struct rq *rq;
+ int i;
+
+ tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL);
+ if (!tg->rt_rq)
+ goto err;
+ tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL);
+ if (!tg->rt_se)
+ goto err;
+
+ tg->rt_runtime = 0;
+
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+
rt_rq = kmalloc_node(sizeof(struct rt_rq),
GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
if (!rt_rq)
@@ -7616,20 +7696,75 @@ struct task_group *sched_create_group(void)
if (!rt_se)
goto err;
- init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0);
init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0);
}
- lock_task_group_list();
+ return 1;
+
+ err:
+ return 0;
+}
+
+static inline void register_rt_sched_group(struct task_group *tg, int cpu)
+{
+ list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
+ &cpu_rq(cpu)->leaf_rt_rq_list);
+}
+
+static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
+{
+ list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
+}
+#else
+static inline void free_rt_sched_group(struct task_group *tg)
+{
+}
+
+static inline int alloc_rt_sched_group(struct task_group *tg)
+{
+ return 1;
+}
+
+static inline void register_rt_sched_group(struct task_group *tg, int cpu)
+{
+}
+
+static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
+{
+}
+#endif
+
+static void free_sched_group(struct task_group *tg)
+{
+ free_fair_sched_group(tg);
+ free_rt_sched_group(tg);
+ kfree(tg);
+}
+
+/* allocate runqueue etc for a new task group */
+struct task_group *sched_create_group(void)
+{
+ struct task_group *tg;
+ unsigned long flags;
+ int i;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ if (!alloc_fair_sched_group(tg))
+ goto err;
+
+ if (!alloc_rt_sched_group(tg))
+ goto err;
+
+ spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i) {
- rq = cpu_rq(i);
- cfs_rq = tg->cfs_rq[i];
- list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
- rt_rq = tg->rt_rq[i];
- list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
+ register_fair_sched_group(tg, i);
+ register_rt_sched_group(tg, i);
}
list_add_rcu(&tg->list, &task_groups);
- unlock_task_group_list();
+ spin_unlock_irqrestore(&task_group_lock, flags);
return tg;
@@ -7648,21 +7783,16 @@ static void free_sched_group_rcu(struct rcu_head *rhp)
/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
- struct cfs_rq *cfs_rq = NULL;
- struct rt_rq *rt_rq = NULL;
+ unsigned long flags;
int i;
- lock_task_group_list();
+ spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i) {
- cfs_rq = tg->cfs_rq[i];
- list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
- rt_rq = tg->rt_rq[i];
- list_del_rcu(&rt_rq->leaf_rt_rq_list);
+ unregister_fair_sched_group(tg, i);
+ unregister_rt_sched_group(tg, i);
}
list_del_rcu(&tg->list);
- unlock_task_group_list();
-
- BUG_ON(!cfs_rq);
+ spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for possible concurrent references to cfs_rqs complete */
call_rcu(&tg->rcu, free_sched_group_rcu);
@@ -7703,6 +7833,7 @@ void sched_move_task(struct task_struct *tsk)
task_rq_unlock(rq, &flags);
}
+#ifdef CONFIG_FAIR_GROUP_SCHED
/* rq->lock to be locked by caller */
static void set_se_shares(struct sched_entity *se, unsigned long shares)
{
@@ -7728,13 +7859,14 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
}
}
+static DEFINE_MUTEX(shares_mutex);
+
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
- struct cfs_rq *cfs_rq;
- struct rq *rq;
+ unsigned long flags;
- lock_task_group_list();
+ mutex_lock(&shares_mutex);
if (tg->shares == shares)
goto done;
@@ -7746,10 +7878,10 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
* load_balance_fair) from referring to this group first,
* by taking it off the rq->leaf_cfs_rq_list on each cpu.
*/
- for_each_possible_cpu(i) {
- cfs_rq = tg->cfs_rq[i];
- list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
- }
+ spin_lock_irqsave(&task_group_lock, flags);
+ for_each_possible_cpu(i)
+ unregister_fair_sched_group(tg, i);
+ spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for any ongoing reference to this group to finish */
synchronize_sched();
@@ -7769,13 +7901,12 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
* Enable load balance activity on this group, by inserting it back on
* each cpu's rq->leaf_cfs_rq_list.
*/
- for_each_possible_cpu(i) {
- rq = cpu_rq(i);
- cfs_rq = tg->cfs_rq[i];
- list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
- }
+ spin_lock_irqsave(&task_group_lock, flags);
+ for_each_possible_cpu(i)
+ register_fair_sched_group(tg, i);
+ spin_unlock_irqrestore(&task_group_lock, flags);
done:
- unlock_task_group_list();
+ mutex_unlock(&shares_mutex);
return 0;
}
@@ -7783,35 +7914,84 @@ unsigned long sched_group_shares(struct task_group *tg)
{
return tg->shares;
}
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
/*
- * Ensure the total rt_ratio <= sysctl_sched_rt_ratio
+ * Ensure that the real time constraints are schedulable.
*/
-int sched_group_set_rt_ratio(struct task_group *tg, unsigned long rt_ratio)
+static DEFINE_MUTEX(rt_constraints_mutex);
+
+static unsigned long to_ratio(u64 period, u64 runtime)
+{
+ if (runtime == RUNTIME_INF)
+ return 1ULL << 16;
+
+ runtime *= (1ULL << 16);
+ div64_64(runtime, period);
+ return runtime;
+}
+
+static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
struct task_group *tgi;
unsigned long total = 0;
+ unsigned long global_ratio =
+ to_ratio(sysctl_sched_rt_period,
+ sysctl_sched_rt_runtime < 0 ?
+ RUNTIME_INF : sysctl_sched_rt_runtime);
rcu_read_lock();
- list_for_each_entry_rcu(tgi, &task_groups, list)
- total += tgi->rt_ratio;
- rcu_read_unlock();
+ list_for_each_entry_rcu(tgi, &task_groups, list) {
+ if (tgi == tg)
+ continue;
- if (total + rt_ratio - tg->rt_ratio > sysctl_sched_rt_ratio)
- return -EINVAL;
+ total += to_ratio(period, tgi->rt_runtime);
+ }
+ rcu_read_unlock();
- tg->rt_ratio = rt_ratio;
- return 0;
+ return total + to_ratio(period, runtime) < global_ratio;
}
-unsigned long sched_group_rt_ratio(struct task_group *tg)
+int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
- return tg->rt_ratio;
+ u64 rt_runtime, rt_period;
+ int err = 0;
+
+ rt_period = sysctl_sched_rt_period * NSEC_PER_USEC;
+ rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
+ if (rt_runtime_us == -1)
+ rt_runtime = rt_period;
+
+ mutex_lock(&rt_constraints_mutex);
+ if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
+ err = -EINVAL;
+ goto unlock;
+ }
+ if (rt_runtime_us == -1)
+ rt_runtime = RUNTIME_INF;
+ tg->rt_runtime = rt_runtime;
+ unlock:
+ mutex_unlock(&rt_constraints_mutex);
+
+ return err;
}
-#endif /* CONFIG_FAIR_GROUP_SCHED */
+long sched_group_rt_runtime(struct task_group *tg)
+{
+ u64 rt_runtime_us;
+
+ if (tg->rt_runtime == RUNTIME_INF)
+ return -1;
+
+ rt_runtime_us = tg->rt_runtime;
+ do_div(rt_runtime_us, NSEC_PER_USEC);
+ return rt_runtime_us;
+}
+#endif
+#endif /* CONFIG_GROUP_SCHED */
-#ifdef CONFIG_FAIR_CGROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
/* return corresponding task_group object of a cgroup */
static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
@@ -7857,9 +8037,15 @@ static int
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk)
{
+#ifdef CONFIG_RT_GROUP_SCHED
+ /* Don't accept realtime tasks when there is no way for them to run */
+ if (rt_task(tsk) && cgroup_tg(cgrp)->rt_runtime == 0)
+ return -EINVAL;
+#else
/* We don't support RT-tasks being in separate groups */
if (tsk->sched_class != &fair_sched_class)
return -EINVAL;
+#endif
return 0;
}
@@ -7871,6 +8057,7 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
sched_move_task(tsk);
}
+#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
u64 shareval)
{
@@ -7883,31 +8070,70 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
return (u64) tg->shares;
}
+#endif
-static int cpu_rt_ratio_write_uint(struct cgroup *cgrp, struct cftype *cftype,
- u64 rt_ratio_val)
+#ifdef CONFIG_RT_GROUP_SCHED
+static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
+ struct file *file,
+ const char __user *userbuf,
+ size_t nbytes, loff_t *unused_ppos)
{
- return sched_group_set_rt_ratio(cgroup_tg(cgrp), rt_ratio_val);
+ char buffer[64];
+ int retval = 0;
+ s64 val;
+ char *end;
+
+ if (!nbytes)
+ return -EINVAL;
+ if (nbytes >= sizeof(buffer))
+ return -E2BIG;
+ if (copy_from_user(buffer, userbuf, nbytes))
+ return -EFAULT;
+
+ buffer[nbytes] = 0; /* nul-terminate */
+
+ /* strip newline if necessary */
+ if (nbytes && (buffer[nbytes-1] == '\n'))
+ buffer[nbytes-1] = 0;
+ val = simple_strtoll(buffer, &end, 0);
+ if (*end)
+ return -EINVAL;
+
+ /* Pass to subsystem */
+ retval = sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
+ if (!retval)
+ retval = nbytes;
+ return retval;
}
-static u64 cpu_rt_ratio_read_uint(struct cgroup *cgrp, struct cftype *cft)
+static ssize_t cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft,
+ struct file *file,
+ char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
- struct task_group *tg = cgroup_tg(cgrp);
+ char tmp[64];
+ long val = sched_group_rt_runtime(cgroup_tg(cgrp));
+ int len = sprintf(tmp, "%ld\n", val);
- return (u64) tg->rt_ratio;
+ return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}
+#endif
static struct cftype cpu_files[] = {
+#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
.read_uint = cpu_shares_read_uint,
.write_uint = cpu_shares_write_uint,
},
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
{
- .name = "rt_ratio",
- .read_uint = cpu_rt_ratio_read_uint,
- .write_uint = cpu_rt_ratio_write_uint,
+ .name = "rt_runtime_us",
+ .read = cpu_rt_runtime_read,
+ .write = cpu_rt_runtime_write,
},
+#endif
};
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
@@ -7926,7 +8152,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
.early_init = 1,
};
-#endif /* CONFIG_FAIR_CGROUP_SCHED */
+#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_CGROUP_CPUACCT
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 274b40d7bef..f54792b175b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -55,14 +55,14 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
return !list_empty(&rt_se->run_list);
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
-static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
+static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
if (!rt_rq->tg)
- return SCHED_RT_FRAC;
+ return RUNTIME_INF;
- return rt_rq->tg->rt_ratio;
+ return rt_rq->tg->rt_runtime;
}
#define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -89,7 +89,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
-static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
+static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct sched_rt_entity *rt_se = rt_rq->rt_se;
@@ -102,7 +102,7 @@ static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
}
}
-static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
+static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
struct sched_rt_entity *rt_se = rt_rq->rt_se;
@@ -110,11 +110,31 @@ static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
dequeue_rt_entity(rt_se);
}
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+ return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
+}
+
+static int rt_se_boosted(struct sched_rt_entity *rt_se)
+{
+ struct rt_rq *rt_rq = group_rt_rq(rt_se);
+ struct task_struct *p;
+
+ if (rt_rq)
+ return !!rt_rq->rt_nr_boosted;
+
+ p = rt_task_of(rt_se);
+ return p->prio != p->normal_prio;
+}
+
#else
-static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
+static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
- return sysctl_sched_rt_ratio;
+ if (sysctl_sched_rt_runtime == -1)
+ return RUNTIME_INF;
+
+ return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
}
#define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -141,19 +161,23 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
return NULL;
}
-static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
+static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
}
-static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
+static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
}
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+ return rt_rq->rt_throttled;
+}
#endif
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq)
@@ -163,28 +187,26 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
return rt_task_of(rt_se)->prio;
}
-static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
+static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
- unsigned int rt_ratio = sched_rt_ratio(rt_rq);
- u64 period, ratio;
+ u64 runtime = sched_rt_runtime(rt_rq);
- if (rt_ratio == SCHED_RT_FRAC)
+ if (runtime == RUNTIME_INF)
return 0;
if (rt_rq->rt_throttled)
- return 1;
-
- period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
- ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
+ return rt_rq_throttled(rt_rq);
- if (rt_rq->rt_time > ratio) {
+ if (rt_rq->rt_time > runtime) {
struct rq *rq = rq_of_rt_rq(rt_rq);
rq->rt_throttled = 1;
rt_rq->rt_throttled = 1;
- sched_rt_ratio_dequeue(rt_rq);
- return 1;
+ if (rt_rq_throttled(rt_rq)) {
+ sched_rt_rq_dequeue(rt_rq);
+ return 1;
+ }
}
return 0;
@@ -196,17 +218,16 @@ static void update_sched_rt_period(struct rq *rq)
u64 period;
while (rq->clock > rq->rt_period_expire) {
- period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
+ period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
rq->rt_period_expire += period;
for_each_leaf_rt_rq(rt_rq, rq) {
- unsigned long rt_ratio = sched_rt_ratio(rt_rq);
- u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
+ u64 runtime = sched_rt_runtime(rt_rq);
- rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
- if (rt_rq->rt_throttled) {
+ rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
+ if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
rt_rq->rt_throttled = 0;
- sched_rt_ratio_enqueue(rt_rq);
+ sched_rt_rq_enqueue(rt_rq);
}
}
@@ -239,12 +260,7 @@ static void update_curr_rt(struct rq *rq)
cpuacct_charge(curr, delta_exec);
rt_rq->rt_time += delta_exec;
- /*
- * might make it a tad more accurate:
- *
- * update_sched_rt_period(rq);
- */
- if (sched_rt_ratio_exceeded(rt_rq))
+ if (sched_rt_runtime_exceeded(rt_rq))
resched_task(curr);
}
@@ -253,7 +269,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
rt_rq->rt_nr_running++;
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
if (rt_se_prio(rt_se) < rt_rq->highest_prio)
rt_rq->highest_prio = rt_se_prio(rt_se);
#endif
@@ -265,6 +281,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
update_rt_migration(rq_of_rt_rq(rt_rq));
#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+ if (rt_se_boosted(rt_se))
+ rt_rq->rt_nr_boosted++;
+#endif
}
static inline
@@ -273,7 +293,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
WARN_ON(!rt_rq->rt_nr_running);
rt_rq->rt_nr_running--;
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
if (rt_rq->rt_nr_running) {
struct rt_prio_array *array;
@@ -295,6 +315,12 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
update_rt_migration(rq_of_rt_rq(rt_rq));
#endif /* CONFIG_SMP */
+#ifdef CONFIG_RT_GROUP_SCHED
+ if (rt_se_boosted(rt_se))
+ rt_rq->rt_nr_boosted--;
+
+ WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
+#endif
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -303,7 +329,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se);
- if (group_rq && group_rq->rt_throttled)
+ if (group_rq && rt_rq_throttled(group_rq))
return;
list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -496,7 +522,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
if (unlikely(!rt_rq->rt_nr_running))
return NULL;
- if (sched_rt_ratio_exceeded(rt_rq))
+ if (rt_rq_throttled(rt_rq))
return NULL;
do {
diff --git a/kernel/signal.c b/kernel/signal.c
index 2c1f08defac..84917fe507f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -972,7 +972,7 @@ void zap_other_threads(struct task_struct *p)
}
}
-int fastcall __fatal_signal_pending(struct task_struct *tsk)
+int __fatal_signal_pending(struct task_struct *tsk)
{
return sigismember(&tsk->pending.signal, SIGKILL);
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d41ef6b4cf7..8b7e9541179 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -311,22 +311,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "sched_rt_period_ms",
- .data = &sysctl_sched_rt_period,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "sched_rt_ratio",
- .data = &sysctl_sched_rt_ratio,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
{
.ctl_name = CTL_UNNUMBERED,
@@ -348,6 +332,22 @@ static struct ctl_table kern_table[] = {
#endif
{
.ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_rt_period_us",
+ .data = &sysctl_sched_rt_period,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_rt_runtime_us",
+ .data = &sysctl_sched_rt_runtime,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
.procname = "sched_compat_yield",
.data = &sysctl_sched_compat_yield,
.maxlen = sizeof(unsigned int),
@@ -978,8 +978,8 @@ static struct ctl_table vm_table[] = {
{
.ctl_name = CTL_UNNUMBERED,
.procname = "nr_overcommit_hugepages",
- .data = &nr_overcommit_huge_pages,
- .maxlen = sizeof(nr_overcommit_huge_pages),
+ .data = &sysctl_overcommit_huge_pages,
+ .maxlen = sizeof(sysctl_overcommit_huge_pages),
.mode = 0644,
.proc_handler = &hugetlb_overcommit_handler,
},
diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
index 62b1287932e..41468035473 100644
--- a/kernel/timeconst.pl
+++ b/kernel/timeconst.pl
@@ -339,7 +339,7 @@ sub output($@)
print "\n";
foreach $pfx ('HZ_TO_MSEC','MSEC_TO_HZ',
- 'USEC_TO_HZ','HZ_TO_USEC') {
+ 'HZ_TO_USEC','USEC_TO_HZ') {
foreach $bit (32, 64) {
foreach $suf ('MUL', 'ADJ', 'SHR') {
printf "#define %-23s %s\n",
diff --git a/kernel/user.c b/kernel/user.c
index 7d7900c5a1f..7132022a040 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -57,7 +57,7 @@ struct user_struct root_user = {
.uid_keyring = &root_user_keyring,
.session_keyring = &root_session_keyring,
#endif
-#ifdef CONFIG_FAIR_USER_SCHED
+#ifdef CONFIG_USER_SCHED
.tg = &init_task_group,
#endif
};
@@ -90,7 +90,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
return NULL;
}
-#ifdef CONFIG_FAIR_USER_SCHED
+#ifdef CONFIG_USER_SCHED
static void sched_destroy_user(struct user_struct *up)
{
@@ -113,15 +113,15 @@ static void sched_switch_user(struct task_struct *p)
sched_move_task(p);
}
-#else /* CONFIG_FAIR_USER_SCHED */
+#else /* CONFIG_USER_SCHED */
static void sched_destroy_user(struct user_struct *up) { }
static int sched_create_user(struct user_struct *up) { return 0; }
static void sched_switch_user(struct task_struct *p) { }
-#endif /* CONFIG_FAIR_USER_SCHED */
+#endif /* CONFIG_USER_SCHED */
-#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
+#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
static DEFINE_MUTEX(uids_mutex);
@@ -137,6 +137,7 @@ static inline void uids_mutex_unlock(void)
}
/* uid directory attributes */
+#ifdef CONFIG_FAIR_GROUP_SCHED
static ssize_t cpu_shares_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -163,10 +164,45 @@ static ssize_t cpu_shares_store(struct kobject *kobj,
static struct kobj_attribute cpu_share_attr =
__ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+
+ return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg));
+}
+
+static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
+ unsigned long rt_runtime;
+ int rc;
+
+ sscanf(buf, "%lu", &rt_runtime);
+
+ rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
+
+ return (rc ? rc : size);
+}
+
+static struct kobj_attribute cpu_rt_runtime_attr =
+ __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
+#endif
/* default attributes per uid directory */
static struct attribute *uids_attributes[] = {
+#ifdef CONFIG_FAIR_GROUP_SCHED
&cpu_share_attr.attr,
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+ &cpu_rt_runtime_attr.attr,
+#endif
NULL
};
@@ -269,7 +305,7 @@ static inline void free_user(struct user_struct *up, unsigned long flags)
schedule_work(&up->work);
}
-#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
+#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
int uids_sysfs_init(void) { return 0; }
static inline int uids_user_create(struct user_struct *up) { return 0; }
@@ -373,7 +409,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
- /* This case is not possible when CONFIG_FAIR_USER_SCHED
+ /* This case is not possible when CONFIG_USER_SCHED
* is defined, since we serialize alloc_uid() using
* uids_mutex. Hence no need to call
* sched_destroy_user() or remove_user_sysfs_dir().
diff --git a/mm/filemap.c b/mm/filemap.c
index b7b1be6dbd8..5c74b68935a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -604,7 +604,7 @@ void __lock_page(struct page *page)
}
EXPORT_SYMBOL(__lock_page);
-int fastcall __lock_page_killable(struct page *page)
+int __lock_page_killable(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d9a38031246..cb1b3a7ecdf 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -24,14 +24,15 @@
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
static unsigned long surplus_huge_pages;
+static unsigned long nr_overcommit_huge_pages;
unsigned long max_huge_pages;
+unsigned long sysctl_overcommit_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
-unsigned long nr_overcommit_huge_pages;
static int hugetlb_next_nid;
/*
@@ -609,8 +610,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer,
size_t *length, loff_t *ppos)
{
- spin_lock(&hugetlb_lock);
proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+ spin_lock(&hugetlb_lock);
+ nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
spin_unlock(&hugetlb_lock);
return 0;
}
diff --git a/mm/memory.c b/mm/memory.c
index e5628a5fd67..ce3c9e4492d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -989,6 +989,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int i;
unsigned int vm_flags;
+ if (len <= 0)
+ return 0;
/*
* Require read or write permissions.
* If 'force' is set, we only require the "MAY" flags.
@@ -2709,6 +2711,13 @@ void print_vma_addr(char *prefix, unsigned long ip)
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+ /*
+ * Do not print if we are in atomic
+ * contexts (in exception stacks, etc.):
+ */
+ if (preempt_count())
+ return;
+
down_read(&mm->mmap_sem);
vma = find_vma(mm, ip);
if (vma && vma->vm_file) {
@@ -2717,7 +2726,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
if (buf) {
char *p, *s;
- p = d_path(f->f_dentry, f->f_vfsmnt, buf, PAGE_SIZE);
+ p = d_path(&f->f_path, buf, PAGE_SIZE);
if (IS_ERR(p))
p = "?";
s = strrchr(p, '/');
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 83c69f8a64c..6c7ba1a63d2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -116,22 +116,51 @@ static void mpol_rebind_policy(struct mempolicy *pol,
/* Do sanity checking on a policy */
static int mpol_check_policy(int mode, nodemask_t *nodes)
{
- int empty = nodes_empty(*nodes);
+ int was_empty, is_empty;
+
+ if (!nodes)
+ return 0;
+
+ /*
+ * "Contextualize" the in-coming nodemast for cpusets:
+ * Remember whether in-coming nodemask was empty, If not,
+ * restrict the nodes to the allowed nodes in the cpuset.
+ * This is guaranteed to be a subset of nodes with memory.
+ */
+ cpuset_update_task_memory_state();
+ is_empty = was_empty = nodes_empty(*nodes);
+ if (!was_empty) {
+ nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
+ is_empty = nodes_empty(*nodes); /* after "contextualization" */
+ }
switch (mode) {
case MPOL_DEFAULT:
- if (!empty)
+ /*
+ * require caller to specify an empty nodemask
+ * before "contextualization"
+ */
+ if (!was_empty)
return -EINVAL;
break;
case MPOL_BIND:
case MPOL_INTERLEAVE:
- /* Preferred will only use the first bit, but allow
- more for now. */
- if (empty)
+ /*
+ * require at least 1 valid node after "contextualization"
+ */
+ if (is_empty)
+ return -EINVAL;
+ break;
+ case MPOL_PREFERRED:
+ /*
+ * Did caller specify invalid nodes?
+ * Don't silently accept this as "local allocation".
+ */
+ if (!was_empty && is_empty)
return -EINVAL;
break;
}
- return nodes_subset(*nodes, node_states[N_HIGH_MEMORY]) ? 0 : -EINVAL;
+ return 0;
}
/* Generate a custom zonelist for the BIND policy. */
@@ -188,8 +217,6 @@ static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
switch (mode) {
case MPOL_INTERLEAVE:
policy->v.nodes = *nodes;
- nodes_and(policy->v.nodes, policy->v.nodes,
- node_states[N_HIGH_MEMORY]);
if (nodes_weight(policy->v.nodes) == 0) {
kmem_cache_free(policy_cache, policy);
return ERR_PTR(-EINVAL);
@@ -421,18 +448,6 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
return err;
}
-static int contextualize_policy(int mode, nodemask_t *nodes)
-{
- if (!nodes)
- return 0;
-
- cpuset_update_task_memory_state();
- if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
- return -EINVAL;
- return mpol_check_policy(mode, nodes);
-}
-
-
/*
* Update task->flags PF_MEMPOLICY bit: set iff non-default
* mempolicy. Allows more rapid checking of this (combined perhaps
@@ -468,7 +483,7 @@ static long do_set_mempolicy(int mode, nodemask_t *nodes)
{
struct mempolicy *new;
- if (contextualize_policy(mode, nodes))
+ if (mpol_check_policy(mode, nodes))
return -EINVAL;
new = mpol_new(mode, nodes);
if (IS_ERR(new))
@@ -915,10 +930,6 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
err = get_nodes(&nodes, nmask, maxnode);
if (err)
return err;
-#ifdef CONFIG_CPUSETS
- /* Restrict the nodes to the allowed nodes in the cpuset */
- nodes_and(nodes, nodes, current->mems_allowed);
-#endif
return do_mbind(start, len, mode, &nodes, flags);
}
@@ -1985,7 +1996,7 @@ int show_numa_map(struct seq_file *m, void *v)
if (file) {
seq_printf(m, " file=");
- seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
+ seq_path(m, &file->f_path, "\n\t= ");
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
seq_printf(m, " heap");
} else if (vma->vm_start <= mm->start_stack &&
diff --git a/mm/slab.c b/mm/slab.c
index 40c00dacbe4..473e6c2eaef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2630,6 +2630,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
slabp->colouroff = colour_off;
slabp->s_mem = objp + colour_off;
slabp->nodeid = nodeid;
+ slabp->free = 0;
return slabp;
}
@@ -2683,7 +2684,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
slab_bufctl(slabp)[i] = i + 1;
}
slab_bufctl(slabp)[i - 1] = BUFCTL_END;
- slabp->free = 0;
}
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
@@ -2816,7 +2816,6 @@ static int cache_grow(struct kmem_cache *cachep,
if (!slabp)
goto opps1;
- slabp->nodeid = nodeid;
slab_map_pages(cachep, slabp, objp);
cache_init_objs(cachep, slabp);
diff --git a/mm/slub.c b/mm/slub.c
index e2989ae243b..4b3895cb90e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page)
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000 /* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
+#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
+#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
/* Not all arches define cache_line_size */
#ifndef cache_line_size
@@ -308,7 +310,7 @@ static inline int is_end(void *addr)
return (unsigned long)addr & PAGE_MAPPING_ANON;
}
-void *slab_address(struct page *page)
+static void *slab_address(struct page *page)
{
return page->end - PAGE_MAPPING_ANON;
}
@@ -1078,14 +1080,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct page *page;
int pages = 1 << s->order;
- if (s->order)
- flags |= __GFP_COMP;
-
- if (s->flags & SLAB_CACHE_DMA)
- flags |= SLUB_DMA;
-
- if (s->flags & SLAB_RECLAIM_ACCOUNT)
- flags |= __GFP_RECLAIMABLE;
+ flags |= s->allocflags;
if (node == -1)
page = alloc_pages(flags, s->order);
@@ -1546,7 +1541,6 @@ load_freelist:
unlock_out:
slab_unlock(c->page);
stat(c, ALLOC_SLOWPATH);
-out:
#ifdef SLUB_FASTPATH
local_irq_restore(flags);
#endif
@@ -1581,8 +1575,24 @@ new_slab:
c->page = new;
goto load_freelist;
}
- object = NULL;
- goto out;
+#ifdef SLUB_FASTPATH
+ local_irq_restore(flags);
+#endif
+ /*
+ * No memory available.
+ *
+ * If the slab uses higher order allocs but the object is
+ * smaller than a page size then we can fallback in emergencies
+ * to the page allocator via kmalloc_large. The page allocator may
+ * have failed to obtain a higher order page and we can try to
+ * allocate a single page if the object fits into a single page.
+ * That is only possible if certain conditions are met that are being
+ * checked when a slab is created.
+ */
+ if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
+ return kmalloc_large(s->objsize, gfpflags);
+
+ return NULL;
debug:
object = c->page->freelist;
if (!alloc_debug_processing(s, c->page, object, addr))
@@ -2329,10 +2339,33 @@ static int calculate_sizes(struct kmem_cache *s)
size = ALIGN(size, align);
s->size = size;
- s->order = calculate_order(size);
+ if ((flags & __KMALLOC_CACHE) &&
+ PAGE_SIZE / size < slub_min_objects) {
+ /*
+ * Kmalloc cache that would not have enough objects in
+ * an order 0 page. Kmalloc slabs can fallback to
+ * page allocator order 0 allocs so take a reasonably large
+ * order that will allows us a good number of objects.
+ */
+ s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
+ s->flags |= __PAGE_ALLOC_FALLBACK;
+ s->allocflags |= __GFP_NOWARN;
+ } else
+ s->order = calculate_order(size);
+
if (s->order < 0)
return 0;
+ s->allocflags = 0;
+ if (s->order)
+ s->allocflags |= __GFP_COMP;
+
+ if (s->flags & SLAB_CACHE_DMA)
+ s->allocflags |= SLUB_DMA;
+
+ if (s->flags & SLAB_RECLAIM_ACCOUNT)
+ s->allocflags |= __GFP_RECLAIMABLE;
+
/*
* Determine the number of objects per slab
*/
@@ -2484,11 +2517,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
#ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
+static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
#endif
static int __init setup_slub_min_order(char *str)
@@ -2536,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
- flags, NULL))
+ flags | __KMALLOC_CACHE, NULL))
goto panic;
list_add(&s->list, &slab_caches);
@@ -2670,9 +2703,8 @@ void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ if (unlikely(size > PAGE_SIZE))
+ return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -2688,9 +2720,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ if (unlikely(size > PAGE_SIZE))
+ return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -3001,7 +3032,7 @@ void __init kmem_cache_init(void)
caches++;
}
- for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
+ for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
@@ -3028,7 +3059,7 @@ void __init kmem_cache_init(void)
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
- for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
+ for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
@@ -3057,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s)
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;
+ if ((s->flags & __PAGE_ALLOC_FALLBACK))
+ return 1;
+
if (s->ctor)
return 1;
@@ -3218,9 +3252,9 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(gfpflags | __GFP_COMP,
- get_order(size));
+ if (unlikely(size > PAGE_SIZE))
+ return kmalloc_large(size, gfpflags);
+
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
@@ -3234,9 +3268,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
{
struct kmem_cache *s;
- if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(gfpflags | __GFP_COMP,
- get_order(size));
+ if (unlikely(size > PAGE_SIZE))
+ return kmalloc_large(size, gfpflags);
+
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 02ccab5ad9d..2da149cfc9a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1394,7 +1394,7 @@ static int swap_show(struct seq_file *swap, void *v)
}
file = ptr->swap_file;
- len = seq_path(swap, file->f_path.mnt, file->f_path.dentry, " \t\n\\");
+ len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index d3e4e1877e6..0c2c93735e9 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -465,7 +465,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
return len;
}
-void fastcall __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
+void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
{
BT_DBG("dlc %p state %ld", d, d->state);
@@ -476,7 +476,7 @@ void fastcall __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
rfcomm_schedule(RFCOMM_SCHED_TX);
}
-void fastcall __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
+void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
{
BT_DBG("dlc %p state %ld", d, d->state);
diff --git a/net/core/dev.c b/net/core/dev.c
index 9549417250b..b3e19ae57f9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2143,7 +2143,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
*
* The entry's receive function will be scheduled to run
*/
-void fastcall __napi_schedule(struct napi_struct *n)
+void __napi_schedule(struct napi_struct *n)
{
unsigned long flags;
@@ -3038,8 +3038,7 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
EXPORT_SYMBOL(dev_unicast_sync);
/**
- * dev_unicast_unsync - Remove synchronized addresses from the destination
- * device
+ * dev_unicast_unsync - Remove synchronized addresses from the destination device
* @to: destination device
* @from: source device
*
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4e354221ec2..cfc07dac636 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1907,11 +1907,11 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
* of bytes already consumed and the next call to
* skb_seq_read() will return the remaining part of the block.
*
- * Note: The size of each block of data returned can be arbitary,
+ * Note 1: The size of each block of data returned can be arbitary,
* this limitation is the cost for zerocopy seqeuental
* reads of potentially non linear data.
*
- * Note: Fragment lists within fragments are not implemented
+ * Note 2: Fragment lists within fragments are not implemented
* at the moment, state->root_skb could be replaced with
* a stack for this purpose.
*/
diff --git a/net/core/sock.c b/net/core/sock.c
index 433715fb141..09cb3a74de7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1731,7 +1731,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
atomic_set(&sk->sk_drops, 0);
}
-void fastcall lock_sock_nested(struct sock *sk, int subclass)
+void lock_sock_nested(struct sock *sk, int subclass)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
@@ -1748,7 +1748,7 @@ void fastcall lock_sock_nested(struct sock *sk, int subclass)
EXPORT_SYMBOL(lock_sock_nested);
-void fastcall release_sock(struct sock *sk)
+void release_sock(struct sock *sk)
{
/*
* The sk_lock has mutex_unlock() semantics:
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 0998e6d0966..8c6a7f1a25e 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -464,9 +464,9 @@ rpc_release_client(struct rpc_clnt *clnt)
/**
* rpc_bind_new_program - bind a new RPC program to an existing client
- * @old - old rpc_client
- * @program - rpc program to set
- * @vers - rpc program version
+ * @old: old rpc_client
+ * @program: rpc program to set
+ * @vers: rpc program version
*
* Clones the rpc client and sets up a new RPC program. This is mainly
* of use for enabling different RPC programs to share the same transport.
@@ -575,7 +575,7 @@ EXPORT_SYMBOL_GPL(rpc_call_sync);
* @clnt: pointer to RPC client
* @msg: RPC call parameters
* @flags: RPC call flags
- * @ops: RPC call ops
+ * @tk_ops: RPC call ops
* @data: user call data
*/
int
@@ -610,7 +610,7 @@ EXPORT_SYMBOL_GPL(rpc_call_start);
* rpc_peeraddr - extract remote peer address from clnt's xprt
* @clnt: RPC client structure
* @buf: target buffer
- * @size: length of target buffer
+ * @bufsize: length of target buffer
*
* Returns the number of bytes that are actually in the stored address.
*/
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 7e197168a24..1b395a41a8b 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -495,7 +495,7 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
static void
rpc_release_path(struct nameidata *nd)
{
- path_release(nd);
+ path_put(&nd->path);
rpc_put_mount();
}
@@ -668,7 +668,8 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
if ((error = rpc_lookup_parent(path, nd)) != 0)
return ERR_PTR(error);
- dentry = rpc_lookup_create(nd->dentry, nd->last.name, nd->last.len, 1);
+ dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
+ 1);
if (IS_ERR(dentry))
rpc_release_path(nd);
return dentry;
@@ -677,7 +678,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
/**
* rpc_mkdir - Create a new directory in rpc_pipefs
* @path: path from the rpc_pipefs root to the new directory
- * @rpc_clnt: rpc client to associate with this directory
+ * @rpc_client: rpc client to associate with this directory
*
* This creates a directory at the given @path associated with
* @rpc_clnt, which will contain a file named "info" with some basic
@@ -695,7 +696,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
dentry = rpc_lookup_negative(path, &nd);
if (IS_ERR(dentry))
return dentry;
- dir = nd.dentry->d_inode;
+ dir = nd.path.dentry->d_inode;
if ((error = __rpc_mkdir(dir, dentry)) != 0)
goto err_dput;
RPC_I(dentry->d_inode)->private = rpc_client;
@@ -748,6 +749,7 @@ rpc_rmdir(struct dentry *dentry)
* @private: private data to associate with the pipe, for the caller's use
* @ops: operations defining the behavior of the pipe: upcall, downcall,
* release_pipe, and destroy_msg.
+ * @flags: rpc_inode flags
*
* Data is made available for userspace to read by calls to
* rpc_queue_upcall(). The actual reads will result in calls to
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index cfcade906a5..d5553b8179f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL_GPL(xprt_register_transport);
/**
* xprt_unregister_transport - unregister a transport implementation
- * transport: transport to unregister
+ * @transport: transport to unregister
*
* Returns:
* 0: transport successfully unregistered
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 3e321949e1d..0598b229c11 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -159,7 +159,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
BUG_ON(sge_count >= 32);
dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
"write_len=%d, xdr_sge=%p, sge_count=%d\n",
- rmr, to, xdr_off, write_len, xdr_sge, sge_count);
+ rmr, (unsigned long long)to, xdr_off,
+ write_len, xdr_sge, sge_count);
ctxt = svc_rdma_get_context(xprt);
ctxt->count = 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index eea75888805..b8788fd5e3c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -718,16 +718,16 @@ static struct sock *unix_find_other(struct net *net,
goto put_fail;
err = -ECONNREFUSED;
- if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
+ if (!S_ISSOCK(nd.path.dentry->d_inode->i_mode))
goto put_fail;
- u=unix_find_socket_byinode(net, nd.dentry->d_inode);
+ u = unix_find_socket_byinode(net, nd.path.dentry->d_inode);
if (!u)
goto put_fail;
if (u->sk_type == type)
- touch_atime(nd.mnt, nd.dentry);
+ touch_atime(nd.path.mnt, nd.path.dentry);
- path_release(&nd);
+ path_put(&nd.path);
err=-EPROTOTYPE;
if (u->sk_type != type) {
@@ -748,7 +748,7 @@ static struct sock *unix_find_other(struct net *net,
return u;
put_fail:
- path_release(&nd);
+ path_put(&nd.path);
fail:
*error=err;
return NULL;
@@ -819,12 +819,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
*/
mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current->fs->umask);
- err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
+ err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
if (err)
goto out_mknod_dput;
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
- dput(nd.dentry);
- nd.dentry = dentry;
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ dput(nd.path.dentry);
+ nd.path.dentry = dentry;
addr->hash = UNIX_HASH_SIZE;
}
@@ -842,8 +842,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
list = &unix_socket_table[addr->hash];
} else {
list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
- u->dentry = nd.dentry;
- u->mnt = nd.mnt;
+ u->dentry = nd.path.dentry;
+ u->mnt = nd.path.mnt;
}
err = 0;
@@ -861,8 +861,8 @@ out:
out_mknod_dput:
dput(dentry);
out_mknod_unlock:
- mutex_unlock(&nd.dentry->d_inode->i_mutex);
- path_release(&nd);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ path_put(&nd.path);
out_mknod_parent:
if (err==-EEXIST)
err=-EADDRINUSE;
diff --git a/samples/markers/probe-example.c b/samples/markers/probe-example.c
index a3679753561..c8e099d4d1f 100644
--- a/samples/markers/probe-example.c
+++ b/samples/markers/probe-example.c
@@ -20,31 +20,27 @@ struct probe_data {
marker_probe_func *probe_func;
};
-void probe_subsystem_event(const struct marker *mdata, void *private,
- const char *format, ...)
+void probe_subsystem_event(void *probe_data, void *call_data,
+ const char *format, va_list *args)
{
- va_list ap;
/* Declare args */
unsigned int value;
const char *mystr;
/* Assign args */
- va_start(ap, format);
- value = va_arg(ap, typeof(value));
- mystr = va_arg(ap, typeof(mystr));
+ value = va_arg(*args, typeof(value));
+ mystr = va_arg(*args, typeof(mystr));
/* Call printk */
- printk(KERN_DEBUG "Value %u, string %s\n", value, mystr);
+ printk(KERN_INFO "Value %u, string %s\n", value, mystr);
/* or count, check rights, serialize data in a buffer */
-
- va_end(ap);
}
atomic_t eventb_count = ATOMIC_INIT(0);
-void probe_subsystem_eventb(const struct marker *mdata, void *private,
- const char *format, ...)
+void probe_subsystem_eventb(void *probe_data, void *call_data,
+ const char *format, va_list *args)
{
/* Increment counter */
atomic_inc(&eventb_count);
@@ -72,10 +68,6 @@ static int __init probe_init(void)
if (result)
printk(KERN_INFO "Unable to register probe %s\n",
probe_array[i].name);
- result = marker_arm(probe_array[i].name);
- if (result)
- printk(KERN_INFO "Unable to arm probe %s\n",
- probe_array[i].name);
}
return 0;
}
@@ -85,7 +77,8 @@ static void __exit probe_fini(void)
int i;
for (i = 0; i < ARRAY_SIZE(probe_array); i++)
- marker_probe_unregister(probe_array[i].name);
+ marker_probe_unregister(probe_array[i].name,
+ probe_array[i].probe_func, &probe_array[i]);
printk(KERN_INFO "Number of event b : %u\n",
atomic_read(&eventb_count));
}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index da3559ea92e..d64e6badc94 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -39,10 +39,13 @@ escsq = $(subst $(squote),'\$(squote)',$1)
# - If they are equal no change, and no timestamp update
# - stdin is piped in from the first prerequisite ($<) so one has
# to specify a valid file as first prerequisite (often the kbuild file)
+ chk_filechk = :
quiet_chk_filechk = echo ' CHK $@'
silent_chk_filechk = :
+ upd_filechk = :
quiet_upd_filechk = echo ' UPD $@'
silent_upd_filechk = :
+
define filechk
$(Q)set -e; \
$($(quiet)chk_filechk); \
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 65e707e1ffc..cfc004e0441 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -13,6 +13,7 @@
# 2) modpost is then used to
# 3) create one <module>.mod.c file pr. module
# 4) create one Module.symvers file with CRC for all exported symbols
+# 4a) [CONFIG_MARKERS] create one Module.markers file listing defined markers
# 5) compile all <module>.mod.c files
# 6) final link of the module to a <module.ko> file
@@ -45,6 +46,10 @@ include scripts/Makefile.lib
kernelsymfile := $(objtree)/Module.symvers
modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers
+kernelmarkersfile := $(objtree)/Module.markers
+modulemarkersfile := $(firstword $(KBUILD_EXTMOD))/Module.markers
+
+markersfile = $(if $(KBUILD_EXTMOD),$(modulemarkersfile),$(kernelmarkersfile))
# Step 1), find all modules listed in $(MODVERDIR)/
__modules := $(sort $(shell grep -h '\.ko' /dev/null $(wildcard $(MODVERDIR)/*.mod)))
@@ -63,6 +68,8 @@ modpost = scripts/mod/modpost \
$(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
$(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
$(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
+ $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \
+ $(if $(CONFIG_MARKERS),-M $(markersfile)) \
$(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
@@ -82,6 +89,10 @@ vmlinux.o: FORCE
$(symverfile): __modpost ;
$(modules:.ko=.mod.c): __modpost ;
+ifdef CONFIG_MARKERS
+$(markersfile): __modpost ;
+endif
+
# Step 5), compile all *.mod.c files
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 6c18a14386a..26146cbaa50 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1624,7 +1624,6 @@ sub dump_function($$) {
$prototype =~ s/^static +//;
$prototype =~ s/^extern +//;
- $prototype =~ s/^fastcall +//;
$prototype =~ s/^asmlinkage +//;
$prototype =~ s/^inline +//;
$prototype =~ s/^__inline__ +//;
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index dbe1fb5e8cc..61742771c65 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -11,6 +11,8 @@
* Usage: modpost vmlinux module1.o module2.o ...
*/
+#define _GNU_SOURCE
+#include <stdio.h>
#include <ctype.h>
#include "modpost.h"
#include "../../include/linux/license.h"
@@ -435,6 +437,8 @@ static int parse_elf(struct elf_info *info, const char *filename)
info->export_unused_gpl_sec = i;
else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
info->export_gpl_future_sec = i;
+ else if (strcmp(secname, "__markers_strings") == 0)
+ info->markers_strings_sec = i;
if (sechdrs[i].sh_type != SHT_SYMTAB)
continue;
@@ -1470,6 +1474,62 @@ static void check_sec_ref(struct module *mod, const char *modname,
}
}
+static void get_markers(struct elf_info *info, struct module *mod)
+{
+ const Elf_Shdr *sh = &info->sechdrs[info->markers_strings_sec];
+ const char *strings = (const char *) info->hdr + sh->sh_offset;
+ const Elf_Sym *sym, *first_sym, *last_sym;
+ size_t n;
+
+ if (!info->markers_strings_sec)
+ return;
+
+ /*
+ * First count the strings. We look for all the symbols defined
+ * in the __markers_strings section named __mstrtab_*. For
+ * these local names, the compiler puts a random .NNN suffix on,
+ * so the names don't correspond exactly.
+ */
+ first_sym = last_sym = NULL;
+ n = 0;
+ for (sym = info->symtab_start; sym < info->symtab_stop; sym++)
+ if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT &&
+ sym->st_shndx == info->markers_strings_sec &&
+ !strncmp(info->strtab + sym->st_name,
+ "__mstrtab_", sizeof "__mstrtab_" - 1)) {
+ if (first_sym == NULL)
+ first_sym = sym;
+ last_sym = sym;
+ ++n;
+ }
+
+ if (n == 0)
+ return;
+
+ /*
+ * Now collect each name and format into a line for the output.
+ * Lines look like:
+ * marker_name vmlinux marker %s format %d
+ * The format string after the second \t can use whitespace.
+ */
+ mod->markers = NOFAIL(malloc(sizeof mod->markers[0] * n));
+ mod->nmarkers = n;
+
+ n = 0;
+ for (sym = first_sym; sym <= last_sym; sym++)
+ if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT &&
+ sym->st_shndx == info->markers_strings_sec &&
+ !strncmp(info->strtab + sym->st_name,
+ "__mstrtab_", sizeof "__mstrtab_" - 1)) {
+ const char *name = strings + sym->st_value;
+ const char *fmt = strchr(name, '\0') + 1;
+ char *line = NULL;
+ asprintf(&line, "%s\t%s\t%s\n", name, mod->name, fmt);
+ NOFAIL(line);
+ mod->markers[n++] = line;
+ }
+}
+
static void read_symbols(char *modname)
{
const char *symname;
@@ -1521,6 +1581,8 @@ static void read_symbols(char *modname)
get_src_version(modname, mod->srcversion,
sizeof(mod->srcversion)-1);
+ get_markers(&info, mod);
+
parse_elf_finish(&info);
/* Our trick to get versioning for struct_module - it's
@@ -1867,16 +1929,104 @@ static void write_dump(const char *fname)
write_if_changed(&buf, fname);
}
+static void add_marker(struct module *mod, const char *name, const char *fmt)
+{
+ char *line = NULL;
+ asprintf(&line, "%s\t%s\t%s\n", name, mod->name, fmt);
+ NOFAIL(line);
+
+ mod->markers = NOFAIL(realloc(mod->markers, ((mod->nmarkers + 1) *
+ sizeof mod->markers[0])));
+ mod->markers[mod->nmarkers++] = line;
+}
+
+static void read_markers(const char *fname)
+{
+ unsigned long size, pos = 0;
+ void *file = grab_file(fname, &size);
+ char *line;
+
+ if (!file) /* No old markers, silently ignore */
+ return;
+
+ while ((line = get_next_line(&pos, file, size))) {
+ char *marker, *modname, *fmt;
+ struct module *mod;
+
+ marker = line;
+ modname = strchr(marker, '\t');
+ if (!modname)
+ goto fail;
+ *modname++ = '\0';
+ fmt = strchr(modname, '\t');
+ if (!fmt)
+ goto fail;
+ *fmt++ = '\0';
+ if (*marker == '\0' || *modname == '\0')
+ goto fail;
+
+ mod = find_module(modname);
+ if (!mod) {
+ if (is_vmlinux(modname))
+ have_vmlinux = 1;
+ mod = new_module(NOFAIL(strdup(modname)));
+ mod->skip = 1;
+ }
+
+ add_marker(mod, marker, fmt);
+ }
+ return;
+fail:
+ fatal("parse error in markers list file\n");
+}
+
+static int compare_strings(const void *a, const void *b)
+{
+ return strcmp(*(const char **) a, *(const char **) b);
+}
+
+static void write_markers(const char *fname)
+{
+ struct buffer buf = { };
+ struct module *mod;
+ size_t i;
+
+ for (mod = modules; mod; mod = mod->next)
+ if ((!external_module || !mod->skip) && mod->markers != NULL) {
+ /*
+ * Sort the strings so we can skip duplicates when
+ * we write them out.
+ */
+ qsort(mod->markers, mod->nmarkers,
+ sizeof mod->markers[0], &compare_strings);
+ for (i = 0; i < mod->nmarkers; ++i) {
+ char *line = mod->markers[i];
+ buf_write(&buf, line, strlen(line));
+ while (i + 1 < mod->nmarkers &&
+ !strcmp(mod->markers[i],
+ mod->markers[i + 1]))
+ free(mod->markers[i++]);
+ free(mod->markers[i]);
+ }
+ free(mod->markers);
+ mod->markers = NULL;
+ }
+
+ write_if_changed(&buf, fname);
+}
+
int main(int argc, char **argv)
{
struct module *mod;
struct buffer buf = { };
char *kernel_read = NULL, *module_read = NULL;
char *dump_write = NULL;
+ char *markers_read = NULL;
+ char *markers_write = NULL;
int opt;
int err;
- while ((opt = getopt(argc, argv, "i:I:msSo:aw")) != -1) {
+ while ((opt = getopt(argc, argv, "i:I:msSo:awM:K:")) != -1) {
switch (opt) {
case 'i':
kernel_read = optarg;
@@ -1903,6 +2053,12 @@ int main(int argc, char **argv)
case 'w':
warn_unresolved = 1;
break;
+ case 'M':
+ markers_write = optarg;
+ break;
+ case 'K':
+ markers_read = optarg;
+ break;
default:
exit(1);
}
@@ -1950,5 +2106,11 @@ int main(int argc, char **argv)
"'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
sec_mismatch_count);
+ if (markers_read)
+ read_markers(markers_read);
+
+ if (markers_write)
+ write_markers(markers_write);
+
return err;
}
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 999f15e0e00..565c5872407 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -112,6 +112,8 @@ struct module {
int has_init;
int has_cleanup;
struct buffer dev_table_buf;
+ char **markers;
+ size_t nmarkers;
char srcversion[25];
};
@@ -126,6 +128,7 @@ struct elf_info {
Elf_Section export_gpl_sec;
Elf_Section export_unused_gpl_sec;
Elf_Section export_gpl_future_sec;
+ Elf_Section markers_strings_sec;
const char *strtab;
char *modinfo;
unsigned int modinfo_len;
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index e8529e2f51e..187964e88af 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -568,10 +568,11 @@ void avc_audit(u32 ssid, u32 tsid,
audit_log_format(ab, " capability=%d", a->u.cap);
break;
case AVC_AUDIT_DATA_FS:
- if (a->u.fs.dentry) {
- struct dentry *dentry = a->u.fs.dentry;
- if (a->u.fs.mnt) {
- audit_log_d_path(ab, "path=", dentry, a->u.fs.mnt);
+ if (a->u.fs.path.dentry) {
+ struct dentry *dentry = a->u.fs.path.dentry;
+ if (a->u.fs.path.mnt) {
+ audit_log_d_path(ab, "path=",
+ &a->u.fs.path);
} else {
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, dentry->d_name.name);
@@ -626,8 +627,12 @@ void avc_audit(u32 ssid, u32 tsid,
case AF_UNIX:
u = unix_sk(sk);
if (u->dentry) {
+ struct path path = {
+ .dentry = u->dentry,
+ .mnt = u->mnt
+ };
audit_log_d_path(ab, "path=",
- u->dentry, u->mnt);
+ &path);
break;
}
if (!u->addr)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e5ed0751030..75c2e99bfb8 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1272,12 +1272,18 @@ static int task_has_perm(struct task_struct *tsk1,
SECCLASS_PROCESS, perms, NULL);
}
+#if CAP_LAST_CAP > 63
+#error Fix SELinux to handle capabilities > 63.
+#endif
+
/* Check whether a task is allowed to use a capability. */
static int task_has_capability(struct task_struct *tsk,
int cap)
{
struct task_security_struct *tsec;
struct avc_audit_data ad;
+ u16 sclass;
+ u32 av = CAP_TO_MASK(cap);
tsec = tsk->security;
@@ -1285,8 +1291,19 @@ static int task_has_capability(struct task_struct *tsk,
ad.tsk = tsk;
ad.u.cap = cap;
- return avc_has_perm(tsec->sid, tsec->sid,
- SECCLASS_CAPABILITY, CAP_TO_MASK(cap), &ad);
+ switch (CAP_TO_INDEX(cap)) {
+ case 0:
+ sclass = SECCLASS_CAPABILITY;
+ break;
+ case 1:
+ sclass = SECCLASS_CAPABILITY2;
+ break;
+ default:
+ printk(KERN_ERR
+ "SELinux: out of range capability %d\n", cap);
+ BUG();
+ }
+ return avc_has_perm(tsec->sid, tsec->sid, sclass, av, &ad);
}
/* Check whether a task is allowed to use a system operation. */
@@ -1339,8 +1356,8 @@ static inline int dentry_has_perm(struct task_struct *tsk,
struct inode *inode = dentry->d_inode;
struct avc_audit_data ad;
AVC_AUDIT_DATA_INIT(&ad,FS);
- ad.u.fs.mnt = mnt;
- ad.u.fs.dentry = dentry;
+ ad.u.fs.path.mnt = mnt;
+ ad.u.fs.path.dentry = dentry;
return inode_has_perm(tsk, inode, av, &ad);
}
@@ -1358,15 +1375,12 @@ static int file_has_perm(struct task_struct *tsk,
{
struct task_security_struct *tsec = tsk->security;
struct file_security_struct *fsec = file->f_security;
- struct vfsmount *mnt = file->f_path.mnt;
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file->f_path.dentry->d_inode;
struct avc_audit_data ad;
int rc;
AVC_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.mnt = mnt;
- ad.u.fs.dentry = dentry;
+ ad.u.fs.path = file->f_path;
if (tsec->sid != fsec->sid) {
rc = avc_has_perm(tsec->sid, fsec->sid,
@@ -1401,7 +1415,7 @@ static int may_create(struct inode *dir,
sbsec = dir->i_sb->s_security;
AVC_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.dentry = dentry;
+ ad.u.fs.path.dentry = dentry;
rc = avc_has_perm(tsec->sid, dsec->sid, SECCLASS_DIR,
DIR__ADD_NAME | DIR__SEARCH,
@@ -1459,7 +1473,7 @@ static int may_link(struct inode *dir,
isec = dentry->d_inode->i_security;
AVC_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.dentry = dentry;
+ ad.u.fs.path.dentry = dentry;
av = DIR__SEARCH;
av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME);
@@ -1506,7 +1520,7 @@ static inline int may_rename(struct inode *old_dir,
AVC_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.dentry = old_dentry;
+ ad.u.fs.path.dentry = old_dentry;
rc = avc_has_perm(tsec->sid, old_dsec->sid, SECCLASS_DIR,
DIR__REMOVE_NAME | DIR__SEARCH, &ad);
if (rc)
@@ -1522,7 +1536,7 @@ static inline int may_rename(struct inode *old_dir,
return rc;
}
- ad.u.fs.dentry = new_dentry;
+ ad.u.fs.path.dentry = new_dentry;
av = DIR__ADD_NAME | DIR__SEARCH;
if (new_dentry->d_inode)
av |= DIR__REMOVE_NAME;
@@ -1901,8 +1915,7 @@ static int selinux_bprm_set_security(struct linux_binprm *bprm)
}
AVC_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.mnt = bprm->file->f_path.mnt;
- ad.u.fs.dentry = bprm->file->f_path.dentry;
+ ad.u.fs.path = bprm->file->f_path;
if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
newsid = tsec->sid;
@@ -2298,7 +2311,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, void *data)
return rc;
AVC_AUDIT_DATA_INIT(&ad,FS);
- ad.u.fs.dentry = sb->s_root;
+ ad.u.fs.path.dentry = sb->s_root;
return superblock_has_perm(current, sb, FILESYSTEM__MOUNT, &ad);
}
@@ -2307,7 +2320,7 @@ static int selinux_sb_statfs(struct dentry *dentry)
struct avc_audit_data ad;
AVC_AUDIT_DATA_INIT(&ad,FS);
- ad.u.fs.dentry = dentry->d_sb->s_root;
+ ad.u.fs.path.dentry = dentry->d_sb->s_root;
return superblock_has_perm(current, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
}
@@ -2324,10 +2337,10 @@ static int selinux_mount(char * dev_name,
return rc;
if (flags & MS_REMOUNT)
- return superblock_has_perm(current, nd->mnt->mnt_sb,
+ return superblock_has_perm(current, nd->path.mnt->mnt_sb,
FILESYSTEM__REMOUNT, NULL);
else
- return dentry_has_perm(current, nd->mnt, nd->dentry,
+ return dentry_has_perm(current, nd->path.mnt, nd->path.dentry,
FILE__MOUNTON);
}
@@ -2570,7 +2583,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, char *name, void *value
return -EPERM;
AVC_AUDIT_DATA_INIT(&ad,FS);
- ad.u.fs.dentry = dentry;
+ ad.u.fs.path.dentry = dentry;
rc = avc_has_perm(tsec->sid, isec->sid, isec->sclass,
FILE__RELABELFROM, &ad);
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
index 399f868c5c8..d5696690d3a 100644
--- a/security/selinux/include/av_perm_to_string.h
+++ b/security/selinux/include/av_perm_to_string.h
@@ -132,6 +132,9 @@
S_(SECCLASS_CAPABILITY, CAPABILITY__LEASE, "lease")
S_(SECCLASS_CAPABILITY, CAPABILITY__AUDIT_WRITE, "audit_write")
S_(SECCLASS_CAPABILITY, CAPABILITY__AUDIT_CONTROL, "audit_control")
+ S_(SECCLASS_CAPABILITY, CAPABILITY__SETFCAP, "setfcap")
+ S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_OVERRIDE, "mac_override")
+ S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_ADMIN, "mac_admin")
S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ, "nlmsg_read")
S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE, "nlmsg_write")
S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_READ, "nlmsg_read")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
index 84c9abc8097..75b41311ab8 100644
--- a/security/selinux/include/av_permissions.h
+++ b/security/selinux/include/av_permissions.h
@@ -533,6 +533,9 @@
#define CAPABILITY__LEASE 0x10000000UL
#define CAPABILITY__AUDIT_WRITE 0x20000000UL
#define CAPABILITY__AUDIT_CONTROL 0x40000000UL
+#define CAPABILITY__SETFCAP 0x80000000UL
+#define CAPABILITY2__MAC_OVERRIDE 0x00000001UL
+#define CAPABILITY2__MAC_ADMIN 0x00000002UL
#define NETLINK_ROUTE_SOCKET__IOCTL 0x00000001UL
#define NETLINK_ROUTE_SOCKET__READ 0x00000002UL
#define NETLINK_ROUTE_SOCKET__WRITE 0x00000004UL
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 80c28fa6621..8e23d7a873a 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/in6.h>
+#include <linux/path.h>
#include <asm/system.h>
#include "flask.h"
#include "av_permissions.h"
@@ -30,8 +31,6 @@ extern int selinux_enforcing;
struct avc_entry;
struct task_struct;
-struct vfsmount;
-struct dentry;
struct inode;
struct sock;
struct sk_buff;
@@ -46,8 +45,7 @@ struct avc_audit_data {
struct task_struct *tsk;
union {
struct {
- struct vfsmount *mnt;
- struct dentry *dentry;
+ struct path path;
struct inode *inode;
} fs;
struct {
diff --git a/security/selinux/include/class_to_string.h b/security/selinux/include/class_to_string.h
index b1b0d1d8f95..bd813c366e3 100644
--- a/security/selinux/include/class_to_string.h
+++ b/security/selinux/include/class_to_string.h
@@ -71,3 +71,4 @@
S_(NULL)
S_(NULL)
S_("peer")
+ S_("capability2")
diff --git a/security/selinux/include/flask.h b/security/selinux/include/flask.h
index 09e9dd23ee1..febf8868e85 100644
--- a/security/selinux/include/flask.h
+++ b/security/selinux/include/flask.h
@@ -51,6 +51,7 @@
#define SECCLASS_DCCP_SOCKET 60
#define SECCLASS_MEMPROTECT 61
#define SECCLASS_PEER 68
+#define SECCLASS_CAPABILITY2 69
/*
* Security identifier indices for initial entities
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 1c11e424585..2b5d6f72f67 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -325,7 +325,7 @@ static int smack_sb_statfs(struct dentry *dentry)
static int smack_sb_mount(char *dev_name, struct nameidata *nd,
char *type, unsigned long flags, void *data)
{
- struct superblock_smack *sbp = nd->mnt->mnt_sb->s_security;
+ struct superblock_smack *sbp = nd->path.mnt->mnt_sb->s_security;
return smk_curacc(sbp->smk_floor, MAY_WRITE);
}
@@ -701,7 +701,7 @@ static int smack_inode_getsecurity(const struct inode *inode,
return -EOPNOTSUPP;
sock = SOCKET_I(ip);
- if (sock == NULL)
+ if (sock == NULL || sock->sk == NULL)
return -EOPNOTSUPP;
ssp = sock->sk->sk_security;
@@ -1280,10 +1280,11 @@ static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp)
*/
static int smack_netlabel(struct sock *sk)
{
- struct socket_smack *ssp = sk->sk_security;
+ struct socket_smack *ssp;
struct netlbl_lsm_secattr secattr;
int rc = 0;
+ ssp = sk->sk_security;
netlbl_secattr_init(&secattr);
smack_to_secattr(ssp->smk_out, &secattr);
if (secattr.flags != NETLBL_SECATTR_NONE)
@@ -1331,7 +1332,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
return -EOPNOTSUPP;
sock = SOCKET_I(inode);
- if (sock == NULL)
+ if (sock == NULL || sock->sk == NULL)
return -EOPNOTSUPP;
ssp = sock->sk->sk_security;
@@ -1362,7 +1363,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
static int smack_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
- if (family != PF_INET)
+ if (family != PF_INET || sock->sk == NULL)
return 0;
/*
* Set the outbound netlbl.
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index f97c1ba43a2..47cfa5186e3 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -149,13 +149,13 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
}
spin_unlock_irqrestore(&clients_lock, flags);
#ifdef CONFIG_KMOD
- if (!in_interrupt() && current->fs->root) {
+ if (!in_interrupt()) {
static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
static char card_requested[SNDRV_CARDS];
if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
int idx;
- if (! client_requested[clientid] && current->fs->root) {
+ if (!client_requested[clientid]) {
client_requested[clientid] = 1;
for (idx = 0; idx < 15; idx++) {
if (seq_client_load[idx] < 0)
diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
index 155dc7da472..2f00ad28a2b 100644
--- a/sound/core/seq/seq_device.c
+++ b/sound/core/seq/seq_device.c
@@ -149,9 +149,6 @@ void snd_seq_device_load_drivers(void)
if (snd_seq_in_init)
return;
- if (! current->fs->root)
- return;
-
mutex_lock(&ops_mutex);
list_for_each_entry(ops, &opslist, list) {
if (! (ops->driver & DRIVER_LOADED) &&
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 00cca4d6e56..812f91b3de5 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -71,8 +71,6 @@ static DEFINE_MUTEX(sound_mutex);
*/
void snd_request_card(int card)
{
- if (! current->fs->root)
- return;
if (snd_card_locked(card))
return;
if (card < 0 || card >= cards_limit)
@@ -86,8 +84,6 @@ static void snd_request_other(int minor)
{
char *str;
- if (! current->fs->root)
- return;
switch (minor) {
case SNDRV_MINOR_SEQUENCER: str = "snd-seq"; break;
case SNDRV_MINOR_TIMER: str = "snd-timer"; break;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index aece465934b..9d8184a2c2d 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -150,8 +150,6 @@ static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
static void snd_timer_request(struct snd_timer_id *tid)
{
- if (! current->fs->root)
- return;
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
diff --git a/sound/ppc/daca.c b/sound/ppc/daca.c
index 8432c16cd6f..ca9452901a5 100644
--- a/sound/ppc/daca.c
+++ b/sound/ppc/daca.c
@@ -250,9 +250,8 @@ int __init snd_pmac_daca_init(struct snd_pmac *chip)
struct pmac_daca *mix;
#ifdef CONFIG_KMOD
- if (current->fs->root)
- request_module("i2c-powermac");
-#endif /* CONFIG_KMOD */
+ request_module("i2c-powermac");
+#endif /* CONFIG_KMOD */
mix = kzalloc(sizeof(*mix), GFP_KERNEL);
if (! mix)
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 71a7a976542..3f8d7164cef 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -1351,9 +1351,8 @@ int __init snd_pmac_tumbler_init(struct snd_pmac *chip)
char *chipname;
#ifdef CONFIG_KMOD
- if (current->fs->root)
- request_module("i2c-powermac");
-#endif /* CONFIG_KMOD */
+ request_module("i2c-powermac");
+#endif /* CONFIG_KMOD */
mix = kzalloc(sizeof(*mix), GFP_KERNEL);
if (! mix)