summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/kernel-api.tmpl4
-rw-r--r--Documentation/filesystems/ecryptfs.txt (renamed from Documentation/ecryptfs.txt)0
-rw-r--r--Documentation/filesystems/proc.txt15
-rw-r--r--Documentation/kernel-parameters.txt26
-rw-r--r--Documentation/oops-tracing.txt2
-rw-r--r--Documentation/power/freezing-of-tasks.txt160
-rw-r--r--Documentation/power/kernel_threads.txt40
-rw-r--r--Documentation/power/swsusp.txt18
-rw-r--r--Documentation/rtc.txt2
-rw-r--r--Documentation/spi/spi-lm70llp69
-rw-r--r--Documentation/sysctl/vm.txt3
-rw-r--r--Documentation/vm/slub.txt137
-rw-r--r--arch/alpha/kernel/ptrace.c4
-rw-r--r--arch/alpha/kernel/smp.c6
-rw-r--r--arch/alpha/kernel/traps.c1
-rw-r--r--arch/alpha/lib/checksum.c1
-rw-r--r--arch/arm/kernel/ptrace.c15
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/mach-at91/board-csb337.c10
-rw-r--r--arch/arm/mach-iop32x/n2100.c10
-rw-r--r--arch/arm26/kernel/ptrace.c15
-rw-r--r--arch/arm26/kernel/traps.c1
-rw-r--r--arch/avr32/kernel/ptrace.c13
-rw-r--r--arch/avr32/kernel/traps.c1
-rw-r--r--arch/cris/arch-v10/kernel/ptrace.c21
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c7
-rw-r--r--arch/frv/kernel/ptrace.c16
-rw-r--r--arch/h8300/kernel/ptrace.c5
-rw-r--r--arch/i386/Kconfig4
-rw-r--r--arch/i386/Makefile1
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/therm_throt.c6
-rw-r--r--arch/i386/kernel/efi.c2
-rw-r--r--arch/i386/kernel/io_apic.c1
-rw-r--r--arch/i386/kernel/nmi.c8
-rw-r--r--arch/i386/kernel/ptrace.c17
-rw-r--r--arch/i386/kernel/smpcommon.c8
-rw-r--r--arch/i386/kernel/traps.c1
-rw-r--r--arch/i386/video/Makefile1
-rw-r--r--arch/i386/video/fbdev.c32
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/common/sba_iommu.c20
-rw-r--r--arch/ia64/hp/sim/boot/fw-emu.c5
-rw-r--r--arch/ia64/hp/sim/simserial.c4
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/traps.c1
-rw-r--r--arch/ia64/lib/checksum.c1
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c3
-rw-r--r--arch/m32r/kernel/ptrace.c19
-rw-r--r--arch/m68k/kernel/ptrace.c8
-rw-r--r--arch/m68k/kernel/traps.c1
-rw-r--r--arch/m68k/lib/checksum.c1
-rw-r--r--arch/m68knommu/kernel/ptrace.c17
-rw-r--r--arch/m68knommu/kernel/traps.c1
-rw-r--r--arch/mips/kernel/ptrace.c18
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/sibyte/bcm1480/setup.c1
-rw-r--r--arch/mips/sibyte/sb1250/setup.c1
-rw-r--r--arch/parisc/kernel/ptrace.c13
-rw-r--r--arch/parisc/kernel/traps.c1
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/ptrace.c18
-rw-r--r--arch/powerpc/kernel/traps.c1
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c125
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c15
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c4
-rw-r--r--arch/powerpc/platforms/ps3/spu.c6
-rw-r--r--arch/ppc/kernel/traps.c1
-rw-r--r--arch/ppc/syslib/virtex_devices.h7
-rw-r--r--arch/s390/kernel/ptrace.c11
-rw-r--r--arch/s390/kernel/traps.c1
-rw-r--r--arch/sh/kernel/ptrace.c18
-rw-r--r--arch/sh/kernel/traps.c1
-rw-r--r--arch/sh64/kernel/ptrace.c17
-rw-r--r--arch/sh64/lib/c-checksum.c1
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/kernel/traps.c1
-rw-r--r--arch/sparc64/defconfig155
-rw-r--r--arch/sparc64/kernel/hvtramp.S3
-rw-r--r--arch/sparc64/kernel/signal.c15
-rw-r--r--arch/sparc64/kernel/traps.c1
-rw-r--r--arch/um/drivers/pcap_user.c2
-rw-r--r--arch/um/kernel/ptrace.c18
-rw-r--r--arch/v850/kernel/ptrace.c14
-rw-r--r--arch/x86_64/Kconfig4
-rw-r--r--arch/x86_64/kernel/nmi.c8
-rw-r--r--arch/x86_64/kernel/ptrace.c17
-rw-r--r--arch/x86_64/kernel/smp.c12
-rw-r--r--arch/x86_64/kernel/traps.c2
-rw-r--r--arch/xtensa/kernel/ptrace.c17
-rw-r--r--arch/xtensa/kernel/traps.c1
-rw-r--r--block/as-iosched.c3
-rw-r--r--block/cfq-iosched.c18
-rw-r--r--block/deadline-iosched.c3
-rw-r--r--block/elevator.c3
-rw-r--r--block/genhd.c20
-rw-r--r--block/ll_rw_blk.c4
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/atm/ambassador.c4
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/block/Kconfig6
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/xsysace.c1164
-rw-r--r--drivers/block/z2ram.c4
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/apm-emulation.c12
-rw-r--r--drivers/char/cyclades.c367
-rw-r--r--drivers/char/drm/drm_stub.c2
-rw-r--r--drivers/char/drm/sis_mm.c2
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/char/isicom.c93
-rw-r--r--drivers/char/istallion.c9
-rw-r--r--drivers/char/moxa.c37
-rw-r--r--drivers/char/riscom8.c12
-rw-r--r--drivers/char/specialix.c16
-rw-r--r--drivers/char/stallion.c5
-rw-r--r--drivers/char/vt.c35
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/hwmon/lm70.c4
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/chips/Kconfig10
-rw-r--r--drivers/i2c/chips/Makefile1
-rw-r--r--drivers/i2c/chips/menelaus.c1281
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ieee1394/ieee1394_core.c3
-rw-r--r--drivers/ieee1394/nodemgr.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c4
-rw-r--r--drivers/input/gameport/gameport.c1
-rw-r--r--drivers/input/serio/serio.c1
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c1
-rw-r--r--drivers/isdn/Kconfig15
-rw-r--r--drivers/isdn/capi/Kconfig7
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/capi/kcapi.c6
-rw-r--r--drivers/isdn/capi/kcapi_proc.c28
-rw-r--r--drivers/isdn/hardware/Kconfig1
-rw-r--r--drivers/isdn/hardware/avm/Kconfig23
-rw-r--r--drivers/isdn/hardware/eicon/Kconfig22
-rw-r--r--drivers/isdn/hardware/eicon/idifunc.c1
-rw-r--r--drivers/isdn/hisax/bkm_a4t.c108
-rw-r--r--drivers/isdn/hisax/config.c243
-rw-r--r--drivers/isdn/hisax/enternow_pci.c165
-rw-r--r--drivers/isdn/hisax/hfc_pci.c191
-rw-r--r--drivers/isdn/hisax/nj_s.c194
-rw-r--r--drivers/isdn/hisax/nj_u.c167
-rw-r--r--drivers/isdn/hisax/sedlbauer.c8
-rw-r--r--drivers/isdn/i4l/Kconfig7
-rw-r--r--drivers/kvm/Kconfig9
-rw-r--r--drivers/kvm/kvm.h116
-rw-r--r--drivers/kvm/kvm_main.c456
-rw-r--r--drivers/kvm/mmu.c292
-rw-r--r--drivers/kvm/paging_tmpl.h273
-rw-r--r--drivers/kvm/svm.c59
-rw-r--r--drivers/kvm/svm.h3
-rw-r--r--drivers/kvm/vmx.c652
-rw-r--r--drivers/kvm/x86_emulate.c44
-rw-r--r--drivers/macintosh/therm_adt746x.c1
-rw-r--r--drivers/macintosh/windfarm_core.c1
-rw-r--r--drivers/md/Kconfig15
-rw-r--r--drivers/md/bitmap.c169
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c71
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c1
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c1
-rw-r--r--drivers/media/video/msp3400-kthreads.c6
-rw-r--r--drivers/media/video/tvaudio.c2
-rw-r--r--drivers/media/video/video-buf-dvb.c1
-rw-r--r--drivers/media/video/vivi.c1
-rw-r--r--drivers/message/i2o/debug.c134
-rw-r--r--drivers/message/i2o/exec-osm.c6
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/i2o_config.c62
-rw-r--r--drivers/mfd/ucb1x00-ts.c1
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/ibmasm/command.c14
-rw-r--r--drivers/misc/ibmasm/dot_command.c10
-rw-r--r--drivers/misc/ibmasm/dot_command.h2
-rw-r--r--drivers/misc/ibmasm/event.c8
-rw-r--r--drivers/misc/ibmasm/heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/i2o.h10
-rw-r--r--drivers/misc/ibmasm/ibmasm.h70
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c24
-rw-r--r--drivers/misc/ibmasm/lowlevel.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.h16
-rw-r--r--drivers/misc/ibmasm/module.c10
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c10
-rw-r--r--drivers/misc/ibmasm/remote.c37
-rw-r--r--drivers/misc/ibmasm/remote.h8
-rw-r--r--drivers/misc/ibmasm/uart.c2
-rw-r--r--drivers/mmc/card/queue.c7
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/ubi/eba.c4
-rw-r--r--drivers/mtd/ubi/wl.c1
-rw-r--r--drivers/net/atl1/atl1_main.c1
-rw-r--r--drivers/net/eepro100.c7
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/ne2k-pci.c7
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/tokenring/smctr.c6
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/sbni.c7
-rw-r--r--drivers/net/wireless/airo.c3
-rw-r--r--drivers/net/wireless/libertas/main.c1
-rw-r--r--drivers/parisc/hppb.c1
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c548
-rw-r--r--drivers/pnp/pnpbios/core.c1
-rw-r--r--drivers/rtc/Kconfig64
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/rtc-at32ap700x.c317
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/rtc/rtc-ds1216.c226
-rw-r--r--drivers/rtc/rtc-ds1307.c300
-rw-r--r--drivers/rtc/rtc-m41t80.c917
-rw-r--r--drivers/rtc/rtc-m48t59.c491
-rw-r--r--drivers/rtc/rtc-rs5c372.c95
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/sbus/char/jsflash.c3
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/serial/Kconfig28
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/sb1250-duart.c972
-rw-r--r--drivers/spi/Kconfig45
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/atmel_spi.c185
-rw-r--r--drivers/spi/au1550_spi.c9
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c9
-rw-r--r--drivers/spi/omap2_mcspi.c1081
-rw-r--r--drivers/spi/omap_uwire.c9
-rw-r--r--drivers/spi/pxa2xx_spi.c9
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/spi/spi_bitbang.c8
-rw-r--r--drivers/spi/spi_imx.c24
-rw-r--r--drivers/spi/spi_lm70llp.c361
-rw-r--r--drivers/spi/spi_mpc83xx.c47
-rw-r--r--drivers/spi/spi_s3c24xx.c8
-rw-r--r--drivers/spi/spi_txx9.c474
-rw-r--r--drivers/spi/spidev.c6
-rw-r--r--drivers/spi/tle62x0.c328
-rw-r--r--drivers/spi/xilinx_spi.c434
-rw-r--r--drivers/telephony/Kconfig1
-rw-r--r--drivers/telephony/ixj.c7
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/atm/ueagle-atm.c1
-rw-r--r--drivers/usb/core/hub.c1
-rw-r--r--drivers/usb/gadget/file_storage.c3
-rw-r--r--drivers/usb/misc/auerswald.c4
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/video/68328fb.c2
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/aty/ati_ids.h1
-rw-r--r--drivers/video/aty/atyfb_base.c2
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/console/Kconfig16
-rw-r--r--drivers/video/console/fbcon.c366
-rw-r--r--drivers/video/controlfb.c2
-rw-r--r--drivers/video/cyblafb.c21
-rw-r--r--drivers/video/epson1355fb.c21
-rw-r--r--drivers/video/fbmem.c299
-rw-r--r--drivers/video/fm2fb.c16
-rw-r--r--drivers/video/gbefb.c41
-rw-r--r--drivers/video/i810/i810.h2
-rw-r--r--drivers/video/intelfb/intelfb.h2
-rw-r--r--drivers/video/logo/Kconfig5
-rw-r--r--drivers/video/logo/Makefile2
-rw-r--r--drivers/video/logo/logo_spe_clut224.ppm283
-rw-r--r--drivers/video/macfb.c93
-rw-r--r--drivers/video/macmodes.c5
-rw-r--r--drivers/video/macmodes.h8
-rw-r--r--drivers/video/matrox/matroxfb_accel.c11
-rw-r--r--drivers/video/matrox/matroxfb_base.c4
-rw-r--r--drivers/video/matrox/matroxfb_base.h2
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c6
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.h2
-rw-r--r--drivers/video/matrox/matroxfb_maven.c9
-rw-r--r--drivers/video/nvidia/nv_hw.c62
-rw-r--r--drivers/video/nvidia/nv_setup.c12
-rw-r--r--drivers/video/nvidia/nv_type.h1
-rw-r--r--drivers/video/nvidia/nvidia.c9
-rw-r--r--drivers/video/offb.c2
-rw-r--r--drivers/video/omap/Kconfig58
-rw-r--r--drivers/video/omap/Makefile29
-rw-r--r--drivers/video/omap/blizzard.c1568
-rw-r--r--drivers/video/omap/dispc.c1502
-rw-r--r--drivers/video/omap/dispc.h43
-rw-r--r--drivers/video/omap/hwa742.c1077
-rw-r--r--drivers/video/omap/lcd_h3.c141
-rw-r--r--drivers/video/omap/lcd_h4.c117
-rw-r--r--drivers/video/omap/lcd_inn1510.c124
-rw-r--r--drivers/video/omap/lcd_inn1610.c150
-rw-r--r--drivers/video/omap/lcd_osk.c144
-rw-r--r--drivers/video/omap/lcd_palmte.c123
-rw-r--r--drivers/video/omap/lcd_palmtt.c127
-rw-r--r--drivers/video/omap/lcd_palmz71.c123
-rw-r--r--drivers/video/omap/lcd_sx1.c334
-rw-r--r--drivers/video/omap/lcdc.c893
-rw-r--r--drivers/video/omap/lcdc.h7
-rw-r--r--drivers/video/omap/omapfb_main.c1941
-rw-r--r--drivers/video/omap/rfbi.c588
-rw-r--r--drivers/video/omap/sossi.c686
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pm2fb.c202
-rw-r--r--drivers/video/pm3fb.c270
-rw-r--r--drivers/video/ps3fb.c1
-rw-r--r--drivers/video/pvr2fb.c7
-rw-r--r--drivers/video/q40fb.c2
-rw-r--r--drivers/video/riva/riva_hw.c7
-rw-r--r--drivers/video/sgivwfb.c2
-rw-r--r--drivers/video/sis/sis.h2
-rw-r--r--drivers/video/sis/sis_main.c6
-rw-r--r--drivers/video/tgafb.c2
-rw-r--r--drivers/video/tridentfb.c30
-rw-r--r--drivers/video/tx3912fb.c2
-rw-r--r--drivers/video/vt8623fb.c42
-rw-r--r--drivers/w1/w1.c1
-rw-r--r--fs/9p/v9fs.c2
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/anon_inodes.c1
-rw-r--r--fs/attr.c4
-rw-r--r--fs/buffer.c58
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/connect.c1
-rw-r--r--fs/cifs/export.c1
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/dquot.c7
-rw-r--r--fs/ecryptfs/inode.c4
-rw-r--r--fs/efs/namei.c32
-rw-r--r--fs/efs/super.c2
-rw-r--r--fs/exportfs/expfs.c439
-rw-r--r--fs/ext2/acl.c2
-rw-r--r--fs/ext2/ioctl.c4
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext3/acl.c2
-rw-r--r--fs/ext3/ioctl.c6
-rw-r--r--fs/ext3/super.c1
-rw-r--r--fs/ext4/acl.c2
-rw-r--r--fs/ext4/ioctl.c6
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/fat/inode.c1
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/generic_acl.c2
-rw-r--r--fs/gfs2/acl.c2
-rw-r--r--fs/gfs2/ops_export.c1
-rw-r--r--fs/hfsplus/ioctl.c2
-rw-r--r--fs/inode.c17
-rw-r--r--fs/isofs/isofs.h1
-rw-r--r--fs/jffs2/acl.c2
-rw-r--r--fs/jffs2/background.c1
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/jfs/jfs_inode.h1
-rw-r--r--fs/jfs/namei.c32
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/lockd/svc.c29
-rw-r--r--fs/mbcache.c9
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/super.c10
-rw-r--r--fs/nfsd/auth.c18
-rw-r--r--fs/nfsd/export.c289
-rw-r--r--fs/nfsd/lockd.c1
-rw-r--r--fs/nfsd/nfs4acl.c12
-rw-r--r--fs/nfsd/nfs4callback.c2
-rw-r--r--fs/nfsd/nfs4idmap.c13
-rw-r--r--fs/nfsd/nfs4proc.c35
-rw-r--r--fs/nfsd/nfs4state.c46
-rw-r--r--fs/nfsd/nfs4xdr.c101
-rw-r--r--fs/nfsd/nfsctl.c3
-rw-r--r--fs/nfsd/nfsfh.c51
-rw-r--r--fs/nfsd/nfsproc.c3
-rw-r--r--fs/nfsd/nfssvc.c12
-rw-r--r--fs/nfsd/vfs.c110
-rw-r--r--fs/ntfs/namei.c1
-rw-r--r--fs/ocfs2/export.h2
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/reiserfs/ioctl.c5
-rw-r--r--fs/reiserfs/super.c1
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/utimes.c2
-rw-r--r--fs/xattr.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c14
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h2
-rw-r--r--fs/xfs/quota/xfs_qm.c10
-rw-r--r--include/asm-alpha/fb.h13
-rw-r--r--include/asm-alpha/page.h3
-rw-r--r--include/asm-alpha/termios.h4
-rw-r--r--include/asm-arm/fb.h19
-rw-r--r--include/asm-arm/pgtable.h6
-rw-r--r--include/asm-arm26/fb.h12
-rw-r--r--include/asm-avr32/fb.h21
-rw-r--r--include/asm-blackfin/fb.h12
-rw-r--r--include/asm-cris/fb.h12
-rw-r--r--include/asm-cris/page.h3
-rw-r--r--include/asm-frv/fb.h12
-rw-r--r--include/asm-frv/pgtable.h8
-rw-r--r--include/asm-generic/bug.h2
-rw-r--r--include/asm-generic/pgtable.h44
-rw-r--r--include/asm-generic/unaligned.h16
-rw-r--r--include/asm-h8300/fb.h12
-rw-r--r--include/asm-h8300/page.h3
-rw-r--r--include/asm-i386/fb.h17
-rw-r--r--include/asm-i386/page.h3
-rw-r--r--include/asm-i386/pgtable.h32
-rw-r--r--include/asm-ia64/fb.h23
-rw-r--r--include/asm-ia64/ioctls.h4
-rw-r--r--include/asm-ia64/page.h13
-rw-r--r--include/asm-ia64/pgtable.h23
-rw-r--r--include/asm-ia64/termbits.h5
-rw-r--r--include/asm-ia64/termios.h6
-rw-r--r--include/asm-m32r/fb.h19
-rw-r--r--include/asm-m32r/page.h3
-rw-r--r--include/asm-m32r/pgtable.h6
-rw-r--r--include/asm-m68k/fb.h34
-rw-r--r--include/asm-m68knommu/fb.h12
-rw-r--r--include/asm-m68knommu/page.h3
-rw-r--r--include/asm-mips/fb.h19
-rw-r--r--include/asm-mips/sibyte/bcm1480_regs.h30
-rw-r--r--include/asm-mips/sibyte/sb1250_regs.h76
-rw-r--r--include/asm-mips/sibyte/sb1250_uart.h7
-rw-r--r--include/asm-parisc/fb.h19
-rw-r--r--include/asm-parisc/pgtable.h16
-rw-r--r--include/asm-powerpc/fb.h21
-rw-r--r--include/asm-powerpc/kprobes.h4
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h7
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h31
-rw-r--r--include/asm-ppc/pgtable.h7
-rw-r--r--include/asm-s390/fb.h12
-rw-r--r--include/asm-s390/page.h3
-rw-r--r--include/asm-s390/pgtable.h58
-rw-r--r--include/asm-sh/fb.h19
-rw-r--r--include/asm-sh64/fb.h19
-rw-r--r--include/asm-sparc/fb.h12
-rw-r--r--include/asm-sparc64/fb.h18
-rw-r--r--include/asm-v850/fb.h12
-rw-r--r--include/asm-x86_64/fb.h19
-rw-r--r--include/asm-x86_64/page.h3
-rw-r--r--include/asm-x86_64/pgtable.h8
-rw-r--r--include/asm-xtensa/fb.h12
-rw-r--r--include/asm-xtensa/pgtable.h12
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/crc7.h14
-rw-r--r--include/linux/efs_fs.h1
-rw-r--r--include/linux/exportfs.h126
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/freezer.h14
-rw-r--r--include/linux/fs.h125
-rw-r--r--include/linux/fsl_devices.h2
-rw-r--r--include/linux/gfp.h19
-rw-r--r--include/linux/highmem.h51
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/kallsyms.h6
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kernelcapi.h2
-rw-r--r--include/linux/limits.h2
-rw-r--r--include/linux/linux_logo.h8
-rw-r--r--include/linux/lockd/bind.h9
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/mempolicy.h6
-rw-r--r--include/linux/mm.h39
-rw-r--r--include/linux/mmzone.h28
-rw-r--r--include/linux/nfsd/export.h41
-rw-r--r--include/linux/nfsd/interface.h13
-rw-r--r--include/linux/nfsd/nfsd.h9
-rw-r--r--include/linux/nfsd/state.h3
-rw-r--r--include/linux/nfsd/xdr4.h7
-rw-r--r--include/linux/notifier.h3
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/raid/bitmap.h6
-rw-r--r--include/linux/raid/md_k.h2
-rw-r--r--include/linux/rtc/m48t59.h57
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/slab.h90
-rw-r--r--include/linux/slab_def.h34
-rw-r--r--include/linux/slub_def.h29
-rw-r--r--include/linux/smp.h13
-rw-r--r--include/linux/spi/spi.h1
-rw-r--r--include/linux/spi/spi_bitbang.h1
-rw-r--r--include/linux/spi/tle62x0.h24
-rw-r--r--include/linux/sunrpc/gss_api.h1
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svcauth.h1
-rw-r--r--include/linux/sunrpc/svcauth_gss.h1
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/vmstat.h5
-rw-r--r--include/linux/vt_kern.h2
-rw-r--r--include/net/scm.h2
-rw-r--r--include/video/tgafb.h1
-rw-r--r--init/Kconfig2
-rw-r--r--init/do_mounts_initrd.c7
-rw-r--r--ipc/msg.c4
-rw-r--r--ipc/sem.c2
-rw-r--r--kernel/audit.c1
-rw-r--r--kernel/auditfilter.c12
-rw-r--r--kernel/cpu.c16
-rw-r--r--kernel/cpuset.c3
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/kallsyms.c16
-rw-r--r--kernel/lockdep.c4
-rw-r--r--kernel/module.c10
-rw-r--r--kernel/panic.c5
-rw-r--r--kernel/ptrace.c19
-rw-r--r--kernel/rcutorture.c4
-rw-r--r--kernel/rtmutex-tester.c1
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/timer.c4
-rw-r--r--kernel/workqueue.c15
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Makefile1
-rw-r--r--lib/crc7.c68
-rw-r--r--lib/genalloc.c3
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/Makefile4
-rw-r--r--mm/allocpercpu.c9
-rw-r--r--mm/filemap.c72
-rw-r--r--mm/highmem.c7
-rw-r--r--mm/hugetlb.c25
-rw-r--r--mm/memory.c9
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/mempool.c3
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c292
-rw-r--r--mm/pdflush.c1
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.c86
-rw-r--r--mm/slob.c56
-rw-r--r--mm/slub.c668
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/util.c48
-rw-r--r--mm/vmalloc.c6
-rw-r--r--mm/vmscan.c212
-rw-r--r--mm/vmstat.c2
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/cmtp/core.c2
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/sunrpc/auth.c11
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c1
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c14
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c1
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c32
-rw-r--r--net/sunrpc/svcauth_unix.c7
-rw-r--r--scripts/kallsyms.c4
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--sound/oss/Kconfig77
-rw-r--r--sound/oss/trident.c367
-rw-r--r--sound/pci/mixart/mixart_hwdep.c1
571 files changed, 27007 insertions, 5479 deletions
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 46bcff2849b..fd2ef4d29b6 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -139,8 +139,10 @@ X!Ilib/string.c
!Elib/cmdline.c
</sect1>
- <sect1><title>CRC Functions</title>
+ <sect1 id="crc"><title>CRC Functions</title>
+!Elib/crc7.c
!Elib/crc16.c
+!Elib/crc-itu-t.c
!Elib/crc32.c
!Elib/crc-ccitt.c
</sect1>
diff --git a/Documentation/ecryptfs.txt b/Documentation/filesystems/ecryptfs.txt
index 01d8a08351a..01d8a08351a 100644
--- a/Documentation/ecryptfs.txt
+++ b/Documentation/filesystems/ecryptfs.txt
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 460b892d089..ebffdffb3d9 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1348,6 +1348,21 @@ nr_hugepages configures number of hugetlb page reserved for the system.
hugetlb_shm_group contains group id that is allowed to create SysV shared
memory segment using hugetlb page.
+hugepages_treat_as_movable
+--------------------------
+
+This parameter is only useful when kernelcore= is specified at boot time to
+create ZONE_MOVABLE for pages that may be reclaimed or migrated. Huge pages
+are not movable so are not normally allocated from ZONE_MOVABLE. A non-zero
+value written to hugepages_treat_as_movable allows huge pages to be allocated
+from ZONE_MOVABLE.
+
+Once enabled, the ZONE_MOVABLE is treated as an area of memory the huge
+pages pool can easily grow or shrink within. Assuming that applications are
+not running that mlock() a lot of memory, it is likely the huge pages pool
+can grow to the size of ZONE_MOVABLE by repeatedly entering the desired value
+into nr_hugepages and triggering page reclaim.
+
laptop_mode
-----------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8363ad3ba01..9a541486fb7 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -817,6 +817,32 @@ and is between 256 and 4096 characters. It is defined in the file
js= [HW,JOY] Analog joystick
See Documentation/input/joystick.txt.
+ kernelcore=nn[KMG] [KNL,IA-32,IA-64,PPC,X86-64] This parameter
+ specifies the amount of memory usable by the kernel
+ for non-movable allocations. The requested amount is
+ spread evenly throughout all nodes in the system. The
+ remaining memory in each node is used for Movable
+ pages. In the event, a node is too small to have both
+ kernelcore and Movable pages, kernelcore pages will
+ take priority and other nodes will have a larger number
+ of kernelcore pages. The Movable zone is used for the
+ allocation of pages that may be reclaimed or moved
+ by the page migration subsystem. This means that
+ HugeTLB pages may not be allocated from this zone.
+ Note that allocations like PTEs-from-HighMem still
+ use the HighMem zone if it exists, and the Normal
+ zone if it does not.
+
+ movablecore=nn[KMG] [KNL,IA-32,IA-64,PPC,X86-64] This parameter
+ is similar to kernelcore except it specifies the
+ amount of memory used for migratable allocations.
+ If both kernelcore and movablecore is specified,
+ then kernelcore will be at *least* the specified
+ value but may be more. If movablecore on its own
+ is specified, the administrator must be careful
+ that the amount of memory usable for all allocations
+ is not too small.
+
keepinitrd [HW,ARM]
kstack=N [IA-32,X86-64] Print N words from the kernel stack
diff --git a/Documentation/oops-tracing.txt b/Documentation/oops-tracing.txt
index 23e6dde7eea..7f60dfe642c 100644
--- a/Documentation/oops-tracing.txt
+++ b/Documentation/oops-tracing.txt
@@ -251,6 +251,8 @@ characters, each representing a particular tainted value.
7: 'U' if a user or user application specifically requested that the
Tainted flag be set, ' ' otherwise.
+ 8: 'D' if the kernel has died recently, i.e. there was an OOPS or BUG.
+
The primary reason for the 'Tainted: ' string is to tell kernel
debuggers if this is a clean kernel or if anything unusual has
occurred. Tainting is permanent: even if an offending module is
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
new file mode 100644
index 00000000000..af1a282c71a
--- /dev/null
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -0,0 +1,160 @@
+Freezing of tasks
+ (C) 2007 Rafael J. Wysocki <rjw@sisk.pl>, GPL
+
+I. What is the freezing of tasks?
+
+The freezing of tasks is a mechanism by which user space processes and some
+kernel threads are controlled during hibernation or system-wide suspend (on some
+architectures).
+
+II. How does it work?
+
+There are four per-task flags used for that, PF_NOFREEZE, PF_FROZEN, TIF_FREEZE
+and PF_FREEZER_SKIP (the last one is auxiliary). The tasks that have
+PF_NOFREEZE unset (all user space processes and some kernel threads) are
+regarded as 'freezable' and treated in a special way before the system enters a
+suspend state as well as before a hibernation image is created (in what follows
+we only consider hibernation, but the description also applies to suspend).
+
+Namely, as the first step of the hibernation procedure the function
+freeze_processes() (defined in kernel/power/process.c) is called. It executes
+try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
+sends a fake signal to each of them. A task that receives such a signal and has
+TIF_FREEZE set, should react to it by calling the refrigerator() function
+(defined in kernel/power/process.c), which sets the task's PF_FROZEN flag,
+changes its state to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is
+cleared for it. Then, we say that the task is 'frozen' and therefore the set of
+functions handling this mechanism is called 'the freezer' (these functions are
+defined in kernel/power/process.c and include/linux/freezer.h). User space
+processes are generally frozen before kernel threads.
+
+It is not recommended to call refrigerator() directly. Instead, it is
+recommended to use the try_to_freeze() function (defined in
+include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
+task enter refrigerator() if the flag is set.
+
+For user space processes try_to_freeze() is called automatically from the
+signal-handling code, but the freezable kernel threads need to call it
+explicitly in suitable places. The code to do this may look like the following:
+
+ do {
+ hub_events();
+ wait_event_interruptible(khubd_wait,
+ !list_empty(&hub_event_list));
+ try_to_freeze();
+ } while (!signal_pending(current));
+
+(from drivers/usb/core/hub.c::hub_thread()).
+
+If a freezable kernel thread fails to call try_to_freeze() after the freezer has
+set TIF_FREEZE for it, the freezing of tasks will fail and the entire
+hibernation operation will be cancelled. For this reason, freezable kernel
+threads must call try_to_freeze() somewhere.
+
+After the system memory state has been restored from a hibernation image and
+devices have been reinitialized, the function thaw_processes() is called in
+order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
+have been frozen leave refrigerator() and continue running.
+
+III. Which kernel threads are freezable?
+
+Kernel threads are not freezable by default. However, a kernel thread may clear
+PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
+directly is strongly discouraged). From this point it is regarded as freezable
+and must call try_to_freeze() in a suitable place.
+
+IV. Why do we do that?
+
+Generally speaking, there is a couple of reasons to use the freezing of tasks:
+
+1. The principal reason is to prevent filesystems from being damaged after
+hibernation. At the moment we have no simple means of checkpointing
+filesystems, so if there are any modifications made to filesystem data and/or
+metadata on disks, we cannot bring them back to the state from before the
+modifications. At the same time each hibernation image contains some
+filesystem-related information that must be consistent with the state of the
+on-disk data and metadata after the system memory state has been restored from
+the image (otherwise the filesystems will be damaged in a nasty way, usually
+making them almost impossible to repair). We therefore freeze tasks that might
+cause the on-disk filesystems' data and metadata to be modified after the
+hibernation image has been created and before the system is finally powered off.
+The majority of these are user space processes, but if any of the kernel threads
+may cause something like this to happen, they have to be freezable.
+
+2. The second reason is to prevent user space processes and some kernel threads
+from interfering with the suspending and resuming of devices. A user space
+process running on a second CPU while we are suspending devices may, for
+example, be troublesome and without the freezing of tasks we would need some
+safeguards against race conditions that might occur in such a case.
+
+Although Linus Torvalds doesn't like the freezing of tasks, he said this in one
+of the discussions on LKML (http://lkml.org/lkml/2007/4/27/608):
+
+"RJW:> Why we freeze tasks at all or why we freeze kernel threads?
+
+Linus: In many ways, 'at all'.
+
+I _do_ realize the IO request queue issues, and that we cannot actually do
+s2ram with some devices in the middle of a DMA. So we want to be able to
+avoid *that*, there's no question about that. And I suspect that stopping
+user threads and then waiting for a sync is practically one of the easier
+ways to do so.
+
+So in practice, the 'at all' may become a 'why freeze kernel threads?' and
+freezing user threads I don't find really objectionable."
+
+Still, there are kernel threads that may want to be freezable. For example, if
+a kernel that belongs to a device driver accesses the device directly, it in
+principle needs to know when the device is suspended, so that it doesn't try to
+access it at that time. However, if the kernel thread is freezable, it will be
+frozen before the driver's .suspend() callback is executed and it will be
+thawed after the driver's .resume() callback has run, so it won't be accessing
+the device while it's suspended.
+
+3. Another reason for freezing tasks is to prevent user space processes from
+realizing that hibernation (or suspend) operation takes place. Ideally, user
+space processes should not notice that such a system-wide operation has occurred
+and should continue running without any problems after the restore (or resume
+from suspend). Unfortunately, in the most general case this is quite difficult
+to achieve without the freezing of tasks. Consider, for example, a process
+that depends on all CPUs being online while it's running. Since we need to
+disable nonboot CPUs during the hibernation, if this process is not frozen, it
+may notice that the number of CPUs has changed and may start to work incorrectly
+because of that.
+
+V. Are there any problems related to the freezing of tasks?
+
+Yes, there are.
+
+First of all, the freezing of kernel threads may be tricky if they depend one
+on another. For example, if kernel thread A waits for a completion (in the
+TASK_UNINTERRUPTIBLE state) that needs to be done by freezable kernel thread B
+and B is frozen in the meantime, then A will be blocked until B is thawed, which
+may be undesirable. That's why kernel threads are not freezable by default.
+
+Second, there are the following two problems related to the freezing of user
+space processes:
+1. Putting processes into an uninterruptible sleep distorts the load average.
+2. Now that we have FUSE, plus the framework for doing device drivers in
+userspace, it gets even more complicated because some userspace processes are
+now doing the sorts of things that kernel threads do
+(https://lists.linux-foundation.org/pipermail/linux-pm/2007-May/012309.html).
+
+The problem 1. seems to be fixable, although it hasn't been fixed so far. The
+other one is more serious, but it seems that we can work around it by using
+hibernation (and suspend) notifiers (in that case, though, we won't be able to
+avoid the realization by the user space processes that the hibernation is taking
+place).
+
+There are also problems that the freezing of tasks tends to expose, although
+they are not directly related to it. For example, if request_firmware() is
+called from a device driver's .resume() routine, it will timeout and eventually
+fail, because the user land process that should respond to the request is frozen
+at this point. So, seemingly, the failure is due to the freezing of tasks.
+Suppose, however, that the firmware file is located on a filesystem accessible
+only through another device that hasn't been resumed yet. In that case,
+request_firmware() will fail regardless of whether or not the freezing of tasks
+is used. Consequently, the problem is not really related to the freezing of
+tasks, since it generally exists anyway. [The solution to this particular
+problem is to keep the firmware in memory after it's loaded for the first time
+and upload if from memory to the device whenever necessary.]
diff --git a/Documentation/power/kernel_threads.txt b/Documentation/power/kernel_threads.txt
deleted file mode 100644
index fb57784986b..00000000000
--- a/Documentation/power/kernel_threads.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-KERNEL THREADS
-
-
-Freezer
-
-Upon entering a suspended state the system will freeze all
-tasks. This is done by delivering pseudosignals. This affects
-kernel threads, too. To successfully freeze a kernel thread
-the thread has to check for the pseudosignal and enter the
-refrigerator. Code to do this looks like this:
-
- do {
- hub_events();
- wait_event_interruptible(khubd_wait, !list_empty(&hub_event_list));
- try_to_freeze();
- } while (!signal_pending(current));
-
-from drivers/usb/core/hub.c::hub_thread()
-
-
-The Unfreezable
-
-Some kernel threads however, must not be frozen. The kernel must
-be able to finish pending IO operations and later on be able to
-write the memory image to disk. Kernel threads needed to do IO
-must stay awake. Such threads must mark themselves unfreezable
-like this:
-
- /*
- * This thread doesn't need any user-level access,
- * so get rid of all our resources.
- */
- daemonize("usb-storage");
-
- current->flags |= PF_NOFREEZE;
-
-from drivers/usb/storage/usb.c::usb_stor_control_thread()
-
-Such drivers are themselves responsible for staying quiet during
-the actual snapshotting.
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index 152b510d1bb..aea7e920966 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -140,21 +140,11 @@ should be sent to the mailing list available through the suspend2
website, and not to the Linux Kernel Mailing List. We are working
toward merging suspend2 into the mainline kernel.
-Q: A kernel thread must voluntarily freeze itself (call 'refrigerator').
-I found some kernel threads that don't do it, and they don't freeze
-so the system can't sleep. Is this a known behavior?
-
-A: All such kernel threads need to be fixed, one by one. Select the
-place where the thread is safe to be frozen (no kernel semaphores
-should be held at that point and it must be safe to sleep there), and
-add:
-
- try_to_freeze();
-
-If the thread is needed for writing the image to storage, you should
-instead set the PF_NOFREEZE process flag when creating the thread (and
-be very careful).
+Q: What is the freezing of tasks and why are we using it?
+A: The freezing of tasks is a mechanism by which user space processes and some
+kernel threads are controlled during hibernation or system-wide suspend (on some
+architectures). See freezing-of-tasks.txt for details.
Q: What is the difference between "platform" and "shutdown"?
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index 7c701b88d6d..c931d613f64 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -385,7 +385,7 @@ test_PIE:
/* not all RTCs support periodic IRQs */
if (errno == ENOTTY) {
fprintf(stderr, "\nNo periodic IRQ support\n");
- return 0;
+ goto done;
}
perror("RTC_IRQP_READ ioctl");
exit(errno);
diff --git a/Documentation/spi/spi-lm70llp b/Documentation/spi/spi-lm70llp
new file mode 100644
index 00000000000..154bd02220b
--- /dev/null
+++ b/Documentation/spi/spi-lm70llp
@@ -0,0 +1,69 @@
+spi_lm70llp : LM70-LLP parport-to-SPI adapter
+==============================================
+
+Supported board/chip:
+ * National Semiconductor LM70 LLP evaluation board
+ Datasheet: http://www.national.com/pf/LM/LM70.html
+
+Author:
+ Kaiwan N Billimoria <kaiwan@designergraphix.com>
+
+Description
+-----------
+This driver provides glue code connecting a National Semiconductor LM70 LLP
+temperature sensor evaluation board to the kernel's SPI core subsystem.
+
+In effect, this driver turns the parallel port interface on the eval board
+into a SPI bus with a single device, which will be driven by the generic
+LM70 driver (drivers/hwmon/lm70.c).
+
+The hardware interfacing on the LM70 LLP eval board is as follows:
+
+ Parallel LM70 LLP
+ Port Direction JP2 Header
+ ----------- --------- ----------------
+ D0 2 - -
+ D1 3 --> V+ 5
+ D2 4 --> V+ 5
+ D3 5 --> V+ 5
+ D4 6 --> V+ 5
+ D5 7 --> nCS 8
+ D6 8 --> SCLK 3
+ D7 9 --> SI/O 5
+ GND 25 - GND 7
+ Select 13 <-- SI/O 1
+ ----------- --------- ----------------
+
+Note that since the LM70 uses a "3-wire" variant of SPI, the SI/SO pin
+is connected to both pin D7 (as Master Out) and Select (as Master In)
+using an arrangment that lets either the parport or the LM70 pull the
+pin low. This can't be shared with true SPI devices, but other 3-wire
+devices might share the same SI/SO pin.
+
+The bitbanger routine in this driver (lm70_txrx) is called back from
+the bound "hwmon/lm70" protocol driver through its sysfs hook, using a
+spi_write_then_read() call. It performs Mode 0 (SPI/Microwire) bitbanging.
+The lm70 driver then inteprets the resulting digital temperature value
+and exports it through sysfs.
+
+A "gotcha": National Semiconductor's LM70 LLP eval board circuit schematic
+shows that the SI/O line from the LM70 chip is connected to the base of a
+transistor Q1 (and also a pullup, and a zener diode to D7); while the
+collector is tied to VCC.
+
+Interpreting this circuit, when the LM70 SI/O line is High (or tristate
+and not grounded by the host via D7), the transistor conducts and switches
+the collector to zero, which is reflected on pin 13 of the DB25 parport
+connector. When SI/O is Low (driven by the LM70 or the host) on the other
+hand, the transistor is cut off and the voltage tied to it's collector is
+reflected on pin 13 as a High level.
+
+So: the getmiso inline routine in this driver takes this fact into account,
+inverting the value read at pin 13.
+
+
+Thanks to
+---------
+o David Brownell for mentoring the SPI-side driver development.
+o Dr.Craig Hollabaugh for the (early) "manual" bitbanging driver version.
+o Nadir Billimoria for help interpreting the circuit schematic.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index df3ff2095f9..a0ccc5b6026 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -38,7 +38,8 @@ Currently, these files are in /proc/sys/vm:
dirty_ratio, dirty_background_ratio, dirty_expire_centisecs,
dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode,
-block_dump, swap_token_timeout, drop-caches:
+block_dump, swap_token_timeout, drop-caches,
+hugepages_treat_as_movable:
See Documentation/filesystems/proc.txt
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index df812b03b65..d17f324db9f 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -127,13 +127,20 @@ SLUB Debug output
Here is a sample of slub debug output:
-*** SLUB kmalloc-8: Redzone Active@0xc90f6d20 slab 0xc528c530 offset=3360 flags=0x400000c3 inuse=61 freelist=0xc90f6d58
- Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
- Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
- Redzone 0xc90f6d28: 00 cc cc cc .
-FreePointer 0xc90f6d2c -> 0xc90f6d58
-Last alloc: get_modalias+0x61/0xf5 jiffies_ago=53 cpu=1 pid=554
-Filler 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+====================================================================
+BUG kmalloc-8: Redzone overwritten
+--------------------------------------------------------------------
+
+INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
+INFO: Slab 0xc528c530 flags=0x400000c3 inuse=61 fp=0xc90f6d58
+INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
+INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
+
+Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+ Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
+ Redzone 0xc90f6d28: 00 cc cc cc .
+ Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+
[<c010523d>] dump_trace+0x63/0x1eb
[<c01053df>] show_trace_log_lvl+0x1a/0x2f
[<c010601d>] show_trace+0x12/0x14
@@ -155,74 +162,108 @@ Filler 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
[<c0104112>] sysenter_past_esp+0x5f/0x99
[<b7f7b410>] 0xb7f7b410
=======================
-@@@ SLUB kmalloc-8: Restoring redzone (0xcc) from 0xc90f6d28-0xc90f6d2b
+FIX kmalloc-8: Restoring Redzone 0xc90f6d28-0xc90f6d2b=0xcc
+If SLUB encounters a corrupted object (full detection requires the kernel
+to be booted with slub_debug) then the following output will be dumped
+into the syslog:
-If SLUB encounters a corrupted object then it will perform the following
-actions:
-
-1. Isolation and report of the issue
+1. Description of the problem encountered
This will be a message in the system log starting with
-*** SLUB <slab cache affected>: <What went wrong>@<object address>
-offset=<offset of object into slab> flags=<slabflags>
-inuse=<objects in use in this slab> freelist=<first free object in slab>
+===============================================
+BUG <slab cache affected>: <What went wrong>
+-----------------------------------------------
-2. Report on how the problem was dealt with in order to ensure the continued
-operation of the system.
+INFO: <corruption start>-<corruption_end> <more info>
+INFO: Slab <address> <slab information>
+INFO: Object <address> <object information>
+INFO: Allocated in <kernel function> age=<jiffies since alloc> cpu=<allocated by
+ cpu> pid=<pid of the process>
+INFO: Freed in <kernel function> age=<jiffies since free> cpu=<freed by cpu>
+ pid=<pid of the process>
-These are messages in the system log beginning with
-
-@@@ SLUB <slab cache affected>: <corrective action taken>
+(Object allocation / free information is only available if SLAB_STORE_USER is
+set for the slab. slub_debug sets that option)
+2. The object contents if an object was involved.
-In the above sample SLUB found that the Redzone of an active object has
-been overwritten. Here a string of 8 characters was written into a slab that
-has the length of 8 characters. However, a 8 character string needs a
-terminating 0. That zero has overwritten the first byte of the Redzone field.
-After reporting the details of the issue encountered the @@@ SLUB message
-tell us that SLUB has restored the redzone to its proper value and then
-system operations continue.
-
-Various types of lines can follow the @@@ SLUB line:
+Various types of lines can follow the BUG SLUB line:
Bytes b4 <address> : <bytes>
- Show a few bytes before the object where the problem was detected.
+ Shows a few bytes before the object where the problem was detected.
Can be useful if the corruption does not stop with the start of the
object.
Object <address> : <bytes>
The bytes of the object. If the object is inactive then the bytes
- typically contain poisoning values. Any non-poison value shows a
+ typically contain poison values. Any non-poison value shows a
corruption by a write after free.
Redzone <address> : <bytes>
- The redzone following the object. The redzone is used to detect
+ The Redzone following the object. The Redzone is used to detect
writes after the object. All bytes should always have the same
value. If there is any deviation then it is due to a write after
the object boundary.
-Freepointer
- The pointer to the next free object in the slab. May become
- corrupted if overwriting continues after the red zone.
-
-Last alloc:
-Last free:
- Shows the address from which the object was allocated/freed last.
- We note the pid, the time and the CPU that did so. This is usually
- the most useful information to figure out where things went wrong.
- Here get_modalias() did an kmalloc(8) instead of a kmalloc(9).
+ (Redzone information is only available if SLAB_RED_ZONE is set.
+ slub_debug sets that option)
-Filler <address> : <bytes>
+Padding <address> : <bytes>
Unused data to fill up the space in order to get the next object
properly aligned. In the debug case we make sure that there are
- at least 4 bytes of filler. This allow for the detection of writes
+ at least 4 bytes of padding. This allows the detection of writes
before the object.
-Following the filler will be a stackdump. That stackdump describes the
-location where the error was detected. The cause of the corruption is more
-likely to be found by looking at the information about the last alloc / free.
+3. A stackdump
+
+The stackdump describes the location where the error was detected. The cause
+of the corruption is may be more likely found by looking at the function that
+allocated or freed the object.
+
+4. Report on how the problem was dealt with in order to ensure the continued
+operation of the system.
+
+These are messages in the system log beginning with
+
+FIX <slab cache affected>: <corrective action taken>
+
+In the above sample SLUB found that the Redzone of an active object has
+been overwritten. Here a string of 8 characters was written into a slab that
+has the length of 8 characters. However, a 8 character string needs a
+terminating 0. That zero has overwritten the first byte of the Redzone field.
+After reporting the details of the issue encountered the FIX SLUB message
+tell us that SLUB has restored the Redzone to its proper value and then
+system operations continue.
+
+Emergency operations:
+---------------------
+
+Minimal debugging (sanity checks alone) can be enabled by booting with
+
+ slub_debug=F
+
+This will be generally be enough to enable the resiliency features of slub
+which will keep the system running even if a bad kernel component will
+keep corrupting objects. This may be important for production systems.
+Performance will be impacted by the sanity checks and there will be a
+continual stream of error messages to the syslog but no additional memory
+will be used (unlike full debugging).
+
+No guarantees. The kernel component still needs to be fixed. Performance
+may be optimized further by locating the slab that experiences corruption
+and enabling debugging only for that cache
+
+I.e.
+
+ slub_debug=F,dentry
+
+If the corruption occurs by writing after the end of the object then it
+may be advisable to enable a Redzone to avoid corrupting the beginning
+of other objects.
+
+ slub_debug=FZ,dentry
-Christoph Lameter, <clameter@sgi.com>, May 23, 2007
+Christoph Lameter, <clameter@sgi.com>, May 30, 2007
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index 0cd060598f9..83a78184226 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -315,9 +315,7 @@ do_sys_ptrace(long request, long pid, long addr, long data,
/* When I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- tmp = data;
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
- ret = (copied == sizeof(tmp)) ? 0 : -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the specified register */
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 80cfb758ee2..b28731437c3 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -65,7 +65,7 @@ enum ipi_message_type {
};
/* Set to a secondary's cpuid when it comes online. */
-static int smp_secondary_alive __initdata = 0;
+static int smp_secondary_alive __devinitdata = 0;
/* Which cpus ids came online. */
cpumask_t cpu_online_map;
@@ -173,7 +173,7 @@ smp_callin(void)
}
/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
-static int __init
+static int __devinit
wait_for_txrdy (unsigned long cpumask)
{
unsigned long timeout;
@@ -358,7 +358,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
/*
* Bring one cpu online.
*/
-static int __init
+static int __devinit
smp_boot_one_cpu(int cpuid)
{
struct task_struct *idle;
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index d6e665d567b..ec0f05e0d8f 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -184,6 +184,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
#endif
printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err);
dik_show_regs(regs, r9_15);
+ add_taint(TAINT_DIE);
dik_show_trace((unsigned long *)(regs+1));
dik_show_code((unsigned int *)regs->pc);
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index ab3761c437a..8698e0746f9 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -69,6 +69,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
result = (result & 0xffffffff) + (result >> 32);
return (__force __wsum)result;
}
+EXPORT_SYMBOL(csum_tcpudp_nofold);
/*
* Do a 64-bit checksum on an arbitrary memory area..
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 6f2f46c2e40..78c9f1a3d41 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -657,7 +657,6 @@ static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- unsigned long tmp;
int ret;
switch (request) {
@@ -666,12 +665,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
*/
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
- ret = access_process_vm(child, addr, &tmp,
- sizeof(unsigned long), 0);
- if (ret == sizeof(unsigned long))
- ret = put_user(tmp, (unsigned long __user *) data);
- else
- ret = -EIO;
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
case PTRACE_PEEKUSR:
@@ -683,12 +677,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
*/
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
- ret = access_process_vm(child, addr, &data,
- sizeof(unsigned long), 1);
- if (ret == sizeof(unsigned long))
- ret = 0;
- else
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR:
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 237f4999b9a..f2114bcf09d 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -249,6 +249,7 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(1);
__die(str, err, thread, regs);
bust_spinlocks(0);
+ add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index e18a41e61f0..dde089922e3 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/mtd/physmap.h>
@@ -83,6 +84,13 @@ static struct at91_udc_data __initdata csb337_udc_data = {
.pullup_pin = AT91_PIN_PA24,
};
+static struct i2c_board_info __initdata csb337_i2c_devices[] = {
+ { I2C_BOARD_INFO("rtc-ds1307", 0x68),
+ .type = "ds1307",
+ },
+};
+
+
static struct at91_cf_data __initdata csb337_cf_data = {
/*
* connector P4 on the CSB 337 mates to
@@ -161,6 +169,8 @@ static void __init csb337_board_init(void)
at91_add_device_udc(&csb337_udc_data);
/* I2C */
at91_add_device_i2c();
+ i2c_register_board_info(0, csb337_i2c_devices,
+ ARRAY_SIZE(csb337_i2c_devices));
/* Compact Flash */
at91_set_gpio_input(AT91_PIN_PB22, 1); /* IOIS16 */
at91_add_device_cf(&csb337_cf_data);
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 390a97d39e5..1873bd8cd1b 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -25,6 +25,7 @@
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/mtd/physmap.h>
+#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <asm/hardware.h>
@@ -199,6 +200,12 @@ static struct platform_device n2100_serial_device = {
.resource = &n2100_uart_resource,
};
+static struct i2c_board_info __initdata n2100_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("rtc-rs5c372", 0x32),
+ .type = "rs5c372b",
+ },
+};
/*
* Pull PCA9532 GPIO #8 low to power off the machine.
@@ -248,6 +255,9 @@ static void __init n2100_init_machine(void)
platform_device_register(&iop3xx_dma_0_channel);
platform_device_register(&iop3xx_dma_1_channel);
+ i2c_register_board_info(0, n2100_i2c_devices,
+ ARRAY_SIZE(n2100_i2c_devices));
+
pm_power_off = n2100_power_off;
init_timer(&power_button_poll_timer);
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
index 41692795672..0fefb86970c 100644
--- a/arch/arm26/kernel/ptrace.c
+++ b/arch/arm26/kernel/ptrace.c
@@ -531,7 +531,6 @@ static int ptrace_setfpregs(struct task_struct *tsk, void *ufp)
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- unsigned long tmp;
int ret;
switch (request) {
@@ -540,12 +539,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
*/
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
- ret = access_process_vm(child, addr, &tmp,
- sizeof(unsigned long), 0);
- if (ret == sizeof(unsigned long))
- ret = put_user(tmp, (unsigned long *) data);
- else
- ret = -EIO;
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
case PTRACE_PEEKUSR:
@@ -557,12 +551,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
*/
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
- ret = access_process_vm(child, addr, &data,
- sizeof(unsigned long), 1);
- if (ret == sizeof(unsigned long))
- ret = 0;
- else
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR:
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c
index d594fb59e94..2911e2eae80 100644
--- a/arch/arm26/kernel/traps.c
+++ b/arch/arm26/kernel/traps.c
@@ -185,6 +185,7 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
printk("Internal error: %s: %x\n", str, err);
printk("CPU: %d\n", smp_processor_id());
show_regs(regs);
+ add_taint(TAINT_DIE);
printk("Process %s (pid: %d, stack limit = 0x%p)\n",
current->comm, current->pid, end_of_stack(tsk));
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
index 3c36c2d1614..39060cbeb2a 100644
--- a/arch/avr32/kernel/ptrace.c
+++ b/arch/avr32/kernel/ptrace.c
@@ -153,7 +153,6 @@ static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- unsigned long tmp;
int ret;
pr_debug("arch_ptrace(%ld, %d, %#lx, %#lx)\n",
@@ -166,11 +165,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Read the word at location addr in the child process */
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
- ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- if (ret == sizeof(tmp))
- ret = put_user(tmp, (unsigned long __user *)data);
- else
- ret = -EIO;
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
case PTRACE_PEEKUSR:
@@ -181,11 +176,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Write the word in data at location addr */
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
- ret = access_process_vm(child, addr, &data, sizeof(data), 1);
- if (ret == sizeof(data))
- ret = 0;
- else
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR:
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index aaa792815cd..9a73ce7eb50 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -56,6 +56,7 @@ void NORET_TYPE die(const char *str, struct pt_regs *regs, long err)
show_regs_log_lvl(regs, KERN_EMERG);
show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG);
bust_spinlocks(0);
+ add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c
index fd2129a0458..f4f9db698b4 100644
--- a/arch/cris/arch-v10/kernel/ptrace.c
+++ b/arch/cris/arch-v10/kernel/ptrace.c
@@ -83,19 +83,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* Read word at location address. */
case PTRACE_PEEKTEXT:
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
-
- if (copied != sizeof(tmp))
- break;
-
- ret = put_user(tmp,datap);
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* Read the word at location address in the USER area. */
case PTRACE_PEEKUSR: {
@@ -113,12 +103,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Write the word at location address. */
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
- ret = 0;
-
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
-
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
/* Write the word at location address in the USER area. */
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index d4d57b74133..38ece0cd47c 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -146,12 +146,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Write the word at location address. */
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
- ret = 0;
-
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
-
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
/* Write the word at location address in the USER area. */
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index ce88fb95ee5..709e9bdc612 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -112,20 +112,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- int copied;
-
+ case PTRACE_PEEKDATA:
ret = -EIO;
if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
break;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- if (copied != sizeof(tmp))
- break;
-
- ret = put_user(tmp,(unsigned long *) data);
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -176,9 +168,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
break;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) != sizeof(data))
- break;
- ret = 0;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 8a7a991b8f7..d32bbf02fc4 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -111,10 +111,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 8531a540ca8..c7c9c2a15fa 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -1222,8 +1222,8 @@ if INSTRUMENTATION
source "arch/i386/oprofile/Kconfig"
config KPROBES
- bool "Kprobes (EXPERIMENTAL)"
- depends on KALLSYMS && EXPERIMENTAL && MODULES
+ bool "Kprobes"
+ depends on KALLSYMS && MODULES
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index bd28f9f9b4b..181cc29a7c4 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -108,6 +108,7 @@ drivers-$(CONFIG_PCI) += arch/i386/pci/
# must be linked after kernel/
drivers-$(CONFIG_OPROFILE) += arch/i386/oprofile/
drivers-$(CONFIG_PM) += arch/i386/power/
+drivers-$(CONFIG_FB) += arch/i386/video/
CFLAGS += $(mflags-y)
AFLAGS += $(mflags-y)
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 4112afe712b..47001d50a08 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -222,6 +222,7 @@
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/freezer.h>
#include <linux/smp.h>
#include <linux/dmi.h>
#include <linux/suspend.h>
@@ -2311,7 +2312,6 @@ static int __init apm_init(void)
remove_proc_entry("apm", NULL);
return err;
}
- kapmd_task->flags |= PF_NOFREEZE;
wake_up_process(kapmd_task);
if (num_online_cpus() > 1 && !smp ) {
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index 7ba7c3abd3a..1203dc5ab87 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -134,19 +134,21 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
int err;
sys_dev = get_cpu_sysdev(cpu);
- mutex_lock(&therm_cpu_lock);
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ mutex_lock(&therm_cpu_lock);
err = thermal_throttle_add_dev(sys_dev);
+ mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
+ mutex_lock(&therm_cpu_lock);
thermal_throttle_remove_dev(sys_dev);
+ mutex_unlock(&therm_cpu_lock);
break;
}
- mutex_unlock(&therm_cpu_lock);
return NOTIFY_OK;
}
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index a1808022ea1..2452c6fbe99 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -278,7 +278,7 @@ void efi_memmap_walk(efi_freemem_callback_t callback, void *arg)
struct range {
unsigned long start;
unsigned long end;
- } prev, curr;
+ } uninitialized_var(prev), curr;
efi_memory_desc_t *md;
unsigned long start, end;
void *p;
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7f8b7af2b95..21db8f56c9a 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -667,6 +667,7 @@ static int balanced_irq(void *unused)
set_pending_irq(i, cpumask_of_cpu(0));
}
+ set_freezable();
for ( ; ; ) {
time_remaining = schedule_timeout_interruptible(time_remaining);
try_to_freeze();
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index fba121f7973..03b7f5584d7 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -295,7 +295,7 @@ static unsigned int
last_irq_sums [NR_CPUS],
alert_counter [NR_CPUS];
-void touch_nmi_watchdog (void)
+void touch_nmi_watchdog(void)
{
if (nmi_watchdog > 0) {
unsigned cpu;
@@ -304,8 +304,10 @@ void touch_nmi_watchdog (void)
* Just reset the alert counters, (other CPUs might be
* spinning on locks we hold):
*/
- for_each_present_cpu (cpu)
- alert_counter[cpu] = 0;
+ for_each_present_cpu(cpu) {
+ if (alert_counter[cpu])
+ alert_counter[cpu] = 0;
+ }
}
/*
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 0c0ceec5de0..1c075f58d1f 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -358,17 +358,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp, datap);
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -395,10 +387,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/i386/kernel/smpcommon.c b/arch/i386/kernel/smpcommon.c
index 1868ae18eb4..bbfe85a0f69 100644
--- a/arch/i386/kernel/smpcommon.c
+++ b/arch/i386/kernel/smpcommon.c
@@ -47,7 +47,7 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
EXPORT_SYMBOL(smp_call_function);
/**
- * smp_call_function_single - Run a function on another CPU
+ * smp_call_function_single - Run a function on a specific CPU
* @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
@@ -66,9 +66,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int ret;
int me = get_cpu();
if (cpu == me) {
- WARN_ON(1);
+ local_irq_disable();
+ func(info);
+ local_irq_enable();
put_cpu();
- return -EBUSY;
+ return 0;
}
ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 28bd1c5163e..18c1c285836 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -433,6 +433,7 @@ void die(const char * str, struct pt_regs * regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
+ add_taint(TAINT_DIE);
spin_unlock_irqrestore(&die.lock, flags);
if (!regs)
diff --git a/arch/i386/video/Makefile b/arch/i386/video/Makefile
new file mode 100644
index 00000000000..2c447c94adc
--- /dev/null
+++ b/arch/i386/video/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_FB) += fbdev.o
diff --git a/arch/i386/video/fbdev.c b/arch/i386/video/fbdev.c
new file mode 100644
index 00000000000..48fb38d7d2c
--- /dev/null
+++ b/arch/i386/video/fbdev.c
@@ -0,0 +1,32 @@
+/*
+ * arch/i386/video/fbdev.c - i386 Framebuffer
+ *
+ * Copyright (C) 2007 Antonino Daplas <adaplas@gmail.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ */
+#include <linux/fb.h>
+#include <linux/pci.h>
+
+int fb_is_primary_device(struct fb_info *info)
+{
+ struct device *device = info->device;
+ struct pci_dev *pci_dev = NULL;
+ struct resource *res = NULL;
+ int retval = 0;
+
+ if (device)
+ pci_dev = to_pci_dev(device);
+
+ if (pci_dev)
+ res = &pci_dev->resource[PCI_ROM_RESOURCE];
+
+ if (res && res->flags & IORESOURCE_ROM_SHADOW)
+ retval = 1;
+
+ return retval;
+}
+EXPORT_SYMBOL(fb_is_primary_device);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index db9ddff9584..616c96e7348 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -582,8 +582,8 @@ menu "Instrumentation Support"
source "arch/ia64/oprofile/Kconfig"
config KPROBES
- bool "Kprobes (EXPERIMENTAL)"
- depends on KALLSYMS && EXPERIMENTAL && MODULES
+ bool "Kprobes"
+ depends on KALLSYMS && MODULES
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index c1dca226b47..cd4adf52f17 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -34,6 +34,7 @@
#include <linux/efi.h>
#include <linux/nodemask.h>
#include <linux/bitops.h> /* hweight64() */
+#include <linux/crash_dump.h>
#include <asm/delay.h> /* ia64_get_itc() */
#include <asm/io.h>
@@ -43,6 +44,8 @@
#include <asm/acpi-ext.h>
+extern int swiotlb_late_init_with_default_size (size_t size);
+
#define PFX "IOC: "
/*
@@ -2026,11 +2029,24 @@ sba_init(void)
if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
return 0;
+#if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP)
+ /* If we are booting a kdump kernel, the sba_iommu will
+ * cause devices that were not shutdown properly to MCA
+ * as soon as they are turned back on. Our only option for
+ * a successful kdump kernel boot is to use the swiotlb.
+ */
+ if (elfcorehdr_addr < ELFCORE_ADDR_MAX) {
+ if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
+ panic("Unable to initialize software I/O TLB:"
+ " Try machvec=dig boot option");
+ machvec_init("dig");
+ return 0;
+ }
+#endif
+
acpi_bus_register_driver(&acpi_sba_ioc_driver);
if (!ioc_list) {
#ifdef CONFIG_IA64_GENERIC
- extern int swiotlb_late_init_with_default_size (size_t size);
-
/*
* If we didn't find something sba_iommu can claim, we
* need to setup the swiotlb and switch to the dig machvec.
diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c
index 300acd913d9..1189d035d31 100644
--- a/arch/ia64/hp/sim/boot/fw-emu.c
+++ b/arch/ia64/hp/sim/boot/fw-emu.c
@@ -329,11 +329,6 @@ sys_fw_init (const char *args, int arglen)
strcpy(sal_systab->product_id, "HP-simulator");
#endif
-#ifdef CONFIG_IA64_SDV
- strcpy(sal_systab->oem_id, "Intel");
- strcpy(sal_systab->product_id, "SDV");
-#endif
-
/* fill in an entry point: */
sal_ed->type = SAL_DESC_ENTRY_POINT;
sal_ed->pal_proc = __pa(pal_desc[0]);
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 324ea7565e2..ef252df50e1 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -36,10 +36,6 @@
#include <asm/hw_irq.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_KDB
-# include <linux/kdb.h>
-#endif
-
#undef SIMSERIAL_DEBUG /* define this to get some debug information */
#define KEYBOARD_INTR 3 /* must match with simulator! */
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 75ec3478d8a..73ca86d0381 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -28,6 +28,7 @@
#include <linux/time.h>
#include <linux/efi.h>
#include <linux/kexec.h>
+#include <linux/mm.h>
#include <asm/io.h>
#include <asm/kregs.h>
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 8589e84a27c..3f926c2dc70 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -247,6 +247,9 @@ ENTRY(fsys_gettimeofday)
.time_redo:
.pred.rel.mutex p8,p9,p10
ld4.acq r28 = [r29] // xtime_lock.sequence. Must come first for locking purposes
+ ;;
+ and r28 = ~1,r28 // Make sequence even to force retry if odd
+ ;;
(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!!
add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20
(p9) ld8 r2 = [r30] // readq(ti->address). Could also have latency issues..
@@ -284,7 +287,6 @@ EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare
(p15) ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET
(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful redo
// simulate tbit.nz.or p7,p0 = r28,0
- and r28 = ~1,r28 // Make sequence even to force retry if odd
getf.sig r2 = f8
mf
add r8 = r8,r18 // Add time interpolator offset
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 15ad85da15a..3aeaf15e468 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -69,6 +69,7 @@ die (const char *str, struct pt_regs *regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
+ add_taint(TAINT_DIE);
spin_unlock_irq(&die.lock);
if (panic_on_oops)
diff --git a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c
index 4411d9baeb2..9fc955026f8 100644
--- a/arch/ia64/lib/checksum.c
+++ b/arch/ia64/lib/checksum.c
@@ -60,6 +60,7 @@ csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len,
result = (result & 0xffffffff) + (result >> 32);
return (__force __wsum)result;
}
+EXPORT_SYMBOL(csum_tcpudp_nofold);
extern unsigned long do_csum (const unsigned char *, long);
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 6da9854751c..df8d5bed611 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -750,9 +750,10 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
goto error;
} else
if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
+ int cpuobj_index = 0;
+
memset(p, 0, a.sz);
for (i = 0; i < nobj; i++) {
- int cpuobj_index = 0;
if (!SN_HWPERF_IS_NODE(objs + i))
continue;
node = sn_hwperf_obj_to_cnode(objs + i);
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 5f02b314487..57a92ef31a9 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -595,7 +595,6 @@ void ptrace_disable(struct task_struct *child)
static int
do_ptrace(long request, struct task_struct *child, long addr, long data)
{
- unsigned long tmp;
int ret;
switch (request) {
@@ -604,11 +603,7 @@ do_ptrace(long request, struct task_struct *child, long addr, long data)
*/
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
- ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- if (ret == sizeof(tmp))
- ret = put_user(tmp,(unsigned long __user *) data);
- else
- ret = -EIO;
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
/*
@@ -624,15 +619,9 @@ do_ptrace(long request, struct task_struct *child, long addr, long data)
*/
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
- ret = access_process_vm(child, addr, &data, sizeof(data), 1);
- if (ret == sizeof(data)) {
- ret = 0;
- if (request == PTRACE_POKETEXT) {
- invalidate_cache();
- }
- } else {
- ret = -EIO;
- }
+ ret = generic_ptrace_pokedata(child, addr, data);
+ if (ret == 0 && request == PTRACE_POKETEXT)
+ invalidate_cache();
break;
/*
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index cdba9fd6d82..2cf0690b788 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -128,10 +128,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
- i = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- if (i != sizeof(tmp))
- goto out_eio;
- ret = put_user(tmp, (unsigned long *)data);
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
/* read the word at location addr in the USER area. */
@@ -160,8 +157,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- if (access_process_vm(child, addr, &data, sizeof(data), 1) != sizeof(data))
- goto out_eio;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index a27a4fa3329..4e2752a0e89 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -1170,6 +1170,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
console_verbose();
printk("%s: %08x\n",str,nr);
show_registers(fp);
+ add_taint(TAINT_DIE);
do_exit(SIGSEGV);
}
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index cf6bb51945a..6216f12a756 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -422,3 +422,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
);
return(sum);
}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index f54b6a3dfec..ef70ca070ce 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -106,17 +106,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long *) data);
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -159,10 +151,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c
index bed5f47bf56..fde04e1757f 100644
--- a/arch/m68knommu/kernel/traps.c
+++ b/arch/m68knommu/kernel/traps.c
@@ -83,6 +83,7 @@ void die_if_kernel(char *str, struct pt_regs *fp, int nr)
printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
show_stack(NULL, (unsigned long *)fp);
+ add_taint(TAINT_DIE);
do_exit(SIGSEGV);
}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index b5a7b46bbc4..893e7bccf22 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -174,17 +174,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long __user *) data);
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -313,11 +305,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1)
- == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 37c562c4c81..ce277cb34dd 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -326,6 +326,7 @@ void __noreturn die(const char * str, struct pt_regs * regs)
#endif /* CONFIG_MIPS_MT_SMTC */
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
+ add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c
index bdaac34ae70..89f29233cae 100644
--- a/arch/mips/sibyte/bcm1480/setup.c
+++ b/arch/mips/sibyte/bcm1480/setup.c
@@ -31,6 +31,7 @@
unsigned int sb1_pass;
unsigned int soc_pass;
unsigned int soc_type;
+EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
unsigned int zbbus_mhz;
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index f4a6169aa0a..2d5c6d8b41f 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -31,6 +31,7 @@
unsigned int sb1_pass;
unsigned int soc_pass;
unsigned int soc_type;
+EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 8a0db376e91..26ec774c502 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -87,10 +87,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
- int copied;
-
#ifdef CONFIG_64BIT
if (__is_compat_task(child)) {
+ int copied;
unsigned int tmp;
addr &= 0xffffffffL;
@@ -105,15 +104,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
else
#endif
- {
- unsigned long tmp;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- goto out_tsk;
- ret = put_user(tmp,(unsigned long *) data);
- }
+ ret = generic_ptrace_peekdata(child, addr, data);
goto out_tsk;
}
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index f9bca2d74b3..bbf029a184a 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -264,6 +264,7 @@ KERN_CRIT " || ||\n");
show_regs(regs);
dump_stack();
+ add_taint(TAINT_DIE);
if (in_interrupt())
panic("Fatal exception in interrupt");
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 322167737de..cf780cb3b91 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -242,7 +242,7 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
#ifdef CONFIG_KALLSYMS
/* Handle some frequent special cases.... */
{
- char symname[KSYM_NAME_LEN+1];
+ char symname[KSYM_NAME_LEN];
char *modname;
kallsyms_lookup(info->ip, NULL, NULL, &modname,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e641bb68d87..d860b640a14 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -640,8 +640,8 @@ menu "Instrumentation Support"
source "arch/powerpc/oprofile/Kconfig"
config KPROBES
- bool "Kprobes (EXPERIMENTAL)"
- depends on !BOOKE && !4xx && KALLSYMS && EXPERIMENTAL && MODULES
+ bool "Kprobes"
+ depends on !BOOKE && !4xx && KALLSYMS && MODULES
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 0fb53950da4..8a177bd9eab 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -379,17 +379,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long __user *) data);
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -421,11 +413,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* If I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1)
- == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
/* write the word at location addr in the USER area */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3b8427e6283..2bb1cb91178 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -149,6 +149,7 @@ int die(const char *str, struct pt_regs *regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
+ add_taint(TAINT_DIE);
spin_unlock_irqrestore(&die.lock, flags);
if (kexec_should_crash(current) ||
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index dc27dab48df..5a808d611ae 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -40,7 +40,7 @@
#include <asm/prom.h>
extern void cpm_reset(void);
-extern void mpc8xx_show_cpuinfo(struct seq_file*);
+extern void mpc8xx_show_cpuinfo(struct seq_file *);
extern void mpc8xx_restart(char *cmd);
extern void mpc8xx_calibrate_decr(void);
extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
@@ -48,9 +48,9 @@ extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
extern void m8xx_pic_init(void);
extern unsigned int mpc8xx_get_irq(void);
-static void init_smc1_uart_ioports(struct fs_uart_platform_info* fpi);
-static void init_smc2_uart_ioports(struct fs_uart_platform_info* fpi);
-static void init_scc3_ioports(struct fs_platform_info* ptr);
+static void init_smc1_uart_ioports(struct fs_uart_platform_info *fpi);
+static void init_smc2_uart_ioports(struct fs_uart_platform_info *fpi);
+static void init_scc3_ioports(struct fs_platform_info *ptr);
#ifdef CONFIG_PCMCIA_M8XX
static void pcmcia_hw_setup(int slot, int enable)
@@ -73,7 +73,7 @@ static int pcmcia_set_voltage(int slot, int vcc, int vpp)
bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
- switch(vcc) {
+ switch (vcc) {
case 0:
break;
case 33:
@@ -86,12 +86,12 @@ static int pcmcia_set_voltage(int slot, int vcc, int vpp)
return 1;
}
- switch(vpp) {
+ switch (vpp) {
case 0:
break;
case 33:
case 50:
- if(vcc == vpp)
+ if (vcc == vpp)
reg |= BCSR1_PCCVPP1;
else
return 1;
@@ -127,7 +127,7 @@ void __init mpc885ads_board_setup(void)
#endif
bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
- cp = (cpm8xx_t *)immr_map(im_cpm);
+ cp = (cpm8xx_t *) immr_map(im_cpm);
if (bcsr_io == NULL) {
printk(KERN_CRIT "Could not remap BCSR\n");
@@ -140,13 +140,13 @@ void __init mpc885ads_board_setup(void)
out_8(&(cp->cp_smc[0].smc_smcm), tmpval8);
clrbits16(&cp->cp_smc[0].smc_smcmr, SMCMR_REN | SMCMR_TEN); /* brg1 */
#else
- setbits32(bcsr_io,BCSR1_RS232EN_1);
+ setbits32(bcsr_io, BCSR1_RS232EN_1);
out_be16(&cp->cp_smc[0].smc_smcmr, 0);
out_8(&cp->cp_smc[0].smc_smce, 0);
#endif
#ifdef CONFIG_SERIAL_CPM_SMC2
- clrbits32(bcsr_io,BCSR1_RS232EN_2);
+ clrbits32(bcsr_io, BCSR1_RS232EN_2);
clrbits32(&cp->cp_simode, 0xe0000000 >> 1);
setbits32(&cp->cp_simode, 0x20000000 >> 1); /* brg2 */
tmpval8 = in_8(&(cp->cp_smc[1].smc_smcm)) | (SMCM_RX | SMCM_TX);
@@ -155,7 +155,7 @@ void __init mpc885ads_board_setup(void)
init_smc2_uart_ioports(0);
#else
- setbits32(bcsr_io,BCSR1_RS232EN_2);
+ setbits32(bcsr_io, BCSR1_RS232EN_2);
out_be16(&cp->cp_smc[1].smc_smcmr, 0);
out_8(&cp->cp_smc[1].smc_smce, 0);
#endif
@@ -164,16 +164,16 @@ void __init mpc885ads_board_setup(void)
#ifdef CONFIG_FS_ENET
/* use MDC for MII (common) */
- io_port = (iop8xx_t*)immr_map(im_ioport);
+ io_port = (iop8xx_t *) immr_map(im_ioport);
setbits16(&io_port->iop_pdpar, 0x0080);
clrbits16(&io_port->iop_pddir, 0x0080);
bcsr_io = ioremap(BCSR5, sizeof(unsigned long));
- clrbits32(bcsr_io,BCSR5_MII1_EN);
- clrbits32(bcsr_io,BCSR5_MII1_RST);
+ clrbits32(bcsr_io, BCSR5_MII1_EN);
+ clrbits32(bcsr_io, BCSR5_MII1_RST);
#ifndef CONFIG_FC_ENET_HAS_SCC
- clrbits32(bcsr_io,BCSR5_MII2_EN);
- clrbits32(bcsr_io,BCSR5_MII2_RST);
+ clrbits32(bcsr_io, BCSR5_MII2_EN);
+ clrbits32(bcsr_io, BCSR5_MII2_RST);
#endif
iounmap(bcsr_io);
@@ -182,17 +182,16 @@ void __init mpc885ads_board_setup(void)
#endif
#ifdef CONFIG_PCMCIA_M8XX
- /*Set up board specific hook-ups*/
+ /*Set up board specific hook-ups */
m8xx_pcmcia_ops.hw_ctrl = pcmcia_hw_setup;
m8xx_pcmcia_ops.voltage_set = pcmcia_set_voltage;
#endif
}
-
-static void init_fec1_ioports(struct fs_platform_info* ptr)
+static void init_fec1_ioports(struct fs_platform_info *ptr)
{
- cpm8xx_t *cp = (cpm8xx_t *)immr_map(im_cpm);
- iop8xx_t *io_port = (iop8xx_t *)immr_map(im_ioport);
+ cpm8xx_t *cp = (cpm8xx_t *) immr_map(im_cpm);
+ iop8xx_t *io_port = (iop8xx_t *) immr_map(im_ioport);
/* configure FEC1 pins */
setbits16(&io_port->iop_papar, 0xf830);
@@ -214,11 +213,10 @@ static void init_fec1_ioports(struct fs_platform_info* ptr)
immr_unmap(cp);
}
-
-static void init_fec2_ioports(struct fs_platform_info* ptr)
+static void init_fec2_ioports(struct fs_platform_info *ptr)
{
- cpm8xx_t *cp = (cpm8xx_t *)immr_map(im_cpm);
- iop8xx_t *io_port = (iop8xx_t *)immr_map(im_ioport);
+ cpm8xx_t *cp = (cpm8xx_t *) immr_map(im_cpm);
+ iop8xx_t *io_port = (iop8xx_t *) immr_map(im_ioport);
/* configure FEC2 pins */
setbits32(&cp->cp_pepar, 0x0003fffc);
@@ -248,15 +246,15 @@ void init_fec_ioports(struct fs_platform_info *fpi)
}
}
-static void init_scc3_ioports(struct fs_platform_info* fpi)
+static void init_scc3_ioports(struct fs_platform_info *fpi)
{
unsigned *bcsr_io;
iop8xx_t *io_port;
cpm8xx_t *cp;
bcsr_io = ioremap(BCSR_ADDR, BCSR_SIZE);
- io_port = (iop8xx_t *)immr_map(im_ioport);
- cp = (cpm8xx_t *)immr_map(im_cpm);
+ io_port = (iop8xx_t *) immr_map(im_ioport);
+ cp = (cpm8xx_t *) immr_map(im_cpm);
if (bcsr_io == NULL) {
printk(KERN_CRIT "Could not remap BCSR\n");
@@ -265,9 +263,9 @@ static void init_scc3_ioports(struct fs_platform_info* fpi)
/* Enable the PHY.
*/
- clrbits32(bcsr_io+4, BCSR4_ETH10_RST);
+ clrbits32(bcsr_io + 4, BCSR4_ETH10_RST);
udelay(1000);
- setbits32(bcsr_io+4, BCSR4_ETH10_RST);
+ setbits32(bcsr_io + 4, BCSR4_ETH10_RST);
/* Configure port A pins for Txd and Rxd.
*/
setbits16(&io_port->iop_papar, PA_ENET_RXD | PA_ENET_TXD);
@@ -283,8 +281,7 @@ static void init_scc3_ioports(struct fs_platform_info* fpi)
*/
setbits32(&cp->cp_pepar, PE_ENET_TCLK | PE_ENET_RCLK);
clrbits32(&cp->cp_pepar, PE_ENET_TENA);
- clrbits32(&cp->cp_pedir,
- PE_ENET_TCLK | PE_ENET_RCLK | PE_ENET_TENA);
+ clrbits32(&cp->cp_pedir, PE_ENET_TCLK | PE_ENET_RCLK | PE_ENET_TENA);
clrbits32(&cp->cp_peso, PE_ENET_TCLK | PE_ENET_RCLK);
setbits32(&cp->cp_peso, PE_ENET_TENA);
@@ -308,7 +305,7 @@ static void init_scc3_ioports(struct fs_platform_info* fpi)
clrbits32(&cp->cp_pedir, PE_ENET_TENA);
setbits32(&cp->cp_peso, PE_ENET_TENA);
- setbits32(bcsr_io+4, BCSR1_ETHEN);
+ setbits32(bcsr_io + 4, BCSR1_ETHEN);
iounmap(bcsr_io);
immr_unmap(io_port);
immr_unmap(cp);
@@ -328,50 +325,48 @@ void init_scc_ioports(struct fs_platform_info *fpi)
}
}
-
-
-static void init_smc1_uart_ioports(struct fs_uart_platform_info* ptr)
+static void init_smc1_uart_ioports(struct fs_uart_platform_info *ptr)
{
- unsigned *bcsr_io;
+ unsigned *bcsr_io;
cpm8xx_t *cp;
- cp = (cpm8xx_t *)immr_map(im_cpm);
+ cp = (cpm8xx_t *) immr_map(im_cpm);
setbits32(&cp->cp_pepar, 0x000000c0);
clrbits32(&cp->cp_pedir, 0x000000c0);
clrbits32(&cp->cp_peso, 0x00000040);
setbits32(&cp->cp_peso, 0x00000080);
immr_unmap(cp);
- bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
+ bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
- if (bcsr_io == NULL) {
- printk(KERN_CRIT "Could not remap BCSR1\n");
- return;
- }
- clrbits32(bcsr_io,BCSR1_RS232EN_1);
- iounmap(bcsr_io);
+ if (bcsr_io == NULL) {
+ printk(KERN_CRIT "Could not remap BCSR1\n");
+ return;
+ }
+ clrbits32(bcsr_io, BCSR1_RS232EN_1);
+ iounmap(bcsr_io);
}
-static void init_smc2_uart_ioports(struct fs_uart_platform_info* fpi)
+static void init_smc2_uart_ioports(struct fs_uart_platform_info *fpi)
{
- unsigned *bcsr_io;
+ unsigned *bcsr_io;
cpm8xx_t *cp;
- cp = (cpm8xx_t *)immr_map(im_cpm);
+ cp = (cpm8xx_t *) immr_map(im_cpm);
setbits32(&cp->cp_pepar, 0x00000c00);
clrbits32(&cp->cp_pedir, 0x00000c00);
clrbits32(&cp->cp_peso, 0x00000400);
setbits32(&cp->cp_peso, 0x00000800);
immr_unmap(cp);
- bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
+ bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
- if (bcsr_io == NULL) {
- printk(KERN_CRIT "Could not remap BCSR1\n");
- return;
- }
- clrbits32(bcsr_io,BCSR1_RS232EN_2);
- iounmap(bcsr_io);
+ if (bcsr_io == NULL) {
+ printk(KERN_CRIT "Could not remap BCSR1\n");
+ return;
+ }
+ clrbits32(bcsr_io, BCSR1_RS232EN_2);
+ iounmap(bcsr_io);
}
void init_smc_ioports(struct fs_uart_platform_info *data)
@@ -444,15 +439,11 @@ static int __init mpc885ads_probe(void)
return 1;
}
-define_machine(mpc885_ads) {
- .name = "MPC885 ADS",
- .probe = mpc885ads_probe,
- .setup_arch = mpc885ads_setup_arch,
- .init_IRQ = m8xx_pic_init,
- .show_cpuinfo = mpc8xx_show_cpuinfo,
- .get_irq = mpc8xx_get_irq,
- .restart = mpc8xx_restart,
- .calibrate_decr = mpc8xx_calibrate_decr,
- .set_rtc_time = mpc8xx_set_rtc_time,
- .get_rtc_time = mpc8xx_get_rtc_time,
-};
+define_machine(mpc885_ads)
+{
+.name = "MPC885 ADS",.probe = mpc885ads_probe,.setup_arch =
+ mpc885ads_setup_arch,.init_IRQ =
+ m8xx_pic_init,.show_cpuinfo = mpc8xx_show_cpuinfo,.get_irq =
+ mpc8xx_get_irq,.restart = mpc8xx_restart,.calibrate_decr =
+ mpc8xx_calibrate_decr,.set_rtc_time =
+ mpc8xx_set_rtc_time,.get_rtc_time = mpc8xx_get_rtc_time,};
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index e4d0c9f42ab..96a8f609690 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -31,6 +31,7 @@
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/mutex.h>
+#include <linux/linux_logo.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/xmon.h>
@@ -656,12 +657,24 @@ static int __init init_spu_base(void)
ret = spu_enumerate_spus(create_spu);
- if (ret) {
+ if (ret < 0) {
printk(KERN_WARNING "%s: Error initializing spus\n",
__FUNCTION__);
goto out_unregister_sysdev_class;
}
+ if (ret > 0) {
+ /*
+ * We cannot put the forward declaration in
+ * <linux/linux_logo.h> because of conflicting session type
+ * conflicts for const and __initdata with different compiler
+ * versions
+ */
+ extern const struct linux_logo logo_spe_clut224;
+
+ fb_append_extra_logo(&logo_spe_clut224, ret);
+ }
+
xmon_register_spus(&spu_full_list);
spu_add_sysdev_attr(&attr_stat);
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 1d4562ae463..75ed50fcc3d 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -279,6 +279,7 @@ static int __init of_enumerate_spus(int (*fn)(void *data))
{
int ret;
struct device_node *node;
+ unsigned int n = 0;
ret = -ENODEV;
for (node = of_find_node_by_type(NULL, "spe");
@@ -289,8 +290,9 @@ static int __init of_enumerate_spus(int (*fn)(void *data))
__FUNCTION__, node->name);
break;
}
+ n++;
}
- return ret;
+ return ret ? ret : n;
}
static int __init of_create_spu(struct spu *spu, void *data)
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index c7f734c8946..502d80ed982 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -405,11 +405,13 @@ static int __init ps3_enumerate_spus(int (*fn)(void *data))
}
}
- if (result)
+ if (result) {
printk(KERN_WARNING "%s:%d: Error initializing spus\n",
__func__, __LINE__);
+ return result;
+ }
- return result;
+ return num_resource_id;
}
const struct spu_management_ops spu_management_ps3_ops = {
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 0eaef7c8378..3f3b292eb77 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -92,6 +92,7 @@ int die(const char * str, struct pt_regs * fp, long err)
if (nl)
printk("\n");
show_regs(fp);
+ add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
/* do_exit() should take care of panic'ing from an interrupt
* context so we don't handle it here
diff --git a/arch/ppc/syslib/virtex_devices.h b/arch/ppc/syslib/virtex_devices.h
index 3d4be1412f6..9f38d92ae53 100644
--- a/arch/ppc/syslib/virtex_devices.h
+++ b/arch/ppc/syslib/virtex_devices.h
@@ -31,4 +31,11 @@ void __init virtex_early_serial_map(void);
*/
int virtex_device_fixup(struct platform_device *dev);
+/* SPI Controller IP */
+struct xspi_platform_data {
+ s16 bus_num;
+ u16 num_chipselect;
+ u32 speed_hz;
+};
+
#endif /* __ASM_VIRTEX_DEVICES_H__ */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2a8f0872ea8..f4503ca2763 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -294,7 +294,6 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
static int
do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
{
- unsigned long tmp;
ptrace_area parea;
int copied, ret;
@@ -304,10 +303,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
/* Remove high order bit from address (only for 31 bit). */
addr &= PSW_ADDR_INSN;
/* read word at location addr. */
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- if (copied != sizeof(tmp))
- return -EIO;
- return put_user(tmp, (unsigned long __force __user *) data);
+ return generic_ptrace_peekdata(child, addr, data);
case PTRACE_PEEKUSR:
/* read the word at location addr in the USER area. */
@@ -318,10 +314,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
/* Remove high order bit from address (only for 31 bit). */
addr &= PSW_ADDR_INSN;
/* write the word at location addr. */
- copied = access_process_vm(child, addr, &data, sizeof(data),1);
- if (copied != sizeof(data))
- return -EIO;
- return 0;
+ return generic_ptrace_pokedata(child, addr, data);
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 81e03b9c384..8ec9def83cc 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -262,6 +262,7 @@ void die(const char * str, struct pt_regs * regs, long err)
print_modules();
show_regs(regs);
bust_spinlocks(0);
+ add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
index f2eaa485d04..891d1d46c90 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace.c
@@ -91,17 +91,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long __user *) data);
- break;
- }
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -135,10 +126,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 05a40f3c30b..502d43e4785 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -103,6 +103,7 @@ void die(const char * str, struct pt_regs * regs, long err)
(unsigned long)task_stack_page(current));
bust_spinlocks(0);
+ add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (kexec_should_crash(current))
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
index 4e95e18b46d..df06c647746 100644
--- a/arch/sh64/kernel/ptrace.c
+++ b/arch/sh64/kernel/ptrace.c
@@ -129,17 +129,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long *) data);
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -166,10 +158,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR:
diff --git a/arch/sh64/lib/c-checksum.c b/arch/sh64/lib/c-checksum.c
index 4b2676380de..bd550176024 100644
--- a/arch/sh64/lib/c-checksum.c
+++ b/arch/sh64/lib/c-checksum.c
@@ -213,3 +213,4 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
return (__wsum)result;
}
+EXPORT_SYMBOL(csum_tcpudp_nofold);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 8567cc90194..73df7115325 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -217,6 +217,9 @@ source "drivers/pci/Kconfig"
endif
+config NO_DMA
+ def_bool !PCI
+
config SUN_OPENPROMFS
tristate "Openprom tree appears in /proc/openprom"
help
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
index dc9ffea2a4f..3bc3bff51e0 100644
--- a/arch/sparc/kernel/traps.c
+++ b/arch/sparc/kernel/traps.c
@@ -101,6 +101,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
show_regs(regs);
+ add_taint(TAINT_DIE);
__SAVE; __SAVE; __SAVE; __SAVE;
__SAVE; __SAVE; __SAVE; __SAVE;
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 65840a62bb9..45ebf91a280 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22-rc1
-# Mon May 14 04:17:48 2007
+# Linux kernel version: 2.6.22
+# Tue Jul 17 01:19:52 2007
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
@@ -42,12 +42,11 @@ CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=18
@@ -82,22 +81,15 @@ CONFIG_SLUB=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
-
-#
-# Block layer
-#
CONFIG_BLOCK=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_BSG=y
#
# IO Schedulers
@@ -156,12 +148,15 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_RESOURCES_64BIT=y
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=1
+CONFIG_VIRT_TO_BUS=y
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
CONFIG_SUN_AUXIO=y
CONFIG_SUN_IO=y
+# CONFIG_SUN_LDOMS is not set
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
CONFIG_ARCH_SUPPORTS_MSI=y
CONFIG_PCI_MSI=y
# CONFIG_PCI_DEBUG is not set
@@ -246,10 +241,6 @@ CONFIG_IPV6_TUNNEL=m
# CONFIG_IPV6_MULTIPLE_TABLES is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
-
-#
-# DCCP Configuration (EXPERIMENTAL)
-#
CONFIG_IP_DCCP=m
CONFIG_INET_DCCP_DIAG=m
CONFIG_IP_DCCP_ACKVEC=y
@@ -269,15 +260,7 @@ CONFIG_IP_DCCP_CCID3_RTO=100
#
# CONFIG_IP_DCCP_DEBUG is not set
# CONFIG_NET_DCCPPROBE is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
# CONFIG_IP_SCTP is not set
-
-#
-# TIPC Configuration (EXPERIMENTAL)
-#
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -314,6 +297,7 @@ CONFIG_NET_TCPPROBE=m
# CONFIG_MAC80211 is not set
# CONFIG_IEEE80211 is not set
# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -328,26 +312,10 @@ CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
CONFIG_CONNECTOR=m
# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
@@ -364,18 +332,11 @@ CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_BUFFERS=8
CONFIG_CDROM_PKTCDVD_WCACHE=y
CONFIG_ATA_OVER_ETH=m
-
-#
-# Misc devices
-#
+CONFIG_MISC_DEVICES=y
# CONFIG_PHANTOM is not set
+# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
-# CONFIG_BLINK is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
CONFIG_IDE=y
CONFIG_BLK_DEV_IDE=y
@@ -440,6 +401,7 @@ CONFIG_BLK_DEV_IDEDMA=y
#
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
# CONFIG_SCSI_TGT is not set
CONFIG_SCSI_NETLINK=y
CONFIG_SCSI_PROC_FS=y
@@ -505,7 +467,6 @@ CONFIG_ISCSI_TCP=m
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_DEBUG is not set
-# CONFIG_SCSI_ESP_CORE is not set
# CONFIG_SCSI_SUNESP is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_ATA is not set
@@ -545,30 +506,16 @@ CONFIG_DM_ZERO=m
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
# CONFIG_I2O is not set
-
-#
-# Network device support
-#
CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
-
-#
-# ARCnet devices
-#
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
CONFIG_NET_ETHERNET=y
CONFIG_MII=m
# CONFIG_SUNLANCE is not set
@@ -578,10 +525,6 @@ CONFIG_MII=m
# CONFIG_SUNGEM is not set
CONFIG_CASSINI=m
# CONFIG_NET_VENDOR_3COM is not set
-
-#
-# Tulip family network device support
-#
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
CONFIG_NET_PCI=y
@@ -617,7 +560,6 @@ CONFIG_E1000_NAPI=y
# CONFIG_SIS190 is not set
# CONFIG_SKGE is not set
# CONFIG_SKY2 is not set
-# CONFIG_SK98LIN is not set
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=m
CONFIG_BNX2=m
@@ -631,11 +573,6 @@ CONFIG_NETDEV_10000=y
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
# CONFIG_MLX4_CORE is not set
-CONFIG_MLX4_DEBUG=y
-
-#
-# Token Ring devices
-#
# CONFIG_TR is not set
#
@@ -665,6 +602,7 @@ CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
# CONFIG_SLIP is not set
CONFIG_SLHC=m
# CONFIG_NET_FC is not set
@@ -677,10 +615,6 @@ CONFIG_SLHC=m
# ISDN subsystem
#
# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
# CONFIG_PHONE is not set
#
@@ -688,6 +622,7 @@ CONFIG_SLHC=m
#
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
#
# Userland interfaces
@@ -733,7 +668,6 @@ CONFIG_INPUT_SPARCSPKR=y
# CONFIG_INPUT_POWERMATE is not set
# CONFIG_INPUT_YEALINK is not set
# CONFIG_INPUT_UINPUT is not set
-# CONFIG_INPUT_POLLDEV is not set
#
# Hardware I/O ports
@@ -773,10 +707,6 @@ CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set
-
-#
-# IPMI
-#
# CONFIG_IPMI_HANDLER is not set
# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
@@ -785,10 +715,6 @@ CONFIG_RTC=y
# CONFIG_APPLICOM is not set
# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
CONFIG_I2C=y
@@ -822,6 +748,7 @@ CONFIG_I2C_ALGOBIT=y
# CONFIG_I2C_SIS5595 is not set
# CONFIG_I2C_SIS630 is not set
# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_TAOS_EVM is not set
# CONFIG_I2C_STUB is not set
# CONFIG_I2C_TINY_USB is not set
# CONFIG_I2C_VIA is not set
@@ -833,11 +760,13 @@ CONFIG_I2C_ALGOBIT=y
#
# CONFIG_SENSORS_DS1337 is not set
# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_DS1682 is not set
# CONFIG_SENSORS_EEPROM is not set
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@@ -848,11 +777,8 @@ CONFIG_I2C_ALGOBIT=y
#
# CONFIG_SPI is not set
# CONFIG_SPI_MASTER is not set
-
-#
-# Dallas's 1-wire bus
-#
# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
# CONFIG_SENSORS_ABITUGURU is not set
@@ -949,6 +875,8 @@ CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_SBUS is not set
+# CONFIG_FB_XVR500 is not set
+# CONFIG_FB_XVR2500 is not set
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
@@ -970,9 +898,6 @@ CONFIG_FB_RADEON_I2C=y
# CONFIG_FB_TRIDENT is not set
# CONFIG_FB_ARK is not set
# CONFIG_FB_PM3 is not set
-# CONFIG_FB_XVR500 is not set
-# CONFIG_FB_XVR2500 is not set
-# CONFIG_FB_PCI is not set
# CONFIG_FB_VIRTUAL is not set
#
@@ -1118,10 +1043,7 @@ CONFIG_SND_SUN_CS4231=m
#
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=m
-
-#
-# HID Devices
-#
+CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HID_DEBUG is not set
@@ -1132,10 +1054,7 @@ CONFIG_USB_HID=y
# CONFIG_USB_HIDINPUT_POWERBOOK is not set
# CONFIG_HID_FF is not set
CONFIG_USB_HIDDEV=y
-
-#
-# USB support
-#
+CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
@@ -1157,7 +1076,6 @@ CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1165,6 +1083,7 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
#
# USB Device Class drivers
@@ -1256,17 +1175,9 @@ CONFIG_USB_STORAGE=m
#
# LED Triggers
#
-
-#
-# InfiniBand support
-#
# CONFIG_INFINIBAND is not set
#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
-#
-
-#
# Real Time Clock
#
# CONFIG_RTC_CLASS is not set
@@ -1387,7 +1298,6 @@ CONFIG_RAMFS=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
#
# Partition Types
@@ -1465,8 +1375,10 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
# CONFIG_TIMER_STATS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -1496,10 +1408,10 @@ CONFIG_FORCED_INLINING=y
CONFIG_KEYS=y
# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_BLKCIPHER=y
@@ -1539,10 +1451,7 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_TEST=m
-
-#
-# Hardware crypto devices
-#
+CONFIG_CRYPTO_HW=y
#
# Library routines
diff --git a/arch/sparc64/kernel/hvtramp.S b/arch/sparc64/kernel/hvtramp.S
index 76a090e2c2a..a55c252e18c 100644
--- a/arch/sparc64/kernel/hvtramp.S
+++ b/arch/sparc64/kernel/hvtramp.S
@@ -10,6 +10,7 @@
#include <asm/hvtramp.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
+#include <asm/head.h>
#include <asm/asi.h>
.text
@@ -28,7 +29,7 @@
* First setup basic privileged cpu state.
*/
hv_cpu_startup:
- wrpr %g0, 0, %gl
+ SET_GL(0)
wrpr %g0, 15, %pil
wrpr %g0, 0, %canrestore
wrpr %g0, 0, %otherwin
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index 203e8730100..fb13775b368 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -289,9 +289,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
struct rt_signal_frame __user *sf;
unsigned long tpc, tnpc, tstate;
__siginfo_fpu_t __user *fpu_save;
- mm_segment_t old_fs;
sigset_t set;
- stack_t st;
int err;
/* Always make any pending restarted system calls return -EINTR */
@@ -327,20 +325,13 @@ void do_rt_sigreturn(struct pt_regs *regs)
err |= restore_fpu_state(regs, &sf->fpu_state);
err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
- err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
-
+ err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
+
if (err)
goto segv;
-
+
regs->tpc = tpc;
regs->tnpc = tnpc;
-
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
- set_fs(old_fs);
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 00a9e3286c8..6ef2d299fb1 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2225,6 +2225,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
__asm__ __volatile__("flushw");
__show_regs(regs);
+ add_taint(TAINT_DIE);
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
diff --git a/arch/um/drivers/pcap_user.c b/arch/um/drivers/pcap_user.c
index 483aa15222a..1316456e2a2 100644
--- a/arch/um/drivers/pcap_user.c
+++ b/arch/um/drivers/pcap_user.c
@@ -53,7 +53,7 @@ static int pcap_open(void *data)
return -EIO;
}
- pri->compiled = um_kmalloc(sizeof(struct bpf_program));
+ pri->compiled = kmalloc(sizeof(struct bpf_program), UM_GFP_KERNEL);
if(pri->compiled == NULL){
printk(UM_KERN_ERR "pcap_open : kmalloc failed\n");
return -ENOMEM;
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 627742d8943..6916c8888db 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -52,17 +52,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- ret = -EIO;
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp, p);
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
@@ -72,11 +64,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = -EIO;
- if (access_process_vm(child, addr, &data, sizeof(data),
- 1) != sizeof(data))
- break;
- ret = 0;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c
index a9b09343097..a458ac941b2 100644
--- a/arch/v850/kernel/ptrace.c
+++ b/arch/v850/kernel/ptrace.c
@@ -117,24 +117,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int rval;
switch (request) {
- unsigned long val, copied;
+ unsigned long val;
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
- copied = access_process_vm(child, addr, &val, sizeof(val), 0);
- rval = -EIO;
- if (copied != sizeof(val))
- break;
- rval = put_user(val, (unsigned long *)data);
+ rval = generic_ptrace_peekdata(child, addr, data);
goto out;
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- rval = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1)
- == sizeof(data))
- break;
- rval = -EIO;
+ rval = generic_ptrace_pokedata(child, addr, data);
goto out;
/* Read/write the word at location ADDR in the registers. */
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 8bdd25ac154..14bf8ce3ea2 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -774,8 +774,8 @@ menu "Instrumentation Support"
source "arch/x86_64/oprofile/Kconfig"
config KPROBES
- bool "Kprobes (EXPERIMENTAL)"
- depends on KALLSYMS && EXPERIMENTAL && MODULES
+ bool "Kprobes"
+ depends on KALLSYMS && MODULES
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 931c64bad5e..edbbc59b752 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -296,7 +296,7 @@ static DEFINE_PER_CPU(unsigned, last_irq_sum);
static DEFINE_PER_CPU(local_t, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
-void touch_nmi_watchdog (void)
+void touch_nmi_watchdog(void)
{
if (nmi_watchdog > 0) {
unsigned cpu;
@@ -306,8 +306,10 @@ void touch_nmi_watchdog (void)
* do it ourselves because the alert count increase is not
* atomic.
*/
- for_each_present_cpu (cpu)
- per_cpu(nmi_touch, cpu) = 1;
+ for_each_present_cpu(cpu) {
+ if (per_cpu(nmi_touch, cpu) != 1)
+ per_cpu(nmi_touch, cpu) = 1;
+ }
}
touch_softlockup_watchdog();
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index 9409117b9f1..fa6775ef729 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -313,17 +313,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long __user *) data);
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
break;
- }
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -367,10 +359,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 2ff46859162..0694940b2e7 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -357,7 +357,7 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info,
}
/*
- * smp_call_function_single - Run a function on another CPU
+ * smp_call_function_single - Run a function on a specific CPU
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Currently unused.
@@ -374,14 +374,18 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
{
/* prevent preemption and reschedule on another processor */
int me = get_cpu();
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
if (cpu == me) {
+ local_irq_disable();
+ func(info);
+ local_irq_enable();
put_cpu();
return 0;
}
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
spin_lock_bh(&call_lock);
__smp_call_function_single(cpu, func, info, nonatomic, wait);
spin_unlock_bh(&call_lock);
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 7fa155c394d..74cbeb2e99a 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -330,6 +330,7 @@ static int print_trace_stack(void *data, char *name)
static void print_trace_address(void *data, unsigned long addr)
{
+ touch_nmi_watchdog();
printk_address(addr);
}
@@ -518,6 +519,7 @@ void __kprobes __die(const char * str, struct pt_regs * regs, long err)
printk("\n");
notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
show_registers(regs);
+ add_taint(TAINT_DIE);
/* Executive summary in case the oops scrolled away */
printk(KERN_ALERT "RIP ");
printk_address(regs->rip);
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 14104ff6309..06a13d9b69d 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -50,18 +50,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
- {
- unsigned long tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp,(unsigned long *) data);
-
+ ret = generic_ptrace_peekdata(child, addr, data);
goto out;
- }
/* Read the word at location addr in the USER area. */
@@ -138,10 +128,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
- if (access_process_vm(child, addr, &data, sizeof(data), 1)
- == sizeof(data))
- break;
- ret = -EIO;
+ ret = generic_ptrace_pokedata(child, addr, data);
goto out;
case PTRACE_POKEUSR:
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 693ab268485..c5e62f9d9f5 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -482,6 +482,7 @@ void die(const char * str, struct pt_regs * regs, long err)
if (!user_mode(regs))
show_stack(NULL, (unsigned long*)regs->areg[1]);
+ add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 109e91b91ff..3e316dd7252 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1322,10 +1322,9 @@ static void *as_init_queue(request_queue_t *q)
{
struct as_data *ad;
- ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
+ ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
if (!ad)
return NULL;
- memset(ad, 0, sizeof(*ad));
ad->q = q; /* Identify what queue the data belongs to */
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e0aa4dad674..9755a3cfad2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1251,9 +1251,9 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct cfq_io_context *cic;
- cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
+ cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
if (cic) {
- memset(cic, 0, sizeof(*cic));
cic->last_end_request = jiffies;
INIT_LIST_HEAD(&cic->queue_list);
cic->dtor = cfq_free_io_context;
@@ -1376,17 +1376,19 @@ retry:
* free memory.
*/
spin_unlock_irq(cfqd->queue->queue_lock);
- new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
+ new_cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+ cfqd->queue->node);
spin_lock_irq(cfqd->queue->queue_lock);
goto retry;
} else {
- cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
+ cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
if (!cfqq)
goto out;
}
- memset(cfqq, 0, sizeof(*cfqq));
-
RB_CLEAR_NODE(&cfqq->rb_node);
INIT_LIST_HEAD(&cfqq->fifo);
@@ -2079,12 +2081,10 @@ static void *cfq_init_queue(request_queue_t *q)
{
struct cfq_data *cfqd;
- cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
+ cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!cfqd)
return NULL;
- memset(cfqd, 0, sizeof(*cfqd));
-
cfqd->service_tree = CFQ_RB_ROOT;
INIT_LIST_HEAD(&cfqd->cic_list);
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 6d673e938d3..87ca02ac84c 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -360,10 +360,9 @@ static void *deadline_init_queue(request_queue_t *q)
{
struct deadline_data *dd;
- dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+ dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!dd)
return NULL;
- memset(dd, 0, sizeof(*dd));
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
diff --git a/block/elevator.c b/block/elevator.c
index 4769a25d703..d265963d1ed 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -177,11 +177,10 @@ static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
elevator_t *eq;
int i;
- eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
+ eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
if (unlikely(!eq))
goto err;
- memset(eq, 0, sizeof(*eq));
eq->ops = &e->ops;
eq->elevator_type = e;
kobject_init(&eq->kobj);
diff --git a/block/genhd.c b/block/genhd.c
index 863a8c0623e..3af1e7a378d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -108,28 +108,24 @@ out:
EXPORT_SYMBOL(register_blkdev);
-/* todo: make void - error printk here */
-int unregister_blkdev(unsigned int major, const char *name)
+void unregister_blkdev(unsigned int major, const char *name)
{
struct blk_major_name **n;
struct blk_major_name *p = NULL;
int index = major_to_index(major);
- int ret = 0;
mutex_lock(&block_subsys_lock);
for (n = &major_names[index]; *n; n = &(*n)->next)
if ((*n)->major == major)
break;
- if (!*n || strcmp((*n)->name, name))
- ret = -EINVAL;
- else {
+ if (!*n || strcmp((*n)->name, name)) {
+ WARN_ON(1);
+ } else {
p = *n;
*n = p->next;
}
mutex_unlock(&block_subsys_lock);
kfree(p);
-
- return ret;
}
EXPORT_SYMBOL(unregister_blkdev);
@@ -726,21 +722,21 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
- disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
+ disk = kmalloc_node(sizeof(struct gendisk),
+ GFP_KERNEL | __GFP_ZERO, node_id);
if (disk) {
- memset(disk, 0, sizeof(struct gendisk));
if (!init_disk_stats(disk)) {
kfree(disk);
return NULL;
}
if (minors > 1) {
int size = (minors - 1) * sizeof(struct hd_struct *);
- disk->part = kmalloc_node(size, GFP_KERNEL, node_id);
+ disk->part = kmalloc_node(size,
+ GFP_KERNEL | __GFP_ZERO, node_id);
if (!disk->part) {
kfree(disk);
return NULL;
}
- memset(disk->part, 0, size);
}
disk->minors = minors;
kobj_set_kset_s(disk,block_subsys);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 11e4235d0b0..d7cadf30416 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1829,11 +1829,11 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
request_queue_t *q;
- q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
+ q = kmem_cache_alloc_node(requestq_cachep,
+ gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
- memset(q, 0, sizeof(*q));
init_timer(&q->unplug_timer);
snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 5d576435fcc..fb8a749423c 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2666,7 +2666,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
mv_print_info(host);
pci_set_master(pdev);
- pci_set_mwi(pdev);
+ pci_try_set_mwi(pdev);
return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
}
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 59651abfa4f..b34b3829f6a 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1040,7 +1040,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
struct atm_qos * qos;
struct atm_trafprm * txtp;
struct atm_trafprm * rxtp;
- u16 tx_rate_bits;
+ u16 tx_rate_bits = -1; // hush gcc
u16 tx_vc_bits = -1; // hush gcc
u16 tx_frame_bits = -1; // hush gcc
@@ -1096,6 +1096,8 @@ static int amb_open (struct atm_vcc * atm_vcc)
r = round_up;
}
error = make_rate (pcr, r, &tx_rate_bits, NULL);
+ if (error)
+ return error;
tx_vc_bits = TX_UBR_CAPPED;
tx_frame_bits = TX_FRAME_CAPPED;
}
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 020a87a476c..58583c6ac5b 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -915,7 +915,7 @@ static int open_tx_first(struct atm_vcc *vcc)
unsigned long flags;
u32 *loop;
unsigned short chan;
- int pcr,unlimited;
+ int unlimited;
DPRINTK("open_tx_first\n");
zatm_dev = ZATM_DEV(vcc->dev);
@@ -936,6 +936,8 @@ static int open_tx_first(struct atm_vcc *vcc)
vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
else {
+ int uninitialized_var(pcr);
+
if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index c5a61571a07..8f65b88cf71 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -421,4 +421,10 @@ config SUNVDC
source "drivers/s390/block/Kconfig"
+config XILINX_SYSACE
+ tristate "Xilinx SystemACE support"
+ depends on 4xx
+ help
+ Include support for the Xilinx SystemACE CompactFlash interface
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 7926be8c9fb..9ee08ab4ffa 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_BLK_DEV_XD) += xd.o
obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
+obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4503290da40..e425daa1eac 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,6 +68,7 @@
#include <linux/loop.h>
#include <linux/compat.h>
#include <linux/suspend.h>
+#include <linux/freezer.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> /* for invalidate_bdev() */
#include <linux/completion.h>
@@ -600,13 +601,6 @@ static int loop_thread(void *data)
struct loop_device *lo = data;
struct bio *bio;
- /*
- * loop can be used in an encrypted device,
- * hence, it mustn't be stopped at all
- * because it could be indirectly used during suspension
- */
- current->flags |= PF_NOFREEZE;
-
set_user_nice(current, -20);
while (!kthread_should_stop() || lo->lo_bio) {
@@ -1574,8 +1568,7 @@ static void __exit loop_exit(void)
loop_del_one(lo);
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
- if (unregister_blkdev(LOOP_MAJOR, "loop"))
- printk(KERN_WARNING "loop: cannot unregister blkdev\n");
+ unregister_blkdev(LOOP_MAJOR, "loop");
}
module_init(loop_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7c294a40002..31be33e4f11 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1593,6 +1593,7 @@ static int kcdrwd(void *foobar)
long min_sleep_time, residue;
set_user_nice(current, -20);
+ set_freezable();
for (;;) {
DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
new file mode 100644
index 00000000000..732ec63b6e9
--- /dev/null
+++ b/drivers/block/xsysace.c
@@ -0,0 +1,1164 @@
+/*
+ * Xilinx SystemACE device driver
+ *
+ * Copyright 2007 Secret Lab Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+/*
+ * The SystemACE chip is designed to configure FPGAs by loading an FPGA
+ * bitstream from a file on a CF card and squirting it into FPGAs connected
+ * to the SystemACE JTAG chain. It also has the advantage of providing an
+ * MPU interface which can be used to control the FPGA configuration process
+ * and to use the attached CF card for general purpose storage.
+ *
+ * This driver is a block device driver for the SystemACE.
+ *
+ * Initialization:
+ * The driver registers itself as a platform_device driver at module
+ * load time. The platform bus will take care of calling the
+ * ace_probe() method for all SystemACE instances in the system. Any
+ * number of SystemACE instances are supported. ace_probe() calls
+ * ace_setup() which initialized all data structures, reads the CF
+ * id structure and registers the device.
+ *
+ * Processing:
+ * Just about all of the heavy lifting in this driver is performed by
+ * a Finite State Machine (FSM). The driver needs to wait on a number
+ * of events; some raised by interrupts, some which need to be polled
+ * for. Describing all of the behaviour in a FSM seems to be the
+ * easiest way to keep the complexity low and make it easy to
+ * understand what the driver is doing. If the block ops or the
+ * request function need to interact with the hardware, then they
+ * simply need to flag the request and kick of FSM processing.
+ *
+ * The FSM itself is atomic-safe code which can be run from any
+ * context. The general process flow is:
+ * 1. obtain the ace->lock spinlock.
+ * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is
+ * cleared.
+ * 3. release the lock.
+ *
+ * Individual states do not sleep in any way. If a condition needs to
+ * be waited for then the state much clear the fsm_continue flag and
+ * either schedule the FSM to be run again at a later time, or expect
+ * an interrupt to call the FSM when the desired condition is met.
+ *
+ * In normal operation, the FSM is processed at interrupt context
+ * either when the driver's tasklet is scheduled, or when an irq is
+ * raised by the hardware. The tasklet can be scheduled at any time.
+ * The request method in particular schedules the tasklet when a new
+ * request has been indicated by the block layer. Once started, the
+ * FSM proceeds as far as it can processing the request until it
+ * needs on a hardware event. At this point, it must yield execution.
+ *
+ * A state has two options when yielding execution:
+ * 1. ace_fsm_yield()
+ * - Call if need to poll for event.
+ * - clears the fsm_continue flag to exit the processing loop
+ * - reschedules the tasklet to run again as soon as possible
+ * 2. ace_fsm_yieldirq()
+ * - Call if an irq is expected from the HW
+ * - clears the fsm_continue flag to exit the processing loop
+ * - does not reschedule the tasklet so the FSM will not be processed
+ * again until an irq is received.
+ * After calling a yield function, the state must return control back
+ * to the FSM main loop.
+ *
+ * Additionally, the driver maintains a kernel timer which can process
+ * the FSM. If the FSM gets stalled, typically due to a missed
+ * interrupt, then the kernel timer will expire and the driver can
+ * continue where it left off.
+ *
+ * To Do:
+ * - Add FPGA configuration control interface.
+ * - Request major number from lanana
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/platform_device.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("Xilinx SystemACE device driver");
+MODULE_LICENSE("GPL");
+
+/* SystemACE register definitions */
+#define ACE_BUSMODE (0x00)
+
+#define ACE_STATUS (0x04)
+#define ACE_STATUS_CFGLOCK (0x00000001)
+#define ACE_STATUS_MPULOCK (0x00000002)
+#define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */
+#define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */
+#define ACE_STATUS_CFDETECT (0x00000010)
+#define ACE_STATUS_DATABUFRDY (0x00000020)
+#define ACE_STATUS_DATABUFMODE (0x00000040)
+#define ACE_STATUS_CFGDONE (0x00000080)
+#define ACE_STATUS_RDYFORCFCMD (0x00000100)
+#define ACE_STATUS_CFGMODEPIN (0x00000200)
+#define ACE_STATUS_CFGADDR_MASK (0x0000e000)
+#define ACE_STATUS_CFBSY (0x00020000)
+#define ACE_STATUS_CFRDY (0x00040000)
+#define ACE_STATUS_CFDWF (0x00080000)
+#define ACE_STATUS_CFDSC (0x00100000)
+#define ACE_STATUS_CFDRQ (0x00200000)
+#define ACE_STATUS_CFCORR (0x00400000)
+#define ACE_STATUS_CFERR (0x00800000)
+
+#define ACE_ERROR (0x08)
+#define ACE_CFGLBA (0x0c)
+#define ACE_MPULBA (0x10)
+
+#define ACE_SECCNTCMD (0x14)
+#define ACE_SECCNTCMD_RESET (0x0100)
+#define ACE_SECCNTCMD_IDENTIFY (0x0200)
+#define ACE_SECCNTCMD_READ_DATA (0x0300)
+#define ACE_SECCNTCMD_WRITE_DATA (0x0400)
+#define ACE_SECCNTCMD_ABORT (0x0600)
+
+#define ACE_VERSION (0x16)
+#define ACE_VERSION_REVISION_MASK (0x00FF)
+#define ACE_VERSION_MINOR_MASK (0x0F00)
+#define ACE_VERSION_MAJOR_MASK (0xF000)
+
+#define ACE_CTRL (0x18)
+#define ACE_CTRL_FORCELOCKREQ (0x0001)
+#define ACE_CTRL_LOCKREQ (0x0002)
+#define ACE_CTRL_FORCECFGADDR (0x0004)
+#define ACE_CTRL_FORCECFGMODE (0x0008)
+#define ACE_CTRL_CFGMODE (0x0010)
+#define ACE_CTRL_CFGSTART (0x0020)
+#define ACE_CTRL_CFGSEL (0x0040)
+#define ACE_CTRL_CFGRESET (0x0080)
+#define ACE_CTRL_DATABUFRDYIRQ (0x0100)
+#define ACE_CTRL_ERRORIRQ (0x0200)
+#define ACE_CTRL_CFGDONEIRQ (0x0400)
+#define ACE_CTRL_RESETIRQ (0x0800)
+#define ACE_CTRL_CFGPROG (0x1000)
+#define ACE_CTRL_CFGADDR_MASK (0xe000)
+
+#define ACE_FATSTAT (0x1c)
+
+#define ACE_NUM_MINORS 16
+#define ACE_SECTOR_SIZE (512)
+#define ACE_FIFO_SIZE (32)
+#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
+
+struct ace_reg_ops;
+
+struct ace_device {
+ /* driver state data */
+ int id;
+ int media_change;
+ int users;
+ struct list_head list;
+
+ /* finite state machine data */
+ struct tasklet_struct fsm_tasklet;
+ uint fsm_task; /* Current activity (ACE_TASK_*) */
+ uint fsm_state; /* Current state (ACE_FSM_STATE_*) */
+ uint fsm_continue_flag; /* cleared to exit FSM mainloop */
+ uint fsm_iter_num;
+ struct timer_list stall_timer;
+
+ /* Transfer state/result, use for both id and block request */
+ struct request *req; /* request being processed */
+ void *data_ptr; /* pointer to I/O buffer */
+ int data_count; /* number of buffers remaining */
+ int data_result; /* Result of transfer; 0 := success */
+
+ int id_req_count; /* count of id requests */
+ int id_result;
+ struct completion id_completion; /* used when id req finishes */
+ int in_irq;
+
+ /* Details of hardware device */
+ unsigned long physaddr;
+ void *baseaddr;
+ int irq;
+ int bus_width; /* 0 := 8 bit; 1 := 16 bit */
+ struct ace_reg_ops *reg_ops;
+ int lock_count;
+
+ /* Block device data structures */
+ spinlock_t lock;
+ struct device *dev;
+ struct request_queue *queue;
+ struct gendisk *gd;
+
+ /* Inserted CF card parameters */
+ struct hd_driveid cf_id;
+};
+
+static int ace_major;
+
+/* ---------------------------------------------------------------------
+ * Low level register access
+ */
+
+struct ace_reg_ops {
+ u16(*in) (struct ace_device * ace, int reg);
+ void (*out) (struct ace_device * ace, int reg, u16 val);
+ void (*datain) (struct ace_device * ace);
+ void (*dataout) (struct ace_device * ace);
+};
+
+/* 8 Bit bus width */
+static u16 ace_in_8(struct ace_device *ace, int reg)
+{
+ void *r = ace->baseaddr + reg;
+ return in_8(r) | (in_8(r + 1) << 8);
+}
+
+static void ace_out_8(struct ace_device *ace, int reg, u16 val)
+{
+ void *r = ace->baseaddr + reg;
+ out_8(r, val);
+ out_8(r + 1, val >> 8);
+}
+
+static void ace_datain_8(struct ace_device *ace)
+{
+ void *r = ace->baseaddr + 0x40;
+ u8 *dst = ace->data_ptr;
+ int i = ACE_FIFO_SIZE;
+ while (i--)
+ *dst++ = in_8(r++);
+ ace->data_ptr = dst;
+}
+
+static void ace_dataout_8(struct ace_device *ace)
+{
+ void *r = ace->baseaddr + 0x40;
+ u8 *src = ace->data_ptr;
+ int i = ACE_FIFO_SIZE;
+ while (i--)
+ out_8(r++, *src++);
+ ace->data_ptr = src;
+}
+
+static struct ace_reg_ops ace_reg_8_ops = {
+ .in = ace_in_8,
+ .out = ace_out_8,
+ .datain = ace_datain_8,
+ .dataout = ace_dataout_8,
+};
+
+/* 16 bit big endian bus attachment */
+static u16 ace_in_be16(struct ace_device *ace, int reg)
+{
+ return in_be16(ace->baseaddr + reg);
+}
+
+static void ace_out_be16(struct ace_device *ace, int reg, u16 val)
+{
+ out_be16(ace->baseaddr + reg, val);
+}
+
+static void ace_datain_be16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *dst = ace->data_ptr;
+ while (i--)
+ *dst++ = in_le16(ace->baseaddr + 0x40);
+ ace->data_ptr = dst;
+}
+
+static void ace_dataout_be16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *src = ace->data_ptr;
+ while (i--)
+ out_le16(ace->baseaddr + 0x40, *src++);
+ ace->data_ptr = src;
+}
+
+/* 16 bit little endian bus attachment */
+static u16 ace_in_le16(struct ace_device *ace, int reg)
+{
+ return in_le16(ace->baseaddr + reg);
+}
+
+static void ace_out_le16(struct ace_device *ace, int reg, u16 val)
+{
+ out_le16(ace->baseaddr + reg, val);
+}
+
+static void ace_datain_le16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *dst = ace->data_ptr;
+ while (i--)
+ *dst++ = in_be16(ace->baseaddr + 0x40);
+ ace->data_ptr = dst;
+}
+
+static void ace_dataout_le16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *src = ace->data_ptr;
+ while (i--)
+ out_be16(ace->baseaddr + 0x40, *src++);
+ ace->data_ptr = src;
+}
+
+static struct ace_reg_ops ace_reg_be16_ops = {
+ .in = ace_in_be16,
+ .out = ace_out_be16,
+ .datain = ace_datain_be16,
+ .dataout = ace_dataout_be16,
+};
+
+static struct ace_reg_ops ace_reg_le16_ops = {
+ .in = ace_in_le16,
+ .out = ace_out_le16,
+ .datain = ace_datain_le16,
+ .dataout = ace_dataout_le16,
+};
+
+static inline u16 ace_in(struct ace_device *ace, int reg)
+{
+ return ace->reg_ops->in(ace, reg);
+}
+
+static inline u32 ace_in32(struct ace_device *ace, int reg)
+{
+ return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16);
+}
+
+static inline void ace_out(struct ace_device *ace, int reg, u16 val)
+{
+ ace->reg_ops->out(ace, reg, val);
+}
+
+static inline void ace_out32(struct ace_device *ace, int reg, u32 val)
+{
+ ace_out(ace, reg, val);
+ ace_out(ace, reg + 2, val >> 16);
+}
+
+/* ---------------------------------------------------------------------
+ * Debug support functions
+ */
+
+#if defined(DEBUG)
+static void ace_dump_mem(void *base, int len)
+{
+ const char *ptr = base;
+ int i, j;
+
+ for (i = 0; i < len; i += 16) {
+ printk(KERN_INFO "%.8x:", i);
+ for (j = 0; j < 16; j++) {
+ if (!(j % 4))
+ printk(" ");
+ printk("%.2x", ptr[i + j]);
+ }
+ printk(" ");
+ for (j = 0; j < 16; j++)
+ printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.');
+ printk("\n");
+ }
+}
+#else
+static inline void ace_dump_mem(void *base, int len)
+{
+}
+#endif
+
+static void ace_dump_regs(struct ace_device *ace)
+{
+ dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
+ " status:%.8x mpu_lba:%.8x busmode:%4x\n"
+ " error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
+ ace_in32(ace, ACE_CTRL),
+ ace_in(ace, ACE_SECCNTCMD),
+ ace_in(ace, ACE_VERSION),
+ ace_in32(ace, ACE_STATUS),
+ ace_in32(ace, ACE_MPULBA),
+ ace_in(ace, ACE_BUSMODE),
+ ace_in32(ace, ACE_ERROR),
+ ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT));
+}
+
+void ace_fix_driveid(struct hd_driveid *id)
+{
+#if defined(__BIG_ENDIAN)
+ u16 *buf = (void *)id;
+ int i;
+
+ /* All half words have wrong byte order; swap the bytes */
+ for (i = 0; i < sizeof(struct hd_driveid); i += 2, buf++)
+ *buf = le16_to_cpu(*buf);
+
+ /* Some of the data values are 32bit; swap the half words */
+ id->lba_capacity = ((id->lba_capacity >> 16) & 0x0000FFFF) |
+ ((id->lba_capacity << 16) & 0xFFFF0000);
+ id->spg = ((id->spg >> 16) & 0x0000FFFF) |
+ ((id->spg << 16) & 0xFFFF0000);
+#endif
+}
+
+/* ---------------------------------------------------------------------
+ * Finite State Machine (FSM) implementation
+ */
+
+/* FSM tasks; used to direct state transitions */
+#define ACE_TASK_IDLE 0
+#define ACE_TASK_IDENTIFY 1
+#define ACE_TASK_READ 2
+#define ACE_TASK_WRITE 3
+#define ACE_FSM_NUM_TASKS 4
+
+/* FSM state definitions */
+#define ACE_FSM_STATE_IDLE 0
+#define ACE_FSM_STATE_REQ_LOCK 1
+#define ACE_FSM_STATE_WAIT_LOCK 2
+#define ACE_FSM_STATE_WAIT_CFREADY 3
+#define ACE_FSM_STATE_IDENTIFY_PREPARE 4
+#define ACE_FSM_STATE_IDENTIFY_TRANSFER 5
+#define ACE_FSM_STATE_IDENTIFY_COMPLETE 6
+#define ACE_FSM_STATE_REQ_PREPARE 7
+#define ACE_FSM_STATE_REQ_TRANSFER 8
+#define ACE_FSM_STATE_REQ_COMPLETE 9
+#define ACE_FSM_STATE_ERROR 10
+#define ACE_FSM_NUM_STATES 11
+
+/* Set flag to exit FSM loop and reschedule tasklet */
+static inline void ace_fsm_yield(struct ace_device *ace)
+{
+ dev_dbg(ace->dev, "ace_fsm_yield()\n");
+ tasklet_schedule(&ace->fsm_tasklet);
+ ace->fsm_continue_flag = 0;
+}
+
+/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
+static inline void ace_fsm_yieldirq(struct ace_device *ace)
+{
+ dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
+
+ if (ace->irq == NO_IRQ)
+ /* No IRQ assigned, so need to poll */
+ tasklet_schedule(&ace->fsm_tasklet);
+ ace->fsm_continue_flag = 0;
+}
+
+/* Get the next read/write request; ending requests that we don't handle */
+struct request *ace_get_next_request(request_queue_t * q)
+{
+ struct request *req;
+
+ while ((req = elv_next_request(q)) != NULL) {
+ if (blk_fs_request(req))
+ break;
+ end_request(req, 0);
+ }
+ return req;
+}
+
+static void ace_fsm_dostate(struct ace_device *ace)
+{
+ struct request *req;
+ u32 status;
+ u16 val;
+ int count;
+ int i;
+
+#if defined(DEBUG)
+ dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
+ ace->fsm_state, ace->id_req_count);
+#endif
+
+ switch (ace->fsm_state) {
+ case ACE_FSM_STATE_IDLE:
+ /* See if there is anything to do */
+ if (ace->id_req_count || ace_get_next_request(ace->queue)) {
+ ace->fsm_iter_num++;
+ ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
+ mod_timer(&ace->stall_timer, jiffies + HZ);
+ if (!timer_pending(&ace->stall_timer))
+ add_timer(&ace->stall_timer);
+ break;
+ }
+ del_timer(&ace->stall_timer);
+ ace->fsm_continue_flag = 0;
+ break;
+
+ case ACE_FSM_STATE_REQ_LOCK:
+ if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
+ /* Already have the lock, jump to next state */
+ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
+ break;
+ }
+
+ /* Request the lock */
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
+ ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
+ break;
+
+ case ACE_FSM_STATE_WAIT_LOCK:
+ if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
+ /* got the lock; move to next state */
+ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
+ break;
+ }
+
+ /* wait a bit for the lock */
+ ace_fsm_yield(ace);
+ break;
+
+ case ACE_FSM_STATE_WAIT_CFREADY:
+ status = ace_in32(ace, ACE_STATUS);
+ if (!(status & ACE_STATUS_RDYFORCFCMD) ||
+ (status & ACE_STATUS_CFBSY)) {
+ /* CF card isn't ready; it needs to be polled */
+ ace_fsm_yield(ace);
+ break;
+ }
+
+ /* Device is ready for command; determine what to do next */
+ if (ace->id_req_count)
+ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
+ else
+ ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
+ break;
+
+ case ACE_FSM_STATE_IDENTIFY_PREPARE:
+ /* Send identify command */
+ ace->fsm_task = ACE_TASK_IDENTIFY;
+ ace->data_ptr = &ace->cf_id;
+ ace->data_count = ACE_BUF_PER_SECTOR;
+ ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
+
+ /* As per datasheet, put config controller in reset */
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
+
+ /* irq handler takes over from this point; wait for the
+ * transfer to complete */
+ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
+ ace_fsm_yieldirq(ace);
+ break;
+
+ case ACE_FSM_STATE_IDENTIFY_TRANSFER:
+ /* Check that the sysace is ready to receive data */
+ status = ace_in32(ace, ACE_STATUS);
+ if (status & ACE_STATUS_CFBSY) {
+ dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
+ ace->fsm_task, ace->fsm_iter_num,
+ ace->data_count);
+ ace_fsm_yield(ace);
+ break;
+ }
+ if (!(status & ACE_STATUS_DATABUFRDY)) {
+ ace_fsm_yield(ace);
+ break;
+ }
+
+ /* Transfer the next buffer */
+ ace->reg_ops->datain(ace);
+ ace->data_count--;
+
+ /* If there are still buffers to be transfers; jump out here */
+ if (ace->data_count != 0) {
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ /* transfer finished; kick state machine */
+ dev_dbg(ace->dev, "identify finished\n");
+ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
+ break;
+
+ case ACE_FSM_STATE_IDENTIFY_COMPLETE:
+ ace_fix_driveid(&ace->cf_id);
+ ace_dump_mem(&ace->cf_id, 512); /* Debug: Dump out disk ID */
+
+ if (ace->data_result) {
+ /* Error occured, disable the disk */
+ ace->media_change = 1;
+ set_capacity(ace->gd, 0);
+ dev_err(ace->dev, "error fetching CF id (%i)\n",
+ ace->data_result);
+ } else {
+ ace->media_change = 0;
+
+ /* Record disk parameters */
+ set_capacity(ace->gd, ace->cf_id.lba_capacity);
+ dev_info(ace->dev, "capacity: %i sectors\n",
+ ace->cf_id.lba_capacity);
+ }
+
+ /* We're done, drop to IDLE state and notify waiters */
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ ace->id_result = ace->data_result;
+ while (ace->id_req_count) {
+ complete(&ace->id_completion);
+ ace->id_req_count--;
+ }
+ break;
+
+ case ACE_FSM_STATE_REQ_PREPARE:
+ req = ace_get_next_request(ace->queue);
+ if (!req) {
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ break;
+ }
+
+ /* Okay, it's a data request, set it up for transfer */
+ dev_dbg(ace->dev,
+ "request: sec=%lx hcnt=%lx, ccnt=%x, dir=%i\n",
+ req->sector, req->hard_nr_sectors,
+ req->current_nr_sectors, rq_data_dir(req));
+
+ ace->req = req;
+ ace->data_ptr = req->buffer;
+ ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
+ ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
+
+ count = req->hard_nr_sectors;
+ if (rq_data_dir(req)) {
+ /* Kick off write request */
+ dev_dbg(ace->dev, "write data\n");
+ ace->fsm_task = ACE_TASK_WRITE;
+ ace_out(ace, ACE_SECCNTCMD,
+ count | ACE_SECCNTCMD_WRITE_DATA);
+ } else {
+ /* Kick off read request */
+ dev_dbg(ace->dev, "read data\n");
+ ace->fsm_task = ACE_TASK_READ;
+ ace_out(ace, ACE_SECCNTCMD,
+ count | ACE_SECCNTCMD_READ_DATA);
+ }
+
+ /* As per datasheet, put config controller in reset */
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
+
+ /* Move to the transfer state. The systemace will raise
+ * an interrupt once there is something to do
+ */
+ ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
+ if (ace->fsm_task == ACE_TASK_READ)
+ ace_fsm_yieldirq(ace); /* wait for data ready */
+ break;
+
+ case ACE_FSM_STATE_REQ_TRANSFER:
+ /* Check that the sysace is ready to receive data */
+ status = ace_in32(ace, ACE_STATUS);
+ if (status & ACE_STATUS_CFBSY) {
+ dev_dbg(ace->dev,
+ "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
+ ace->fsm_task, ace->fsm_iter_num,
+ ace->req->current_nr_sectors * 16,
+ ace->data_count, ace->in_irq);
+ ace_fsm_yield(ace); /* need to poll CFBSY bit */
+ break;
+ }
+ if (!(status & ACE_STATUS_DATABUFRDY)) {
+ dev_dbg(ace->dev,
+ "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
+ ace->fsm_task, ace->fsm_iter_num,
+ ace->req->current_nr_sectors * 16,
+ ace->data_count, ace->in_irq);
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ /* Transfer the next buffer */
+ i = 16;
+ if (ace->fsm_task == ACE_TASK_WRITE)
+ ace->reg_ops->dataout(ace);
+ else
+ ace->reg_ops->datain(ace);
+ ace->data_count--;
+
+ /* If there are still buffers to be transfers; jump out here */
+ if (ace->data_count != 0) {
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ /* bio finished; is there another one? */
+ i = ace->req->current_nr_sectors;
+ if (end_that_request_first(ace->req, 1, i)) {
+ /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
+ * ace->req->hard_nr_sectors,
+ * ace->req->current_nr_sectors);
+ */
+ ace->data_ptr = ace->req->buffer;
+ ace->data_count = ace->req->current_nr_sectors * 16;
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
+ break;
+
+ case ACE_FSM_STATE_REQ_COMPLETE:
+ /* Complete the block request */
+ blkdev_dequeue_request(ace->req);
+ end_that_request_last(ace->req, 1);
+ ace->req = NULL;
+
+ /* Finished request; go to idle state */
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ break;
+
+ default:
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ break;
+ }
+}
+
+static void ace_fsm_tasklet(unsigned long data)
+{
+ struct ace_device *ace = (void *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ace->lock, flags);
+
+ /* Loop over state machine until told to stop */
+ ace->fsm_continue_flag = 1;
+ while (ace->fsm_continue_flag)
+ ace_fsm_dostate(ace);
+
+ spin_unlock_irqrestore(&ace->lock, flags);
+}
+
+static void ace_stall_timer(unsigned long data)
+{
+ struct ace_device *ace = (void *)data;
+ unsigned long flags;
+
+ dev_warn(ace->dev,
+ "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n",
+ ace->fsm_state, ace->fsm_task, ace->fsm_iter_num,
+ ace->data_count);
+ spin_lock_irqsave(&ace->lock, flags);
+
+ /* Rearm the stall timer *before* entering FSM (which may then
+ * delete the timer) */
+ mod_timer(&ace->stall_timer, jiffies + HZ);
+
+ /* Loop over state machine until told to stop */
+ ace->fsm_continue_flag = 1;
+ while (ace->fsm_continue_flag)
+ ace_fsm_dostate(ace);
+
+ spin_unlock_irqrestore(&ace->lock, flags);
+}
+
+/* ---------------------------------------------------------------------
+ * Interrupt handling routines
+ */
+static int ace_interrupt_checkstate(struct ace_device *ace)
+{
+ u32 sreg = ace_in32(ace, ACE_STATUS);
+ u16 creg = ace_in(ace, ACE_CTRL);
+
+ /* Check for error occurance */
+ if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) &&
+ (creg & ACE_CTRL_ERRORIRQ)) {
+ dev_err(ace->dev, "transfer failure\n");
+ ace_dump_regs(ace);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ace_interrupt(int irq, void *dev_id)
+{
+ u16 creg;
+ struct ace_device *ace = dev_id;
+
+ /* be safe and get the lock */
+ spin_lock(&ace->lock);
+ ace->in_irq = 1;
+
+ /* clear the interrupt */
+ creg = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ);
+ ace_out(ace, ACE_CTRL, creg);
+
+ /* check for IO failures */
+ if (ace_interrupt_checkstate(ace))
+ ace->data_result = -EIO;
+
+ if (ace->fsm_task == 0) {
+ dev_err(ace->dev,
+ "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n",
+ ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL),
+ ace_in(ace, ACE_SECCNTCMD));
+ dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n",
+ ace->fsm_task, ace->fsm_state, ace->data_count);
+ }
+
+ /* Loop over state machine until told to stop */
+ ace->fsm_continue_flag = 1;
+ while (ace->fsm_continue_flag)
+ ace_fsm_dostate(ace);
+
+ /* done with interrupt; drop the lock */
+ ace->in_irq = 0;
+ spin_unlock(&ace->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* ---------------------------------------------------------------------
+ * Block ops
+ */
+static void ace_request(request_queue_t * q)
+{
+ struct request *req;
+ struct ace_device *ace;
+
+ req = ace_get_next_request(q);
+
+ if (req) {
+ ace = req->rq_disk->private_data;
+ tasklet_schedule(&ace->fsm_tasklet);
+ }
+}
+
+static int ace_media_changed(struct gendisk *gd)
+{
+ struct ace_device *ace = gd->private_data;
+ dev_dbg(ace->dev, "ace_media_changed(): %i\n", ace->media_change);
+
+ return ace->media_change;
+}
+
+static int ace_revalidate_disk(struct gendisk *gd)
+{
+ struct ace_device *ace = gd->private_data;
+ unsigned long flags;
+
+ dev_dbg(ace->dev, "ace_revalidate_disk()\n");
+
+ if (ace->media_change) {
+ dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
+
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->id_req_count++;
+ spin_unlock_irqrestore(&ace->lock, flags);
+
+ tasklet_schedule(&ace->fsm_tasklet);
+ wait_for_completion(&ace->id_completion);
+ }
+
+ dev_dbg(ace->dev, "revalidate complete\n");
+ return ace->id_result;
+}
+
+static int ace_open(struct inode *inode, struct file *filp)
+{
+ struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ unsigned long flags;
+
+ dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
+
+ filp->private_data = ace;
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->users++;
+ spin_unlock_irqrestore(&ace->lock, flags);
+
+ check_disk_change(inode->i_bdev);
+ return 0;
+}
+
+static int ace_release(struct inode *inode, struct file *filp)
+{
+ struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ unsigned long flags;
+ u16 val;
+
+ dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
+
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->users--;
+ if (ace->users == 0) {
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
+ }
+ spin_unlock_irqrestore(&ace->lock, flags);
+ return 0;
+}
+
+static int ace_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
+ struct hd_geometry g;
+ dev_dbg(ace->dev, "ace_ioctl()\n");
+
+ switch (cmd) {
+ case HDIO_GETGEO:
+ g.heads = ace->cf_id.heads;
+ g.sectors = ace->cf_id.sectors;
+ g.cylinders = ace->cf_id.cyls;
+ g.start = 0;
+ return copy_to_user(geo, &g, sizeof(g)) ? -EFAULT : 0;
+
+ default:
+ return -ENOTTY;
+ }
+ return -ENOTTY;
+}
+
+static struct block_device_operations ace_fops = {
+ .owner = THIS_MODULE,
+ .open = ace_open,
+ .release = ace_release,
+ .media_changed = ace_media_changed,
+ .revalidate_disk = ace_revalidate_disk,
+ .ioctl = ace_ioctl,
+};
+
+/* --------------------------------------------------------------------
+ * SystemACE device setup/teardown code
+ */
+static int __devinit ace_setup(struct ace_device *ace)
+{
+ u16 version;
+ u16 val;
+
+ int rc;
+
+ spin_lock_init(&ace->lock);
+ init_completion(&ace->id_completion);
+
+ /*
+ * Map the device
+ */
+ ace->baseaddr = ioremap(ace->physaddr, 0x80);
+ if (!ace->baseaddr)
+ goto err_ioremap;
+
+ if (ace->irq != NO_IRQ) {
+ rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
+ if (rc) {
+ /* Failure - fall back to polled mode */
+ dev_err(ace->dev, "request_irq failed\n");
+ ace->irq = NO_IRQ;
+ }
+ }
+
+ /*
+ * Initialize the state machine tasklet and stall timer
+ */
+ tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
+ setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+
+ /*
+ * Initialize the request queue
+ */
+ ace->queue = blk_init_queue(ace_request, &ace->lock);
+ if (ace->queue == NULL)
+ goto err_blk_initq;
+ blk_queue_hardsect_size(ace->queue, 512);
+
+ /*
+ * Allocate and initialize GD structure
+ */
+ ace->gd = alloc_disk(ACE_NUM_MINORS);
+ if (!ace->gd)
+ goto err_alloc_disk;
+
+ ace->gd->major = ace_major;
+ ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
+ ace->gd->fops = &ace_fops;
+ ace->gd->queue = ace->queue;
+ ace->gd->private_data = ace;
+ snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
+
+ /* set bus width */
+ if (ace->bus_width == 1) {
+ /* 0x0101 should work regardless of endianess */
+ ace_out_le16(ace, ACE_BUSMODE, 0x0101);
+
+ /* read it back to determine endianess */
+ if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
+ ace->reg_ops = &ace_reg_le16_ops;
+ else
+ ace->reg_ops = &ace_reg_be16_ops;
+ } else {
+ ace_out_8(ace, ACE_BUSMODE, 0x00);
+ ace->reg_ops = &ace_reg_8_ops;
+ }
+
+ /* Make sure version register is sane */
+ version = ace_in(ace, ACE_VERSION);
+ if ((version == 0) || (version == 0xFFFF))
+ goto err_read;
+
+ /* Put sysace in a sane state by clearing most control reg bits */
+ ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
+ ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
+
+ /* Enable interrupts */
+ val = ace_in(ace, ACE_CTRL);
+ val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
+ ace_out(ace, ACE_CTRL, val);
+
+ /* Print the identification */
+ dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
+ (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
+ dev_dbg(ace->dev, "physaddr 0x%lx, mapped to 0x%p, irq=%i\n",
+ ace->physaddr, ace->baseaddr, ace->irq);
+
+ ace->media_change = 1;
+ ace_revalidate_disk(ace->gd);
+
+ /* Make the sysace device 'live' */
+ add_disk(ace->gd);
+
+ return 0;
+
+ err_read:
+ put_disk(ace->gd);
+ err_alloc_disk:
+ blk_cleanup_queue(ace->queue);
+ err_blk_initq:
+ iounmap(ace->baseaddr);
+ if (ace->irq != NO_IRQ)
+ free_irq(ace->irq, ace);
+ err_ioremap:
+ printk(KERN_INFO "xsysace: error initializing device at 0x%lx\n",
+ ace->physaddr);
+ return -ENOMEM;
+}
+
+static void __devexit ace_teardown(struct ace_device *ace)
+{
+ if (ace->gd) {
+ del_gendisk(ace->gd);
+ put_disk(ace->gd);
+ }
+
+ if (ace->queue)
+ blk_cleanup_queue(ace->queue);
+
+ tasklet_kill(&ace->fsm_tasklet);
+
+ if (ace->irq != NO_IRQ)
+ free_irq(ace->irq, ace);
+
+ iounmap(ace->baseaddr);
+}
+
+/* ---------------------------------------------------------------------
+ * Platform Bus Support
+ */
+
+static int __devinit ace_probe(struct device *device)
+{
+ struct platform_device *dev = to_platform_device(device);
+ struct ace_device *ace;
+ int i;
+
+ dev_dbg(device, "ace_probe(%p)\n", device);
+
+ /*
+ * Allocate the ace device structure
+ */
+ ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
+ if (!ace)
+ goto err_alloc;
+
+ ace->dev = device;
+ ace->id = dev->id;
+ ace->irq = NO_IRQ;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ if (dev->resource[i].flags & IORESOURCE_MEM)
+ ace->physaddr = dev->resource[i].start;
+ if (dev->resource[i].flags & IORESOURCE_IRQ)
+ ace->irq = dev->resource[i].start;
+ }
+
+ /* FIXME: Should get bus_width from the platform_device struct */
+ ace->bus_width = 1;
+
+ dev_set_drvdata(&dev->dev, ace);
+
+ /* Call the bus-independant setup code */
+ if (ace_setup(ace) != 0)
+ goto err_setup;
+
+ return 0;
+
+ err_setup:
+ dev_set_drvdata(&dev->dev, NULL);
+ kfree(ace);
+ err_alloc:
+ printk(KERN_ERR "xsysace: could not initialize device\n");
+ return -ENOMEM;
+}
+
+/*
+ * Platform bus remove() method
+ */
+static int __devexit ace_remove(struct device *device)
+{
+ struct ace_device *ace = dev_get_drvdata(device);
+
+ dev_dbg(device, "ace_remove(%p)\n", device);
+
+ if (ace) {
+ ace_teardown(ace);
+ kfree(ace);
+ }
+
+ return 0;
+}
+
+static struct device_driver ace_driver = {
+ .name = "xsysace",
+ .bus = &platform_bus_type,
+ .probe = ace_probe,
+ .remove = __devexit_p(ace_remove),
+};
+
+/* ---------------------------------------------------------------------
+ * Module init/exit routines
+ */
+static int __init ace_init(void)
+{
+ ace_major = register_blkdev(ace_major, "xsysace");
+ if (ace_major <= 0) {
+ printk(KERN_WARNING "xsysace: register_blkdev() failed\n");
+ return ace_major;
+ }
+
+ pr_debug("Registering Xilinx SystemACE driver, major=%i\n", ace_major);
+ return driver_register(&ace_driver);
+}
+
+static void __exit ace_exit(void)
+{
+ pr_debug("Unregistering Xilinx SystemACE driver\n");
+ driver_unregister(&ace_driver);
+ unregister_blkdev(ace_major, "xsysace");
+}
+
+module_init(ace_init);
+module_exit(ace_exit);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 2abf94cc313..e40fa98842e 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -371,9 +371,7 @@ static void __exit z2_exit(void)
{
int i, j;
blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
- if ( unregister_blkdev( Z2RAM_MAJOR, DEVICE_NAME ) != 0 )
- printk( KERN_ERR DEVICE_NAME ": unregister of device failed\n");
-
+ unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
del_gendisk(z2ram_gendisk);
put_disk(z2ram_gendisk);
blk_cleanup_queue(z2_queue);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ec9dc3d53f1..d8d7125529c 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -114,7 +114,7 @@ config COMPUTONE
config ROCKETPORT
tristate "Comtrol RocketPort support"
- depends on SERIAL_NONSTANDARD
+ depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
help
This driver supports Comtrol RocketPort and RocketModem PCI boards.
These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
@@ -157,7 +157,7 @@ config CYZ_INTR
config DIGIEPCA
tristate "Digiboard Intelligent Async Support"
- depends on SERIAL_NONSTANDARD
+ depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
---help---
This is a driver for Digi International's Xx, Xeve, and Xem series
of cards which provide multiple serial ports. You would need
@@ -213,8 +213,6 @@ config MOXA_SMARTIO_NEW
This is upgraded (1.9.1) driver from original Moxa drivers with
changes finally resulting in PCI probing.
- Use at your own risk.
-
This driver can also be built as a module. The module will be called
mxser_new. If you want to do that, say M here.
@@ -354,7 +352,7 @@ config STALDRV
config STALLION
tristate "Stallion EasyIO or EC8/32 support"
- depends on STALDRV && BROKEN_ON_SMP
+ depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
help
If you have an EasyIO or EasyConnection 8/32 multiport Stallion
card, then this is for you; say Y. Make sure to read
@@ -365,7 +363,7 @@ config STALLION
config ISTALLION
tristate "Stallion EC8/64, ONboard, Brumby support"
- depends on STALDRV && BROKEN_ON_SMP
+ depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
help
If you have an EasyConnection 8/64, ONboard, Brumby or Stallion
serial multiport card, say Y here. Make sure to read
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 179c7a3b6e7..ec116df919d 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/pm.h>
#include <linux/apm-emulation.h>
+#include <linux/freezer.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -329,13 +330,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
/*
* Wait for the suspend/resume to complete. If there
* are pending acknowledges, we wait here for them.
- *
- * Note: we need to ensure that the PM subsystem does
- * not kick us out of the wait when it suspends the
- * threads.
*/
flags = current->flags;
- current->flags |= PF_NOFREEZE;
wait_event(apm_suspend_waitqueue,
as->suspend_state == SUSPEND_DONE);
@@ -365,13 +361,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
/*
* Wait for the suspend/resume to complete. If there
* are pending acknowledges, we wait here for them.
- *
- * Note: we need to ensure that the PM subsystem does
- * not kick us out of the wait when it suspends the
- * threads.
*/
flags = current->flags;
- current->flags |= PF_NOFREEZE;
wait_event_interruptible(apm_suspend_waitqueue,
as->suspend_state == SUSPEND_DONE);
@@ -598,7 +589,6 @@ static int __init apm_init(void)
kapmd_tsk = NULL;
return ret;
}
- kapmd_tsk->flags |= PF_NOFREEZE;
wake_up_process(kapmd_tsk);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e04005b5f8a..9e0adfe27c1 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -646,6 +646,7 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
+#include <linux/firmware.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -680,6 +681,44 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
#define STD_COM_FLAGS (0)
+/* firmware stuff */
+#define ZL_MAX_BLOCKS 16
+#define DRIVER_VERSION 0x02010203
+#define RAM_SIZE 0x80000
+
+#define Z_FPGA_LOADED(X) ((readl(&(X)->init_ctrl) & (1<<17)) != 0)
+
+enum zblock_type {
+ ZBLOCK_PRG = 0,
+ ZBLOCK_FPGA = 1
+};
+
+struct zfile_header {
+ char name[64];
+ char date[32];
+ char aux[32];
+ u32 n_config;
+ u32 config_offset;
+ u32 n_blocks;
+ u32 block_offset;
+ u32 reserved[9];
+} __attribute__ ((packed));
+
+struct zfile_config {
+ char name[64];
+ u32 mailbox;
+ u32 function;
+ u32 n_blocks;
+ u32 block_list[ZL_MAX_BLOCKS];
+} __attribute__ ((packed));
+
+struct zfile_block {
+ u32 type;
+ u32 file_offset;
+ u32 ram_offset;
+ u32 size;
+} __attribute__ ((packed));
+
static struct tty_driver *cy_serial_driver;
#ifdef CONFIG_ISA
@@ -1851,11 +1890,11 @@ static void cyz_poll(unsigned long arg)
struct cyclades_card *cinfo;
struct cyclades_port *info;
struct tty_struct *tty;
- static struct FIRM_ID *firm_id;
- static struct ZFW_CTRL *zfw_ctrl;
- static struct BOARD_CTRL *board_ctrl;
- static struct CH_CTRL *ch_ctrl;
- static struct BUF_CTRL *buf_ctrl;
+ struct FIRM_ID __iomem *firm_id;
+ struct ZFW_CTRL __iomem *zfw_ctrl;
+ struct BOARD_CTRL __iomem *board_ctrl;
+ struct CH_CTRL __iomem *ch_ctrl;
+ struct BUF_CTRL __iomem *buf_ctrl;
unsigned long expires = jiffies + HZ;
int card, port;
@@ -1999,7 +2038,6 @@ static int startup(struct cyclades_port *info)
struct ZFW_CTRL __iomem *zfw_ctrl;
struct BOARD_CTRL __iomem *board_ctrl;
struct CH_CTRL __iomem *ch_ctrl;
- int retval;
base_addr = card->base_addr;
@@ -2371,7 +2409,6 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
struct ZFW_CTRL __iomem *zfw_ctrl;
struct BOARD_CTRL __iomem *board_ctrl;
struct CH_CTRL __iomem *ch_ctrl;
- int retval;
base_addr = cinfo->base_addr;
firm_id = base_addr + ID_ADDRESS;
@@ -4429,10 +4466,10 @@ static void cy_hangup(struct tty_struct *tty)
static int __devinit cy_init_card(struct cyclades_card *cinfo)
{
struct cyclades_port *info;
- u32 mailbox;
+ u32 uninitialized_var(mailbox);
unsigned int nports;
unsigned short chip_number;
- int index, port;
+ int uninitialized_var(index), port;
spin_lock_init(&cinfo->card_lock);
@@ -4735,17 +4772,295 @@ static int __init cy_detect_isa(void)
} /* cy_detect_isa */
#ifdef CONFIG_PCI
-static void __devinit plx_init(void __iomem * addr, __u32 initctl)
+static inline int __devinit cyc_isfwstr(const char *str, unsigned int size)
+{
+ unsigned int a;
+
+ for (a = 0; a < size && *str; a++, str++)
+ if (*str & 0x80)
+ return -EINVAL;
+
+ for (; a < size; a++, str++)
+ if (*str)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline void __devinit cyz_fpga_copy(void __iomem *fpga, u8 *data,
+ unsigned int size)
+{
+ for (; size > 0; size--) {
+ cy_writel(fpga, *data++);
+ udelay(10);
+ }
+}
+
+static void __devinit plx_init(struct pci_dev *pdev, int irq,
+ struct RUNTIME_9060 __iomem *addr)
{
/* Reset PLX */
- cy_writel(addr + initctl, readl(addr + initctl) | 0x40000000);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000);
udelay(100L);
- cy_writel(addr + initctl, readl(addr + initctl) & ~0x40000000);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000);
/* Reload Config. Registers from EEPROM */
- cy_writel(addr + initctl, readl(addr + initctl) | 0x20000000);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000);
udelay(100L);
- cy_writel(addr + initctl, readl(addr + initctl) & ~0x20000000);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000);
+
+ /* For some yet unknown reason, once the PLX9060 reloads the EEPROM,
+ * the IRQ is lost and, thus, we have to re-write it to the PCI config.
+ * registers. This will remain here until we find a permanent fix.
+ */
+ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
+}
+
+static int __devinit __cyz_load_fw(const struct firmware *fw,
+ const char *name, const u32 mailbox, void __iomem *base,
+ void __iomem *fpga)
+{
+ void *ptr = fw->data;
+ struct zfile_header *h = ptr;
+ struct zfile_config *c, *cs;
+ struct zfile_block *b, *bs;
+ unsigned int a, tmp, len = fw->size;
+#define BAD_FW KERN_ERR "Bad firmware: "
+ if (len < sizeof(*h)) {
+ printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h));
+ return -EINVAL;
+ }
+
+ cs = ptr + h->config_offset;
+ bs = ptr + h->block_offset;
+
+ if ((void *)(cs + h->n_config) > ptr + len ||
+ (void *)(bs + h->n_blocks) > ptr + len) {
+ printk(BAD_FW "too short");
+ return -EINVAL;
+ }
+
+ if (cyc_isfwstr(h->name, sizeof(h->name)) ||
+ cyc_isfwstr(h->date, sizeof(h->date))) {
+ printk(BAD_FW "bad formatted header string\n");
+ return -EINVAL;
+ }
+
+ if (strncmp(name, h->name, sizeof(h->name))) {
+ printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name);
+ return -EINVAL;
+ }
+
+ tmp = 0;
+ for (c = cs; c < cs + h->n_config; c++) {
+ for (a = 0; a < c->n_blocks; a++)
+ if (c->block_list[a] > h->n_blocks) {
+ printk(BAD_FW "bad block ref number in cfgs\n");
+ return -EINVAL;
+ }
+ if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */
+ tmp++;
+ }
+ if (!tmp) {
+ printk(BAD_FW "nothing appropriate\n");
+ return -EINVAL;
+ }
+
+ for (b = bs; b < bs + h->n_blocks; b++)
+ if (b->file_offset + b->size > len) {
+ printk(BAD_FW "bad block data offset\n");
+ return -EINVAL;
+ }
+
+ /* everything is OK, let's seek'n'load it */
+ for (c = cs; c < cs + h->n_config; c++)
+ if (c->mailbox == mailbox && c->function == 0)
+ break;
+
+ for (a = 0; a < c->n_blocks; a++) {
+ b = &bs[c->block_list[a]];
+ if (b->type == ZBLOCK_FPGA) {
+ if (fpga != NULL)
+ cyz_fpga_copy(fpga, ptr + b->file_offset,
+ b->size);
+ } else {
+ if (base != NULL)
+ memcpy_toio(base + b->ram_offset,
+ ptr + b->file_offset, b->size);
+ }
+ }
+#undef BAD_FW
+ return 0;
+}
+
+static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
+ struct RUNTIME_9060 __iomem *ctl_addr, int irq)
+{
+ const struct firmware *fw;
+ struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS;
+ struct CUSTOM_REG __iomem *cust = base_addr;
+ struct ZFW_CTRL __iomem *pt_zfwctrl;
+ void __iomem *tmp;
+ u32 mailbox, status;
+ unsigned int i;
+ int retval;
+
+ retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev);
+ if (retval) {
+ dev_err(&pdev->dev, "can't get firmware\n");
+ goto err;
+ }
+
+ /* Check whether the firmware is already loaded and running. If
+ positive, skip this board */
+ if (Z_FPGA_LOADED(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
+ u32 cntval = readl(base_addr + 0x190);
+
+ udelay(100);
+ if (cntval != readl(base_addr + 0x190)) {
+ /* FW counter is working, FW is running */
+ dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. "
+ "Skipping board.\n");
+ retval = 0;
+ goto err_rel;
+ }
+ }
+
+ /* start boot */
+ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) &
+ ~0x00030800UL);
+
+ mailbox = readl(&ctl_addr->mail_box_0);
+
+ if (mailbox == 0 || Z_FPGA_LOADED(ctl_addr)) {
+ /* stops CPU and set window to beginning of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ cy_writel(&cust->cpu_stop, 0);
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ udelay(100);
+ }
+
+ plx_init(pdev, irq, ctl_addr);
+
+ if (mailbox != 0) {
+ /* load FPGA */
+ retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL,
+ base_addr);
+ if (retval)
+ goto err_rel;
+ if (!Z_FPGA_LOADED(ctl_addr)) {
+ dev_err(&pdev->dev, "fw upload successful, but fw is "
+ "not loaded\n");
+ goto err_rel;
+ }
+ }
+
+ /* stops CPU and set window to beginning of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ cy_writel(&cust->cpu_stop, 0);
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ udelay(100);
+
+ /* clear memory */
+ for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
+ cy_writeb(tmp, 255);
+ if (mailbox != 0) {
+ /* set window to last 512K of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
+ //sleep(1);
+ for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
+ cy_writeb(tmp, 255);
+ /* set window to beginning of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ //sleep(1);
+ }
+
+ retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
+ release_firmware(fw);
+ if (retval)
+ goto err;
+
+ /* finish boot and start boards */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ cy_writel(&cust->cpu_start, 0);
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ i = 0;
+ while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40)
+ msleep(100);
+ if (status != ZFIRM_ID) {
+ if (status == ZFIRM_HLT) {
+ dev_err(&pdev->dev, "you need an external power supply "
+ "for this number of ports. Firmware halted and "
+ "board reset.\n");
+ retval = -EIO;
+ goto err;
+ }
+ dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting "
+ "some more time\n", status);
+ while ((status = readl(&fid->signature)) != ZFIRM_ID &&
+ i++ < 200)
+ msleep(100);
+ if (status != ZFIRM_ID) {
+ dev_err(&pdev->dev, "Board not started in 20 seconds! "
+ "Giving up. (fid->signature = 0x%x)\n",
+ status);
+ dev_info(&pdev->dev, "*** Warning ***: if you are "
+ "upgrading the FW, please power cycle the "
+ "system before loading the new FW to the "
+ "Cyclades-Z.\n");
+
+ if (Z_FPGA_LOADED(ctl_addr))
+ plx_init(pdev, irq, ctl_addr);
+
+ retval = -EIO;
+ goto err;
+ }
+ dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n",
+ i / 10);
+ }
+ pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr);
+
+ dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n",
+ base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
+ base_addr + readl(&fid->zfwctrl_addr));
+
+ dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
+ readl(&pt_zfwctrl->board_ctrl.fw_version),
+ readl(&pt_zfwctrl->board_ctrl.n_channel));
+
+ if (readl(&pt_zfwctrl->board_ctrl.n_channel) == 0) {
+ dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
+ "check the connection between the Z host card and the "
+ "serial expanders.\n");
+
+ if (Z_FPGA_LOADED(ctl_addr))
+ plx_init(pdev, irq, ctl_addr);
+
+ dev_info(&pdev->dev, "Null number of ports detected. Board "
+ "reset.\n");
+ retval = 0;
+ goto err;
+ }
+
+ cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX);
+ cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION);
+
+ /*
+ Early firmware failed to start looking for commands.
+ This enables firmware interrupts for those commands.
+ */
+ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
+ (1 << 17));
+ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
+ 0x00030800UL);
+
+ plx_init(pdev, irq, ctl_addr);
+
+ return 0;
+err_rel:
+ release_firmware(fw);
+err:
+ return retval;
}
static int __devinit cy_pci_probe(struct pci_dev *pdev,
@@ -4827,16 +5142,9 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
}
/* Disable interrupts on the PLX before resetting it */
- cy_writew(addr0 + 0x68,
- readw(addr0 + 0x68) & ~0x0900);
+ cy_writew(addr0 + 0x68, readw(addr0 + 0x68) & ~0x0900);
- plx_init(addr0, 0x6c);
- /* For some yet unknown reason, once the PLX9060 reloads
- the EEPROM, the IRQ is lost and, thus, we have to
- re-write it to the PCI config. registers.
- This will remain here until we find a permanent
- fix. */
- pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
+ plx_init(pdev, irq, addr0);
mailbox = (u32)readl(&ctl_addr->mail_box_0);
@@ -4877,6 +5185,9 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
cy_writel(addr2 + ID_ADDRESS, 0L);
+ retval = cyz_load_fw(pdev, addr2, addr0, irq);
+ if (retval)
+ goto err_unmap;
/* This must be a Cyclades-8Zo/PCI. The extendable
version will have a different device_id and will
be allocated its maximum number of ports. */
@@ -4953,15 +5264,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
case PLX_9060:
case PLX_9080:
default: /* Old boards, use PLX_9060 */
-
- plx_init(addr0, 0x6c);
- /* For some yet unknown reason, once the PLX9060 reloads
- the EEPROM, the IRQ is lost and, thus, we have to
- re-write it to the PCI config. registers.
- This will remain here until we find a permanent
- fix. */
- pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
-
+ plx_init(pdev, irq, addr0);
cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900);
break;
}
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 9138b49e676..ee83ff9efed 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -72,6 +72,8 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
+ idr_init(&dev->drw_idr);
+
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index 0580fa33cb7..441bbdbf151 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -94,7 +94,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
mutex_lock(&dev->struct_mutex);
#if defined(CONFIG_FB_SIS)
{
- drm_sman_mm_t sman_mm;
+ struct drm_sman_mm sman_mm;
sman_mm.private = (void *)0xFFFFFFFF;
sman_mm.allocate = sis_sman_mm_allocate;
sman_mm.free = sis_sman_mm_free;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index b3ab42e0dd4..83c1151ec7a 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -679,6 +679,7 @@ static int khvcd(void *unused)
int poll_mask;
struct hvc_struct *hp;
+ set_freezable();
__set_current_state(TASK_RUNNING);
do {
poll_mask = 0;
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 761f77740d6..77a7a4a0662 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -171,9 +171,6 @@ static struct pci_driver isicom_driver = {
static int prev_card = 3; /* start servicing isi_card[0] */
static struct tty_driver *isicom_normal;
-static DECLARE_COMPLETION(isi_timerdone);
-static char re_schedule = 1;
-
static void isicom_tx(unsigned long _data);
static void isicom_start(struct tty_struct *tty);
@@ -187,7 +184,7 @@ static signed char linuxb_to_isib[] = {
struct isi_board {
unsigned long base;
- unsigned char irq;
+ int irq;
unsigned char port_count;
unsigned short status;
unsigned short port_status; /* each bit for each port */
@@ -227,7 +224,7 @@ static struct isi_port isi_ports[PORT_COUNT];
* it wants to talk.
*/
-static inline int WaitTillCardIsFree(u16 base)
+static inline int WaitTillCardIsFree(unsigned long base)
{
unsigned int count = 0;
unsigned int a = in_atomic(); /* do we run under spinlock? */
@@ -243,17 +240,18 @@ static inline int WaitTillCardIsFree(u16 base)
static int lock_card(struct isi_board *card)
{
- char retries;
unsigned long base = card->base;
+ unsigned int retries, a;
- for (retries = 0; retries < 100; retries++) {
+ for (retries = 0; retries < 10; retries++) {
spin_lock_irqsave(&card->card_lock, card->flags);
- if (inw(base + 0xe) & 0x1) {
- return 1;
- } else {
- spin_unlock_irqrestore(&card->card_lock, card->flags);
- udelay(1000); /* 1ms */
+ for (a = 0; a < 10; a++) {
+ if (inw(base + 0xe) & 0x1)
+ return 1;
+ udelay(10);
}
+ spin_unlock_irqrestore(&card->card_lock, card->flags);
+ msleep(10);
}
printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n",
card->base);
@@ -261,23 +259,6 @@ static int lock_card(struct isi_board *card)
return 0; /* Failed to acquire the card! */
}
-static int lock_card_at_interrupt(struct isi_board *card)
-{
- unsigned char retries;
- unsigned long base = card->base;
-
- for (retries = 0; retries < 200; retries++) {
- spin_lock_irqsave(&card->card_lock, card->flags);
-
- if (inw(base + 0xe) & 0x1)
- return 1;
- else
- spin_unlock_irqrestore(&card->card_lock, card->flags);
- }
- /* Failing in interrupt is an acceptable event */
- return 0; /* Failed to acquire the card! */
-}
-
static void unlock_card(struct isi_board *card)
{
spin_unlock_irqrestore(&card->card_lock, card->flags);
@@ -415,7 +396,9 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
static void isicom_tx(unsigned long _data)
{
- short count = (BOARD_COUNT-1), card, base;
+ unsigned long flags, base;
+ unsigned int retries;
+ short count = (BOARD_COUNT-1), card;
short txcount, wrd, residue, word_count, cnt;
struct isi_port *port;
struct tty_struct *tty;
@@ -435,32 +418,34 @@ static void isicom_tx(unsigned long _data)
count = isi_card[card].port_count;
port = isi_card[card].ports;
base = isi_card[card].base;
+
+ spin_lock_irqsave(&isi_card[card].card_lock, flags);
+ for (retries = 0; retries < 100; retries++) {
+ if (inw(base + 0xe) & 0x1)
+ break;
+ udelay(2);
+ }
+ if (retries >= 100)
+ goto unlock;
+
for (;count > 0;count--, port++) {
- if (!lock_card_at_interrupt(&isi_card[card]))
- continue;
/* port not active or tx disabled to force flow control */
if (!(port->flags & ASYNC_INITIALIZED) ||
!(port->status & ISI_TXOK))
- unlock_card(&isi_card[card]);
continue;
tty = port->tty;
-
- if (tty == NULL) {
- unlock_card(&isi_card[card]);
+ if (tty == NULL)
continue;
- }
txcount = min_t(short, TX_SIZE, port->xmit_cnt);
- if (txcount <= 0 || tty->stopped || tty->hw_stopped) {
- unlock_card(&isi_card[card]);
+ if (txcount <= 0 || tty->stopped || tty->hw_stopped)
continue;
- }
- if (!(inw(base + 0x02) & (1 << port->channel))) {
- unlock_card(&isi_card[card]);
+
+ if (!(inw(base + 0x02) & (1 << port->channel)))
continue;
- }
+
pr_dbg("txing %d bytes, port%d.\n", txcount,
port->channel + 1);
outw((port->channel << isi_card[card].shift_count) | txcount,
@@ -508,16 +493,12 @@ static void isicom_tx(unsigned long _data)
port->status &= ~ISI_TXOK;
if (port->xmit_cnt <= WAKEUP_CHARS)
tty_wakeup(tty);
- unlock_card(&isi_card[card]);
}
+unlock:
+ spin_unlock_irqrestore(&isi_card[card].card_lock, flags);
/* schedule another tx for hopefully in about 10ms */
sched_again:
- if (!re_schedule) {
- complete(&isi_timerdone);
- return;
- }
-
mod_timer(&tx, jiffies + msecs_to_jiffies(10));
}
@@ -1749,17 +1730,13 @@ static unsigned int card_count;
static int __devinit isicom_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- unsigned int ioaddr, signature, index;
+ unsigned int signature, index;
int retval = -EPERM;
- u8 pciirq;
struct isi_board *board = NULL;
if (card_count >= BOARD_COUNT)
goto err;
- ioaddr = pci_resource_start(pdev, 3);
- /* i.e at offset 0x1c in the PCI configuration register space. */
- pciirq = pdev->irq;
dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
/* allot the first empty slot in the array */
@@ -1770,8 +1747,8 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
}
board->index = index;
- board->base = ioaddr;
- board->irq = pciirq;
+ board->base = pci_resource_start(pdev, 3);
+ board->irq = pdev->irq;
card_count++;
pci_set_drvdata(pdev, board);
@@ -1901,9 +1878,7 @@ error:
static void __exit isicom_exit(void)
{
- re_schedule = 0;
-
- wait_for_completion_timeout(&isi_timerdone, HZ);
+ del_timer_sync(&tx);
pci_unregister_driver(&isicom_driver);
tty_unregister_driver(isicom_normal);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 80940992299..3c66f402f9d 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -2163,14 +2163,10 @@ static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigne
cdkhdr_t __iomem *hdrp;
cdkctrl_t __iomem *cp;
unsigned char __iomem *bits;
- unsigned long flags;
-
- spin_lock_irqsave(&brd_lock, flags);
if (test_bit(ST_CMDING, &portp->state)) {
printk(KERN_ERR "STALLION: command already busy, cmd=%x!\n",
(int) cmd);
- spin_unlock_irqrestore(&brd_lock, flags);
return;
}
@@ -2191,7 +2187,6 @@ static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigne
writeb(readb(bits) | portp->portbit, bits);
set_bit(ST_CMDING, &portp->state);
EBRDDISABLE(brdp);
- spin_unlock_irqrestore(&brd_lock, flags);
}
static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback)
@@ -3215,13 +3210,13 @@ static int stli_initecp(struct stlibrd *brdp)
goto err;
}
+ brdp->iosize = ECP_IOSIZE;
+
if (!request_region(brdp->iobase, brdp->iosize, "istallion")) {
retval = -EIO;
goto err;
}
- brdp->iosize = ECP_IOSIZE;
-
/*
* Based on the specific board type setup the common vars to access
* and enable shared memory. Set all board specific information now
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index e0d35c20c04..ed76f0a127f 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1405,7 +1405,6 @@ static int moxaCard;
static struct mon_str moxaLog;
static int moxaFuncTout = HZ / 2;
-static void moxadelay(int);
static void moxafunc(void __iomem *, int, ushort);
static void wait_finish(void __iomem *);
static void low_water_check(void __iomem *);
@@ -2404,10 +2403,10 @@ void MoxaPortSendBreak(int port, int ms100)
ofsAddr = moxa_ports[port].tableAddr;
if (ms100) {
moxafunc(ofsAddr, FC_SendBreak, Magic_code);
- moxadelay(ms100 * (HZ / 10));
+ msleep(ms100 * 10);
} else {
moxafunc(ofsAddr, FC_SendBreak, Magic_code);
- moxadelay(HZ / 4); /* 250 ms */
+ msleep(250);
}
moxafunc(ofsAddr, FC_StopBreak, Magic_code);
}
@@ -2476,18 +2475,6 @@ static int moxa_set_serial_info(struct moxa_port *info,
/*****************************************************************************
* Static local functions: *
*****************************************************************************/
-/*
- * moxadelay - delays a specified number ticks
- */
-static void moxadelay(int tick)
-{
- unsigned long st, et;
-
- st = jiffies;
- et = st + tick;
- while (time_before(jiffies, et));
-}
-
static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg)
{
@@ -2535,7 +2522,7 @@ static int moxaloadbios(int cardno, unsigned char __user *tmp, int len)
return -EFAULT;
baseAddr = moxa_boards[cardno].basemem;
writeb(HW_reset, baseAddr + Control_reg); /* reset */
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
for (i = 0; i < 4096; i++)
writeb(0, baseAddr + i); /* clear fix page */
for (i = 0; i < len; i++)
@@ -2713,7 +2700,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
for (i = 0; i < 100; i++) {
if (readw(baseAddr + C218_key) == keycode)
break;
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
}
if (readw(baseAddr + C218_key) != keycode) {
return (-1);
@@ -2725,7 +2712,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
for (i = 0; i < 100; i++) {
if (readw(baseAddr + C218_key) == keycode)
break;
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
}
retry++;
} while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3));
@@ -2736,7 +2723,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
for (i = 0; i < 100; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code) {
return (-1);
@@ -2746,7 +2733,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
for (i = 0; i < 100; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code) {
return (-1);
@@ -2788,7 +2775,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 10; i++) {
if (readw(baseAddr + C320_key) == C320_KeyCode)
break;
- moxadelay(1);
+ msleep(10);
}
if (readw(baseAddr + C320_key) != C320_KeyCode)
return (-1);
@@ -2799,7 +2786,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 10; i++) {
if (readw(baseAddr + C320_key) == C320_KeyCode)
break;
- moxadelay(1);
+ msleep(10);
}
retry++;
} while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3));
@@ -2809,7 +2796,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 600; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1);
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code)
return (-100);
@@ -2828,7 +2815,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 500; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1);
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code)
return (-102);
@@ -2842,7 +2829,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 600; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1);
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code)
return (-102);
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 3494e3fc44b..b37e626f4fa 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -213,14 +213,6 @@ static inline void rc_release_io_range(struct riscom_board * const bp)
release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
}
-/* Must be called with enabled interrupts */
-static inline void rc_long_delay(unsigned long delay)
-{
- unsigned long i;
-
- for (i = jiffies + delay; time_after(i,jiffies); ) ;
-}
-
/* Reset and setup CD180 chip */
static void __init rc_init_CD180(struct riscom_board const * bp)
{
@@ -231,7 +223,7 @@ static void __init rc_init_CD180(struct riscom_board const * bp)
rc_wait_CCR(bp); /* Wait for CCR ready */
rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */
sti();
- rc_long_delay(HZ/20); /* Delay 0.05 sec */
+ msleep(50); /* Delay 0.05 sec */
cli();
rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */
rc_out(bp, CD180_GICR, 0); /* Clear all bits */
@@ -280,7 +272,7 @@ static int __init rc_probe(struct riscom_board *bp)
rc_wait_CCR(bp);
rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */
rc_out(bp, CD180_IER, IER_TXRDY); /* Enable tx empty intr */
- rc_long_delay(HZ/20);
+ msleep(50);
irqs = probe_irq_off(irqs);
val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */
val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index baf7234b6e6..455855631ae 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -345,18 +345,6 @@ static inline void sx_release_io_range(struct specialix_board * bp)
}
-/* Must be called with enabled interrupts */
-/* Ugly. Very ugly. Don't use this for anything else than initialization
- code */
-static inline void sx_long_delay(unsigned long delay)
-{
- unsigned long i;
-
- for (i = jiffies + delay; time_after(i, jiffies); ) ;
-}
-
-
-
/* Set the IRQ using the RTS lines that run to the PAL on the board.... */
static int sx_set_irq ( struct specialix_board *bp)
{
@@ -397,7 +385,7 @@ static int sx_init_CD186x(struct specialix_board * bp)
spin_lock_irqsave(&bp->lock, flags);
sx_out_off(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */
spin_unlock_irqrestore(&bp->lock, flags);
- sx_long_delay(HZ/20); /* Delay 0.05 sec */
+ msleep(50); /* Delay 0.05 sec */
spin_lock_irqsave(&bp->lock, flags);
sx_out_off(bp, CD186x_GIVR, SX_ID); /* Set ID for this chip */
sx_out_off(bp, CD186x_GICR, 0); /* Clear all bits */
@@ -533,7 +521,7 @@ static int sx_probe(struct specialix_board *bp)
sx_wait_CCR(bp);
sx_out(bp, CD186x_CCR, CCR_TXEN); /* Enable transmitter */
sx_out(bp, CD186x_IER, IER_TXRDY); /* Enable tx empty intr */
- sx_long_delay(HZ/20);
+ msleep(50);
irqs = probe_irq_off(irqs);
dprintk (SX_DEBUG_INIT, "SRSR = %02x, ", sx_in(bp, CD186x_SRSR));
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 8c73ccb8830..93d0bb8b4c0 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -1788,7 +1788,6 @@ static void stl_offintr(struct work_struct *work)
if (tty == NULL)
return;
- lock_kernel();
if (test_bit(ASYI_TXLOW, &portp->istate))
tty_wakeup(tty);
@@ -1802,7 +1801,6 @@ static void stl_offintr(struct work_struct *work)
if (portp->flags & ASYNC_CHECK_CD)
tty_hangup(tty); /* FIXME: module removal race here - AKPM */
}
- unlock_kernel();
}
/*****************************************************************************/
@@ -2357,9 +2355,6 @@ static int __devinit stl_pciprobe(struct pci_dev *pdev,
if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE)
goto err;
- dev_info(&pdev->dev, "please, report this to LKML: %x/%x/%x\n",
- pdev->vendor, pdev->device, pdev->class);
-
retval = pci_enable_device(pdev);
if (retval)
goto err;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 6650ae1c088..edb7002a321 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -729,10 +729,9 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
/* although the numbers above are not valid since long ago, the
point is still up-to-date and the comment still has its value
even if only as a historical artifact. --mj, July 1998 */
- vc = kmalloc(sizeof(struct vc_data), GFP_KERNEL);
+ vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
if (!vc)
return -ENOMEM;
- memset(vc, 0, sizeof(*vc));
vc_cons[currcons].d = vc;
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1);
@@ -1991,8 +1990,7 @@ static int is_double_width(uint32_t ucs)
{ 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
{ 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
};
- return bisearch(ucs, double_width,
- sizeof(double_width) / sizeof(*double_width) - 1);
+ return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
}
/* acquires console_sem */
@@ -2989,8 +2987,24 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
return retval;
}
-static int unbind_con_driver(const struct consw *csw, int first, int last,
- int deflt)
+/**
+ * unbind_con_driver - unbind a console driver
+ * @csw: pointer to console driver to unregister
+ * @first: first in range of consoles that @csw should be unbound from
+ * @last: last in range of consoles that @csw should be unbound from
+ * @deflt: should next bound console driver be default after @csw is unbound?
+ *
+ * To unbind a driver from all possible consoles, pass 0 as @first and
+ * %MAX_NR_CONSOLES as @last.
+ *
+ * @deflt controls whether the console that ends up replacing @csw should be
+ * the default console.
+ *
+ * RETURNS:
+ * -ENODEV if @csw isn't a registered console driver or can't be unregistered
+ * or 0 on success.
+ */
+int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
struct module *owner = csw->owner;
const struct consw *defcsw = NULL;
@@ -3075,6 +3089,7 @@ err:
return retval;
}
+EXPORT_SYMBOL(unbind_con_driver);
static int vt_bind(struct con_driver *con)
{
@@ -3491,9 +3506,6 @@ void do_blank_screen(int entering_gfx)
}
return;
}
- if (blank_state != blank_normal_wait)
- return;
- blank_state = blank_off;
/* entering graphics mode? */
if (entering_gfx) {
@@ -3501,10 +3513,15 @@ void do_blank_screen(int entering_gfx)
save_screen(vc);
vc->vc_sw->con_blank(vc, -1, 1);
console_blanked = fg_console + 1;
+ blank_state = blank_off;
set_origin(vc);
return;
}
+ if (blank_state != blank_normal_wait)
+ return;
+ blank_state = blank_off;
+
/* don't blank graphics */
if (vc->vc_mode != KD_TEXT) {
console_blanked = fg_console + 1;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 7b622300d0e..804875de580 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -1906,6 +1906,7 @@ static void do_edac_check(void)
static int edac_kernel_thread(void *arg)
{
+ set_freezable();
while (!kthread_should_stop()) {
do_edac_check();
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 7eaae3834e1..275d392eca6 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -96,6 +96,10 @@ static int __devinit lm70_probe(struct spi_device *spi)
struct lm70 *p_lm70;
int status;
+ /* signaling is SPI_MODE_0 on a 3-wire link (shared SI/SO) */
+ if ((spi->mode & (SPI_CPOL|SPI_CPHA)) || !(spi->mode & SPI_3WIRE))
+ return -EINVAL;
+
p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL);
if (!p_lm70)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 03188d277af..17cecf1ea79 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -630,7 +630,7 @@ static struct i2c_adapter pmcmsptwi_adapter = {
static struct platform_driver pmcmsptwi_driver = {
.probe = pmcmsptwi_probe,
.remove = __devexit_p(pmcmsptwi_remove),
- .driver {
+ .driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 3944e889cb2..2e1c24f671c 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -153,4 +153,14 @@ config SENSORS_TSL2550
This driver can also be built as a module. If so, the module
will be called tsl2550.
+config MENELAUS
+ bool "TWL92330/Menelaus PM chip"
+ depends on I2C=y && ARCH_OMAP24XX
+ help
+ If you say yes here you get support for the Texas Instruments
+ TWL92330/Menelaus Power Management chip. This include voltage
+ regulators, Dual slot memory card tranceivers, real-time clock
+ and other features that are often used in portable devices like
+ cell phones and PDAs.
+
endmenu
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index d8cbeb3f4b6..ca924e10595 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
obj-$(CONFIG_TPS65010) += tps65010.o
+obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
ifeq ($(CONFIG_I2C_DEBUG_CHIP),y)
diff --git a/drivers/i2c/chips/menelaus.c b/drivers/i2c/chips/menelaus.c
new file mode 100644
index 00000000000..48a7e2f0bdd
--- /dev/null
+++ b/drivers/i2c/chips/menelaus.c
@@ -0,0 +1,1281 @@
+#define DEBUG
+/*
+ * Copyright (C) 2004 Texas Instruments, Inc.
+ *
+ * Some parts based tps65010.c:
+ * Copyright (C) 2004 Texas Instruments and
+ * Copyright (C) 2004-2005 David Brownell
+ *
+ * Some parts based on tlv320aic24.c:
+ * Copyright (C) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * Changes for interrupt handling and clean-up by
+ * Tony Lindgren <tony@atomide.com> and Imre Deak <imre.deak@nokia.com>
+ * Cleanup and generalized support for voltage setting by
+ * Juha Yrjola
+ * Added support for controlling VCORE and regulator sleep states,
+ * Amit Kucheria <amit.kucheria@nokia.com>
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/menelaus.h>
+
+#define DRIVER_NAME "menelaus"
+
+#define pr_err(fmt, arg...) printk(KERN_ERR DRIVER_NAME ": ", ## arg);
+
+#define MENELAUS_I2C_ADDRESS 0x72
+
+#define MENELAUS_REV 0x01
+#define MENELAUS_VCORE_CTRL1 0x02
+#define MENELAUS_VCORE_CTRL2 0x03
+#define MENELAUS_VCORE_CTRL3 0x04
+#define MENELAUS_VCORE_CTRL4 0x05
+#define MENELAUS_VCORE_CTRL5 0x06
+#define MENELAUS_DCDC_CTRL1 0x07
+#define MENELAUS_DCDC_CTRL2 0x08
+#define MENELAUS_DCDC_CTRL3 0x09
+#define MENELAUS_LDO_CTRL1 0x0A
+#define MENELAUS_LDO_CTRL2 0x0B
+#define MENELAUS_LDO_CTRL3 0x0C
+#define MENELAUS_LDO_CTRL4 0x0D
+#define MENELAUS_LDO_CTRL5 0x0E
+#define MENELAUS_LDO_CTRL6 0x0F
+#define MENELAUS_LDO_CTRL7 0x10
+#define MENELAUS_LDO_CTRL8 0x11
+#define MENELAUS_SLEEP_CTRL1 0x12
+#define MENELAUS_SLEEP_CTRL2 0x13
+#define MENELAUS_DEVICE_OFF 0x14
+#define MENELAUS_OSC_CTRL 0x15
+#define MENELAUS_DETECT_CTRL 0x16
+#define MENELAUS_INT_MASK1 0x17
+#define MENELAUS_INT_MASK2 0x18
+#define MENELAUS_INT_STATUS1 0x19
+#define MENELAUS_INT_STATUS2 0x1A
+#define MENELAUS_INT_ACK1 0x1B
+#define MENELAUS_INT_ACK2 0x1C
+#define MENELAUS_GPIO_CTRL 0x1D
+#define MENELAUS_GPIO_IN 0x1E
+#define MENELAUS_GPIO_OUT 0x1F
+#define MENELAUS_BBSMS 0x20
+#define MENELAUS_RTC_CTRL 0x21
+#define MENELAUS_RTC_UPDATE 0x22
+#define MENELAUS_RTC_SEC 0x23
+#define MENELAUS_RTC_MIN 0x24
+#define MENELAUS_RTC_HR 0x25
+#define MENELAUS_RTC_DAY 0x26
+#define MENELAUS_RTC_MON 0x27
+#define MENELAUS_RTC_YR 0x28
+#define MENELAUS_RTC_WKDAY 0x29
+#define MENELAUS_RTC_AL_SEC 0x2A
+#define MENELAUS_RTC_AL_MIN 0x2B
+#define MENELAUS_RTC_AL_HR 0x2C
+#define MENELAUS_RTC_AL_DAY 0x2D
+#define MENELAUS_RTC_AL_MON 0x2E
+#define MENELAUS_RTC_AL_YR 0x2F
+#define MENELAUS_RTC_COMP_MSB 0x30
+#define MENELAUS_RTC_COMP_LSB 0x31
+#define MENELAUS_S1_PULL_EN 0x32
+#define MENELAUS_S1_PULL_DIR 0x33
+#define MENELAUS_S2_PULL_EN 0x34
+#define MENELAUS_S2_PULL_DIR 0x35
+#define MENELAUS_MCT_CTRL1 0x36
+#define MENELAUS_MCT_CTRL2 0x37
+#define MENELAUS_MCT_CTRL3 0x38
+#define MENELAUS_MCT_PIN_ST 0x39
+#define MENELAUS_DEBOUNCE1 0x3A
+
+#define IH_MENELAUS_IRQS 12
+#define MENELAUS_MMC_S1CD_IRQ 0 /* MMC slot 1 card change */
+#define MENELAUS_MMC_S2CD_IRQ 1 /* MMC slot 2 card change */
+#define MENELAUS_MMC_S1D1_IRQ 2 /* MMC DAT1 low in slot 1 */
+#define MENELAUS_MMC_S2D1_IRQ 3 /* MMC DAT1 low in slot 2 */
+#define MENELAUS_LOWBAT_IRQ 4 /* Low battery */
+#define MENELAUS_HOTDIE_IRQ 5 /* Hot die detect */
+#define MENELAUS_UVLO_IRQ 6 /* UVLO detect */
+#define MENELAUS_TSHUT_IRQ 7 /* Thermal shutdown */
+#define MENELAUS_RTCTMR_IRQ 8 /* RTC timer */
+#define MENELAUS_RTCALM_IRQ 9 /* RTC alarm */
+#define MENELAUS_RTCERR_IRQ 10 /* RTC error */
+#define MENELAUS_PSHBTN_IRQ 11 /* Push button */
+#define MENELAUS_RESERVED12_IRQ 12 /* Reserved */
+#define MENELAUS_RESERVED13_IRQ 13 /* Reserved */
+#define MENELAUS_RESERVED14_IRQ 14 /* Reserved */
+#define MENELAUS_RESERVED15_IRQ 15 /* Reserved */
+
+static void menelaus_work(struct work_struct *_menelaus);
+
+struct menelaus_chip {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct work_struct work;
+#ifdef CONFIG_RTC_DRV_TWL92330
+ struct rtc_device *rtc;
+ u8 rtc_control;
+ unsigned uie:1;
+#endif
+ unsigned vcore_hw_mode:1;
+ u8 mask1, mask2;
+ void (*handlers[16])(struct menelaus_chip *);
+ void (*mmc_callback)(void *data, u8 mask);
+ void *mmc_callback_data;
+};
+
+static struct menelaus_chip *the_menelaus;
+
+static int menelaus_write_reg(int reg, u8 value)
+{
+ int val = i2c_smbus_write_byte_data(the_menelaus->client, reg, value);
+
+ if (val < 0) {
+ pr_err("write error");
+ return val;
+ }
+
+ return 0;
+}
+
+static int menelaus_read_reg(int reg)
+{
+ int val = i2c_smbus_read_byte_data(the_menelaus->client, reg);
+
+ if (val < 0)
+ pr_err("read error");
+
+ return val;
+}
+
+static int menelaus_enable_irq(int irq)
+{
+ if (irq > 7) {
+ irq -= 8;
+ the_menelaus->mask2 &= ~(1 << irq);
+ return menelaus_write_reg(MENELAUS_INT_MASK2,
+ the_menelaus->mask2);
+ } else {
+ the_menelaus->mask1 &= ~(1 << irq);
+ return menelaus_write_reg(MENELAUS_INT_MASK1,
+ the_menelaus->mask1);
+ }
+}
+
+static int menelaus_disable_irq(int irq)
+{
+ if (irq > 7) {
+ irq -= 8;
+ the_menelaus->mask2 |= (1 << irq);
+ return menelaus_write_reg(MENELAUS_INT_MASK2,
+ the_menelaus->mask2);
+ } else {
+ the_menelaus->mask1 |= (1 << irq);
+ return menelaus_write_reg(MENELAUS_INT_MASK1,
+ the_menelaus->mask1);
+ }
+}
+
+static int menelaus_ack_irq(int irq)
+{
+ if (irq > 7)
+ return menelaus_write_reg(MENELAUS_INT_ACK2, 1 << (irq - 8));
+ else
+ return menelaus_write_reg(MENELAUS_INT_ACK1, 1 << irq);
+}
+
+/* Adds a handler for an interrupt. Does not run in interrupt context */
+static int menelaus_add_irq_work(int irq,
+ void (*handler)(struct menelaus_chip *))
+{
+ int ret = 0;
+
+ mutex_lock(&the_menelaus->lock);
+ the_menelaus->handlers[irq] = handler;
+ ret = menelaus_enable_irq(irq);
+ mutex_unlock(&the_menelaus->lock);
+
+ return ret;
+}
+
+/* Removes handler for an interrupt */
+static int menelaus_remove_irq_work(int irq)
+{
+ int ret = 0;
+
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_disable_irq(irq);
+ the_menelaus->handlers[irq] = NULL;
+ mutex_unlock(&the_menelaus->lock);
+
+ return ret;
+}
+
+/*
+ * Gets scheduled when a card detect interrupt happens. Note that in some cases
+ * this line is wired to card cover switch rather than the card detect switch
+ * in each slot. In this case the cards are not seen by menelaus.
+ * FIXME: Add handling for D1 too
+ */
+static void menelaus_mmc_cd_work(struct menelaus_chip *menelaus_hw)
+{
+ int reg;
+ unsigned char card_mask = 0;
+
+ reg = menelaus_read_reg(MENELAUS_MCT_PIN_ST);
+ if (reg < 0)
+ return;
+
+ if (!(reg & 0x1))
+ card_mask |= (1 << 0);
+
+ if (!(reg & 0x2))
+ card_mask |= (1 << 1);
+
+ if (menelaus_hw->mmc_callback)
+ menelaus_hw->mmc_callback(menelaus_hw->mmc_callback_data,
+ card_mask);
+}
+
+/*
+ * Toggles the MMC slots between open-drain and push-pull mode.
+ */
+int menelaus_set_mmc_opendrain(int slot, int enable)
+{
+ int ret, val;
+
+ if (slot != 1 && slot != 2)
+ return -EINVAL;
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_read_reg(MENELAUS_MCT_CTRL1);
+ if (ret < 0) {
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+ }
+ val = ret;
+ if (slot == 1) {
+ if (enable)
+ val |= 1 << 2;
+ else
+ val &= ~(1 << 2);
+ } else {
+ if (enable)
+ val |= 1 << 3;
+ else
+ val &= ~(1 << 3);
+ }
+ ret = menelaus_write_reg(MENELAUS_MCT_CTRL1, val);
+ mutex_unlock(&the_menelaus->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(menelaus_set_mmc_opendrain);
+
+int menelaus_set_slot_sel(int enable)
+{
+ int ret;
+
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
+ if (ret < 0)
+ goto out;
+ ret |= 0x02;
+ if (enable)
+ ret |= 1 << 5;
+ else
+ ret &= ~(1 << 5);
+ ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+}
+EXPORT_SYMBOL(menelaus_set_slot_sel);
+
+int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_en)
+{
+ int ret, val;
+
+ if (slot != 1 && slot != 2)
+ return -EINVAL;
+ if (power >= 3)
+ return -EINVAL;
+
+ mutex_lock(&the_menelaus->lock);
+
+ ret = menelaus_read_reg(MENELAUS_MCT_CTRL2);
+ if (ret < 0)
+ goto out;
+ val = ret;
+ if (slot == 1) {
+ if (cd_en)
+ val |= (1 << 4) | (1 << 6);
+ else
+ val &= ~((1 << 4) | (1 << 6));
+ } else {
+ if (cd_en)
+ val |= (1 << 5) | (1 << 7);
+ else
+ val &= ~((1 << 5) | (1 << 7));
+ }
+ ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, val);
+ if (ret < 0)
+ goto out;
+
+ ret = menelaus_read_reg(MENELAUS_MCT_CTRL3);
+ if (ret < 0)
+ goto out;
+ val = ret;
+ if (slot == 1) {
+ if (enable)
+ val |= 1 << 0;
+ else
+ val &= ~(1 << 0);
+ } else {
+ int b;
+
+ if (enable)
+ ret |= 1 << 1;
+ else
+ ret &= ~(1 << 1);
+ b = menelaus_read_reg(MENELAUS_MCT_CTRL2);
+ b &= ~0x03;
+ b |= power;
+ ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, b);
+ if (ret < 0)
+ goto out;
+ }
+ /* Disable autonomous shutdown */
+ val &= ~(0x03 << 2);
+ ret = menelaus_write_reg(MENELAUS_MCT_CTRL3, val);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+}
+EXPORT_SYMBOL(menelaus_set_mmc_slot);
+
+int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask),
+ void *data)
+{
+ int ret = 0;
+
+ the_menelaus->mmc_callback_data = data;
+ the_menelaus->mmc_callback = callback;
+ ret = menelaus_add_irq_work(MENELAUS_MMC_S1CD_IRQ,
+ menelaus_mmc_cd_work);
+ if (ret < 0)
+ return ret;
+ ret = menelaus_add_irq_work(MENELAUS_MMC_S2CD_IRQ,
+ menelaus_mmc_cd_work);
+ if (ret < 0)
+ return ret;
+ ret = menelaus_add_irq_work(MENELAUS_MMC_S1D1_IRQ,
+ menelaus_mmc_cd_work);
+ if (ret < 0)
+ return ret;
+ ret = menelaus_add_irq_work(MENELAUS_MMC_S2D1_IRQ,
+ menelaus_mmc_cd_work);
+
+ return ret;
+}
+EXPORT_SYMBOL(menelaus_register_mmc_callback);
+
+void menelaus_unregister_mmc_callback(void)
+{
+ menelaus_remove_irq_work(MENELAUS_MMC_S1CD_IRQ);
+ menelaus_remove_irq_work(MENELAUS_MMC_S2CD_IRQ);
+ menelaus_remove_irq_work(MENELAUS_MMC_S1D1_IRQ);
+ menelaus_remove_irq_work(MENELAUS_MMC_S2D1_IRQ);
+
+ the_menelaus->mmc_callback = NULL;
+ the_menelaus->mmc_callback_data = 0;
+}
+EXPORT_SYMBOL(menelaus_unregister_mmc_callback);
+
+struct menelaus_vtg {
+ const char *name;
+ u8 vtg_reg;
+ u8 vtg_shift;
+ u8 vtg_bits;
+ u8 mode_reg;
+};
+
+struct menelaus_vtg_value {
+ u16 vtg;
+ u16 val;
+};
+
+static int menelaus_set_voltage(const struct menelaus_vtg *vtg, int mV,
+ int vtg_val, int mode)
+{
+ int val, ret;
+ struct i2c_client *c = the_menelaus->client;
+
+ mutex_lock(&the_menelaus->lock);
+ if (vtg == 0)
+ goto set_voltage;
+
+ ret = menelaus_read_reg(vtg->vtg_reg);
+ if (ret < 0)
+ goto out;
+ val = ret & ~(((1 << vtg->vtg_bits) - 1) << vtg->vtg_shift);
+ val |= vtg_val << vtg->vtg_shift;
+
+ dev_dbg(&c->dev, "Setting voltage '%s'"
+ "to %d mV (reg 0x%02x, val 0x%02x)\n",
+ vtg->name, mV, vtg->vtg_reg, val);
+
+ ret = menelaus_write_reg(vtg->vtg_reg, val);
+ if (ret < 0)
+ goto out;
+set_voltage:
+ ret = menelaus_write_reg(vtg->mode_reg, mode);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ if (ret == 0) {
+ /* Wait for voltage to stabilize */
+ msleep(1);
+ }
+ return ret;
+}
+
+static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl,
+ int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++, tbl++)
+ if (tbl->vtg == vtg)
+ return tbl->val;
+ return -EINVAL;
+}
+
+/*
+ * Vcore can be programmed in two ways:
+ * SW-controlled: Required voltage is programmed into VCORE_CTRL1
+ * HW-controlled: Required range (roof-floor) is programmed into VCORE_CTRL3
+ * and VCORE_CTRL4
+ *
+ * Call correct 'set' function accordingly
+ */
+
+static const struct menelaus_vtg_value vcore_values[] = {
+ { 1000, 0 },
+ { 1025, 1 },
+ { 1050, 2 },
+ { 1075, 3 },
+ { 1100, 4 },
+ { 1125, 5 },
+ { 1150, 6 },
+ { 1175, 7 },
+ { 1200, 8 },
+ { 1225, 9 },
+ { 1250, 10 },
+ { 1275, 11 },
+ { 1300, 12 },
+ { 1325, 13 },
+ { 1350, 14 },
+ { 1375, 15 },
+ { 1400, 16 },
+ { 1425, 17 },
+ { 1450, 18 },
+};
+
+int menelaus_set_vcore_sw(unsigned int mV)
+{
+ int val, ret;
+ struct i2c_client *c = the_menelaus->client;
+
+ val = menelaus_get_vtg_value(mV, vcore_values,
+ ARRAY_SIZE(vcore_values));
+ if (val < 0)
+ return -EINVAL;
+
+ dev_dbg(&c->dev, "Setting VCORE to %d mV (val 0x%02x)\n", mV, val);
+
+ /* Set SW mode and the voltage in one go. */
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
+ if (ret == 0)
+ the_menelaus->vcore_hw_mode = 0;
+ mutex_unlock(&the_menelaus->lock);
+ msleep(1);
+
+ return ret;
+}
+
+int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV)
+{
+ int fval, rval, val, ret;
+ struct i2c_client *c = the_menelaus->client;
+
+ rval = menelaus_get_vtg_value(roof_mV, vcore_values,
+ ARRAY_SIZE(vcore_values));
+ if (rval < 0)
+ return -EINVAL;
+ fval = menelaus_get_vtg_value(floor_mV, vcore_values,
+ ARRAY_SIZE(vcore_values));
+ if (fval < 0)
+ return -EINVAL;
+
+ dev_dbg(&c->dev, "Setting VCORE FLOOR to %d mV and ROOF to %d mV\n",
+ floor_mV, roof_mV);
+
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_write_reg(MENELAUS_VCORE_CTRL3, fval);
+ if (ret < 0)
+ goto out;
+ ret = menelaus_write_reg(MENELAUS_VCORE_CTRL4, rval);
+ if (ret < 0)
+ goto out;
+ if (!the_menelaus->vcore_hw_mode) {
+ val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
+ /* HW mode, turn OFF byte comparator */
+ val |= ((1 << 7) | (1 << 5));
+ ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
+ the_menelaus->vcore_hw_mode = 1;
+ }
+ msleep(1);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+}
+
+static const struct menelaus_vtg vmem_vtg = {
+ .name = "VMEM",
+ .vtg_reg = MENELAUS_LDO_CTRL1,
+ .vtg_shift = 0,
+ .vtg_bits = 2,
+ .mode_reg = MENELAUS_LDO_CTRL3,
+};
+
+static const struct menelaus_vtg_value vmem_values[] = {
+ { 1500, 0 },
+ { 1800, 1 },
+ { 1900, 2 },
+ { 2500, 3 },
+};
+
+int menelaus_set_vmem(unsigned int mV)
+{
+ int val;
+
+ if (mV == 0)
+ return menelaus_set_voltage(&vmem_vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vmem_values, ARRAY_SIZE(vmem_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(&vmem_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vmem);
+
+static const struct menelaus_vtg vio_vtg = {
+ .name = "VIO",
+ .vtg_reg = MENELAUS_LDO_CTRL1,
+ .vtg_shift = 2,
+ .vtg_bits = 2,
+ .mode_reg = MENELAUS_LDO_CTRL4,
+};
+
+static const struct menelaus_vtg_value vio_values[] = {
+ { 1500, 0 },
+ { 1800, 1 },
+ { 2500, 2 },
+ { 2800, 3 },
+};
+
+int menelaus_set_vio(unsigned int mV)
+{
+ int val;
+
+ if (mV == 0)
+ return menelaus_set_voltage(&vio_vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vio_values, ARRAY_SIZE(vio_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(&vio_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vio);
+
+static const struct menelaus_vtg_value vdcdc_values[] = {
+ { 1500, 0 },
+ { 1800, 1 },
+ { 2000, 2 },
+ { 2200, 3 },
+ { 2400, 4 },
+ { 2800, 5 },
+ { 3000, 6 },
+ { 3300, 7 },
+};
+
+static const struct menelaus_vtg vdcdc2_vtg = {
+ .name = "VDCDC2",
+ .vtg_reg = MENELAUS_DCDC_CTRL1,
+ .vtg_shift = 0,
+ .vtg_bits = 3,
+ .mode_reg = MENELAUS_DCDC_CTRL2,
+};
+
+static const struct menelaus_vtg vdcdc3_vtg = {
+ .name = "VDCDC3",
+ .vtg_reg = MENELAUS_DCDC_CTRL1,
+ .vtg_shift = 3,
+ .vtg_bits = 3,
+ .mode_reg = MENELAUS_DCDC_CTRL3,
+};
+
+int menelaus_set_vdcdc(int dcdc, unsigned int mV)
+{
+ const struct menelaus_vtg *vtg;
+ int val;
+
+ if (dcdc != 2 && dcdc != 3)
+ return -EINVAL;
+ if (dcdc == 2)
+ vtg = &vdcdc2_vtg;
+ else
+ vtg = &vdcdc3_vtg;
+
+ if (mV == 0)
+ return menelaus_set_voltage(vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vdcdc_values,
+ ARRAY_SIZE(vdcdc_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(vtg, mV, val, 0x03);
+}
+
+static const struct menelaus_vtg_value vmmc_values[] = {
+ { 1850, 0 },
+ { 2800, 1 },
+ { 3000, 2 },
+ { 3100, 3 },
+};
+
+static const struct menelaus_vtg vmmc_vtg = {
+ .name = "VMMC",
+ .vtg_reg = MENELAUS_LDO_CTRL1,
+ .vtg_shift = 6,
+ .vtg_bits = 2,
+ .mode_reg = MENELAUS_LDO_CTRL7,
+};
+
+int menelaus_set_vmmc(unsigned int mV)
+{
+ int val;
+
+ if (mV == 0)
+ return menelaus_set_voltage(&vmmc_vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vmmc_values, ARRAY_SIZE(vmmc_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(&vmmc_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vmmc);
+
+
+static const struct menelaus_vtg_value vaux_values[] = {
+ { 1500, 0 },
+ { 1800, 1 },
+ { 2500, 2 },
+ { 2800, 3 },
+};
+
+static const struct menelaus_vtg vaux_vtg = {
+ .name = "VAUX",
+ .vtg_reg = MENELAUS_LDO_CTRL1,
+ .vtg_shift = 4,
+ .vtg_bits = 2,
+ .mode_reg = MENELAUS_LDO_CTRL6,
+};
+
+int menelaus_set_vaux(unsigned int mV)
+{
+ int val;
+
+ if (mV == 0)
+ return menelaus_set_voltage(&vaux_vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vaux_values, ARRAY_SIZE(vaux_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(&vaux_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vaux);
+
+int menelaus_get_slot_pin_states(void)
+{
+ return menelaus_read_reg(MENELAUS_MCT_PIN_ST);
+}
+EXPORT_SYMBOL(menelaus_get_slot_pin_states);
+
+int menelaus_set_regulator_sleep(int enable, u32 val)
+{
+ int t, ret;
+ struct i2c_client *c = the_menelaus->client;
+
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_write_reg(MENELAUS_SLEEP_CTRL2, val);
+ if (ret < 0)
+ goto out;
+
+ dev_dbg(&c->dev, "regulator sleep configuration: %02x\n", val);
+
+ ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
+ if (ret < 0)
+ goto out;
+ t = ((1 << 6) | 0x04);
+ if (enable)
+ ret |= t;
+ else
+ ret &= ~t;
+ ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+}
+
+/*-----------------------------------------------------------------------*/
+
+/* Handles Menelaus interrupts. Does not run in interrupt context */
+static void menelaus_work(struct work_struct *_menelaus)
+{
+ struct menelaus_chip *menelaus =
+ container_of(_menelaus, struct menelaus_chip, work);
+ void (*handler)(struct menelaus_chip *menelaus);
+
+ while (1) {
+ unsigned isr;
+
+ isr = (menelaus_read_reg(MENELAUS_INT_STATUS2)
+ & ~menelaus->mask2) << 8;
+ isr |= menelaus_read_reg(MENELAUS_INT_STATUS1)
+ & ~menelaus->mask1;
+ if (!isr)
+ break;
+
+ while (isr) {
+ int irq = fls(isr) - 1;
+ isr &= ~(1 << irq);
+
+ mutex_lock(&menelaus->lock);
+ menelaus_disable_irq(irq);
+ menelaus_ack_irq(irq);
+ handler = menelaus->handlers[irq];
+ if (handler)
+ handler(menelaus);
+ menelaus_enable_irq(irq);
+ mutex_unlock(&menelaus->lock);
+ }
+ }
+ enable_irq(menelaus->client->irq);
+}
+
+/*
+ * We cannot use I2C in interrupt context, so we just schedule work.
+ */
+static irqreturn_t menelaus_irq(int irq, void *_menelaus)
+{
+ struct menelaus_chip *menelaus = _menelaus;
+
+ disable_irq_nosync(irq);
+ (void)schedule_work(&menelaus->work);
+
+ return IRQ_HANDLED;
+}
+
+/*-----------------------------------------------------------------------*/
+
+/*
+ * The RTC needs to be set once, then it runs on backup battery power.
+ * It supports alarms, including system wake alarms (from some modes);
+ * and 1/second IRQs if requested.
+ */
+#ifdef CONFIG_RTC_DRV_TWL92330
+
+#define RTC_CTRL_RTC_EN (1 << 0)
+#define RTC_CTRL_AL_EN (1 << 1)
+#define RTC_CTRL_MODE12 (1 << 2)
+#define RTC_CTRL_EVERY_MASK (3 << 3)
+#define RTC_CTRL_EVERY_SEC (0 << 3)
+#define RTC_CTRL_EVERY_MIN (1 << 3)
+#define RTC_CTRL_EVERY_HR (2 << 3)
+#define RTC_CTRL_EVERY_DAY (3 << 3)
+
+#define RTC_UPDATE_EVERY 0x08
+
+#define RTC_HR_PM (1 << 7)
+
+static void menelaus_to_time(char *regs, struct rtc_time *t)
+{
+ t->tm_sec = BCD2BIN(regs[0]);
+ t->tm_min = BCD2BIN(regs[1]);
+ if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
+ t->tm_hour = BCD2BIN(regs[2] & 0x1f) - 1;
+ if (regs[2] & RTC_HR_PM)
+ t->tm_hour += 12;
+ } else
+ t->tm_hour = BCD2BIN(regs[2] & 0x3f);
+ t->tm_mday = BCD2BIN(regs[3]);
+ t->tm_mon = BCD2BIN(regs[4]) - 1;
+ t->tm_year = BCD2BIN(regs[5]) + 100;
+}
+
+static int time_to_menelaus(struct rtc_time *t, int regnum)
+{
+ int hour, status;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_sec));
+ if (status < 0)
+ goto fail;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_min));
+ if (status < 0)
+ goto fail;
+
+ if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
+ hour = t->tm_hour + 1;
+ if (hour > 12)
+ hour = RTC_HR_PM | BIN2BCD(hour - 12);
+ else
+ hour = BIN2BCD(hour);
+ } else
+ hour = BIN2BCD(t->tm_hour);
+ status = menelaus_write_reg(regnum++, hour);
+ if (status < 0)
+ goto fail;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mday));
+ if (status < 0)
+ goto fail;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mon + 1));
+ if (status < 0)
+ goto fail;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_year - 100));
+ if (status < 0)
+ goto fail;
+
+ return 0;
+fail:
+ dev_err(&the_menelaus->client->dev, "rtc write reg %02x, err %d\n",
+ --regnum, status);
+ return status;
+}
+
+static int menelaus_read_time(struct device *dev, struct rtc_time *t)
+{
+ struct i2c_msg msg[2];
+ char regs[7];
+ int status;
+
+ /* block read date and time registers */
+ regs[0] = MENELAUS_RTC_SEC;
+
+ msg[0].addr = MENELAUS_I2C_ADDRESS;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = regs;
+
+ msg[1].addr = MENELAUS_I2C_ADDRESS;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = sizeof(regs);
+ msg[1].buf = regs;
+
+ status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
+ if (status != 2) {
+ dev_err(dev, "%s error %d\n", "read", status);
+ return -EIO;
+ }
+
+ menelaus_to_time(regs, t);
+ t->tm_wday = BCD2BIN(regs[6]);
+
+ return 0;
+}
+
+static int menelaus_set_time(struct device *dev, struct rtc_time *t)
+{
+ int status;
+
+ /* write date and time registers */
+ status = time_to_menelaus(t, MENELAUS_RTC_SEC);
+ if (status < 0)
+ return status;
+ status = menelaus_write_reg(MENELAUS_RTC_WKDAY, BIN2BCD(t->tm_wday));
+ if (status < 0) {
+ dev_err(&the_menelaus->client->dev, "rtc write reg %02x",
+ "err %d\n", MENELAUS_RTC_WKDAY, status);
+ return status;
+ }
+
+ /* now commit the write */
+ status = menelaus_write_reg(MENELAUS_RTC_UPDATE, RTC_UPDATE_EVERY);
+ if (status < 0)
+ dev_err(&the_menelaus->client->dev, "rtc commit time, err %d\n",
+ status);
+
+ return 0;
+}
+
+static int menelaus_read_alarm(struct device *dev, struct rtc_wkalrm *w)
+{
+ struct i2c_msg msg[2];
+ char regs[6];
+ int status;
+
+ /* block read alarm registers */
+ regs[0] = MENELAUS_RTC_AL_SEC;
+
+ msg[0].addr = MENELAUS_I2C_ADDRESS;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = regs;
+
+ msg[1].addr = MENELAUS_I2C_ADDRESS;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = sizeof(regs);
+ msg[1].buf = regs;
+
+ status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
+ if (status != 2) {
+ dev_err(dev, "%s error %d\n", "alarm read", status);
+ return -EIO;
+ }
+
+ menelaus_to_time(regs, &w->time);
+
+ w->enabled = !!(the_menelaus->rtc_control & RTC_CTRL_AL_EN);
+
+ /* NOTE we *could* check if actually pending... */
+ w->pending = 0;
+
+ return 0;
+}
+
+static int menelaus_set_alarm(struct device *dev, struct rtc_wkalrm *w)
+{
+ int status;
+
+ if (the_menelaus->client->irq <= 0 && w->enabled)
+ return -ENODEV;
+
+ /* clear previous alarm enable */
+ if (the_menelaus->rtc_control & RTC_CTRL_AL_EN) {
+ the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
+ status = menelaus_write_reg(MENELAUS_RTC_CTRL,
+ the_menelaus->rtc_control);
+ if (status < 0)
+ return status;
+ }
+
+ /* write alarm registers */
+ status = time_to_menelaus(&w->time, MENELAUS_RTC_AL_SEC);
+ if (status < 0)
+ return status;
+
+ /* enable alarm if requested */
+ if (w->enabled) {
+ the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
+ status = menelaus_write_reg(MENELAUS_RTC_CTRL,
+ the_menelaus->rtc_control);
+ }
+
+ return status;
+}
+
+#ifdef CONFIG_RTC_INTF_DEV
+
+static void menelaus_rtc_update_work(struct menelaus_chip *m)
+{
+ /* report 1/sec update */
+ local_irq_disable();
+ rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_UF);
+ local_irq_enable();
+}
+
+static int menelaus_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
+{
+ int status;
+
+ if (the_menelaus->client->irq <= 0)
+ return -ENOIOCTLCMD;
+
+ switch (cmd) {
+ /* alarm IRQ */
+ case RTC_AIE_ON:
+ if (the_menelaus->rtc_control & RTC_CTRL_AL_EN)
+ return 0;
+ the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
+ break;
+ case RTC_AIE_OFF:
+ if (!(the_menelaus->rtc_control & RTC_CTRL_AL_EN))
+ return 0;
+ the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
+ break;
+ /* 1/second "update" IRQ */
+ case RTC_UIE_ON:
+ if (the_menelaus->uie)
+ return 0;
+ status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
+ status = menelaus_add_irq_work(MENELAUS_RTCTMR_IRQ,
+ menelaus_rtc_update_work);
+ if (status == 0)
+ the_menelaus->uie = 1;
+ return status;
+ case RTC_UIE_OFF:
+ if (!the_menelaus->uie)
+ return 0;
+ status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
+ if (status == 0)
+ the_menelaus->uie = 0;
+ return status;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
+}
+
+#else
+#define menelaus_ioctl NULL
+#endif
+
+/* REVISIT no compensation register support ... */
+
+static const struct rtc_class_ops menelaus_rtc_ops = {
+ .ioctl = menelaus_ioctl,
+ .read_time = menelaus_read_time,
+ .set_time = menelaus_set_time,
+ .read_alarm = menelaus_read_alarm,
+ .set_alarm = menelaus_set_alarm,
+};
+
+static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
+{
+ /* report alarm */
+ local_irq_disable();
+ rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_AF);
+ local_irq_enable();
+
+ /* then disable it; alarms are oneshot */
+ the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
+ menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
+}
+
+static inline void menelaus_rtc_init(struct menelaus_chip *m)
+{
+ int alarm = (m->client->irq > 0);
+
+ /* assume 32KDETEN pin is pulled high */
+ if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
+ dev_dbg(&m->client->dev, "no 32k oscillator\n");
+ return;
+ }
+
+ /* support RTC alarm; it can issue wakeups */
+ if (alarm) {
+ if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
+ menelaus_rtc_alarm_work) < 0) {
+ dev_err(&m->client->dev, "can't handle RTC alarm\n");
+ return;
+ }
+ device_init_wakeup(&m->client->dev, 1);
+ }
+
+ /* be sure RTC is enabled; allow 1/sec irqs; leave 12hr mode alone */
+ m->rtc_control = menelaus_read_reg(MENELAUS_RTC_CTRL);
+ if (!(m->rtc_control & RTC_CTRL_RTC_EN)
+ || (m->rtc_control & RTC_CTRL_AL_EN)
+ || (m->rtc_control & RTC_CTRL_EVERY_MASK)) {
+ if (!(m->rtc_control & RTC_CTRL_RTC_EN)) {
+ dev_warn(&m->client->dev, "rtc clock needs setting\n");
+ m->rtc_control |= RTC_CTRL_RTC_EN;
+ }
+ m->rtc_control &= ~RTC_CTRL_EVERY_MASK;
+ m->rtc_control &= ~RTC_CTRL_AL_EN;
+ menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
+ }
+
+ m->rtc = rtc_device_register(DRIVER_NAME,
+ &m->client->dev,
+ &menelaus_rtc_ops, THIS_MODULE);
+ if (IS_ERR(m->rtc)) {
+ if (alarm) {
+ menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
+ device_init_wakeup(&m->client->dev, 0);
+ }
+ dev_err(&m->client->dev, "can't register RTC: %d\n",
+ (int) PTR_ERR(m->rtc));
+ the_menelaus->rtc = NULL;
+ }
+}
+
+#else
+
+static inline void menelaus_rtc_init(struct menelaus_chip *m)
+{
+ /* nothing */
+}
+
+#endif
+
+/*-----------------------------------------------------------------------*/
+
+static struct i2c_driver menelaus_i2c_driver;
+
+static int menelaus_probe(struct i2c_client *client)
+{
+ struct menelaus_chip *menelaus;
+ int rev = 0, val;
+ int err = 0;
+ struct menelaus_platform_data *menelaus_pdata =
+ client->dev.platform_data;
+
+ if (the_menelaus) {
+ dev_dbg(&client->dev, "only one %s for now\n",
+ DRIVER_NAME);
+ return -ENODEV;
+ }
+
+ menelaus = kzalloc(sizeof *menelaus, GFP_KERNEL);
+ if (!menelaus)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, menelaus);
+
+ the_menelaus = menelaus;
+ menelaus->client = client;
+
+ /* If a true probe check the device */
+ rev = menelaus_read_reg(MENELAUS_REV);
+ if (rev < 0) {
+ pr_err("device not found");
+ err = -ENODEV;
+ goto fail1;
+ }
+
+ /* Ack and disable all Menelaus interrupts */
+ menelaus_write_reg(MENELAUS_INT_ACK1, 0xff);
+ menelaus_write_reg(MENELAUS_INT_ACK2, 0xff);
+ menelaus_write_reg(MENELAUS_INT_MASK1, 0xff);
+ menelaus_write_reg(MENELAUS_INT_MASK2, 0xff);
+ menelaus->mask1 = 0xff;
+ menelaus->mask2 = 0xff;
+
+ /* Set output buffer strengths */
+ menelaus_write_reg(MENELAUS_MCT_CTRL1, 0x73);
+
+ if (client->irq > 0) {
+ err = request_irq(client->irq, menelaus_irq, IRQF_DISABLED,
+ DRIVER_NAME, menelaus);
+ if (err) {
+ dev_dbg(&client->dev, "can't get IRQ %d, err %d",
+ client->irq, err);
+ goto fail1;
+ }
+ }
+
+ mutex_init(&menelaus->lock);
+ INIT_WORK(&menelaus->work, menelaus_work);
+
+ pr_info("Menelaus rev %d.%d\n", rev >> 4, rev & 0x0f);
+
+ val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
+ if (val < 0)
+ goto fail2;
+ if (val & (1 << 7))
+ menelaus->vcore_hw_mode = 1;
+ else
+ menelaus->vcore_hw_mode = 0;
+
+ if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) {
+ err = menelaus_pdata->late_init(&client->dev);
+ if (err < 0)
+ goto fail2;
+ }
+
+ menelaus_rtc_init(menelaus);
+
+ return 0;
+fail2:
+ free_irq(client->irq, menelaus);
+ flush_scheduled_work();
+fail1:
+ kfree(menelaus);
+ return err;
+}
+
+static int __exit menelaus_remove(struct i2c_client *client)
+{
+ struct menelaus_chip *menelaus = i2c_get_clientdata(client);
+
+ free_irq(client->irq, menelaus);
+ kfree(menelaus);
+ i2c_set_clientdata(client, NULL);
+ the_menelaus = NULL;
+ return 0;
+}
+
+static struct i2c_driver menelaus_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = menelaus_probe,
+ .remove = __exit_p(menelaus_remove),
+};
+
+static int __init menelaus_init(void)
+{
+ int res;
+
+ res = i2c_add_driver(&menelaus_i2c_driver);
+ if (res < 0) {
+ pr_err("driver registration failed\n");
+ return res;
+ }
+
+ return 0;
+}
+
+static void __exit menelaus_exit(void)
+{
+ i2c_del_driver(&menelaus_i2c_driver);
+
+ /* FIXME: Shutdown menelaus parts that can be shut down */
+}
+
+MODULE_AUTHOR("Texas Instruments, Inc. (and others)");
+MODULE_DESCRIPTION("I2C interface for Menelaus.");
+MODULE_LICENSE("GPL");
+
+module_init(menelaus_init);
+module_exit(menelaus_exit);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index cc580139946..5a4c5ea12f8 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1073,14 +1073,14 @@ static int init_irq (ide_hwif_t *hwif)
hwgroup->hwif->next = hwif;
spin_unlock_irq(&ide_lock);
} else {
- hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL,
+ hwgroup = kmalloc_node(sizeof(ide_hwgroup_t),
+ GFP_KERNEL | __GFP_ZERO,
hwif_to_node(hwif->drives[0].hwif));
if (!hwgroup)
goto out_up;
hwif->hwgroup = hwgroup;
- memset(hwgroup, 0, sizeof(ide_hwgroup_t));
hwgroup->hwif = hwif->next = hwif;
hwgroup->rq = NULL;
hwgroup->handler = NULL;
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 0fc8c6e559e..ee45259573c 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -30,6 +30,7 @@
#include <linux/moduleparam.h>
#include <linux/bitops.h>
#include <linux/kdev_t.h>
+#include <linux/freezer.h>
#include <linux/suspend.h>
#include <linux/kthread.h>
#include <linux/preempt.h>
@@ -1128,8 +1129,6 @@ static int hpsbpkt_thread(void *__hi)
struct list_head tmp;
int may_schedule;
- current->flags |= PF_NOFREEZE;
-
while (!kthread_should_stop()) {
INIT_LIST_HEAD(&tmp);
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 51a12062ed3..2ffd53461db 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1699,6 +1699,7 @@ static int nodemgr_host_thread(void *__hi)
unsigned int g, generation = 0;
int i, reset_cycles = 0;
+ set_freezable();
/* Setup our device-model entries */
nodemgr_create_host_dev_files(host);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index eef415b12b2..11f1d99db40 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1591,7 +1591,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int i;
int size;
int size0 = 0;
- u32 f0;
+ u32 f0 = 0;
int ind;
u8 op0 = 0;
@@ -1946,7 +1946,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int i;
int size;
int size0 = 0;
- u32 f0;
+ u32 f0 = 0;
int ind;
u8 op0 = 0;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index bd686a2a517..20896d5e5f0 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -445,6 +445,7 @@ static struct gameport *gameport_get_pending_child(struct gameport *parent)
static int gameport_thread(void *nothing)
{
+ set_freezable();
do {
gameport_handle_event();
wait_event_interruptible(gameport_wait,
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a8f3bc1dff2..372ca493119 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -384,6 +384,7 @@ static struct serio *serio_get_pending_child(struct serio *parent)
static int serio_thread(void *nothing)
{
+ set_freezable();
do {
serio_handle_event();
wait_event_interruptible(serio_wait,
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index f0cbcdb008e..36f94401915 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -292,6 +292,7 @@ static int ucb1400_ts_thread(void *_ucb)
sched_setscheduler(tsk, SCHED_FIFO, &param);
+ set_freezable();
while (!kthread_should_stop()) {
unsigned int x, y, p;
long timeout;
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 3e088c42b22..cf906c8cee4 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -2,12 +2,10 @@
# ISDN device configuration
#
-menu "ISDN subsystem"
- depends on !S390
-
-config ISDN
+menuconfig ISDN
tristate "ISDN support"
depends on NET
+ depends on !S390
---help---
ISDN ("Integrated Services Digital Networks", called RNIS in France)
is a special type of fully digital telephone service; it's mostly
@@ -21,9 +19,9 @@ config ISDN
Select this option if you want your kernel to support ISDN.
+if ISDN
menu "Old ISDN4Linux"
- depends on NET && ISDN
config ISDN_I4L
tristate "Old ISDN4Linux (deprecated)"
@@ -50,20 +48,21 @@ endif
endmenu
comment "CAPI subsystem"
- depends on NET && ISDN
config ISDN_CAPI
tristate "CAPI2.0 support"
- depends on ISDN
help
This provides the CAPI (Common ISDN Application Programming
Interface, a standard making it easy for programs to access ISDN
hardware, see <http://www.capi.org/>. This is needed for AVM's set
of active ISDN controllers like B1, T1, M1.
+if ISDN_CAPI
+
source "drivers/isdn/capi/Kconfig"
source "drivers/isdn/hardware/Kconfig"
-endmenu
+endif # ISDN_CAPI
+endif # ISDN
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index c92f9d764fc..e1afd60924f 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -3,7 +3,6 @@
#
config ISDN_DRV_AVMB1_VERBOSE_REASON
bool "Verbose reason code reporting"
- depends on ISDN_CAPI
default y
help
If you say Y here, the CAPI drivers will give verbose reasons for
@@ -12,7 +11,6 @@ config ISDN_DRV_AVMB1_VERBOSE_REASON
config CAPI_TRACE
bool "CAPI trace support"
- depends on ISDN_CAPI
default y
help
If you say Y here, the kernelcapi driver can make verbose traces
@@ -23,7 +21,7 @@ config CAPI_TRACE
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
- depends on ISDN_CAPI && EXPERIMENTAL
+ depends on EXPERIMENTAL
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
@@ -33,7 +31,6 @@ config ISDN_CAPI_MIDDLEWARE
config ISDN_CAPI_CAPI20
tristate "CAPI2.0 /dev/capi support"
- depends on ISDN_CAPI
help
This option will provide the CAPI 2.0 interface to userspace
applications via /dev/capi20. Applications should use the
@@ -56,7 +53,7 @@ config ISDN_CAPI_CAPIFS
config ISDN_CAPI_CAPIDRV
tristate "CAPI2.0 capidrv interface support"
- depends on ISDN_CAPI && ISDN_I4L
+ depends on ISDN_I4L
help
This option provides the glue code to hook up CAPI driven cards to
the legacy isdn4linux link layer. If you have a card which is
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 81661b8bd3a..f449daef3ee 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -549,7 +549,7 @@ static int handle_minor_send(struct capiminor *mp)
capimsg_setu8 (skb->data, 5, CAPI_REQ);
capimsg_setu16(skb->data, 6, mp->msgid++);
capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
- capimsg_setu32(skb->data, 12, (u32) skb->data); /* Data32 */
+ capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
capimsg_setu16(skb->data, 16, len); /* Data length */
capimsg_setu16(skb->data, 18, datahandle);
capimsg_setu16(skb->data, 20, 0); /* Flags */
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 3ed34f7a1c4..9f73bc2727c 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -258,7 +258,7 @@ static void recv_handler(struct work_struct *work)
if ((!ap) || (ap->release_in_progress))
return;
- down(&ap->recv_sem);
+ mutex_lock(&ap->recv_mtx);
while ((skb = skb_dequeue(&ap->recv_queue))) {
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND)
ap->nrecvdatapkt++;
@@ -267,7 +267,7 @@ static void recv_handler(struct work_struct *work)
ap->recv_message(ap, skb);
}
- up(&ap->recv_sem);
+ mutex_unlock(&ap->recv_mtx);
}
void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb)
@@ -547,7 +547,7 @@ u16 capi20_register(struct capi20_appl *ap)
ap->nsentctlpkt = 0;
ap->nsentdatapkt = 0;
ap->callback = NULL;
- init_MUTEX(&ap->recv_sem);
+ mutex_init(&ap->recv_mtx);
skb_queue_head_init(&ap->recv_queue);
INIT_WORK(&ap->recv_work, recv_handler);
ap->release_in_progress = 0;
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 31f4fd8b8b0..845a797b003 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -243,36 +243,15 @@ create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
// ---------------------------------------------------------------------------
-
-static __inline__ struct capi_driver *capi_driver_get_idx(loff_t pos)
-{
- struct capi_driver *drv = NULL;
- struct list_head *l;
- loff_t i;
-
- i = 0;
- list_for_each(l, &capi_drivers) {
- drv = list_entry(l, struct capi_driver, list);
- if (i++ == pos)
- return drv;
- }
- return NULL;
-}
-
static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
{
- struct capi_driver *drv;
read_lock(&capi_drivers_list_lock);
- drv = capi_driver_get_idx(*pos);
- return drv;
+ return seq_list_start(&capi_drivers, *pos);
}
static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct capi_driver *drv = (struct capi_driver *)v;
- ++*pos;
- if (drv->list.next == &capi_drivers) return NULL;
- return list_entry(drv->list.next, struct capi_driver, list);
+ return seq_list_next(v, &capi_drivers, pos);
}
static void capi_driver_stop(struct seq_file *seq, void *v)
@@ -282,7 +261,8 @@ static void capi_driver_stop(struct seq_file *seq, void *v)
static int capi_driver_show(struct seq_file *seq, void *v)
{
- struct capi_driver *drv = (struct capi_driver *)v;
+ struct capi_driver *drv = list_entry(v, struct capi_driver, list);
+
seq_printf(seq, "%-32s %s\n", drv->name, drv->revision);
return 0;
}
diff --git a/drivers/isdn/hardware/Kconfig b/drivers/isdn/hardware/Kconfig
index 139f1979771..30d028d2495 100644
--- a/drivers/isdn/hardware/Kconfig
+++ b/drivers/isdn/hardware/Kconfig
@@ -2,7 +2,6 @@
# ISDN hardware drivers
#
comment "CAPI hardware drivers"
- depends on NET && ISDN && ISDN_CAPI
source "drivers/isdn/hardware/avm/Kconfig"
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig
index 29a32a8830c..5dbcbe3a54a 100644
--- a/drivers/isdn/hardware/avm/Kconfig
+++ b/drivers/isdn/hardware/avm/Kconfig
@@ -2,23 +2,22 @@
# ISDN AVM drivers
#
-menu "Active AVM cards"
- depends on NET && ISDN && ISDN_CAPI!=n
-
-config CAPI_AVM
- bool "Support AVM cards"
+menuconfig CAPI_AVM
+ bool "Active AVM cards"
help
Enable support for AVM active ISDN cards.
+if CAPI_AVM
+
config ISDN_DRV_AVMB1_B1ISA
tristate "AVM B1 ISA support"
- depends on CAPI_AVM && ISDN_CAPI && ISA
+ depends on ISA
help
Enable support for the ISA version of the AVM B1 card.
config ISDN_DRV_AVMB1_B1PCI
tristate "AVM B1 PCI support"
- depends on CAPI_AVM && ISDN_CAPI && PCI
+ depends on PCI
help
Enable support for the PCI version of the AVM B1 card.
@@ -30,14 +29,13 @@ config ISDN_DRV_AVMB1_B1PCIV4
config ISDN_DRV_AVMB1_T1ISA
tristate "AVM T1/T1-B ISA support"
- depends on CAPI_AVM && ISDN_CAPI && ISA
+ depends on ISA
help
Enable support for the AVM T1 T1B card.
Note: This is a PRI card and handle 30 B-channels.
config ISDN_DRV_AVMB1_B1PCMCIA
tristate "AVM B1/M1/M2 PCMCIA support"
- depends on CAPI_AVM && ISDN_CAPI
help
Enable support for the PCMCIA version of the AVM B1 card.
@@ -50,17 +48,16 @@ config ISDN_DRV_AVMB1_AVM_CS
config ISDN_DRV_AVMB1_T1PCI
tristate "AVM T1/T1-B PCI support"
- depends on CAPI_AVM && ISDN_CAPI && PCI
+ depends on PCI
help
Enable support for the AVM T1 T1B card.
Note: This is a PRI card and handle 30 B-channels.
config ISDN_DRV_AVMB1_C4
tristate "AVM C4/C2 support"
- depends on CAPI_AVM && ISDN_CAPI && PCI
+ depends on PCI
help
Enable support for the AVM C4/C2 PCI cards.
These cards handle 4/2 BRI ISDN lines (8/4 channels).
-endmenu
-
+endif # CAPI_AVM
diff --git a/drivers/isdn/hardware/eicon/Kconfig b/drivers/isdn/hardware/eicon/Kconfig
index 01d4afd9d84..6082b6a5ced 100644
--- a/drivers/isdn/hardware/eicon/Kconfig
+++ b/drivers/isdn/hardware/eicon/Kconfig
@@ -2,52 +2,50 @@
# ISDN DIVAS Eicon driver
#
-menu "Active Eicon DIVA Server cards"
- depends on NET && ISDN && ISDN_CAPI!=n
-
-config CAPI_EICON
- bool "Support Eicon cards"
+menuconfig CAPI_EICON
+ bool "Active Eicon DIVA Server cards"
help
Enable support for Eicon Networks active ISDN cards.
+if CAPI_EICON
+
config ISDN_DIVAS
tristate "Support Eicon DIVA Server cards"
- depends on CAPI_EICON && PROC_FS && PCI
+ depends on PROC_FS && PCI
help
Say Y here if you have an Eicon Networks DIVA Server PCI ISDN card.
In order to use this card, additional firmware is necessary, which
has to be downloaded into the card using the divactrl utility.
+if ISDN_DIVAS
+
config ISDN_DIVAS_BRIPCI
bool "DIVA Server BRI/PCI support"
- depends on ISDN_DIVAS
help
Enable support for DIVA Server BRI-PCI.
config ISDN_DIVAS_PRIPCI
bool "DIVA Server PRI/PCI support"
- depends on ISDN_DIVAS
help
Enable support for DIVA Server PRI-PCI.
config ISDN_DIVAS_DIVACAPI
tristate "DIVA CAPI2.0 interface support"
- depends on ISDN_DIVAS && ISDN_CAPI
help
You need this to provide the CAPI interface
for DIVA Server cards.
config ISDN_DIVAS_USERIDI
tristate "DIVA User-IDI interface support"
- depends on ISDN_DIVAS
help
Enable support for user-mode IDI interface.
config ISDN_DIVAS_MAINT
tristate "DIVA Maint driver support"
- depends on ISDN_DIVAS && m
+ depends on m
help
Enable Divas Maintenance driver.
-endmenu
+endif # ISDN_DIVAS
+endif # CAPI_EICON
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
index 4cbc68cf4db..db87d510542 100644
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ b/drivers/isdn/hardware/eicon/idifunc.c
@@ -106,6 +106,7 @@ static void um_new_card(DESCRIPTOR * d)
} else {
DBG_ERR(("could not create user mode idi card %d",
adapter_nr));
+ diva_os_free(0, card);
}
}
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c
index 871310d56a6..3d1bdc8431a 100644
--- a/drivers/isdn/hisax/bkm_a4t.c
+++ b/drivers/isdn/hisax/bkm_a4t.c
@@ -255,54 +255,38 @@ BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static struct pci_dev *dev_a4t __devinitdata = NULL;
+static int __devinit a4t_pci_probe(struct pci_dev *dev_a4t,
+ struct IsdnCardState *cs,
+ u_int *found,
+ u_int *pci_memaddr)
+{
+ u16 sub_sys;
+ u16 sub_vendor;
+
+ sub_vendor = dev_a4t->subsystem_vendor;
+ sub_sys = dev_a4t->subsystem_device;
+ if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
+ if (pci_enable_device(dev_a4t))
+ return (0); /* end loop & function */
+ *found = 1;
+ *pci_memaddr = pci_resource_start(dev_a4t, 0);
+ cs->irq = dev_a4t->irq;
+ return (1); /* end loop */
+ }
-int __devinit
-setup_bkm_a4t(struct IsdnCard *card)
+ return (-1); /* continue looping */
+}
+
+static int __devinit a4t_cs_init(struct IsdnCard *card,
+ struct IsdnCardState *cs,
+ u_int pci_memaddr)
{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
- u_int pci_memaddr = 0, found = 0;
I20_REGISTER_FILE *pI20_Regs;
-#ifdef CONFIG_PCI
-#endif
-
- strcpy(tmp, bkm_a4t_revision);
- printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ == ISDN_CTYPE_BKM_A4T) {
- cs->subtyp = BKM_A4T;
- } else
- return (0);
-#ifdef CONFIG_PCI
- while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
- PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
- u16 sub_sys;
- u16 sub_vendor;
-
- sub_vendor = dev_a4t->subsystem_vendor;
- sub_sys = dev_a4t->subsystem_device;
- if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
- if (pci_enable_device(dev_a4t))
- return(0);
- found = 1;
- pci_memaddr = pci_resource_start(dev_a4t, 0);
- cs->irq = dev_a4t->irq;
- break;
- }
- }
- if (!found) {
- printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
- return (0);
- }
if (!cs->irq) { /* IRQ range check ?? */
printk(KERN_WARNING "HiSax: %s: No IRQ\n", CardType[card->typ]);
return (0);
}
- if (!pci_memaddr) {
- printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
- return (0);
- }
cs->hw.ax.base = (long) ioremap(pci_memaddr, 4096);
/* Check suspecious address */
pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base);
@@ -317,11 +301,7 @@ setup_bkm_a4t(struct IsdnCard *card)
cs->hw.ax.jade_adr = cs->hw.ax.base + PO_OFFSET;
cs->hw.ax.isac_ale = GCS_1;
cs->hw.ax.jade_ale = GCS_3;
-#else
- printk(KERN_WARNING "HiSax: %s: NO_PCI_BIOS\n", CardType[card->typ]);
- printk(KERN_WARNING "HiSax: %s: unable to configure\n", CardType[card->typ]);
- return (0);
-#endif /* CONFIG_PCI */
+
printk(KERN_INFO "HiSax: %s: Card configured at 0x%lX IRQ %d\n",
CardType[card->typ], cs->hw.ax.base, cs->irq);
@@ -339,5 +319,43 @@ setup_bkm_a4t(struct IsdnCard *card)
ISACVersion(cs, "Telekom A4T:");
/* Jade version */
JadeVersion(cs, "Telekom A4T:");
+
return (1);
}
+
+static struct pci_dev *dev_a4t __devinitdata = NULL;
+
+int __devinit
+setup_bkm_a4t(struct IsdnCard *card)
+{
+ struct IsdnCardState *cs = card->cs;
+ char tmp[64];
+ u_int pci_memaddr = 0, found = 0;
+ int ret;
+
+ strcpy(tmp, bkm_a4t_revision);
+ printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
+ if (cs->typ == ISDN_CTYPE_BKM_A4T) {
+ cs->subtyp = BKM_A4T;
+ } else
+ return (0);
+
+ while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
+ PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
+ ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr);
+ if (!ret)
+ return (0);
+ if (ret > 0)
+ break;
+ }
+ if (!found) {
+ printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
+ return (0);
+ }
+ if (!pci_memaddr) {
+ printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
+ return (0);
+ }
+
+ return a4t_cs_init(card, cs, pci_memaddr);
+}
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 8d53a7fd267..5f7907e5709 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -361,11 +361,11 @@ module_param_array(io1, int, NULL, 0);
int nrcards;
-extern char *l1_revision;
-extern char *l2_revision;
-extern char *l3_revision;
-extern char *lli_revision;
-extern char *tei_revision;
+extern const char *l1_revision;
+extern const char *l2_revision;
+extern const char *l3_revision;
+extern const char *lli_revision;
+extern const char *tei_revision;
char *HiSax_getrev(const char *revision)
{
@@ -847,95 +847,10 @@ static int init_card(struct IsdnCardState *cs)
return 3;
}
-static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner)
+static int hisax_cs_setup_card(struct IsdnCard *card)
{
- int ret = 0;
- struct IsdnCard *card = cards + cardnr;
- struct IsdnCardState *cs;
+ int ret;
- cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
- if (!cs) {
- printk(KERN_WARNING
- "HiSax: No memory for IsdnCardState(card %d)\n",
- cardnr + 1);
- goto out;
- }
- card->cs = cs;
- spin_lock_init(&cs->statlock);
- spin_lock_init(&cs->lock);
- cs->chanlimit = 2; /* maximum B-channel number */
- cs->logecho = 0; /* No echo logging */
- cs->cardnr = cardnr;
- cs->debug = L1_DEB_WARN;
- cs->HW_Flags = 0;
- cs->busy_flag = busy_flag;
- cs->irq_flags = I4L_IRQ_FLAG;
-#if TEI_PER_CARD
- if (card->protocol == ISDN_PTYPE_NI1)
- test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
-#else
- test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
-#endif
- cs->protocol = card->protocol;
-
- if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
- printk(KERN_WARNING
- "HiSax: Card Type %d out of range\n", card->typ);
- goto outf_cs;
- }
- if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
- goto outf_cs;
- }
- if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for status_buf(card %d)\n",
- cardnr + 1);
- goto outf_dlog;
- }
- cs->stlist = NULL;
- cs->status_read = cs->status_buf;
- cs->status_write = cs->status_buf;
- cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
- cs->typ = card->typ;
-#ifdef MODULE
- cs->iif.owner = lockowner;
-#endif
- strcpy(cs->iif.id, id);
- cs->iif.channels = 2;
- cs->iif.maxbufsize = MAX_DATA_SIZE;
- cs->iif.hl_hdrlen = MAX_HEADER_LEN;
- cs->iif.features =
- ISDN_FEATURE_L2_X75I |
- ISDN_FEATURE_L2_HDLC |
- ISDN_FEATURE_L2_HDLC_56K |
- ISDN_FEATURE_L2_TRANS |
- ISDN_FEATURE_L3_TRANS |
-#ifdef CONFIG_HISAX_1TR6
- ISDN_FEATURE_P_1TR6 |
-#endif
-#ifdef CONFIG_HISAX_EURO
- ISDN_FEATURE_P_EURO |
-#endif
-#ifdef CONFIG_HISAX_NI1
- ISDN_FEATURE_P_NI1 |
-#endif
- 0;
-
- cs->iif.command = HiSax_command;
- cs->iif.writecmd = NULL;
- cs->iif.writebuf_skb = HiSax_writebuf_skb;
- cs->iif.readstat = HiSax_readstatus;
- register_isdn(&cs->iif);
- cs->myid = cs->iif.channels;
- printk(KERN_INFO
- "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
- (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
- (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
- (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
- (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
- "NONE", cs->iif.id, cs->myid);
switch (card->typ) {
#if CARD_TELES0
case ISDN_CTYPE_16_0:
@@ -1094,13 +1009,115 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
printk(KERN_WARNING
"HiSax: Support for %s Card not selected\n",
CardType[card->typ]);
- ll_unload(cs);
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
+ struct IsdnCardState **cs_out, int *busy_flag,
+ struct module *lockowner)
+{
+ struct IsdnCardState *cs;
+
+ *cs_out = NULL;
+
+ cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
+ if (!cs) {
+ printk(KERN_WARNING
+ "HiSax: No memory for IsdnCardState(card %d)\n",
+ cardnr + 1);
+ goto out;
+ }
+ card->cs = cs;
+ spin_lock_init(&cs->statlock);
+ spin_lock_init(&cs->lock);
+ cs->chanlimit = 2; /* maximum B-channel number */
+ cs->logecho = 0; /* No echo logging */
+ cs->cardnr = cardnr;
+ cs->debug = L1_DEB_WARN;
+ cs->HW_Flags = 0;
+ cs->busy_flag = busy_flag;
+ cs->irq_flags = I4L_IRQ_FLAG;
+#if TEI_PER_CARD
+ if (card->protocol == ISDN_PTYPE_NI1)
+ test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
+#else
+ test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
+#endif
+ cs->protocol = card->protocol;
+
+ if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
+ printk(KERN_WARNING
+ "HiSax: Card Type %d out of range\n", card->typ);
goto outf_cs;
}
- if (!ret) {
- ll_unload(cs);
+ if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
+ printk(KERN_WARNING
+ "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
goto outf_cs;
}
+ if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
+ printk(KERN_WARNING
+ "HiSax: No memory for status_buf(card %d)\n",
+ cardnr + 1);
+ goto outf_dlog;
+ }
+ cs->stlist = NULL;
+ cs->status_read = cs->status_buf;
+ cs->status_write = cs->status_buf;
+ cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
+ cs->typ = card->typ;
+#ifdef MODULE
+ cs->iif.owner = lockowner;
+#endif
+ strcpy(cs->iif.id, id);
+ cs->iif.channels = 2;
+ cs->iif.maxbufsize = MAX_DATA_SIZE;
+ cs->iif.hl_hdrlen = MAX_HEADER_LEN;
+ cs->iif.features =
+ ISDN_FEATURE_L2_X75I |
+ ISDN_FEATURE_L2_HDLC |
+ ISDN_FEATURE_L2_HDLC_56K |
+ ISDN_FEATURE_L2_TRANS |
+ ISDN_FEATURE_L3_TRANS |
+#ifdef CONFIG_HISAX_1TR6
+ ISDN_FEATURE_P_1TR6 |
+#endif
+#ifdef CONFIG_HISAX_EURO
+ ISDN_FEATURE_P_EURO |
+#endif
+#ifdef CONFIG_HISAX_NI1
+ ISDN_FEATURE_P_NI1 |
+#endif
+ 0;
+
+ cs->iif.command = HiSax_command;
+ cs->iif.writecmd = NULL;
+ cs->iif.writebuf_skb = HiSax_writebuf_skb;
+ cs->iif.readstat = HiSax_readstatus;
+ register_isdn(&cs->iif);
+ cs->myid = cs->iif.channels;
+
+ *cs_out = cs;
+ return 1; /* success */
+
+outf_dlog:
+ kfree(cs->dlog);
+outf_cs:
+ kfree(cs);
+ card->cs = NULL;
+out:
+ return 0; /* error */
+}
+
+static int hisax_cs_setup(int cardnr, struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ int ret;
+
if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) {
printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
ll_unload(cs);
@@ -1143,11 +1160,41 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
if (!test_bit(HW_ISAR, &cs->HW_Flags))
ll_run(cs, 0);
- ret = 1;
+ return 1;
+
+outf_cs:
+ kfree(cs);
+ card->cs = NULL;
+ return ret;
+}
+
+static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner)
+{
+ int ret;
+ struct IsdnCard *card = cards + cardnr;
+ struct IsdnCardState *cs;
+
+ ret = hisax_cs_new(cardnr, id, card, &cs, busy_flag, lockowner);
+ if (!ret)
+ return 0;
+
+ printk(KERN_INFO
+ "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
+ (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
+ (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
+ (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
+ (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
+ "NONE", cs->iif.id, cs->myid);
+
+ ret = hisax_cs_setup_card(card);
+ if (!ret) {
+ ll_unload(cs);
+ goto outf_cs;
+ }
+
+ ret = hisax_cs_setup(cardnr, card, cs);
goto out;
- outf_dlog:
- kfree(cs->dlog);
outf_cs:
kfree(cs);
card->cs = NULL;
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index b45de9d408d..b73027ff50e 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -300,98 +300,72 @@ enpci_interrupt(int intno, void *dev_id)
return IRQ_HANDLED;
}
-
-static struct pci_dev *dev_netjet __devinitdata = NULL;
-
-/* called by config.c */
-int __devinit
-setup_enternow_pci(struct IsdnCard *card)
+static int __devinit en_pci_probe(struct pci_dev *dev_netjet,
+ struct IsdnCardState *cs)
{
- int bytecnt;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
-#ifdef CONFIG_PCI
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
- strcpy(tmp, enternow_pci_rev);
- printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_ENTERNOW)
+ if (pci_enable_device(dev_netjet))
return(0);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
- for ( ;; )
- {
- if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
- if (pci_enable_device(dev_netjet))
- return(0);
- cs->irq = dev_netjet->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n");
- return(0);
- }
- cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
- if (!cs->hw.njet.base) {
- printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n");
- return(0);
- }
- /* checks Sub-Vendor ID because system crashes with Traverse-Card */
- if ((dev_netjet->subsystem_vendor != 0x55) ||
- (dev_netjet->subsystem_device != 0x02)) {
- printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
- printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
- return(0);
- }
- } else {
- printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
- return(0);
- }
-
- cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
- cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
-
- /* Reset an */
- cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
- outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
- /* 20 ms Pause */
- mdelay(20);
+ cs->irq = dev_netjet->irq;
+ if (!cs->irq) {
+ printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n");
+ return(0);
+ }
+ cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
+ if (!cs->hw.njet.base) {
+ printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n");
+ return(0);
+ }
+ /* checks Sub-Vendor ID because system crashes with Traverse-Card */
+ if ((dev_netjet->subsystem_vendor != 0x55) ||
+ (dev_netjet->subsystem_device != 0x02)) {
+ printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
+ printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
+ return(0);
+ }
- cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */
- outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
- mdelay(10);
+ return(1);
+}
- cs->hw.njet.auxd = 0x00; // war 0xc0
- cs->hw.njet.dmactrl = 0;
+static void __devinit en_cs_init(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
+ cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
- outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
- outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
- outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
+ /* Reset an */
+ cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
+ outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
+ /* 20 ms Pause */
+ mdelay(20);
- break;
- }
-#else
+ cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */
+ outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
+ mdelay(10);
- printk(KERN_WARNING "enter:now PCI: NO_PCI_BIOS\n");
- printk(KERN_WARNING "enter:now PCI: unable to config Formula-n enter:now ISDN PCI ab\n");
- return (0);
+ cs->hw.njet.auxd = 0x00; // war 0xc0
+ cs->hw.njet.dmactrl = 0;
-#endif /* CONFIG_PCI */
+ outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
+ outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
+ outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
+}
- bytecnt = 256;
+static int __devinit en_cs_init_rest(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ const int bytecnt = 256;
printk(KERN_INFO
"enter:now PCI: PCI card configured at 0x%lx IRQ %d\n",
cs->hw.njet.base, cs->irq);
if (!request_region(cs->hw.njet.base, bytecnt, "Fn_ISDN")) {
printk(KERN_WARNING
- "HiSax: %s config port %lx-%lx already in use\n",
- CardType[card->typ],
- cs->hw.njet.base,
- cs->hw.njet.base + bytecnt);
+ "HiSax: enter:now config port %lx-%lx already in use\n",
+ cs->hw.njet.base,
+ cs->hw.njet.base + bytecnt);
return (0);
}
+
setup_Amd7930(cs);
cs->hw.njet.last_is0 = 0;
/* macro rByteAMD */
@@ -407,5 +381,44 @@ setup_enternow_pci(struct IsdnCard *card)
cs->irq_func = &enpci_interrupt;
cs->irq_flags |= IRQF_SHARED;
- return (1);
+ return (1);
+}
+
+static struct pci_dev *dev_netjet __devinitdata = NULL;
+
+/* called by config.c */
+int __devinit
+setup_enternow_pci(struct IsdnCard *card)
+{
+ int ret;
+ struct IsdnCardState *cs = card->cs;
+ char tmp[64];
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+
+ strcpy(tmp, enternow_pci_rev);
+ printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
+ if (cs->typ != ISDN_CTYPE_ENTERNOW)
+ return(0);
+ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+
+ for ( ;; )
+ {
+ if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+ PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
+ ret = en_pci_probe(dev_netjet, cs);
+ if (!ret)
+ return(0);
+ } else {
+ printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
+ return(0);
+ }
+
+ en_cs_init(card, cs);
+ break;
+ }
+
+ return en_cs_init_rest(card, cs);
}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8a48a3ce0a5..077080aca79 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -6,7 +6,7 @@
* based on existing driver for CCD hfc ISA cards
* Copyright by Werner Cornelius <werner@isdn4linux.de>
* by Karsten Keil <keil@isdn4linux.de>
- *
+ *
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
@@ -67,8 +67,6 @@ static const PCI_ENTRY id_list[] =
};
-#ifdef CONFIG_PCI
-
/******************************************/
/* free hardware resources used by driver */
/******************************************/
@@ -237,7 +235,7 @@ static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
if (fifo_state)
cs->hw.hfcpci.fifo_en |= fifo_state;
Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
-}
+}
/***************************************/
/* clear the desired B-channel tx fifo */
@@ -263,7 +261,7 @@ static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
if (fifo_state)
cs->hw.hfcpci.fifo_en |= fifo_state;
Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
-}
+}
/*********************************************/
/* read a complete B-frame out of the buffer */
@@ -511,7 +509,6 @@ main_rec_hfcpci(struct BCState *bcs)
test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
if (count && receive)
goto Begin;
- return;
}
/**************************/
@@ -582,7 +579,6 @@ hfcpci_fill_dfifo(struct IsdnCardState *cs)
dev_kfree_skb_any(cs->tx_skb);
cs->tx_skb = NULL;
- return;
}
/**************************/
@@ -729,7 +725,6 @@ hfcpci_fill_fifo(struct BCState *bcs)
dev_kfree_skb_any(bcs->tx_skb);
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- return;
}
/**********************************************/
@@ -924,7 +919,6 @@ receive_emsg(struct IsdnCardState *cs)
test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
if (count && receive)
goto Begin;
- return;
} /* receive_emsg */
/*********************/
@@ -1350,13 +1344,13 @@ mode_hfcpci(struct BCState *bcs, int mode, int bc)
cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
}
if (fifo2) {
- cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
+ cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
cs->hw.hfcpci.ctmt &= ~2;
cs->hw.hfcpci.conn &= ~0x18;
} else {
- cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
+ cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
cs->hw.hfcpci.ctmt &= ~1;
@@ -1642,8 +1636,6 @@ hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
/* this variable is used as card index when more than one cards are present */
static struct pci_dev *dev_hfcpci __devinitdata = NULL;
-#endif /* CONFIG_PCI */
-
int __devinit
setup_hfcpci(struct IsdnCard *card)
{
@@ -1656,96 +1648,99 @@ setup_hfcpci(struct IsdnCard *card)
#ifdef __BIG_ENDIAN
#error "not running on big endian machines now"
#endif
+
strcpy(tmp, hfcpci_revision);
printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
-#ifdef CONFIG_PCI
+
cs->hw.hfcpci.int_s1 = 0;
cs->dc.hfcpci.ph_state = 0;
cs->hw.hfcpci.fifo = 255;
- if (cs->typ == ISDN_CTYPE_HFC_PCI) {
- i = 0;
- while (id_list[i].vendor_id) {
- tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
- id_list[i].device_id,
- dev_hfcpci);
- i++;
- if (tmp_hfcpci) {
- if (pci_enable_device(tmp_hfcpci))
- continue;
- pci_set_master(tmp_hfcpci);
- if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
- continue;
- else
- break;
- }
- }
-
+ if (cs->typ != ISDN_CTYPE_HFC_PCI)
+ return(0);
+
+ i = 0;
+ while (id_list[i].vendor_id) {
+ tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
+ id_list[i].device_id,
+ dev_hfcpci);
+ i++;
if (tmp_hfcpci) {
- i--;
- dev_hfcpci = tmp_hfcpci; /* old device */
- cs->hw.hfcpci.dev = dev_hfcpci;
- cs->irq = dev_hfcpci->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
- return (0);
- }
- cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
- printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
- } else {
- printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
- return (0);
- }
- if (!cs->hw.hfcpci.pci_io) {
- printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
- return (0);
- }
- /* Allocate memory for FIFOS */
- /* Because the HFC-PCI needs a 32K physical alignment, we */
- /* need to allocate the double mem and align the address */
- if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
- printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
- return 0;
+ if (pci_enable_device(tmp_hfcpci))
+ continue;
+ pci_set_master(tmp_hfcpci);
+ if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
+ continue;
+ else
+ break;
}
- cs->hw.hfcpci.fifos = (void *)
- (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
- pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
- cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
- printk(KERN_INFO
- "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
- cs->hw.hfcpci.pci_io,
- cs->hw.hfcpci.fifos,
- (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
- cs->irq, HZ);
- spin_lock_irqsave(&cs->lock, flags);
- pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
- cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
- cs->hw.hfcpci.int_m1 = 0;
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
- Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
- /* At this point the needed PCI config is done */
- /* fifos are still not enabled */
- INIT_WORK(&cs->tqueue, hfcpci_bh);
- cs->setstack_d = setstack_hfcpci;
- cs->BC_Send_Data = &hfcpci_send_data;
- cs->readisac = NULL;
- cs->writeisac = NULL;
- cs->readisacfifo = NULL;
- cs->writeisacfifo = NULL;
- cs->BC_Read_Reg = NULL;
- cs->BC_Write_Reg = NULL;
- cs->irq_func = &hfcpci_interrupt;
- cs->irq_flags |= IRQF_SHARED;
- cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
- cs->hw.hfcpci.timer.data = (long) cs;
- init_timer(&cs->hw.hfcpci.timer);
- cs->cardmsg = &hfcpci_card_msg;
- cs->auxcmd = &hfcpci_auxcmd;
- spin_unlock_irqrestore(&cs->lock, flags);
- return (1);
- } else
- return (0); /* no valid card type */
-#else
- printk(KERN_WARNING "HFC-PCI: NO_PCI_BIOS\n");
- return (0);
-#endif /* CONFIG_PCI */
+ }
+
+ if (!tmp_hfcpci) {
+ printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
+ return (0);
+ }
+
+ i--;
+ dev_hfcpci = tmp_hfcpci; /* old device */
+ cs->hw.hfcpci.dev = dev_hfcpci;
+ cs->irq = dev_hfcpci->irq;
+ if (!cs->irq) {
+ printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
+ return (0);
+ }
+ cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
+ printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
+
+ if (!cs->hw.hfcpci.pci_io) {
+ printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
+ return (0);
+ }
+ /* Allocate memory for FIFOS */
+ /* Because the HFC-PCI needs a 32K physical alignment, we */
+ /* need to allocate the double mem and align the address */
+ if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
+ printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
+ return 0;
+ }
+ cs->hw.hfcpci.fifos = (void *)
+ (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
+ pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
+ cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
+ printk(KERN_INFO
+ "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
+ cs->hw.hfcpci.pci_io,
+ cs->hw.hfcpci.fifos,
+ (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
+ cs->irq, HZ);
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
+ cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
+ cs->hw.hfcpci.int_m1 = 0;
+ Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
+ Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
+ /* At this point the needed PCI config is done */
+ /* fifos are still not enabled */
+
+ INIT_WORK(&cs->tqueue, hfcpci_bh);
+ cs->setstack_d = setstack_hfcpci;
+ cs->BC_Send_Data = &hfcpci_send_data;
+ cs->readisac = NULL;
+ cs->writeisac = NULL;
+ cs->readisacfifo = NULL;
+ cs->writeisacfifo = NULL;
+ cs->BC_Read_Reg = NULL;
+ cs->BC_Write_Reg = NULL;
+ cs->irq_func = &hfcpci_interrupt;
+ cs->irq_flags |= IRQF_SHARED;
+ cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
+ cs->hw.hfcpci.timer.data = (long) cs;
+ init_timer(&cs->hw.hfcpci.timer);
+ cs->cardmsg = &hfcpci_card_msg;
+ cs->auxcmd = &hfcpci_auxcmd;
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ return (1);
}
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c
index c09ffb13533..fa2db87667c 100644
--- a/drivers/isdn/hisax/nj_s.c
+++ b/drivers/isdn/hisax/nj_s.c
@@ -148,107 +148,87 @@ NETjet_S_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return(0);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
-
-int __devinit
-setup_netjet_s(struct IsdnCard *card)
+static int __devinit njs_pci_probe(struct pci_dev *dev_netjet,
+ struct IsdnCardState *cs)
{
- int bytecnt,cfg;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
+ int cfg;
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
- strcpy(tmp, NETjet_S_revision);
- printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_NETJET_S)
+ if (pci_enable_device(dev_netjet))
return(0);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+ pci_set_master(dev_netjet);
+ cs->irq = dev_netjet->irq;
+ if (!cs->irq) {
+ printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
+ return(0);
+ }
+ cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
+ if (!cs->hw.njet.base) {
+ printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
+ return(0);
+ }
+ /* the TJ300 and TJ320 must be detected, the IRQ handling is different
+ * unfortunatly the chips use the same device ID, but the TJ320 has
+ * the bit20 in status PCI cfg register set
+ */
+ pci_read_config_dword(dev_netjet, 0x04, &cfg);
+ if (cfg & 0x00100000)
+ cs->subtyp = 1; /* TJ320 */
+ else
+ cs->subtyp = 0; /* TJ300 */
+ /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
+ if ((dev_netjet->subsystem_vendor == 0x55) &&
+ (dev_netjet->subsystem_device == 0x02)) {
+ printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
+ printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
+ return(0);
+ }
+ /* end new code */
-#ifdef CONFIG_PCI
+ return(1);
+}
- for ( ;; )
- {
- if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
- if (pci_enable_device(dev_netjet))
- return(0);
- pci_set_master(dev_netjet);
- cs->irq = dev_netjet->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
- return(0);
- }
- cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
- if (!cs->hw.njet.base) {
- printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
- return(0);
- }
- /* the TJ300 and TJ320 must be detected, the IRQ handling is different
- * unfortunatly the chips use the same device ID, but the TJ320 has
- * the bit20 in status PCI cfg register set
- */
- pci_read_config_dword(dev_netjet, 0x04, &cfg);
- if (cfg & 0x00100000)
- cs->subtyp = 1; /* TJ320 */
- else
- cs->subtyp = 0; /* TJ300 */
- /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
- if ((dev_netjet->subsystem_vendor == 0x55) &&
- (dev_netjet->subsystem_device == 0x02)) {
- printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
- printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
- return(0);
- }
- /* end new code */
- } else {
- printk(KERN_WARNING "NETjet-S: No PCI card found\n");
- return(0);
- }
+static int __devinit njs_cs_init(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
- cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
- cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
+ cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
+ cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
- cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
+ cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
+ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+ mdelay(10);
- cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
+ cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
+ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+ mdelay(10);
- cs->hw.njet.auxd = 0xC0;
- cs->hw.njet.dmactrl = 0;
+ cs->hw.njet.auxd = 0xC0;
+ cs->hw.njet.dmactrl = 0;
- byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
- byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
+ byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
+ byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
+ byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
- {
- case 0 :
- break;
+ switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
+ {
+ case 0 :
+ return 1; /* end loop */
- case 3 :
- printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
- continue;
+ case 3 :
+ printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
+ return -1; /* continue looping */
- default :
- printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
- return 0;
- }
- break;
+ default :
+ printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
+ return 0; /* end loop & function */
}
-#else
-
- printk(KERN_WARNING "NETjet-S: NO_PCI_BIOS\n");
- printk(KERN_WARNING "NETjet-S: unable to config NETJET-S PCI\n");
- return (0);
-
-#endif /* CONFIG_PCI */
+ return 1; /* end loop */
+}
- bytecnt = 256;
+static int __devinit njs_cs_init_rest(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ const int bytecnt = 256;
printk(KERN_INFO
"NETjet-S: %s card configured at %#lx IRQ %d\n",
@@ -273,5 +253,47 @@ setup_netjet_s(struct IsdnCard *card)
cs->irq_func = &netjet_s_interrupt;
cs->irq_flags |= IRQF_SHARED;
ISACVersion(cs, "NETjet-S:");
+
return (1);
}
+
+static struct pci_dev *dev_netjet __devinitdata = NULL;
+
+int __devinit
+setup_netjet_s(struct IsdnCard *card)
+{
+ int ret;
+ struct IsdnCardState *cs = card->cs;
+ char tmp[64];
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+ strcpy(tmp, NETjet_S_revision);
+ printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
+ if (cs->typ != ISDN_CTYPE_NETJET_S)
+ return(0);
+ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+
+ for ( ;; )
+ {
+ if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+ PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
+ ret = njs_pci_probe(dev_netjet, cs);
+ if (!ret)
+ return(0);
+ } else {
+ printk(KERN_WARNING "NETjet-S: No PCI card found\n");
+ return(0);
+ }
+
+ ret = njs_cs_init(card, cs);
+ if (!ret)
+ return(0);
+ if (ret > 0)
+ break;
+ /* otherwise, ret < 0, continue looping */
+ }
+
+ return njs_cs_init_rest(card, cs);
+}
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c
index 8202cf34eca..f017d3816b1 100644
--- a/drivers/isdn/hisax/nj_u.c
+++ b/drivers/isdn/hisax/nj_u.c
@@ -128,93 +128,69 @@ NETjet_U_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return(0);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
-
-int __devinit
-setup_netjet_u(struct IsdnCard *card)
+static int __devinit nju_pci_probe(struct pci_dev *dev_netjet,
+ struct IsdnCardState *cs)
{
- int bytecnt;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-#ifdef CONFIG_PCI
-#endif
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
- strcpy(tmp, NETjet_U_revision);
- printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_NETJET_U)
+ if (pci_enable_device(dev_netjet))
return(0);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
-#ifdef CONFIG_PCI
+ pci_set_master(dev_netjet);
+ cs->irq = dev_netjet->irq;
+ if (!cs->irq) {
+ printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
+ return(0);
+ }
+ cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
+ if (!cs->hw.njet.base) {
+ printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
+ return(0);
+ }
- for ( ;; )
- {
- if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
- if (pci_enable_device(dev_netjet))
- return(0);
- pci_set_master(dev_netjet);
- cs->irq = dev_netjet->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
- return(0);
- }
- cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
- if (!cs->hw.njet.base) {
- printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
- return(0);
- }
- } else {
- printk(KERN_WARNING "NETspider-U: No PCI card found\n");
- return(0);
- }
+ return (1);
+}
- cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
- cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
- mdelay(10);
+static int __devinit nju_cs_init(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
+ cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
+ mdelay(10);
- cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
+ cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
+ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+ mdelay(10);
- cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
+ cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
+ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+ mdelay(10);
- cs->hw.njet.auxd = 0xC0;
- cs->hw.njet.dmactrl = 0;
+ cs->hw.njet.auxd = 0xC0;
+ cs->hw.njet.dmactrl = 0;
- byteout(cs->hw.njet.auxa, 0);
- byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
- byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
+ byteout(cs->hw.njet.auxa, 0);
+ byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
+ byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
+ byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) )
- {
- case 3 :
- break;
+ switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) )
+ {
+ case 3 :
+ return 1; /* end loop */
- case 0 :
- printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" );
- continue;
+ case 0 :
+ printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" );
+ return -1; /* continue looping */
- default :
- printk( KERN_WARNING "NETspider-U: No PCI card found\n" );
- return 0;
- }
- break;
+ default :
+ printk( KERN_WARNING "NETspider-U: No PCI card found\n" );
+ return 0; /* end loop & function */
}
-#else
-
- printk(KERN_WARNING "NETspider-U: NO_PCI_BIOS\n");
- printk(KERN_WARNING "NETspider-U: unable to config NETspider-U PCI\n");
- return (0);
-
-#endif /* CONFIG_PCI */
+ return 1; /* end loop */
+}
- bytecnt = 256;
+static int __devinit nju_cs_init_rest(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ const int bytecnt = 256;
printk(KERN_INFO
"NETspider-U: PCI card configured at %#lx IRQ %d\n",
@@ -239,5 +215,48 @@ setup_netjet_u(struct IsdnCard *card)
cs->irq_func = &netjet_u_interrupt;
cs->irq_flags |= IRQF_SHARED;
ICCVersion(cs, "NETspider-U:");
+
return (1);
}
+
+static struct pci_dev *dev_netjet __devinitdata = NULL;
+
+int __devinit
+setup_netjet_u(struct IsdnCard *card)
+{
+ int ret;
+ struct IsdnCardState *cs = card->cs;
+ char tmp[64];
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+
+ strcpy(tmp, NETjet_U_revision);
+ printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
+ if (cs->typ != ISDN_CTYPE_NETJET_U)
+ return(0);
+ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+
+ for ( ;; )
+ {
+ if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+ PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
+ ret = nju_pci_probe(dev_netjet, cs);
+ if (!ret)
+ return(0);
+ } else {
+ printk(KERN_WARNING "NETspider-U: No PCI card found\n");
+ return(0);
+ }
+
+ ret = nju_cs_init(card, cs);
+ if (!ret)
+ return (0);
+ if (ret > 0)
+ break;
+ /* ret < 0 == continue looping */
+ }
+
+ return nju_cs_init_rest(card, cs);
+}
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index 030d1625c5c..ad06f3cc60f 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -451,6 +451,9 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_RELEASE:
+ if (cs->hw.sedl.bus == SEDL_BUS_PCI)
+ /* disable all IRQ */
+ byteout(cs->hw.sedl.cfg_reg+ 5, 0);
if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
spin_lock_irqsave(&cs->lock, flags);
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
@@ -468,6 +471,9 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return(0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
+ if (cs->hw.sedl.bus == SEDL_BUS_PCI)
+ /* enable all IRQ */
+ byteout(cs->hw.sedl.cfg_reg+ 5, 0x02);
reset_sedlbauer(cs);
if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
clear_pending_isac_ints(cs);
@@ -667,7 +673,7 @@ setup_sedlbauer(struct IsdnCard *card)
byteout(cs->hw.sedl.cfg_reg, 0xff);
byteout(cs->hw.sedl.cfg_reg, 0x00);
byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd);
- byteout(cs->hw.sedl.cfg_reg+ 5, 0x02);
+ byteout(cs->hw.sedl.cfg_reg+ 5, 0); /* disable all IRQ */
byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on);
mdelay(2);
byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off);
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 3ef567b99c7..e91c187992d 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -86,7 +86,6 @@ config ISDN_X25
menu "ISDN feature submodules"
- depends on ISDN
config ISDN_DRV_LOOP
tristate "isdnloop support"
@@ -100,7 +99,7 @@ config ISDN_DRV_LOOP
config ISDN_DIVERSION
tristate "Support isdn diversion services"
- depends on ISDN && ISDN_I4L
+ depends on ISDN_I4L
help
This option allows you to use some supplementary diversion
services in conjunction with the HiSax driver on an EURO/DSS1
@@ -120,13 +119,13 @@ config ISDN_DIVERSION
endmenu
comment "ISDN4Linux hardware drivers"
- depends on NET && ISDN && ISDN_I4L
+ depends on ISDN_I4L
source "drivers/isdn/hisax/Kconfig"
menu "Active cards"
- depends on NET && ISDN && ISDN_I4L!=n
+ depends on ISDN_I4L!=n
source "drivers/isdn/icn/Kconfig"
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index e8e37d82647..33fa28a8c19 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -1,12 +1,17 @@
#
# KVM configuration
#
-menu "Virtualization"
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
depends on X86
+ default y
+
+if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on X86 && EXPERIMENTAL
+ depends on X86_CMPXCHG64 || 64BIT
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -35,4 +40,4 @@ config KVM_AMD
Provides support for KVM on AMD processors equipped with the AMD-V
(SVM) extensions.
-endmenu
+endif # VIRTUALIZATION
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 152312c1faf..a7c5e6bee03 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -10,6 +10,8 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/signal.h>
@@ -18,6 +20,7 @@
#include <linux/kvm_para.h>
#define CR0_PE_MASK (1ULL << 0)
+#define CR0_MP_MASK (1ULL << 1)
#define CR0_TS_MASK (1ULL << 3)
#define CR0_NE_MASK (1ULL << 5)
#define CR0_WP_MASK (1ULL << 16)
@@ -42,7 +45,8 @@
(CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
| CR0_NW_MASK | CR0_CD_MASK)
#define KVM_VM_CR0_ALWAYS_ON \
- (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
+ (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \
+ | CR0_MP_MASK)
#define KVM_GUEST_CR4_MASK \
(CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
@@ -51,10 +55,10 @@
#define INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
-#define KVM_MAX_VCPUS 1
+#define KVM_MAX_VCPUS 4
#define KVM_ALIAS_SLOTS 4
#define KVM_MEMORY_SLOTS 4
-#define KVM_NUM_MMU_PAGES 256
+#define KVM_NUM_MMU_PAGES 1024
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
@@ -80,6 +84,11 @@
#define KVM_PIO_PAGE_OFFSET 1
/*
+ * vcpu->requests bit members
+ */
+#define KVM_TLB_FLUSH 0
+
+/*
* Address types:
*
* gva - guest virtual address
@@ -137,7 +146,7 @@ struct kvm_mmu_page {
gfn_t gfn;
union kvm_mmu_page_role role;
- hpa_t page_hpa;
+ u64 *spt;
unsigned long slot_bitmap; /* One bit set per slot which has memory
* in this shadow page.
*/
@@ -232,6 +241,7 @@ struct kvm_pio_request {
struct page *guest_pages[2];
unsigned guest_page_offset;
int in;
+ int port;
int size;
int string;
int down;
@@ -252,8 +262,70 @@ struct kvm_stat {
u32 halt_exits;
u32 request_irq_exits;
u32 irq_exits;
+ u32 light_exits;
+ u32 efer_reload;
+};
+
+struct kvm_io_device {
+ void (*read)(struct kvm_io_device *this,
+ gpa_t addr,
+ int len,
+ void *val);
+ void (*write)(struct kvm_io_device *this,
+ gpa_t addr,
+ int len,
+ const void *val);
+ int (*in_range)(struct kvm_io_device *this, gpa_t addr);
+ void (*destructor)(struct kvm_io_device *this);
+
+ void *private;
+};
+
+static inline void kvm_iodevice_read(struct kvm_io_device *dev,
+ gpa_t addr,
+ int len,
+ void *val)
+{
+ dev->read(dev, addr, len, val);
+}
+
+static inline void kvm_iodevice_write(struct kvm_io_device *dev,
+ gpa_t addr,
+ int len,
+ const void *val)
+{
+ dev->write(dev, addr, len, val);
+}
+
+static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
+{
+ return dev->in_range(dev, addr);
+}
+
+static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
+{
+ if (dev->destructor)
+ dev->destructor(dev);
+}
+
+/*
+ * It would be nice to use something smarter than a linear search, TBD...
+ * Thankfully we dont expect many devices to register (famous last words :),
+ * so until then it will suffice. At least its abstracted so we can change
+ * in one place.
+ */
+struct kvm_io_bus {
+ int dev_count;
+#define NR_IOBUS_DEVS 6
+ struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
+void kvm_io_bus_init(struct kvm_io_bus *bus);
+void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
+void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
+ struct kvm_io_device *dev);
+
struct kvm_vcpu {
struct kvm *kvm;
union {
@@ -266,6 +338,8 @@ struct kvm_vcpu {
u64 host_tsc;
struct kvm_run *run;
int interrupt_window_open;
+ int guest_mode;
+ unsigned long requests;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
unsigned long irq_pending[NR_IRQ_WORDS];
@@ -285,15 +359,20 @@ struct kvm_vcpu {
u64 apic_base;
u64 ia32_misc_enable_msr;
int nmsrs;
+ int save_nmsrs;
+ int msr_offset_efer;
+#ifdef CONFIG_X86_64
+ int msr_offset_kernel_gs_base;
+#endif
struct vmx_msr_entry *guest_msrs;
struct vmx_msr_entry *host_msrs;
- struct list_head free_pages;
- struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
struct kvm_mmu mmu;
struct kvm_mmu_memory_cache mmu_pte_chain_cache;
struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+ struct kvm_mmu_memory_cache mmu_page_cache;
+ struct kvm_mmu_memory_cache mmu_page_header_cache;
gfn_t last_pt_write_gfn;
int last_pt_write_count;
@@ -305,6 +384,11 @@ struct kvm_vcpu {
char *guest_fx_image;
int fpu_active;
int guest_fpu_loaded;
+ struct vmx_host_state {
+ int loaded;
+ u16 fs_sel, gs_sel, ldt_sel;
+ int fs_gs_ldt_reload_needed;
+ } vmx_host_state;
int mmio_needed;
int mmio_read_completed;
@@ -331,6 +415,7 @@ struct kvm_vcpu {
u32 ar;
} tr, es, ds, fs, gs;
} rmode;
+ int halt_request; /* real mode on Intel only */
int cpuid_nent;
struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
@@ -362,12 +447,15 @@ struct kvm {
struct list_head active_mmu_pages;
int n_free_mmu_pages;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ int nvcpus;
struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
int memory_config_version;
int busy;
unsigned long rmap_overflow;
struct list_head vm_list;
struct file *filp;
+ struct kvm_io_bus mmio_bus;
+ struct kvm_io_bus pio_bus;
};
struct descriptor_table {
@@ -488,6 +576,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int size, unsigned long count, int string, int down,
gva_t address, int rep, unsigned port);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
int emulate_clts(struct kvm_vcpu *vcpu);
int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
@@ -511,6 +600,7 @@ void save_msrs(struct vmx_msr_entry *e, int n);
void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+void kvm_flush_remote_tlbs(struct kvm *kvm);
int kvm_read_guest(struct kvm_vcpu *vcpu,
gva_t addr,
@@ -524,10 +614,12 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
unsigned long segment_base(u16 selector);
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *old, const u8 *new, int bytes);
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
+int kvm_mmu_load(struct kvm_vcpu *vcpu);
+void kvm_mmu_unload(struct kvm_vcpu *vcpu);
int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
@@ -539,6 +631,14 @@ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
return vcpu->mmu.page_fault(vcpu, gva, error_code);
}
+static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
+{
+ if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
+ return 0;
+
+ return kvm_mmu_load(vcpu);
+}
+
static inline int is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 8f1f07adb04..1b206f197c6 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -16,34 +16,33 @@
*/
#include "kvm.h"
+#include "x86_emulate.h"
+#include "segment_descriptor.h"
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
-#include <linux/magic.h>
-#include <asm/processor.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
-#include <asm/msr.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
-#include <asm/uaccess.h>
#include <linux/reboot.h>
-#include <asm/io.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
-#include <asm/desc.h>
#include <linux/sysdev.h>
#include <linux/cpu.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/anon_inodes.h>
-#include "x86_emulate.h"
-#include "segment_descriptor.h"
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/desc.h>
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -51,8 +50,12 @@ MODULE_LICENSE("GPL");
static DEFINE_SPINLOCK(kvm_lock);
static LIST_HEAD(vm_list);
+static cpumask_t cpus_hardware_enabled;
+
struct kvm_arch_ops *kvm_arch_ops;
+static void hardware_disable(void *ignored);
+
#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
static struct kvm_stats_debugfs_item {
@@ -72,13 +75,13 @@ static struct kvm_stats_debugfs_item {
{ "halt_exits", STAT_OFFSET(halt_exits) },
{ "request_irq", STAT_OFFSET(request_irq_exits) },
{ "irq_exits", STAT_OFFSET(irq_exits) },
+ { "light_exits", STAT_OFFSET(light_exits) },
+ { "efer_reload", STAT_OFFSET(efer_reload) },
{ NULL }
};
static struct dentry *debugfs_dir;
-struct vfsmount *kvmfs_mnt;
-
#define MAX_IO_MSRS 256
#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
@@ -100,55 +103,6 @@ struct segment_descriptor_64 {
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
-static struct inode *kvmfs_inode(struct file_operations *fops)
-{
- int error = -ENOMEM;
- struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
-
- if (!inode)
- goto eexit_1;
-
- inode->i_fop = fops;
-
- /*
- * Mark the inode dirty from the very beginning,
- * that way it will never be moved to the dirty
- * list because mark_inode_dirty() will think
- * that it already _is_ on the dirty list.
- */
- inode->i_state = I_DIRTY;
- inode->i_mode = S_IRUSR | S_IWUSR;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- return inode;
-
-eexit_1:
- return ERR_PTR(error);
-}
-
-static struct file *kvmfs_file(struct inode *inode, void *private_data)
-{
- struct file *file = get_empty_filp();
-
- if (!file)
- return ERR_PTR(-ENFILE);
-
- file->f_path.mnt = mntget(kvmfs_mnt);
- file->f_path.dentry = d_alloc_anon(inode);
- if (!file->f_path.dentry)
- return ERR_PTR(-ENOMEM);
- file->f_mapping = inode->i_mapping;
-
- file->f_pos = 0;
- file->f_flags = O_RDWR;
- file->f_op = inode->i_fop;
- file->f_mode = FMODE_READ | FMODE_WRITE;
- file->f_version = 0;
- file->private_data = private_data;
- return file;
-}
-
unsigned long segment_base(u16 selector)
{
struct descriptor_table gdt;
@@ -307,6 +261,48 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
mutex_unlock(&vcpu->mutex);
}
+static void ack_flush(void *_completed)
+{
+ atomic_t *completed = _completed;
+
+ atomic_inc(completed);
+}
+
+void kvm_flush_remote_tlbs(struct kvm *kvm)
+{
+ int i, cpu, needed;
+ cpumask_t cpus;
+ struct kvm_vcpu *vcpu;
+ atomic_t completed;
+
+ atomic_set(&completed, 0);
+ cpus_clear(cpus);
+ needed = 0;
+ for (i = 0; i < kvm->nvcpus; ++i) {
+ vcpu = &kvm->vcpus[i];
+ if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ continue;
+ cpu = vcpu->cpu;
+ if (cpu != -1 && cpu != raw_smp_processor_id())
+ if (!cpu_isset(cpu, cpus)) {
+ cpu_set(cpu, cpus);
+ ++needed;
+ }
+ }
+
+ /*
+ * We really want smp_call_function_mask() here. But that's not
+ * available, so ipi all cpus in parallel and wait for them
+ * to complete.
+ */
+ for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
+ smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
+ while (atomic_read(&completed) != needed) {
+ cpu_relax();
+ barrier();
+ }
+}
+
static struct kvm *kvm_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
@@ -315,8 +311,13 @@ static struct kvm *kvm_create_vm(void)
if (!kvm)
return ERR_PTR(-ENOMEM);
+ kvm_io_bus_init(&kvm->pio_bus);
spin_lock_init(&kvm->lock);
INIT_LIST_HEAD(&kvm->active_mmu_pages);
+ spin_lock(&kvm_lock);
+ list_add(&kvm->vm_list, &vm_list);
+ spin_unlock(&kvm_lock);
+ kvm_io_bus_init(&kvm->mmio_bus);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
struct kvm_vcpu *vcpu = &kvm->vcpus[i];
@@ -324,10 +325,6 @@ static struct kvm *kvm_create_vm(void)
vcpu->cpu = -1;
vcpu->kvm = kvm;
vcpu->mmu.root_hpa = INVALID_PAGE;
- INIT_LIST_HEAD(&vcpu->free_pages);
- spin_lock(&kvm_lock);
- list_add(&kvm->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
}
return kvm;
}
@@ -380,6 +377,16 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
}
}
+static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->vmcs)
+ return;
+
+ vcpu_load(vcpu);
+ kvm_mmu_unload(vcpu);
+ vcpu_put(vcpu);
+}
+
static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
{
if (!vcpu->vmcs)
@@ -400,6 +407,11 @@ static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ /*
+ * Unpin any mmu pages first.
+ */
+ for (i = 0; i < KVM_MAX_VCPUS; ++i)
+ kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
for (i = 0; i < KVM_MAX_VCPUS; ++i)
kvm_free_vcpu(&kvm->vcpus[i]);
}
@@ -414,6 +426,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
+ kvm_io_bus_destroy(&kvm->pio_bus);
+ kvm_io_bus_destroy(&kvm->mmio_bus);
kvm_free_vcpus(kvm);
kvm_free_physmem(kvm);
kfree(kvm);
@@ -969,7 +983,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
int i;
- struct kvm_memory_slot *memslot = NULL;
+ struct kvm_memory_slot *memslot;
unsigned long rel_gfn;
for (i = 0; i < kvm->nmemslots; ++i) {
@@ -978,7 +992,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages) {
- if (!memslot || !memslot->dirty_bitmap)
+ if (!memslot->dirty_bitmap)
return;
rel_gfn = gfn - memslot->base_gfn;
@@ -1037,12 +1051,31 @@ static int emulator_write_std(unsigned long addr,
return X86EMUL_UNHANDLEABLE;
}
+static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
+ gpa_t addr)
+{
+ /*
+ * Note that its important to have this wrapper function because
+ * in the very near future we will be checking for MMIOs against
+ * the LAPIC as well as the general MMIO bus
+ */
+ return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+}
+
+static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
+ gpa_t addr)
+{
+ return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
+}
+
static int emulator_read_emulated(unsigned long addr,
void *val,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
- struct kvm_vcpu *vcpu = ctxt->vcpu;
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+ struct kvm_io_device *mmio_dev;
+ gpa_t gpa;
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
@@ -1051,18 +1084,26 @@ static int emulator_read_emulated(unsigned long addr,
} else if (emulator_read_std(addr, val, bytes, ctxt)
== X86EMUL_CONTINUE)
return X86EMUL_CONTINUE;
- else {
- gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
- if (gpa == UNMAPPED_GVA)
- return X86EMUL_PROPAGATE_FAULT;
- vcpu->mmio_needed = 1;
- vcpu->mmio_phys_addr = gpa;
- vcpu->mmio_size = bytes;
- vcpu->mmio_is_write = 0;
+ gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+ if (gpa == UNMAPPED_GVA)
+ return X86EMUL_PROPAGATE_FAULT;
- return X86EMUL_UNHANDLEABLE;
+ /*
+ * Is this MMIO handled locally?
+ */
+ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+ if (mmio_dev) {
+ kvm_iodevice_read(mmio_dev, gpa, bytes, val);
+ return X86EMUL_CONTINUE;
}
+
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_phys_addr = gpa;
+ vcpu->mmio_size = bytes;
+ vcpu->mmio_is_write = 0;
+
+ return X86EMUL_UNHANDLEABLE;
}
static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1070,18 +1111,20 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
{
struct page *page;
void *virt;
+ unsigned offset = offset_in_page(gpa);
if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
return 0;
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
if (!page)
return 0;
- kvm_mmu_pre_write(vcpu, gpa, bytes);
mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
virt = kmap_atomic(page, KM_USER0);
- memcpy(virt + offset_in_page(gpa), val, bytes);
+ if (memcmp(virt + offset_in_page(gpa), val, bytes)) {
+ kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
+ memcpy(virt + offset_in_page(gpa), val, bytes);
+ }
kunmap_atomic(virt, KM_USER0);
- kvm_mmu_post_write(vcpu, gpa, bytes);
return 1;
}
@@ -1090,8 +1133,9 @@ static int emulator_write_emulated(unsigned long addr,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
- struct kvm_vcpu *vcpu = ctxt->vcpu;
- gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+ struct kvm_io_device *mmio_dev;
+ gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
if (gpa == UNMAPPED_GVA) {
kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
@@ -1101,6 +1145,15 @@ static int emulator_write_emulated(unsigned long addr,
if (emulator_write_phys(vcpu, gpa, val, bytes))
return X86EMUL_CONTINUE;
+ /*
+ * Is this MMIO handled locally?
+ */
+ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+ if (mmio_dev) {
+ kvm_iodevice_write(mmio_dev, gpa, bytes, val);
+ return X86EMUL_CONTINUE;
+ }
+
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
vcpu->mmio_size = bytes;
@@ -1269,6 +1322,17 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(emulate_instruction);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->irq_summary)
+ return 1;
+
+ vcpu->run->exit_reason = KVM_EXIT_HLT;
+ ++vcpu->stat.halt_exits;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+
int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
@@ -1469,6 +1533,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MC0_MISC+16:
case MSR_IA32_UCODE_REV:
case MSR_IA32_PERF_STATUS:
+ case MSR_IA32_EBL_CR_POWERON:
/* MTRR registers */
case 0xfe:
case 0x200 ... 0x2ff:
@@ -1727,6 +1792,20 @@ static int complete_pio(struct kvm_vcpu *vcpu)
return 0;
}
+void kernel_pio(struct kvm_io_device *pio_dev, struct kvm_vcpu *vcpu)
+{
+ /* TODO: String I/O for in kernel device */
+
+ if (vcpu->pio.in)
+ kvm_iodevice_read(pio_dev, vcpu->pio.port,
+ vcpu->pio.size,
+ vcpu->pio_data);
+ else
+ kvm_iodevice_write(pio_dev, vcpu->pio.port,
+ vcpu->pio.size,
+ vcpu->pio_data);
+}
+
int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int size, unsigned long count, int string, int down,
gva_t address, int rep, unsigned port)
@@ -1735,6 +1814,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int i;
int nr_pages = 1;
struct page *page;
+ struct kvm_io_device *pio_dev;
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -1746,17 +1826,27 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->pio.cur_count = count;
vcpu->pio.size = size;
vcpu->pio.in = in;
+ vcpu->pio.port = port;
vcpu->pio.string = string;
vcpu->pio.down = down;
vcpu->pio.guest_page_offset = offset_in_page(address);
vcpu->pio.rep = rep;
+ pio_dev = vcpu_find_pio_dev(vcpu, port);
if (!string) {
kvm_arch_ops->cache_regs(vcpu);
memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
kvm_arch_ops->decache_regs(vcpu);
+ if (pio_dev) {
+ kernel_pio(pio_dev, vcpu);
+ complete_pio(vcpu);
+ return 1;
+ }
return 0;
}
+ /* TODO: String I/O for in kernel device */
+ if (pio_dev)
+ printk(KERN_ERR "kvm_setup_pio: no string io support\n");
if (!count) {
kvm_arch_ops->skip_emulated_instruction(vcpu);
@@ -2273,34 +2363,12 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
struct inode *inode;
struct file *file;
+ r = anon_inode_getfd(&fd, &inode, &file,
+ "kvm-vcpu", &kvm_vcpu_fops, vcpu);
+ if (r)
+ return r;
atomic_inc(&vcpu->kvm->filp->f_count);
- inode = kvmfs_inode(&kvm_vcpu_fops);
- if (IS_ERR(inode)) {
- r = PTR_ERR(inode);
- goto out1;
- }
-
- file = kvmfs_file(inode, vcpu);
- if (IS_ERR(file)) {
- r = PTR_ERR(file);
- goto out2;
- }
-
- r = get_unused_fd();
- if (r < 0)
- goto out3;
- fd = r;
- fd_install(fd, file);
-
return fd;
-
-out3:
- fput(file);
-out2:
- iput(inode);
-out1:
- fput(vcpu->kvm->filp);
- return r;
}
/*
@@ -2363,6 +2431,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
if (r < 0)
goto out_free_vcpus;
+ spin_lock(&kvm_lock);
+ if (n >= kvm->nvcpus)
+ kvm->nvcpus = n + 1;
+ spin_unlock(&kvm_lock);
+
return r;
out_free_vcpus:
@@ -2376,6 +2449,27 @@ out:
return r;
}
+static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
+{
+ u64 efer;
+ int i;
+ struct kvm_cpuid_entry *e, *entry;
+
+ rdmsrl(MSR_EFER, efer);
+ entry = NULL;
+ for (i = 0; i < vcpu->cpuid_nent; ++i) {
+ e = &vcpu->cpuid_entries[i];
+ if (e->function == 0x80000001) {
+ entry = e;
+ break;
+ }
+ }
+ if (entry && (entry->edx & EFER_NX) && !(efer & EFER_NX)) {
+ entry->edx &= ~(1 << 20);
+ printk(KERN_INFO ": guest NX capability removed\n");
+ }
+}
+
static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
struct kvm_cpuid_entry __user *entries)
@@ -2390,6 +2484,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out;
vcpu->cpuid_nent = cpuid->nent;
+ cpuid_fix_nx_cap(vcpu);
return 0;
out:
@@ -2738,41 +2833,18 @@ static int kvm_dev_ioctl_create_vm(void)
struct file *file;
struct kvm *kvm;
- inode = kvmfs_inode(&kvm_vm_fops);
- if (IS_ERR(inode)) {
- r = PTR_ERR(inode);
- goto out1;
- }
-
kvm = kvm_create_vm();
- if (IS_ERR(kvm)) {
- r = PTR_ERR(kvm);
- goto out2;
+ if (IS_ERR(kvm))
+ return PTR_ERR(kvm);
+ r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
+ if (r) {
+ kvm_destroy_vm(kvm);
+ return r;
}
- file = kvmfs_file(inode, kvm);
- if (IS_ERR(file)) {
- r = PTR_ERR(file);
- goto out3;
- }
kvm->filp = file;
- r = get_unused_fd();
- if (r < 0)
- goto out4;
- fd = r;
- fd_install(fd, file);
-
return fd;
-
-out4:
- fput(file);
-out3:
- kvm_destroy_vm(kvm);
-out2:
- iput(inode);
-out1:
- return r;
}
static long kvm_dev_ioctl(struct file *filp,
@@ -2862,7 +2934,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
* in vmx root mode.
*/
printk(KERN_INFO "kvm: exiting hardware virtualization\n");
- on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+ on_each_cpu(hardware_disable, NULL, 0, 1);
}
return NOTIFY_OK;
}
@@ -2905,33 +2977,88 @@ static void decache_vcpus_on_cpu(int cpu)
spin_unlock(&kvm_lock);
}
+static void hardware_enable(void *junk)
+{
+ int cpu = raw_smp_processor_id();
+
+ if (cpu_isset(cpu, cpus_hardware_enabled))
+ return;
+ cpu_set(cpu, cpus_hardware_enabled);
+ kvm_arch_ops->hardware_enable(NULL);
+}
+
+static void hardware_disable(void *junk)
+{
+ int cpu = raw_smp_processor_id();
+
+ if (!cpu_isset(cpu, cpus_hardware_enabled))
+ return;
+ cpu_clear(cpu, cpus_hardware_enabled);
+ decache_vcpus_on_cpu(cpu);
+ kvm_arch_ops->hardware_disable(NULL);
+}
+
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
void *v)
{
int cpu = (long)v;
switch (val) {
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
cpu);
- decache_vcpus_on_cpu(cpu);
- smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
- NULL, 0, 1);
+ smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
- smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
- NULL, 0, 1);
+ smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
break;
}
return NOTIFY_OK;
}
+void kvm_io_bus_init(struct kvm_io_bus *bus)
+{
+ memset(bus, 0, sizeof(*bus));
+}
+
+void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < bus->dev_count; i++) {
+ struct kvm_io_device *pos = bus->devs[i];
+
+ kvm_iodevice_destructor(pos);
+ }
+}
+
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
+{
+ int i;
+
+ for (i = 0; i < bus->dev_count; i++) {
+ struct kvm_io_device *pos = bus->devs[i];
+
+ if (pos->in_range(pos, addr))
+ return pos;
+ }
+
+ return NULL;
+}
+
+void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
+{
+ BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
+
+ bus->devs[bus->dev_count++] = dev;
+}
+
static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
.priority = 20, /* must be > scheduler priority */
@@ -2983,14 +3110,13 @@ static void kvm_exit_debug(void)
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
- decache_vcpus_on_cpu(raw_smp_processor_id());
- on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+ hardware_disable(NULL);
return 0;
}
static int kvm_resume(struct sys_device *dev)
{
- on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
+ hardware_enable(NULL);
return 0;
}
@@ -3007,18 +3133,6 @@ static struct sys_device kvm_sysdev = {
hpa_t bad_page_address;
-static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
-{
- return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
-}
-
-static struct file_system_type kvm_fs_type = {
- .name = "kvmfs",
- .get_sb = kvmfs_get_sb,
- .kill_sb = kill_anon_super,
-};
-
int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
{
int r;
@@ -3043,7 +3157,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
if (r < 0)
goto out;
- on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
+ on_each_cpu(hardware_enable, NULL, 0, 1);
r = register_cpu_notifier(&kvm_cpu_notifier);
if (r)
goto out_free_1;
@@ -3075,7 +3189,7 @@ out_free_2:
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
out_free_1:
- on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+ on_each_cpu(hardware_disable, NULL, 0, 1);
kvm_arch_ops->hardware_unsetup();
out:
kvm_arch_ops = NULL;
@@ -3089,7 +3203,7 @@ void kvm_exit_arch(void)
sysdev_class_unregister(&kvm_sysdev_class);
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
- on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+ on_each_cpu(hardware_disable, NULL, 0, 1);
kvm_arch_ops->hardware_unsetup();
kvm_arch_ops = NULL;
}
@@ -3103,14 +3217,6 @@ static __init int kvm_init(void)
if (r)
goto out4;
- r = register_filesystem(&kvm_fs_type);
- if (r)
- goto out3;
-
- kvmfs_mnt = kern_mount(&kvm_fs_type);
- r = PTR_ERR(kvmfs_mnt);
- if (IS_ERR(kvmfs_mnt))
- goto out2;
kvm_init_debug();
kvm_init_msr_list();
@@ -3127,10 +3233,6 @@ static __init int kvm_init(void)
out:
kvm_exit_debug();
- mntput(kvmfs_mnt);
-out2:
- unregister_filesystem(&kvm_fs_type);
-out3:
kvm_mmu_module_exit();
out4:
return r;
@@ -3140,8 +3242,6 @@ static __exit void kvm_exit(void)
{
kvm_exit_debug();
__free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
- mntput(kvmfs_mnt);
- unregister_filesystem(&kvm_fs_type);
kvm_mmu_module_exit();
}
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index e8e228118de..b297a6b111a 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -16,15 +16,18 @@
* the COPYING file in the top-level directory.
*
*/
+
+#include "vmx.h"
+#include "kvm.h"
+
#include <linux/types.h>
#include <linux/string.h>
-#include <asm/page.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>
-#include "vmx.h"
-#include "kvm.h"
+#include <asm/page.h>
+#include <asm/cmpxchg.h>
#undef MMU_DEBUG
@@ -90,25 +93,11 @@ static int dbg = 1;
#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
-#define PT32_PTE_COPY_MASK \
- (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
-
-#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
-
#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52
-#define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
-#define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
-#define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
-
-#define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
-#define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
-
-#define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
-
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
#define PT64_LEVEL_BITS 9
@@ -165,6 +154,8 @@ struct kvm_rmap_desc {
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
+static struct kmem_cache *mmu_page_cache;
+static struct kmem_cache *mmu_page_header_cache;
static int is_write_protection(struct kvm_vcpu *vcpu)
{
@@ -202,6 +193,15 @@ static int is_rmap_pte(u64 pte)
== (PT_WRITABLE_MASK | PT_PRESENT_MASK);
}
+static void set_shadow_pte(u64 *sptep, u64 spte)
+{
+#ifdef CONFIG_X86_64
+ set_64bit((unsigned long *)sptep, spte);
+#else
+ set_64bit((unsigned long long *)sptep, spte);
+#endif
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
struct kmem_cache *base_cache, int min,
gfp_t gfp_flags)
@@ -235,6 +235,14 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
goto out;
r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
rmap_desc_cache, 1, gfp_flags);
+ if (r)
+ goto out;
+ r = mmu_topup_memory_cache(&vcpu->mmu_page_cache,
+ mmu_page_cache, 4, gfp_flags);
+ if (r)
+ goto out;
+ r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
+ mmu_page_header_cache, 4, gfp_flags);
out:
return r;
}
@@ -258,6 +266,8 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
+ mmu_free_memory_cache(&vcpu->mmu_page_cache);
+ mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
}
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -433,19 +443,18 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
BUG_ON(!(*spte & PT_WRITABLE_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
rmap_remove(vcpu, spte);
- kvm_arch_ops->tlb_flush(vcpu);
- *spte &= ~(u64)PT_WRITABLE_MASK;
+ set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
+ kvm_flush_remote_tlbs(vcpu->kvm);
}
}
#ifdef MMU_DEBUG
-static int is_empty_shadow_page(hpa_t page_hpa)
+static int is_empty_shadow_page(u64 *spt)
{
u64 *pos;
u64 *end;
- for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
- pos != end; pos++)
+ for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
if (*pos != 0) {
printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
pos, *pos);
@@ -455,13 +464,13 @@ static int is_empty_shadow_page(hpa_t page_hpa)
}
#endif
-static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
+static void kvm_mmu_free_page(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page_head)
{
- struct kvm_mmu_page *page_head = page_header(page_hpa);
-
- ASSERT(is_empty_shadow_page(page_hpa));
- page_head->page_hpa = page_hpa;
- list_move(&page_head->link, &vcpu->free_pages);
+ ASSERT(is_empty_shadow_page(page_head->spt));
+ list_del(&page_head->link);
+ mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt);
+ mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head);
++vcpu->kvm->n_free_mmu_pages;
}
@@ -475,12 +484,15 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
{
struct kvm_mmu_page *page;
- if (list_empty(&vcpu->free_pages))
+ if (!vcpu->kvm->n_free_mmu_pages)
return NULL;
- page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
- list_move(&page->link, &vcpu->kvm->active_mmu_pages);
- ASSERT(is_empty_shadow_page(page->page_hpa));
+ page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
+ sizeof *page);
+ page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
+ set_page_private(virt_to_page(page->spt), (unsigned long)page);
+ list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+ ASSERT(is_empty_shadow_page(page->spt));
page->slot_bitmap = 0;
page->multimapped = 0;
page->parent_pte = parent_pte;
@@ -638,7 +650,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
u64 *pt;
u64 ent;
- pt = __va(page->page_hpa);
+ pt = page->spt;
if (page->role.level == PT_PAGE_TABLE_LEVEL) {
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
@@ -646,7 +658,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
rmap_remove(vcpu, &pt[i]);
pt[i] = 0;
}
- kvm_arch_ops->tlb_flush(vcpu);
+ kvm_flush_remote_tlbs(vcpu->kvm);
return;
}
@@ -659,6 +671,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
ent &= PT64_BASE_ADDR_MASK;
mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
}
+ kvm_flush_remote_tlbs(vcpu->kvm);
}
static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
@@ -685,12 +698,12 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
}
BUG_ON(!parent_pte);
kvm_mmu_put_page(vcpu, page, parent_pte);
- *parent_pte = 0;
+ set_shadow_pte(parent_pte, 0);
}
kvm_mmu_page_unlink_children(vcpu, page);
if (!page->root_count) {
hlist_del(&page->hash_link);
- kvm_mmu_free_page(vcpu, page->page_hpa);
+ kvm_mmu_free_page(vcpu, page);
} else
list_move(&page->link, &vcpu->kvm->active_mmu_pages);
}
@@ -717,6 +730,17 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
return r;
}
+static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ struct kvm_mmu_page *page;
+
+ while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
+ pgprintk("%s: zap %lx %x\n",
+ __FUNCTION__, gfn, page->role.word);
+ kvm_mmu_zap_page(vcpu, page);
+ }
+}
+
static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
{
int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
@@ -805,7 +829,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
return -ENOMEM;
}
- table[index] = new_table->page_hpa | PT_PRESENT_MASK
+ table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
}
table_addr = table[index] & PT64_BASE_ADDR_MASK;
@@ -817,11 +841,12 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
int i;
struct kvm_mmu_page *page;
+ if (!VALID_PAGE(vcpu->mmu.root_hpa))
+ return;
#ifdef CONFIG_X86_64
if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->mmu.root_hpa;
- ASSERT(VALID_PAGE(root));
page = page_header(root);
--page->root_count;
vcpu->mmu.root_hpa = INVALID_PAGE;
@@ -832,7 +857,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
hpa_t root = vcpu->mmu.pae_root[i];
if (root) {
- ASSERT(VALID_PAGE(root));
root &= PT64_BASE_ADDR_MASK;
page = page_header(root);
--page->root_count;
@@ -857,7 +881,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
ASSERT(!VALID_PAGE(root));
page = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, 0, 0, NULL);
- root = page->page_hpa;
+ root = __pa(page->spt);
++page->root_count;
vcpu->mmu.root_hpa = root;
return;
@@ -878,7 +902,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu),
0, NULL);
- root = page->page_hpa;
+ root = __pa(page->spt);
++page->root_count;
vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
}
@@ -928,9 +952,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
context->free = nonpaging_free;
context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL;
- mmu_alloc_roots(vcpu);
- ASSERT(VALID_PAGE(context->root_hpa));
- kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
+ context->root_hpa = INVALID_PAGE;
return 0;
}
@@ -944,59 +966,6 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
mmu_free_roots(vcpu);
- if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
- kvm_mmu_free_some_pages(vcpu);
- mmu_alloc_roots(vcpu);
- kvm_mmu_flush_tlb(vcpu);
- kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
-}
-
-static inline void set_pte_common(struct kvm_vcpu *vcpu,
- u64 *shadow_pte,
- gpa_t gaddr,
- int dirty,
- u64 access_bits,
- gfn_t gfn)
-{
- hpa_t paddr;
-
- *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
- if (!dirty)
- access_bits &= ~PT_WRITABLE_MASK;
-
- paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
-
- *shadow_pte |= access_bits;
-
- if (is_error_hpa(paddr)) {
- *shadow_pte |= gaddr;
- *shadow_pte |= PT_SHADOW_IO_MARK;
- *shadow_pte &= ~PT_PRESENT_MASK;
- return;
- }
-
- *shadow_pte |= paddr;
-
- if (access_bits & PT_WRITABLE_MASK) {
- struct kvm_mmu_page *shadow;
-
- shadow = kvm_mmu_lookup_page(vcpu, gfn);
- if (shadow) {
- pgprintk("%s: found shadow page for %lx, marking ro\n",
- __FUNCTION__, gfn);
- access_bits &= ~PT_WRITABLE_MASK;
- if (is_writeble_pte(*shadow_pte)) {
- *shadow_pte &= ~PT_WRITABLE_MASK;
- kvm_arch_ops->tlb_flush(vcpu);
- }
- }
- }
-
- if (access_bits & PT_WRITABLE_MASK)
- mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
-
- page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
- rmap_add(vcpu, shadow_pte);
}
static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -1006,23 +975,6 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
}
-static inline int fix_read_pf(u64 *shadow_ent)
-{
- if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
- !(*shadow_ent & PT_USER_MASK)) {
- /*
- * If supervisor write protect is disabled, we shadow kernel
- * pages as user pages so we can trap the write access.
- */
- *shadow_ent |= PT_USER_MASK;
- *shadow_ent &= ~PT_WRITABLE_MASK;
-
- return 1;
-
- }
- return 0;
-}
-
static void paging_free(struct kvm_vcpu *vcpu)
{
nonpaging_free(vcpu);
@@ -1047,10 +999,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
context->free = paging_free;
context->root_level = level;
context->shadow_root_level = level;
- mmu_alloc_roots(vcpu);
- ASSERT(VALID_PAGE(context->root_hpa));
- kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
- (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+ context->root_hpa = INVALID_PAGE;
return 0;
}
@@ -1069,10 +1018,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
context->free = paging_free;
context->root_level = PT32_ROOT_LEVEL;
context->shadow_root_level = PT32E_ROOT_LEVEL;
- mmu_alloc_roots(vcpu);
- ASSERT(VALID_PAGE(context->root_hpa));
- kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
- (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+ context->root_hpa = INVALID_PAGE;
return 0;
}
@@ -1107,18 +1053,33 @@ static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
+ destroy_kvm_mmu(vcpu);
+ return init_kvm_mmu(vcpu);
+}
+
+int kvm_mmu_load(struct kvm_vcpu *vcpu)
+{
int r;
- destroy_kvm_mmu(vcpu);
- r = init_kvm_mmu(vcpu);
- if (r < 0)
- goto out;
+ spin_lock(&vcpu->kvm->lock);
r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ goto out;
+ mmu_alloc_roots(vcpu);
+ kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+ kvm_mmu_flush_tlb(vcpu);
out:
+ spin_unlock(&vcpu->kvm->lock);
return r;
}
+EXPORT_SYMBOL_GPL(kvm_mmu_load);
+
+void kvm_mmu_unload(struct kvm_vcpu *vcpu)
+{
+ mmu_free_roots(vcpu);
+}
-static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
+static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page,
u64 *spte)
{
@@ -1135,9 +1096,25 @@ static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
}
}
*spte = 0;
+ kvm_flush_remote_tlbs(vcpu->kvm);
+}
+
+static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page,
+ u64 *spte,
+ const void *new, int bytes)
+{
+ if (page->role.level != PT_PAGE_TABLE_LEVEL)
+ return;
+
+ if (page->role.glevels == PT32_ROOT_LEVEL)
+ paging32_update_pte(vcpu, page, spte, new, bytes);
+ else
+ paging64_update_pte(vcpu, page, spte, new, bytes);
}
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *old, const u8 *new, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *page;
@@ -1149,6 +1126,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
unsigned pte_size;
unsigned page_offset;
unsigned misaligned;
+ unsigned quadrant;
int level;
int flooded = 0;
int npte;
@@ -1169,6 +1147,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
continue;
pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+ misaligned |= bytes < 4;
if (misaligned || flooded) {
/*
* Misaligned accesses are too much trouble to fix
@@ -1200,21 +1179,20 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
page_offset <<= 1;
npte = 2;
}
+ quadrant = page_offset >> PAGE_SHIFT;
page_offset &= ~PAGE_MASK;
+ if (quadrant != page->role.quadrant)
+ continue;
}
- spte = __va(page->page_hpa);
- spte += page_offset / sizeof(*spte);
+ spte = &page->spt[page_offset / sizeof(*spte)];
while (npte--) {
- mmu_pre_write_zap_pte(vcpu, page, spte);
+ mmu_pte_write_zap_pte(vcpu, page, spte);
+ mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
++spte;
}
}
}
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
-{
-}
-
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
@@ -1243,13 +1221,6 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
struct kvm_mmu_page, link);
kvm_mmu_zap_page(vcpu, page);
}
- while (!list_empty(&vcpu->free_pages)) {
- page = list_entry(vcpu->free_pages.next,
- struct kvm_mmu_page, link);
- list_del(&page->link);
- __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
- page->page_hpa = INVALID_PAGE;
- }
free_page((unsigned long)vcpu->mmu.pae_root);
}
@@ -1260,18 +1231,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
ASSERT(vcpu);
- for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
- struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
-
- INIT_LIST_HEAD(&page_header->link);
- if ((page = alloc_page(GFP_KERNEL)) == NULL)
- goto error_1;
- set_page_private(page, (unsigned long)page_header);
- page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
- memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
- list_add(&page_header->link, &vcpu->free_pages);
- ++vcpu->kvm->n_free_mmu_pages;
- }
+ vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
@@ -1296,7 +1256,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
- ASSERT(list_empty(&vcpu->free_pages));
return alloc_mmu_pages(vcpu);
}
@@ -1305,7 +1264,6 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
- ASSERT(!list_empty(&vcpu->free_pages));
return init_kvm_mmu(vcpu);
}
@@ -1331,7 +1289,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
if (!test_bit(slot, &page->slot_bitmap))
continue;
- pt = __va(page->page_hpa);
+ pt = page->spt;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
/* avoid RMW */
if (pt[i] & PT_WRITABLE_MASK) {
@@ -1354,7 +1312,7 @@ void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
}
mmu_free_memory_caches(vcpu);
- kvm_arch_ops->tlb_flush(vcpu);
+ kvm_flush_remote_tlbs(vcpu->kvm);
init_kvm_mmu(vcpu);
}
@@ -1364,6 +1322,10 @@ void kvm_mmu_module_exit(void)
kmem_cache_destroy(pte_chain_cache);
if (rmap_desc_cache)
kmem_cache_destroy(rmap_desc_cache);
+ if (mmu_page_cache)
+ kmem_cache_destroy(mmu_page_cache);
+ if (mmu_page_header_cache)
+ kmem_cache_destroy(mmu_page_header_cache);
}
int kvm_mmu_module_init(void)
@@ -1379,6 +1341,18 @@ int kvm_mmu_module_init(void)
if (!rmap_desc_cache)
goto nomem;
+ mmu_page_cache = kmem_cache_create("kvm_mmu_page",
+ PAGE_SIZE,
+ PAGE_SIZE, 0, NULL, NULL);
+ if (!mmu_page_cache)
+ goto nomem;
+
+ mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
+ sizeof(struct kvm_mmu_page),
+ 0, 0, NULL, NULL);
+ if (!mmu_page_header_cache)
+ goto nomem;
+
return 0;
nomem:
@@ -1482,7 +1456,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
int i;
list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
- u64 *pt = __va(page->page_hpa);
+ u64 *pt = page->spt;
if (page->role.level != PT_PAGE_TABLE_LEVEL)
continue;
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 73ffbffb109..a7c5cb0319e 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -31,7 +31,6 @@
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
- #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
#ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS 4
#else
@@ -46,7 +45,6 @@
#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
- #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
#define PT_MAX_FULL_LEVELS 2
#else
#error Invalid PTTYPE value
@@ -192,40 +190,143 @@ static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
}
-static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
- u64 *shadow_pte, u64 access_bits, gfn_t gfn)
+static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
+ u64 *shadow_pte,
+ gpa_t gaddr,
+ pt_element_t *gpte,
+ u64 access_bits,
+ int user_fault,
+ int write_fault,
+ int *ptwrite,
+ struct guest_walker *walker,
+ gfn_t gfn)
{
- ASSERT(*shadow_pte == 0);
- access_bits &= guest_pte;
- *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
- set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
- guest_pte & PT_DIRTY_MASK, access_bits, gfn);
+ hpa_t paddr;
+ int dirty = *gpte & PT_DIRTY_MASK;
+ u64 spte = *shadow_pte;
+ int was_rmapped = is_rmap_pte(spte);
+
+ pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
+ " user_fault %d gfn %lx\n",
+ __FUNCTION__, spte, (u64)*gpte, access_bits,
+ write_fault, user_fault, gfn);
+
+ if (write_fault && !dirty) {
+ *gpte |= PT_DIRTY_MASK;
+ dirty = 1;
+ FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
+ }
+
+ spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK;
+ spte |= *gpte & PT64_NX_MASK;
+ if (!dirty)
+ access_bits &= ~PT_WRITABLE_MASK;
+
+ paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
+
+ spte |= PT_PRESENT_MASK;
+ if (access_bits & PT_USER_MASK)
+ spte |= PT_USER_MASK;
+
+ if (is_error_hpa(paddr)) {
+ spte |= gaddr;
+ spte |= PT_SHADOW_IO_MARK;
+ spte &= ~PT_PRESENT_MASK;
+ set_shadow_pte(shadow_pte, spte);
+ return;
+ }
+
+ spte |= paddr;
+
+ if ((access_bits & PT_WRITABLE_MASK)
+ || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
+ struct kvm_mmu_page *shadow;
+
+ spte |= PT_WRITABLE_MASK;
+ if (user_fault) {
+ mmu_unshadow(vcpu, gfn);
+ goto unshadowed;
+ }
+
+ shadow = kvm_mmu_lookup_page(vcpu, gfn);
+ if (shadow) {
+ pgprintk("%s: found shadow page for %lx, marking ro\n",
+ __FUNCTION__, gfn);
+ access_bits &= ~PT_WRITABLE_MASK;
+ if (is_writeble_pte(spte)) {
+ spte &= ~PT_WRITABLE_MASK;
+ kvm_arch_ops->tlb_flush(vcpu);
+ }
+ if (write_fault)
+ *ptwrite = 1;
+ }
+ }
+
+unshadowed:
+
+ if (access_bits & PT_WRITABLE_MASK)
+ mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
+
+ set_shadow_pte(shadow_pte, spte);
+ page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
+ if (!was_rmapped)
+ rmap_add(vcpu, shadow_pte);
}
-static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
- u64 *shadow_pte, u64 access_bits, gfn_t gfn)
+static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte,
+ u64 *shadow_pte, u64 access_bits,
+ int user_fault, int write_fault, int *ptwrite,
+ struct guest_walker *walker, gfn_t gfn)
+{
+ access_bits &= *gpte;
+ FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK,
+ gpte, access_bits, user_fault, write_fault,
+ ptwrite, walker, gfn);
+}
+
+static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
+ u64 *spte, const void *pte, int bytes)
+{
+ pt_element_t gpte;
+
+ if (bytes < sizeof(pt_element_t))
+ return;
+ gpte = *(const pt_element_t *)pte;
+ if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
+ return;
+ pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
+ FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
+ 0, NULL, NULL,
+ (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
+}
+
+static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde,
+ u64 *shadow_pte, u64 access_bits,
+ int user_fault, int write_fault, int *ptwrite,
+ struct guest_walker *walker, gfn_t gfn)
{
gpa_t gaddr;
- ASSERT(*shadow_pte == 0);
- access_bits &= guest_pde;
+ access_bits &= *gpde;
gaddr = (gpa_t)gfn << PAGE_SHIFT;
if (PTTYPE == 32 && is_cpuid_PSE36())
- gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
+ gaddr |= (*gpde & PT32_DIR_PSE36_MASK) <<
(32 - PT32_DIR_PSE36_SHIFT);
- *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
- set_pte_common(vcpu, shadow_pte, gaddr,
- guest_pde & PT_DIRTY_MASK, access_bits, gfn);
+ FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
+ gpde, access_bits, user_fault, write_fault,
+ ptwrite, walker, gfn);
}
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
*/
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
- struct guest_walker *walker)
+ struct guest_walker *walker,
+ int user_fault, int write_fault, int *ptwrite)
{
hpa_t shadow_addr;
int level;
+ u64 *shadow_ent;
u64 *prev_shadow_ent = NULL;
pt_element_t *guest_ent = walker->ptep;
@@ -242,37 +343,23 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
for (; ; level--) {
u32 index = SHADOW_PT_INDEX(addr, level);
- u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
struct kvm_mmu_page *shadow_page;
u64 shadow_pte;
int metaphysical;
gfn_t table_gfn;
unsigned hugepage_access = 0;
+ shadow_ent = ((u64 *)__va(shadow_addr)) + index;
if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
if (level == PT_PAGE_TABLE_LEVEL)
- return shadow_ent;
+ break;
shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
prev_shadow_ent = shadow_ent;
continue;
}
- if (level == PT_PAGE_TABLE_LEVEL) {
-
- if (walker->level == PT_DIRECTORY_LEVEL) {
- if (prev_shadow_ent)
- *prev_shadow_ent |= PT_SHADOW_PS_MARK;
- FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
- walker->inherited_ar,
- walker->gfn);
- } else {
- ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
- FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
- walker->inherited_ar,
- walker->gfn);
- }
- return shadow_ent;
- }
+ if (level == PT_PAGE_TABLE_LEVEL)
+ break;
if (level - 1 == PT_PAGE_TABLE_LEVEL
&& walker->level == PT_DIRECTORY_LEVEL) {
@@ -289,90 +376,24 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
metaphysical, hugepage_access,
shadow_ent);
- shadow_addr = shadow_page->page_hpa;
+ shadow_addr = __pa(shadow_page->spt);
shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
*shadow_ent = shadow_pte;
prev_shadow_ent = shadow_ent;
}
-}
-/*
- * The guest faulted for write. We need to
- *
- * - check write permissions
- * - update the guest pte dirty bit
- * - update our own dirty page tracking structures
- */
-static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
- u64 *shadow_ent,
- struct guest_walker *walker,
- gva_t addr,
- int user,
- int *write_pt)
-{
- pt_element_t *guest_ent;
- int writable_shadow;
- gfn_t gfn;
- struct kvm_mmu_page *page;
-
- if (is_writeble_pte(*shadow_ent))
- return !user || (*shadow_ent & PT_USER_MASK);
-
- writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
- if (user) {
- /*
- * User mode access. Fail if it's a kernel page or a read-only
- * page.
- */
- if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
- return 0;
- ASSERT(*shadow_ent & PT_USER_MASK);
- } else
- /*
- * Kernel mode access. Fail if it's a read-only page and
- * supervisor write protection is enabled.
- */
- if (!writable_shadow) {
- if (is_write_protection(vcpu))
- return 0;
- *shadow_ent &= ~PT_USER_MASK;
- }
-
- guest_ent = walker->ptep;
-
- if (!is_present_pte(*guest_ent)) {
- *shadow_ent = 0;
- return 0;
+ if (walker->level == PT_DIRECTORY_LEVEL) {
+ FNAME(set_pde)(vcpu, guest_ent, shadow_ent,
+ walker->inherited_ar, user_fault, write_fault,
+ ptwrite, walker, walker->gfn);
+ } else {
+ ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
+ FNAME(set_pte)(vcpu, guest_ent, shadow_ent,
+ walker->inherited_ar, user_fault, write_fault,
+ ptwrite, walker, walker->gfn);
}
-
- gfn = walker->gfn;
-
- if (user) {
- /*
- * Usermode page faults won't be for page table updates.
- */
- while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
- pgprintk("%s: zap %lx %x\n",
- __FUNCTION__, gfn, page->role.word);
- kvm_mmu_zap_page(vcpu, page);
- }
- } else if (kvm_mmu_lookup_page(vcpu, gfn)) {
- pgprintk("%s: found shadow page for %lx, marking ro\n",
- __FUNCTION__, gfn);
- mark_page_dirty(vcpu->kvm, gfn);
- FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
- *guest_ent |= PT_DIRTY_MASK;
- *write_pt = 1;
- return 0;
- }
- mark_page_dirty(vcpu->kvm, gfn);
- *shadow_ent |= PT_WRITABLE_MASK;
- FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
- *guest_ent |= PT_DIRTY_MASK;
- rmap_add(vcpu, shadow_ent);
-
- return 1;
+ return shadow_ent;
}
/*
@@ -397,7 +418,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
int fetch_fault = error_code & PFERR_FETCH_MASK;
struct guest_walker walker;
u64 *shadow_pte;
- int fixed;
int write_pt = 0;
int r;
@@ -421,27 +441,20 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
pgprintk("%s: guest page fault\n", __FUNCTION__);
inject_page_fault(vcpu, addr, walker.error_code);
FNAME(release_walker)(&walker);
+ vcpu->last_pt_write_count = 0; /* reset fork detector */
return 0;
}
- shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
- pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
- shadow_pte, *shadow_pte);
-
- /*
- * Update the shadow pte.
- */
- if (write_fault)
- fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
- user_fault, &write_pt);
- else
- fixed = fix_read_pf(shadow_pte);
-
- pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
- shadow_pte, *shadow_pte);
+ shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
+ &write_pt);
+ pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
+ shadow_pte, *shadow_pte, write_pt);
FNAME(release_walker)(&walker);
+ if (!write_pt)
+ vcpu->last_pt_write_count = 0; /* reset fork detector */
+
/*
* mmio: emulate if accessible, otherwise its a guest fault.
*/
@@ -478,7 +491,5 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
#undef PT_INDEX
#undef SHADOW_PT_INDEX
#undef PT_LEVEL_MASK
-#undef PT_PTE_COPY_MASK
-#undef PT_NON_PTE_COPY_MASK
#undef PT_DIR_BASE_ADDR_MASK
#undef PT_MAX_FULL_LEVELS
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index fa17d6d4f0c..bc818cc126e 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -14,16 +14,17 @@
*
*/
+#include "kvm_svm.h"
+#include "x86_emulate.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/profile.h>
#include <linux/sched.h>
-#include <asm/desc.h>
-#include "kvm_svm.h"
-#include "x86_emulate.h"
+#include <asm/desc.h>
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -378,7 +379,7 @@ static __init int svm_hardware_setup(void)
int cpu;
struct page *iopm_pages;
struct page *msrpm_pages;
- void *msrpm_va;
+ void *iopm_va, *msrpm_va;
int r;
kvm_emulator_want_group7_invlpg();
@@ -387,8 +388,10 @@ static __init int svm_hardware_setup(void)
if (!iopm_pages)
return -ENOMEM;
- memset(page_address(iopm_pages), 0xff,
- PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+
+ iopm_va = page_address(iopm_pages);
+ memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+ clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
@@ -579,7 +582,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
goto out2;
vcpu->svm->vmcb = page_address(page);
- memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
+ clear_page(vcpu->svm->vmcb);
vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
vcpu->svm->asid_generation = 0;
memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
@@ -587,9 +590,9 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
fx_init(vcpu);
vcpu->fpu_active = 1;
- vcpu->apic_base = 0xfee00000 |
- /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
- MSR_IA32_APICBASE_ENABLE;
+ vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ if (vcpu == &vcpu->kvm->vcpus[0])
+ vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
return 0;
@@ -955,7 +958,7 @@ static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
* VMCB is undefined after a SHUTDOWN intercept
* so reinitialize it.
*/
- memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
+ clear_page(vcpu->svm->vmcb);
init_vmcb(vcpu->svm->vmcb);
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
@@ -1113,12 +1116,7 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
skip_emulated_instruction(vcpu);
- if (vcpu->irq_summary)
- return 1;
-
- kvm_run->exit_reason = KVM_EXIT_HLT;
- ++vcpu->stat.halt_exits;
- return 0;
+ return kvm_emulate_halt(vcpu);
}
static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1473,6 +1471,11 @@ static void load_db_regs(unsigned long *db_regs)
asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
}
+static void svm_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ force_new_asid(vcpu);
+}
+
static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u16 fs_selector;
@@ -1481,11 +1484,20 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r;
again:
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r))
+ return r;
+
if (!vcpu->mmio_read_completed)
do_interrupt_requests(vcpu, kvm_run);
clgi();
+ vcpu->guest_mode = 1;
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ svm_flush_tlb(vcpu);
+
pre_svm_run(vcpu);
save_host_msrs(vcpu);
@@ -1617,6 +1629,8 @@ again:
#endif
: "cc", "memory" );
+ vcpu->guest_mode = 0;
+
if (vcpu->fpu_active) {
fx_save(vcpu->guest_fx_image);
fx_restore(vcpu->host_fx_image);
@@ -1681,11 +1695,6 @@ again:
return r;
}
-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
-{
- force_new_asid(vcpu);
-}
-
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
vcpu->svm->vmcb->save.cr3 = root;
@@ -1727,6 +1736,12 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
static int is_disabled(void)
{
+ u64 vm_cr;
+
+ rdmsrl(MSR_VM_CR, vm_cr);
+ if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
+ return 1;
+
return 0;
}
diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
index 5e93814400c..3b1b0f35b6c 100644
--- a/drivers/kvm/svm.h
+++ b/drivers/kvm/svm.h
@@ -175,8 +175,11 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_CPUID_FUNC 0x8000000a
#define MSR_EFER_SVME_MASK (1ULL << 12)
+#define MSR_VM_CR 0xc0010114
#define MSR_VM_HSAVE_PA 0xc0010117ULL
+#define SVM_VM_CR_SVM_DISABLE 4
+
#define SVM_SELECTOR_S_SHIFT 4
#define SVM_SELECTOR_DPL_SHIFT 5
#define SVM_SELECTOR_P_SHIFT 7
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index c1ac106ace8..80628f69916 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -17,28 +17,35 @@
#include "kvm.h"
#include "vmx.h"
+#include "segment_descriptor.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/profile.h>
#include <linux/sched.h>
+
#include <asm/io.h>
#include <asm/desc.h>
-#include "segment_descriptor.h"
-
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static int init_rmode_tss(struct kvm *kvm);
+
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
+static struct page *vmx_io_bitmap_a;
+static struct page *vmx_io_bitmap_b;
+
#ifdef CONFIG_X86_64
#define HOST_IS_64 1
#else
#define HOST_IS_64 0
#endif
+#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
static struct vmcs_descriptor {
int size;
@@ -82,18 +89,17 @@ static const u32 vmx_msr_index[] = {
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
-#ifdef CONFIG_X86_64
-static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
-#endif
+static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr)
+{
+ return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
+}
+
+static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
+{
+ int efer_offset = vcpu->msr_offset_efer;
+ return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
+ msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
+}
static inline int is_page_fault(u32 intr_info)
{
@@ -115,13 +121,23 @@ static inline int is_external_interrupt(u32 intr_info)
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
{
int i;
for (i = 0; i < vcpu->nmsrs; ++i)
if (vcpu->guest_msrs[i].index == msr)
- return &vcpu->guest_msrs[i];
+ return i;
+ return -1;
+}
+
+static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+{
+ int i;
+
+ i = __find_msr_index(vcpu, msr);
+ if (i >= 0)
+ return &vcpu->guest_msrs[i];
return NULL;
}
@@ -147,6 +163,7 @@ static void __vcpu_clear(void *arg)
vmcs_clear(vcpu->vmcs);
if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
+ rdtscll(vcpu->host_tsc);
}
static void vcpu_clear(struct kvm_vcpu *vcpu)
@@ -234,6 +251,127 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
vmcs_writel(field, vmcs_readl(field) | mask);
}
+static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+{
+ u32 eb;
+
+ eb = 1u << PF_VECTOR;
+ if (!vcpu->fpu_active)
+ eb |= 1u << NM_VECTOR;
+ if (vcpu->guest_debug.enabled)
+ eb |= 1u << 1;
+ if (vcpu->rmode.active)
+ eb = ~0;
+ vmcs_write32(EXCEPTION_BITMAP, eb);
+}
+
+static void reload_tss(void)
+{
+#ifndef CONFIG_X86_64
+
+ /*
+ * VT restores TR but not its size. Useless.
+ */
+ struct descriptor_table gdt;
+ struct segment_descriptor *descs;
+
+ get_gdt(&gdt);
+ descs = (void *)gdt.base;
+ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
+ load_TR_desc();
+#endif
+}
+
+static void load_transition_efer(struct kvm_vcpu *vcpu)
+{
+ u64 trans_efer;
+ int efer_offset = vcpu->msr_offset_efer;
+
+ trans_efer = vcpu->host_msrs[efer_offset].data;
+ trans_efer &= ~EFER_SAVE_RESTORE_BITS;
+ trans_efer |= msr_efer_save_restore_bits(
+ vcpu->guest_msrs[efer_offset]);
+ wrmsrl(MSR_EFER, trans_efer);
+ vcpu->stat.efer_reload++;
+}
+
+static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+{
+ struct vmx_host_state *hs = &vcpu->vmx_host_state;
+
+ if (hs->loaded)
+ return;
+
+ hs->loaded = 1;
+ /*
+ * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
+ * allow segment selectors with cpl > 0 or ti == 1.
+ */
+ hs->ldt_sel = read_ldt();
+ hs->fs_gs_ldt_reload_needed = hs->ldt_sel;
+ hs->fs_sel = read_fs();
+ if (!(hs->fs_sel & 7))
+ vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel);
+ else {
+ vmcs_write16(HOST_FS_SELECTOR, 0);
+ hs->fs_gs_ldt_reload_needed = 1;
+ }
+ hs->gs_sel = read_gs();
+ if (!(hs->gs_sel & 7))
+ vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel);
+ else {
+ vmcs_write16(HOST_GS_SELECTOR, 0);
+ hs->fs_gs_ldt_reload_needed = 1;
+ }
+
+#ifdef CONFIG_X86_64
+ vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
+ vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
+#else
+ vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel));
+ vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel));
+#endif
+
+#ifdef CONFIG_X86_64
+ if (is_long_mode(vcpu)) {
+ save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1);
+ }
+#endif
+ load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+ if (msr_efer_need_save_restore(vcpu))
+ load_transition_efer(vcpu);
+}
+
+static void vmx_load_host_state(struct kvm_vcpu *vcpu)
+{
+ struct vmx_host_state *hs = &vcpu->vmx_host_state;
+
+ if (!hs->loaded)
+ return;
+
+ hs->loaded = 0;
+ if (hs->fs_gs_ldt_reload_needed) {
+ load_ldt(hs->ldt_sel);
+ load_fs(hs->fs_sel);
+ /*
+ * If we have to reload gs, we must take care to
+ * preserve our gs base.
+ */
+ local_irq_disable();
+ load_gs(hs->gs_sel);
+#ifdef CONFIG_X86_64
+ wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+#endif
+ local_irq_enable();
+
+ reload_tss();
+ }
+ save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+ load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
+ if (msr_efer_need_save_restore(vcpu))
+ load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
@@ -242,6 +380,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
{
u64 phys_addr = __pa(vcpu->vmcs);
int cpu;
+ u64 tsc_this, delta;
cpu = get_cpu();
@@ -275,15 +414,43 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+ /*
+ * Make sure the time stamp counter is monotonous.
+ */
+ rdtscll(tsc_this);
+ delta = vcpu->host_tsc - tsc_this;
+ vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
}
}
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
+ vmx_load_host_state(vcpu);
kvm_put_guest_fpu(vcpu);
put_cpu();
}
+static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->fpu_active)
+ return;
+ vcpu->fpu_active = 1;
+ vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ if (vcpu->cr0 & CR0_TS_MASK)
+ vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
+ update_exception_bitmap(vcpu);
+}
+
+static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->fpu_active)
+ return;
+ vcpu->fpu_active = 0;
+ vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
+ update_exception_bitmap(vcpu);
+}
+
static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
{
vcpu_clear(vcpu);
@@ -332,41 +499,61 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
}
/*
+ * Swap MSR entry in host/guest MSR entry array.
+ */
+void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
+{
+ struct vmx_msr_entry tmp;
+ tmp = vcpu->guest_msrs[to];
+ vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
+ vcpu->guest_msrs[from] = tmp;
+ tmp = vcpu->host_msrs[to];
+ vcpu->host_msrs[to] = vcpu->host_msrs[from];
+ vcpu->host_msrs[from] = tmp;
+}
+
+/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
* mode, as fiddling with msrs is very expensive.
*/
static void setup_msrs(struct kvm_vcpu *vcpu)
{
- int nr_skip, nr_good_msrs;
-
- if (is_long_mode(vcpu))
- nr_skip = NR_BAD_MSRS;
- else
- nr_skip = NR_64BIT_MSRS;
- nr_good_msrs = vcpu->nmsrs - nr_skip;
+ int save_nmsrs;
- /*
- * MSR_K6_STAR is only needed on long mode guests, and only
- * if efer.sce is enabled.
- */
- if (find_msr_entry(vcpu, MSR_K6_STAR)) {
- --nr_good_msrs;
+ save_nmsrs = 0;
#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE))
- ++nr_good_msrs;
-#endif
+ if (is_long_mode(vcpu)) {
+ int index;
+
+ index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vcpu, MSR_LSTAR);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vcpu, MSR_CSTAR);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ /*
+ * MSR_K6_STAR is only needed on long mode guests, and only
+ * if efer.sce is enabled.
+ */
+ index = __find_msr_index(vcpu, MSR_K6_STAR);
+ if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
+ move_msr_up(vcpu, index, save_nmsrs++);
}
+#endif
+ vcpu->save_nmsrs = save_nmsrs;
- vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
- vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
- vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->host_msrs + nr_skip));
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
+#ifdef CONFIG_X86_64
+ vcpu->msr_offset_kernel_gs_base =
+ __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+#endif
+ vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
}
/*
@@ -394,23 +581,6 @@ static void guest_write_tsc(u64 guest_tsc)
vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
}
-static void reload_tss(void)
-{
-#ifndef CONFIG_X86_64
-
- /*
- * VT restores TR but not its size. Useless.
- */
- struct descriptor_table gdt;
- struct segment_descriptor *descs;
-
- get_gdt(&gdt);
- descs = (void *)gdt.base;
- descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
- load_TR_desc();
-#endif
-}
-
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
@@ -470,10 +640,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vmx_msr_entry *msr;
+ int ret = 0;
+
switch (msr_index) {
#ifdef CONFIG_X86_64
case MSR_EFER:
- return kvm_set_msr_common(vcpu, msr_index, data);
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
+ if (vcpu->vmx_host_state.loaded)
+ load_transition_efer(vcpu);
+ break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
break;
@@ -497,14 +672,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
msr = find_msr_entry(vcpu, msr_index);
if (msr) {
msr->data = data;
+ if (vcpu->vmx_host_state.loaded)
+ load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
break;
}
- return kvm_set_msr_common(vcpu, msr_index, data);
- msr->data = data;
- break;
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
}
- return 0;
+ return ret;
}
/*
@@ -530,10 +705,8 @@ static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
{
unsigned long dr7 = 0x400;
- u32 exception_bitmap;
int old_singlestep;
- exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
old_singlestep = vcpu->guest_debug.singlestep;
vcpu->guest_debug.enabled = dbg->enabled;
@@ -549,13 +722,9 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
dr7 |= 0 << (i*4+16); /* execution breakpoint */
}
- exception_bitmap |= (1u << 1); /* Trap debug exceptions */
-
vcpu->guest_debug.singlestep = dbg->singlestep;
- } else {
- exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
+ } else
vcpu->guest_debug.singlestep = 0;
- }
if (old_singlestep && !vcpu->guest_debug.singlestep) {
unsigned long flags;
@@ -565,7 +734,7 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
vmcs_writel(GUEST_RFLAGS, flags);
}
- vmcs_write32(EXCEPTION_BITMAP, exception_bitmap);
+ update_exception_bitmap(vcpu);
vmcs_writel(GUEST_DR7, dr7);
return 0;
@@ -679,14 +848,6 @@ static __exit void hardware_unsetup(void)
free_kvm_area();
}
-static void update_exception_bitmap(struct kvm_vcpu *vcpu)
-{
- if (vcpu->rmode.active)
- vmcs_write32(EXCEPTION_BITMAP, ~0);
- else
- vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
-}
-
static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
{
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -793,6 +954,8 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+
+ init_rmode_tss(vcpu->kvm);
}
#ifdef CONFIG_X86_64
@@ -837,6 +1000,8 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
+ vmx_fpu_deactivate(vcpu);
+
if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
enter_pmode(vcpu);
@@ -852,26 +1017,20 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
#endif
- if (!(cr0 & CR0_TS_MASK)) {
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, CR0_TS_MASK);
- }
-
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0,
(cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
vcpu->cr0 = cr0;
+
+ if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK))
+ vmx_fpu_activate(vcpu);
}
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
vmcs_writel(GUEST_CR3, cr3);
-
- if (!(vcpu->cr0 & CR0_TS_MASK)) {
- vcpu->fpu_active = 0;
- vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
- vmcs_set_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- }
+ if (vcpu->cr0 & CR0_PE_MASK)
+ vmx_fpu_deactivate(vcpu);
}
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -937,23 +1096,11 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
var->unusable = (ar >> 16) & 1;
}
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
+static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
- struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
u32 ar;
- vmcs_writel(sf->base, var->base);
- vmcs_write32(sf->limit, var->limit);
- vmcs_write16(sf->selector, var->selector);
- if (vcpu->rmode.active && var->s) {
- /*
- * Hack real-mode segments into vm86 compatibility.
- */
- if (var->base == 0xffff0000 && var->selector == 0xf000)
- vmcs_writel(sf->base, 0xf0000);
- ar = 0xf3;
- } else if (var->unusable)
+ if (var->unusable)
ar = 1 << 16;
else {
ar = var->type & 15;
@@ -967,6 +1114,35 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
}
if (ar == 0) /* a 0 value means unusable */
ar = AR_UNUSABLE_MASK;
+
+ return ar;
+}
+
+static void vmx_set_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg)
+{
+ struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+ u32 ar;
+
+ if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
+ vcpu->rmode.tr.selector = var->selector;
+ vcpu->rmode.tr.base = var->base;
+ vcpu->rmode.tr.limit = var->limit;
+ vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
+ return;
+ }
+ vmcs_writel(sf->base, var->base);
+ vmcs_write32(sf->limit, var->limit);
+ vmcs_write16(sf->selector, var->selector);
+ if (vcpu->rmode.active && var->s) {
+ /*
+ * Hack real-mode segments into vm86 compatibility.
+ */
+ if (var->base == 0xffff0000 && var->selector == 0xf000)
+ vmcs_writel(sf->base, 0xf0000);
+ ar = 0xf3;
+ } else
+ ar = vmx_segment_access_rights(var);
vmcs_write32(sf->ar_bytes, ar);
}
@@ -1018,16 +1194,16 @@ static int init_rmode_tss(struct kvm* kvm)
}
page = kmap_atomic(p1, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p2, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p3, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
kunmap_atomic(page, KM_USER0);
@@ -1066,7 +1242,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
struct descriptor_table dt;
int i;
int ret = 0;
- extern asmlinkage void kvm_vmx_return(void);
+ unsigned long kvm_vmx_return;
if (!init_rmode_tss(vcpu->kvm)) {
ret = -ENOMEM;
@@ -1076,9 +1252,9 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
memset(vcpu->regs, 0, sizeof(vcpu->regs));
vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
vcpu->cr8 = 0;
- vcpu->apic_base = 0xfee00000 |
- /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
- MSR_IA32_APICBASE_ENABLE;
+ vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ if (vcpu == &vcpu->kvm->vcpus[0])
+ vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
fx_init(vcpu);
@@ -1129,8 +1305,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
/* I/O */
- vmcs_write64(IO_BITMAP_A, 0);
- vmcs_write64(IO_BITMAP_B, 0);
+ vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
+ vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
guest_write_tsc(0);
@@ -1150,12 +1326,11 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
CPU_BASED_HLT_EXITING /* 20.6.2 */
| CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
| CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
- | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */
+ | CPU_BASED_ACTIVATE_IO_BITMAP /* 20.6.2 */
| CPU_BASED_MOV_DR_EXITING
| CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
);
- vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
@@ -1185,8 +1360,11 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
get_idt(&dt);
vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
-
- vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */
+ asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+ vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
@@ -1210,10 +1388,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->host_msrs[j].reserved = 0;
vcpu->host_msrs[j].data = data;
vcpu->guest_msrs[j] = vcpu->host_msrs[j];
-#ifdef CONFIG_X86_64
- if (index == MSR_KERNEL_GS_BASE)
- msr_offset_kernel_gs_base = j;
-#endif
++vcpu->nmsrs;
}
@@ -1241,6 +1415,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_64
vmx_set_efer(vcpu, 0);
#endif
+ vmx_fpu_activate(vcpu);
+ update_exception_bitmap(vcpu);
return 0;
@@ -1365,7 +1541,11 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
if (!vcpu->rmode.active)
return 0;
- if (vec == GP_VECTOR && err_code == 0)
+ /*
+ * Instruction with address size override prefix opcode 0x67
+ * Cause the #SS fault with 0 error code in VM86 mode.
+ */
+ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
return 1;
return 0;
@@ -1400,10 +1580,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
if (is_no_device(intr_info)) {
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- if (!(vcpu->cr0 & CR0_TS_MASK))
- vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ vmx_fpu_activate(vcpu);
return 1;
}
@@ -1445,8 +1622,13 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->rmode.active &&
handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
- error_code))
+ error_code)) {
+ if (vcpu->halt_request) {
+ vcpu->halt_request = 0;
+ return kvm_emulate_halt(vcpu);
+ }
return 1;
+ }
if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -1595,11 +1777,10 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
break;
case 2: /* clts */
vcpu_load_rsp_rip(vcpu);
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ vmx_fpu_deactivate(vcpu);
vcpu->cr0 &= ~CR0_TS_MASK;
vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
+ vmx_fpu_activate(vcpu);
skip_emulated_instruction(vcpu);
return 1;
case 1: /*mov from cr*/
@@ -1734,12 +1915,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
skip_emulated_instruction(vcpu);
- if (vcpu->irq_summary)
- return 1;
-
- kvm_run->exit_reason = KVM_EXIT_HLT;
- ++vcpu->stat.halt_exits;
- return 0;
+ return kvm_emulate_halt(vcpu);
}
static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1770,7 +1946,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
};
static const int kvm_vmx_max_exit_handlers =
- sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
+ ARRAY_SIZE(kvm_vmx_exit_handlers);
/*
* The guest has exited. See if we can fix it or if we need userspace
@@ -1810,61 +1986,44 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
}
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+}
+
static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u8 fail;
- u16 fs_sel, gs_sel, ldt_sel;
- int fs_gs_ldt_reload_needed;
int r;
-again:
- /*
- * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
- * allow segment selectors with cpl > 0 or ti == 1.
- */
- fs_sel = read_fs();
- gs_sel = read_gs();
- ldt_sel = read_ldt();
- fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
- if (!fs_gs_ldt_reload_needed) {
- vmcs_write16(HOST_FS_SELECTOR, fs_sel);
- vmcs_write16(HOST_GS_SELECTOR, gs_sel);
- } else {
- vmcs_write16(HOST_FS_SELECTOR, 0);
- vmcs_write16(HOST_GS_SELECTOR, 0);
- }
-
-#ifdef CONFIG_X86_64
- vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
- vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
-#else
- vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
- vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
-#endif
+preempted:
+ if (vcpu->guest_debug.enabled)
+ kvm_guest_debug_pre(vcpu);
+again:
if (!vcpu->mmio_read_completed)
do_interrupt_requests(vcpu, kvm_run);
- if (vcpu->guest_debug.enabled)
- kvm_guest_debug_pre(vcpu);
-
+ vmx_save_host_state(vcpu);
kvm_load_guest_fpu(vcpu);
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r))
+ goto out;
+
/*
* Loading guest fpu may have cleared host cr0.ts
*/
vmcs_writel(HOST_CR0, read_cr0());
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
- save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1);
- load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- }
-#endif
+ local_irq_disable();
+
+ vcpu->guest_mode = 1;
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ vmx_flush_tlb(vcpu);
asm (
/* Store host registers */
- "pushf \n\t"
#ifdef CONFIG_X86_64
"push %%rax; push %%rbx; push %%rdx;"
"push %%rsi; push %%rdi; push %%rbp;"
@@ -1909,12 +2068,11 @@ again:
"mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
#endif
/* Enter guest mode */
- "jne launched \n\t"
+ "jne .Llaunched \n\t"
ASM_VMX_VMLAUNCH "\n\t"
- "jmp kvm_vmx_return \n\t"
- "launched: " ASM_VMX_VMRESUME "\n\t"
- ".globl kvm_vmx_return \n\t"
- "kvm_vmx_return: "
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
+ ".Lkvm_vmx_return: "
/* Save guest registers, load host registers, keep flags */
#ifdef CONFIG_X86_64
"xchg %3, (%%rsp) \n\t"
@@ -1957,7 +2115,6 @@ again:
"pop %%ecx; popa \n\t"
#endif
"setbe %0 \n\t"
- "popf \n\t"
: "=q" (fail)
: "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
"c"(vcpu),
@@ -1981,84 +2138,61 @@ again:
[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
: "cc", "memory" );
- /*
- * Reload segment selectors ASAP. (it's needed for a functional
- * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
- * relies on having 0 in %gs for the CPU PDA to work.)
- */
- if (fs_gs_ldt_reload_needed) {
- load_ldt(ldt_sel);
- load_fs(fs_sel);
- /*
- * If we have to reload gs, we must take care to
- * preserve our gs base.
- */
- local_irq_disable();
- load_gs(gs_sel);
-#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
-#endif
- local_irq_enable();
+ vcpu->guest_mode = 0;
+ local_irq_enable();
- reload_tss();
- }
++vcpu->stat.exits;
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
- save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
- }
-#endif
-
vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
- if (fail) {
+ if (unlikely(fail)) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= vmcs_read32(VM_INSTRUCTION_ERROR);
r = 0;
- } else {
- /*
- * Profile KVM exit RIPs:
- */
- if (unlikely(prof_on == KVM_PROFILING))
- profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
-
- vcpu->launched = 1;
- r = kvm_handle_exit(kvm_run, vcpu);
- if (r > 0) {
- /* Give scheduler a change to reschedule. */
- if (signal_pending(current)) {
- ++vcpu->stat.signal_exits;
- post_kvm_run_save(vcpu, kvm_run);
- kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
- if (dm_request_for_irq_injection(vcpu, kvm_run)) {
- ++vcpu->stat.request_irq_exits;
- post_kvm_run_save(vcpu, kvm_run);
- kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
- kvm_resched(vcpu);
+ goto out;
+ }
+ /*
+ * Profile KVM exit RIPs:
+ */
+ if (unlikely(prof_on == KVM_PROFILING))
+ profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
+
+ vcpu->launched = 1;
+ r = kvm_handle_exit(kvm_run, vcpu);
+ if (r > 0) {
+ /* Give scheduler a change to reschedule. */
+ if (signal_pending(current)) {
+ r = -EINTR;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ++vcpu->stat.signal_exits;
+ goto out;
+ }
+
+ if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+ r = -EINTR;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ++vcpu->stat.request_irq_exits;
+ goto out;
+ }
+ if (!need_resched()) {
+ ++vcpu->stat.light_exits;
goto again;
}
}
+out:
+ if (r > 0) {
+ kvm_resched(vcpu);
+ goto preempted;
+ }
+
post_kvm_run_save(vcpu, kvm_run);
return r;
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
-{
- vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
-}
-
static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
unsigned long addr,
u32 err_code)
@@ -2122,7 +2256,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
vmcs_clear(vmcs);
vcpu->vmcs = vmcs;
vcpu->launched = 0;
- vcpu->fpu_active = 1;
return 0;
@@ -2188,11 +2321,50 @@ static struct kvm_arch_ops vmx_arch_ops = {
static int __init vmx_init(void)
{
- return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+ void *iova;
+ int r;
+
+ vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!vmx_io_bitmap_a)
+ return -ENOMEM;
+
+ vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!vmx_io_bitmap_b) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Allow direct access to the PC debug port (it is often used for I/O
+ * delays, but the vmexits simply slow things down).
+ */
+ iova = kmap(vmx_io_bitmap_a);
+ memset(iova, 0xff, PAGE_SIZE);
+ clear_bit(0x80, iova);
+ kunmap(vmx_io_bitmap_a);
+
+ iova = kmap(vmx_io_bitmap_b);
+ memset(iova, 0xff, PAGE_SIZE);
+ kunmap(vmx_io_bitmap_b);
+
+ r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+ if (r)
+ goto out1;
+
+ return 0;
+
+out1:
+ __free_page(vmx_io_bitmap_b);
+out:
+ __free_page(vmx_io_bitmap_a);
+ return r;
}
static void __exit vmx_exit(void)
{
+ __free_page(vmx_io_bitmap_b);
+ __free_page(vmx_io_bitmap_a);
+
kvm_exit_arch();
}
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 7ade09086aa..f60012d6261 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -98,8 +98,11 @@ static u8 opcode_table[256] = {
0, 0, 0, 0,
/* 0x40 - 0x4F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0x50 - 0x5F */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x50 - 0x57 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x58 - 0x5F */
+ ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+ ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
/* 0x60 - 0x6F */
0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -128,9 +131,9 @@ static u8 opcode_table[256] = {
/* 0xB0 - 0xBF */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 0xC0 - 0xC7 */
- ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 0, 0,
- 0, 0, ByteOp | DstMem | SrcImm | ModRM | Mov,
- DstMem | SrcImm | ModRM | Mov,
+ ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
+ 0, ImplicitOps, 0, 0,
+ ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
/* 0xC8 - 0xCF */
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xD0 - 0xD7 */
@@ -143,7 +146,8 @@ static u8 opcode_table[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 0xF0 - 0xF7 */
0, 0, 0, 0,
- 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
+ ImplicitOps, 0,
+ ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
/* 0xF8 - 0xFF */
0, 0, 0, 0,
0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
@@ -152,7 +156,7 @@ static u8 opcode_table[256] = {
static u16 twobyte_table[256] = {
/* 0x00 - 0x0F */
0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
- 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
+ 0, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
/* 0x10 - 0x1F */
0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
/* 0x20 - 0x2F */
@@ -481,6 +485,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
int mode = ctxt->mode;
unsigned long modrm_ea;
int use_modrm_ea, index_reg = 0, base_reg = 0, scale, rip_relative = 0;
+ int no_wb = 0;
/* Shadow copy of register state. Committed on successful emulation. */
unsigned long _regs[NR_VCPU_REGS];
@@ -1047,7 +1052,7 @@ done_prefixes:
_regs[VCPU_REGS_RSP]),
&dst.val, dst.bytes, ctxt)) != 0)
goto done;
- dst.val = dst.orig_val; /* skanky: disable writeback */
+ no_wb = 1;
break;
default:
goto cannot_emulate;
@@ -1056,7 +1061,7 @@ done_prefixes:
}
writeback:
- if ((d & Mov) || (dst.orig_val != dst.val)) {
+ if (!no_wb) {
switch (dst.type) {
case OP_REG:
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
@@ -1149,6 +1154,23 @@ special_insn:
case 0xae ... 0xaf: /* scas */
DPRINTF("Urk! I don't handle SCAS.\n");
goto cannot_emulate;
+ case 0xf4: /* hlt */
+ ctxt->vcpu->halt_request = 1;
+ goto done;
+ case 0xc3: /* ret */
+ dst.ptr = &_eip;
+ goto pop_instruction;
+ case 0x58 ... 0x5f: /* pop reg */
+ dst.ptr = (unsigned long *)&_regs[b & 0x7];
+
+pop_instruction:
+ if ((rc = ops->read_std(register_address(ctxt->ss_base,
+ _regs[VCPU_REGS_RSP]), dst.ptr, op_bytes, ctxt)) != 0)
+ goto done;
+
+ register_address_increment(_regs[VCPU_REGS_RSP], op_bytes);
+ no_wb = 1; /* Disable writeback. */
+ break;
}
goto writeback;
@@ -1302,8 +1324,10 @@ twobyte_insn:
twobyte_special_insn:
/* Disable writeback. */
- dst.orig_val = dst.val;
+ no_wb = 1;
switch (b) {
+ case 0x09: /* wbinvd */
+ break;
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
break;
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index bd55e6ab99f..f25685b9b7c 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -335,6 +335,7 @@ static int monitor_task(void *arg)
{
struct thermostat* th = arg;
+ set_freezable();
while(!kthread_should_stop()) {
try_to_freeze();
msleep_interruptible(2000);
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 4fcb245ba18..e18d265d5d3 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -92,6 +92,7 @@ static int wf_thread_func(void *data)
DBG("wf: thread started\n");
+ set_freezable();
while(!kthread_should_stop()) {
if (time_after_eq(jiffies, next)) {
wf_notify(WF_EVENT_TICK, NULL);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 64bf3a81db9..531d4d17d01 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -2,19 +2,17 @@
# Block device driver configuration
#
-if BLOCK
-
-menu "Multi-device support (RAID and LVM)"
-
-config MD
+menuconfig MD
bool "Multiple devices driver support (RAID and LVM)"
+ depends on BLOCK
help
Support multiple physical spindles through a single logical device.
Required for RAID and logical volume management.
+if MD
+
config BLK_DEV_MD
tristate "RAID support"
- depends on MD
---help---
This driver lets you combine several hard disk partitions into one
logical block device. This can be used to simply append one
@@ -191,7 +189,6 @@ config MD_FAULTY
config BLK_DEV_DM
tristate "Device mapper support"
- depends on MD
---help---
Device-mapper is a low level volume manager. It works by allowing
people to specify mappings for ranges of logical sectors. Various
@@ -279,6 +276,4 @@ config DM_DELAY
If unsure, say N.
-endmenu
-
-endif
+endif # MD
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9620d452d03..927cb34c480 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -268,6 +268,31 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
bdev_hardsect_size(rdev->bdev));
+ /* Just make sure we aren't corrupting data or
+ * metadata
+ */
+ if (bitmap->offset < 0) {
+ /* DATA BITMAP METADATA */
+ if (bitmap->offset
+ + page->index * (PAGE_SIZE/512)
+ + size/512 > 0)
+ /* bitmap runs in to metadata */
+ return -EINVAL;
+ if (rdev->data_offset + mddev->size*2
+ > rdev->sb_offset*2 + bitmap->offset)
+ /* data runs in to bitmap */
+ return -EINVAL;
+ } else if (rdev->sb_offset*2 < rdev->data_offset) {
+ /* METADATA BITMAP DATA */
+ if (rdev->sb_offset*2
+ + bitmap->offset
+ + page->index*(PAGE_SIZE/512) + size/512
+ > rdev->data_offset)
+ /* bitmap runs in to data */
+ return -EINVAL;
+ } else {
+ /* DATA METADATA BITMAP - no problems */
+ }
md_super_write(mddev, rdev,
(rdev->sb_offset<<1) + bitmap->offset
+ page->index * (PAGE_SIZE/512),
@@ -280,32 +305,38 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
return 0;
}
+static void bitmap_file_kick(struct bitmap *bitmap);
/*
* write out a page to a file
*/
-static int write_page(struct bitmap *bitmap, struct page *page, int wait)
+static void write_page(struct bitmap *bitmap, struct page *page, int wait)
{
struct buffer_head *bh;
- if (bitmap->file == NULL)
- return write_sb_page(bitmap, page, wait);
+ if (bitmap->file == NULL) {
+ switch (write_sb_page(bitmap, page, wait)) {
+ case -EINVAL:
+ bitmap->flags |= BITMAP_WRITE_ERROR;
+ }
+ } else {
- bh = page_buffers(page);
+ bh = page_buffers(page);
- while (bh && bh->b_blocknr) {
- atomic_inc(&bitmap->pending_writes);
- set_buffer_locked(bh);
- set_buffer_mapped(bh);
- submit_bh(WRITE, bh);
- bh = bh->b_this_page;
- }
+ while (bh && bh->b_blocknr) {
+ atomic_inc(&bitmap->pending_writes);
+ set_buffer_locked(bh);
+ set_buffer_mapped(bh);
+ submit_bh(WRITE, bh);
+ bh = bh->b_this_page;
+ }
- if (wait) {
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
- return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
+ if (wait) {
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
+ }
}
- return 0;
+ if (bitmap->flags & BITMAP_WRITE_ERROR)
+ bitmap_file_kick(bitmap);
}
static void end_bitmap_write(struct buffer_head *bh, int uptodate)
@@ -425,17 +456,17 @@ out:
*/
/* update the event counter and sync the superblock to disk */
-int bitmap_update_sb(struct bitmap *bitmap)
+void bitmap_update_sb(struct bitmap *bitmap)
{
bitmap_super_t *sb;
unsigned long flags;
if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
- return 0;
+ return;
spin_lock_irqsave(&bitmap->lock, flags);
if (!bitmap->sb_page) { /* no superblock */
spin_unlock_irqrestore(&bitmap->lock, flags);
- return 0;
+ return;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -443,7 +474,7 @@ int bitmap_update_sb(struct bitmap *bitmap)
if (!bitmap->mddev->degraded)
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
kunmap_atomic(sb, KM_USER0);
- return write_page(bitmap, bitmap->sb_page, 1);
+ write_page(bitmap, bitmap->sb_page, 1);
}
/* print out the bitmap file superblock */
@@ -572,20 +603,22 @@ enum bitmap_mask_op {
MASK_UNSET
};
-/* record the state of the bitmap in the superblock */
-static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
- enum bitmap_mask_op op)
+/* record the state of the bitmap in the superblock. Return the old value */
+static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
+ enum bitmap_mask_op op)
{
bitmap_super_t *sb;
unsigned long flags;
+ int old;
spin_lock_irqsave(&bitmap->lock, flags);
if (!bitmap->sb_page) { /* can't set the state */
spin_unlock_irqrestore(&bitmap->lock, flags);
- return;
+ return 0;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ old = le32_to_cpu(sb->state) & bits;
switch (op) {
case MASK_SET: sb->state |= cpu_to_le32(bits);
break;
@@ -594,6 +627,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
default: BUG();
}
kunmap_atomic(sb, KM_USER0);
+ return old;
}
/*
@@ -687,18 +721,23 @@ static void bitmap_file_kick(struct bitmap *bitmap)
{
char *path, *ptr = NULL;
- bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET);
- bitmap_update_sb(bitmap);
+ if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
+ bitmap_update_sb(bitmap);
- if (bitmap->file) {
- path = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (path)
- ptr = file_path(bitmap->file, path, PAGE_SIZE);
+ if (bitmap->file) {
+ path = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (path)
+ ptr = file_path(bitmap->file, path, PAGE_SIZE);
- printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n",
- bmname(bitmap), ptr ? ptr : "");
+ printk(KERN_ALERT
+ "%s: kicking failed bitmap file %s from array!\n",
+ bmname(bitmap), ptr ? ptr : "");
- kfree(path);
+ kfree(path);
+ } else
+ printk(KERN_ALERT
+ "%s: disabling internal bitmap due to errors\n",
+ bmname(bitmap));
}
bitmap_file_put(bitmap);
@@ -769,16 +808,15 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
* sync the dirty pages of the bitmap file to disk */
-int bitmap_unplug(struct bitmap *bitmap)
+void bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i, flags;
int dirty, need_write;
struct page *page;
int wait = 0;
- int err;
if (!bitmap)
- return 0;
+ return;
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
@@ -786,7 +824,7 @@ int bitmap_unplug(struct bitmap *bitmap)
spin_lock_irqsave(&bitmap->lock, flags);
if (!bitmap->filemap) {
spin_unlock_irqrestore(&bitmap->lock, flags);
- return 0;
+ return;
}
page = bitmap->filemap[i];
dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
@@ -798,7 +836,7 @@ int bitmap_unplug(struct bitmap *bitmap)
spin_unlock_irqrestore(&bitmap->lock, flags);
if (dirty | need_write)
- err = write_page(bitmap, page, 0);
+ write_page(bitmap, page, 0);
}
if (wait) { /* if any writes were performed, we need to wait on them */
if (bitmap->file)
@@ -809,7 +847,6 @@ int bitmap_unplug(struct bitmap *bitmap)
}
if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap);
- return 0;
}
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
@@ -858,21 +895,21 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
bmname(bitmap),
(unsigned long) i_size_read(file->f_mapping->host),
bytes + sizeof(bitmap_super_t));
- goto out;
+ goto err;
}
ret = -ENOMEM;
bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
if (!bitmap->filemap)
- goto out;
+ goto err;
/* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
bitmap->filemap_attr = kzalloc(
roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
GFP_KERNEL);
if (!bitmap->filemap_attr)
- goto out;
+ goto err;
oldindex = ~0L;
@@ -905,7 +942,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
}
if (IS_ERR(page)) { /* read error */
ret = PTR_ERR(page);
- goto out;
+ goto err;
}
oldindex = index;
@@ -920,11 +957,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
memset(paddr + offset, 0xff,
PAGE_SIZE - offset);
kunmap_atomic(paddr, KM_USER0);
- ret = write_page(bitmap, page, 1);
- if (ret) {
+ write_page(bitmap, page, 1);
+
+ ret = -EIO;
+ if (bitmap->flags & BITMAP_WRITE_ERROR) {
/* release, page not in filemap yet */
put_page(page);
- goto out;
+ goto err;
}
}
@@ -956,11 +995,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
md_wakeup_thread(bitmap->mddev->thread);
}
-out:
printk(KERN_INFO "%s: bitmap initialized from disk: "
- "read %lu/%lu pages, set %lu bits, status: %d\n",
- bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, ret);
+ "read %lu/%lu pages, set %lu bits\n",
+ bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt);
+
+ return 0;
+ err:
+ printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
+ bmname(bitmap), ret);
return ret;
}
@@ -997,19 +1040,18 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
* out to disk
*/
-int bitmap_daemon_work(struct bitmap *bitmap)
+void bitmap_daemon_work(struct bitmap *bitmap)
{
unsigned long j;
unsigned long flags;
struct page *page = NULL, *lastpage = NULL;
- int err = 0;
int blocks;
void *paddr;
if (bitmap == NULL)
- return 0;
+ return;
if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
- return 0;
+ return;
bitmap->daemon_lastrun = jiffies;
for (j = 0; j < bitmap->chunks; j++) {
@@ -1032,14 +1074,8 @@ int bitmap_daemon_work(struct bitmap *bitmap)
clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- if (need_write) {
- switch (write_page(bitmap, page, 0)) {
- case 0:
- break;
- default:
- bitmap_file_kick(bitmap);
- }
- }
+ if (need_write)
+ write_page(bitmap, page, 0);
continue;
}
@@ -1048,13 +1084,11 @@ int bitmap_daemon_work(struct bitmap *bitmap)
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- err = write_page(bitmap, lastpage, 0);
+ write_page(bitmap, lastpage, 0);
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
}
- if (err)
- bitmap_file_kick(bitmap);
} else
spin_unlock_irqrestore(&bitmap->lock, flags);
lastpage = page;
@@ -1097,14 +1131,13 @@ int bitmap_daemon_work(struct bitmap *bitmap)
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- err = write_page(bitmap, lastpage, 0);
+ write_page(bitmap, lastpage, 0);
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
}
}
- return err;
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1517,7 +1550,9 @@ int bitmap_create(mddev_t *mddev)
mddev->thread->timeout = bitmap->daemon_sleep * HZ;
- return bitmap_update_sb(bitmap);
+ bitmap_update_sb(bitmap);
+
+ return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
error:
bitmap_free(bitmap);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f4f7d35561a..846614e676c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -161,9 +161,7 @@ static void local_exit(void)
{
kmem_cache_destroy(_tio_cache);
kmem_cache_destroy(_io_cache);
-
- if (unregister_blkdev(_major, _name) < 0)
- DMERR("unregister_blkdev failed");
+ unregister_blkdev(_major, _name);
_major = 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 33beaa7da08..65ddc887dfd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1640,7 +1640,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
static void md_update_sb(mddev_t * mddev, int force_change)
{
- int err;
struct list_head *tmp;
mdk_rdev_t *rdev;
int sync_req;
@@ -1727,7 +1726,7 @@ repeat:
"md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev),mddev->in_sync);
- err = bitmap_update_sb(mddev->bitmap);
+ bitmap_update_sb(mddev->bitmap);
ITERATE_RDEV(mddev,rdev,tmp) {
char b[BDEVNAME_SIZE];
dprintk(KERN_INFO "md: ");
@@ -2073,9 +2072,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
- printk(KERN_WARNING
- "md: %s has invalid sb, not importing!\n",
- bdevname(rdev->bdev,b));
+ printk(KERN_WARNING
+ "md: %s does not have a valid v%d.%d "
+ "superblock, not importing!\n",
+ bdevname(rdev->bdev,b),
+ super_format, super_minor);
goto abort_free;
}
if (err < 0) {
@@ -3174,13 +3175,33 @@ static int do_md_run(mddev_t * mddev)
* Drop all container device buffers, from now on
* the only valid external interface is through the md
* device.
- * Also find largest hardsector size
*/
ITERATE_RDEV(mddev,rdev,tmp) {
if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
+
+ /* perform some consistency tests on the device.
+ * We don't want the data to overlap the metadata,
+ * Internal Bitmap issues has handled elsewhere.
+ */
+ if (rdev->data_offset < rdev->sb_offset) {
+ if (mddev->size &&
+ rdev->data_offset + mddev->size*2
+ > rdev->sb_offset*2) {
+ printk("md: %s: data overlaps metadata\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+ } else {
+ if (rdev->sb_offset*2 + rdev->sb_size/512
+ > rdev->data_offset) {
+ printk("md: %s: metadata overlaps data\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+ }
}
md_probe(mddev->unit, NULL, NULL);
@@ -4642,7 +4663,6 @@ static int md_thread(void * arg)
* many dirty RAID5 blocks.
*/
- current->flags |= PF_NOFREEZE;
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
@@ -5090,7 +5110,7 @@ static int is_mddev_idle(mddev_t *mddev)
mdk_rdev_t * rdev;
struct list_head *tmp;
int idle;
- unsigned long curr_events;
+ long curr_events;
idle = 1;
ITERATE_RDEV(mddev,rdev,tmp) {
@@ -5098,20 +5118,29 @@ static int is_mddev_idle(mddev_t *mddev)
curr_events = disk_stat_read(disk, sectors[0]) +
disk_stat_read(disk, sectors[1]) -
atomic_read(&disk->sync_io);
- /* The difference between curr_events and last_events
- * will be affected by any new non-sync IO (making
- * curr_events bigger) and any difference in the amount of
- * in-flight syncio (making current_events bigger or smaller)
- * The amount in-flight is currently limited to
- * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
- * which is at most 4096 sectors.
- * These numbers are fairly fragile and should be made
- * more robust, probably by enforcing the
- * 'window size' that md_do_sync sort-of uses.
+ /* sync IO will cause sync_io to increase before the disk_stats
+ * as sync_io is counted when a request starts, and
+ * disk_stats is counted when it completes.
+ * So resync activity will cause curr_events to be smaller than
+ * when there was no such activity.
+ * non-sync IO will cause disk_stat to increase without
+ * increasing sync_io so curr_events will (eventually)
+ * be larger than it was before. Once it becomes
+ * substantially larger, the test below will cause
+ * the array to appear non-idle, and resync will slow
+ * down.
+ * If there is a lot of outstanding resync activity when
+ * we set last_event to curr_events, then all that activity
+ * completing might cause the array to appear non-idle
+ * and resync will be slowed down even though there might
+ * not have been non-resync activity. This will only
+ * happen once though. 'last_events' will soon reflect
+ * the state where there is little or no outstanding
+ * resync requests, and further resync activity will
+ * always make curr_events less than last_events.
*
- * Note: the following is an unsigned comparison.
*/
- if ((long)curr_events - (long)rdev->last_events > 4096) {
+ if (curr_events - rdev->last_events > 4096) {
rdev->last_events = curr_events;
idle = 0;
}
@@ -5772,7 +5801,7 @@ static void autostart_arrays(int part)
for (i = 0; i < dev_cnt; i++) {
dev_t dev = detected_devices[i];
- rdev = md_import_device(dev,0, 0);
+ rdev = md_import_device(dev,0, 90);
if (IS_ERR(rdev))
continue;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 46677d7d998..00c78b77b13 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1526,8 +1526,7 @@ static void raid1d(mddev_t *mddev)
blk_remove_plug(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
- if (bitmap_unplug(mddev->bitmap) != 0)
- printk("%s: bitmap file write failed!\n", mdname(mddev));
+ bitmap_unplug(mddev->bitmap);
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9eb66c1b523..a95ada1cfac 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1510,8 +1510,7 @@ static void raid10d(mddev_t *mddev)
blk_remove_plug(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
- if (bitmap_unplug(mddev->bitmap) != 0)
- printk("%s: bitmap file write failed!\n", mdname(mddev));
+ bitmap_unplug(mddev->bitmap);
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index f4e4ca2dcad..b6c7f6610ec 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -523,6 +523,7 @@ static int dvb_frontend_thread(void *data)
dvb_frontend_init(fe);
+ set_freezable();
while (1) {
up(&fepriv->sem); /* is locked when we enter the thread... */
restart:
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 259ea08e784..1cc2d286a1c 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -906,6 +906,7 @@ int cx88_audio_thread(void *data)
u32 mode = 0;
dprintk("cx88: tvaudio thread started\n");
+ set_freezable();
for (;;) {
msleep_interruptible(1000);
if (kthread_should_stop())
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index e1821eb82fb..d5ee2629121 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/freezer.h>
#include <linux/videodev.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
@@ -468,6 +469,7 @@ int msp3400c_thread(void *data)
v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
+ set_freezable();
for (;;) {
v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
msp_sleep(state, -1);
@@ -646,7 +648,7 @@ int msp3410d_thread(void *data)
int val, i, std, count;
v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
-
+ set_freezable();
for (;;) {
v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
msp_sleep(state,-1);
@@ -940,7 +942,7 @@ int msp34xxg_thread(void *data)
int val, i;
v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
-
+ set_freezable();
for (;;) {
v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
msp_sleep(state, -1);
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index c9bf9dbc2ea..9da338dc4f3 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -271,7 +271,7 @@ static int chip_thread(void *data)
struct CHIPDESC *desc = chiplist + chip->type;
v4l_dbg(1, debug, &chip->c, "%s: thread started\n", chip->c.name);
-
+ set_freezable();
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop())
diff --git a/drivers/media/video/video-buf-dvb.c b/drivers/media/video/video-buf-dvb.c
index fcc5467e763..e617925ba31 100644
--- a/drivers/media/video/video-buf-dvb.c
+++ b/drivers/media/video/video-buf-dvb.c
@@ -47,6 +47,7 @@ static int videobuf_dvb_thread(void *data)
int err;
dprintk("dvb thread started\n");
+ set_freezable();
videobuf_read_start(&dvb->dvbq);
for (;;) {
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index f7e1d191037..3ef4d0159c3 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -573,6 +573,7 @@ static int vivi_thread(void *data)
dprintk(1,"thread started\n");
mod_timer(&dma_q->timeout, jiffies+BUFFER_TIMEOUT);
+ set_freezable();
for (;;) {
vivi_sleep(dma_q);
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
index 8abe45e49ad..ce62d8bfe1c 100644
--- a/drivers/message/i2o/debug.c
+++ b/drivers/message/i2o/debug.c
@@ -24,7 +24,7 @@ void i2o_report_status(const char *severity, const char *str,
if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
return; // No status in this reply
- printk(KERN_DEBUG "%s%s: ", severity, str);
+ printk("%s%s: ", severity, str);
if (cmd < 0x1F) // Utility cmd
i2o_report_util_cmd(cmd);
@@ -32,7 +32,7 @@ void i2o_report_status(const char *severity, const char *str,
else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
i2o_report_exec_cmd(cmd);
else
- printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); // Other cmds
+ printk("Cmd = %0#2x, ", cmd); // Other cmds
if (msg[0] & MSG_FAIL) {
i2o_report_fail_status(req_status, msg);
@@ -44,7 +44,7 @@ void i2o_report_status(const char *severity, const char *str,
if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
i2o_report_common_dsc(detailed_status);
else
- printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+ printk(" / DetailedStatus = %0#4x.\n",
detailed_status);
}
@@ -89,10 +89,10 @@ static void i2o_report_fail_status(u8 req_status, u32 * msg)
};
if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
- printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
+ printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
req_status);
else
- printk(KERN_DEBUG "TRANSPORT_%s.\n",
+ printk("TRANSPORT_%s.\n",
FAIL_STATUS[req_status & 0x0F]);
/* Dump some details */
@@ -104,7 +104,7 @@ static void i2o_report_fail_status(u8 req_status, u32 * msg)
printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
msg[5] >> 16, msg[5] & 0xFFF);
- printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF);
+ printk(KERN_ERR " Severity: 0x%02X\n", (msg[4] >> 16) & 0xFF);
if (msg[4] & (1 << 16))
printk(KERN_DEBUG "(FormatError), "
"this msg can never be delivered/processed.\n");
@@ -142,9 +142,9 @@ static void i2o_report_common_status(u8 req_status)
};
if (req_status >= ARRAY_SIZE(REPLY_STATUS))
- printk(KERN_DEBUG "RequestStatus = %0#2x", req_status);
+ printk("RequestStatus = %0#2x", req_status);
else
- printk(KERN_DEBUG "%s", REPLY_STATUS[req_status]);
+ printk("%s", REPLY_STATUS[req_status]);
}
/*
@@ -187,10 +187,10 @@ static void i2o_report_common_dsc(u16 detailed_status)
};
if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
- printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+ printk(" / DetailedStatus = %0#4x.\n",
detailed_status);
else
- printk(KERN_DEBUG " / %s.\n", COMMON_DSC[detailed_status]);
+ printk(" / %s.\n", COMMON_DSC[detailed_status]);
}
/*
@@ -200,49 +200,49 @@ static void i2o_report_util_cmd(u8 cmd)
{
switch (cmd) {
case I2O_CMD_UTIL_NOP:
- printk(KERN_DEBUG "UTIL_NOP, ");
+ printk("UTIL_NOP, ");
break;
case I2O_CMD_UTIL_ABORT:
- printk(KERN_DEBUG "UTIL_ABORT, ");
+ printk("UTIL_ABORT, ");
break;
case I2O_CMD_UTIL_CLAIM:
- printk(KERN_DEBUG "UTIL_CLAIM, ");
+ printk("UTIL_CLAIM, ");
break;
case I2O_CMD_UTIL_RELEASE:
- printk(KERN_DEBUG "UTIL_CLAIM_RELEASE, ");
+ printk("UTIL_CLAIM_RELEASE, ");
break;
case I2O_CMD_UTIL_CONFIG_DIALOG:
- printk(KERN_DEBUG "UTIL_CONFIG_DIALOG, ");
+ printk("UTIL_CONFIG_DIALOG, ");
break;
case I2O_CMD_UTIL_DEVICE_RESERVE:
- printk(KERN_DEBUG "UTIL_DEVICE_RESERVE, ");
+ printk("UTIL_DEVICE_RESERVE, ");
break;
case I2O_CMD_UTIL_DEVICE_RELEASE:
- printk(KERN_DEBUG "UTIL_DEVICE_RELEASE, ");
+ printk("UTIL_DEVICE_RELEASE, ");
break;
case I2O_CMD_UTIL_EVT_ACK:
- printk(KERN_DEBUG "UTIL_EVENT_ACKNOWLEDGE, ");
+ printk("UTIL_EVENT_ACKNOWLEDGE, ");
break;
case I2O_CMD_UTIL_EVT_REGISTER:
- printk(KERN_DEBUG "UTIL_EVENT_REGISTER, ");
+ printk("UTIL_EVENT_REGISTER, ");
break;
case I2O_CMD_UTIL_LOCK:
- printk(KERN_DEBUG "UTIL_LOCK, ");
+ printk("UTIL_LOCK, ");
break;
case I2O_CMD_UTIL_LOCK_RELEASE:
- printk(KERN_DEBUG "UTIL_LOCK_RELEASE, ");
+ printk("UTIL_LOCK_RELEASE, ");
break;
case I2O_CMD_UTIL_PARAMS_GET:
- printk(KERN_DEBUG "UTIL_PARAMS_GET, ");
+ printk("UTIL_PARAMS_GET, ");
break;
case I2O_CMD_UTIL_PARAMS_SET:
- printk(KERN_DEBUG "UTIL_PARAMS_SET, ");
+ printk("UTIL_PARAMS_SET, ");
break;
case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
- printk(KERN_DEBUG "UTIL_REPLY_FAULT_NOTIFY, ");
+ printk("UTIL_REPLY_FAULT_NOTIFY, ");
break;
default:
- printk(KERN_DEBUG "Cmd = %0#2x, ", cmd);
+ printk("Cmd = %0#2x, ", cmd);
}
}
@@ -253,106 +253,106 @@ static void i2o_report_exec_cmd(u8 cmd)
{
switch (cmd) {
case I2O_CMD_ADAPTER_ASSIGN:
- printk(KERN_DEBUG "EXEC_ADAPTER_ASSIGN, ");
+ printk("EXEC_ADAPTER_ASSIGN, ");
break;
case I2O_CMD_ADAPTER_READ:
- printk(KERN_DEBUG "EXEC_ADAPTER_READ, ");
+ printk("EXEC_ADAPTER_READ, ");
break;
case I2O_CMD_ADAPTER_RELEASE:
- printk(KERN_DEBUG "EXEC_ADAPTER_RELEASE, ");
+ printk("EXEC_ADAPTER_RELEASE, ");
break;
case I2O_CMD_BIOS_INFO_SET:
- printk(KERN_DEBUG "EXEC_BIOS_INFO_SET, ");
+ printk("EXEC_BIOS_INFO_SET, ");
break;
case I2O_CMD_BOOT_DEVICE_SET:
- printk(KERN_DEBUG "EXEC_BOOT_DEVICE_SET, ");
+ printk("EXEC_BOOT_DEVICE_SET, ");
break;
case I2O_CMD_CONFIG_VALIDATE:
- printk(KERN_DEBUG "EXEC_CONFIG_VALIDATE, ");
+ printk("EXEC_CONFIG_VALIDATE, ");
break;
case I2O_CMD_CONN_SETUP:
- printk(KERN_DEBUG "EXEC_CONN_SETUP, ");
+ printk("EXEC_CONN_SETUP, ");
break;
case I2O_CMD_DDM_DESTROY:
- printk(KERN_DEBUG "EXEC_DDM_DESTROY, ");
+ printk("EXEC_DDM_DESTROY, ");
break;
case I2O_CMD_DDM_ENABLE:
- printk(KERN_DEBUG "EXEC_DDM_ENABLE, ");
+ printk("EXEC_DDM_ENABLE, ");
break;
case I2O_CMD_DDM_QUIESCE:
- printk(KERN_DEBUG "EXEC_DDM_QUIESCE, ");
+ printk("EXEC_DDM_QUIESCE, ");
break;
case I2O_CMD_DDM_RESET:
- printk(KERN_DEBUG "EXEC_DDM_RESET, ");
+ printk("EXEC_DDM_RESET, ");
break;
case I2O_CMD_DDM_SUSPEND:
- printk(KERN_DEBUG "EXEC_DDM_SUSPEND, ");
+ printk("EXEC_DDM_SUSPEND, ");
break;
case I2O_CMD_DEVICE_ASSIGN:
- printk(KERN_DEBUG "EXEC_DEVICE_ASSIGN, ");
+ printk("EXEC_DEVICE_ASSIGN, ");
break;
case I2O_CMD_DEVICE_RELEASE:
- printk(KERN_DEBUG "EXEC_DEVICE_RELEASE, ");
+ printk("EXEC_DEVICE_RELEASE, ");
break;
case I2O_CMD_HRT_GET:
- printk(KERN_DEBUG "EXEC_HRT_GET, ");
+ printk("EXEC_HRT_GET, ");
break;
case I2O_CMD_ADAPTER_CLEAR:
- printk(KERN_DEBUG "EXEC_IOP_CLEAR, ");
+ printk("EXEC_IOP_CLEAR, ");
break;
case I2O_CMD_ADAPTER_CONNECT:
- printk(KERN_DEBUG "EXEC_IOP_CONNECT, ");
+ printk("EXEC_IOP_CONNECT, ");
break;
case I2O_CMD_ADAPTER_RESET:
- printk(KERN_DEBUG "EXEC_IOP_RESET, ");
+ printk("EXEC_IOP_RESET, ");
break;
case I2O_CMD_LCT_NOTIFY:
- printk(KERN_DEBUG "EXEC_LCT_NOTIFY, ");
+ printk("EXEC_LCT_NOTIFY, ");
break;
case I2O_CMD_OUTBOUND_INIT:
- printk(KERN_DEBUG "EXEC_OUTBOUND_INIT, ");
+ printk("EXEC_OUTBOUND_INIT, ");
break;
case I2O_CMD_PATH_ENABLE:
- printk(KERN_DEBUG "EXEC_PATH_ENABLE, ");
+ printk("EXEC_PATH_ENABLE, ");
break;
case I2O_CMD_PATH_QUIESCE:
- printk(KERN_DEBUG "EXEC_PATH_QUIESCE, ");
+ printk("EXEC_PATH_QUIESCE, ");
break;
case I2O_CMD_PATH_RESET:
- printk(KERN_DEBUG "EXEC_PATH_RESET, ");
+ printk("EXEC_PATH_RESET, ");
break;
case I2O_CMD_STATIC_MF_CREATE:
- printk(KERN_DEBUG "EXEC_STATIC_MF_CREATE, ");
+ printk("EXEC_STATIC_MF_CREATE, ");
break;
case I2O_CMD_STATIC_MF_RELEASE:
- printk(KERN_DEBUG "EXEC_STATIC_MF_RELEASE, ");
+ printk("EXEC_STATIC_MF_RELEASE, ");
break;
case I2O_CMD_STATUS_GET:
- printk(KERN_DEBUG "EXEC_STATUS_GET, ");
+ printk("EXEC_STATUS_GET, ");
break;
case I2O_CMD_SW_DOWNLOAD:
- printk(KERN_DEBUG "EXEC_SW_DOWNLOAD, ");
+ printk("EXEC_SW_DOWNLOAD, ");
break;
case I2O_CMD_SW_UPLOAD:
- printk(KERN_DEBUG "EXEC_SW_UPLOAD, ");
+ printk("EXEC_SW_UPLOAD, ");
break;
case I2O_CMD_SW_REMOVE:
- printk(KERN_DEBUG "EXEC_SW_REMOVE, ");
+ printk("EXEC_SW_REMOVE, ");
break;
case I2O_CMD_SYS_ENABLE:
- printk(KERN_DEBUG "EXEC_SYS_ENABLE, ");
+ printk("EXEC_SYS_ENABLE, ");
break;
case I2O_CMD_SYS_MODIFY:
- printk(KERN_DEBUG "EXEC_SYS_MODIFY, ");
+ printk("EXEC_SYS_MODIFY, ");
break;
case I2O_CMD_SYS_QUIESCE:
- printk(KERN_DEBUG "EXEC_SYS_QUIESCE, ");
+ printk("EXEC_SYS_QUIESCE, ");
break;
case I2O_CMD_SYS_TAB_SET:
- printk(KERN_DEBUG "EXEC_SYS_TAB_SET, ");
+ printk("EXEC_SYS_TAB_SET, ");
break;
default:
- printk(KERN_DEBUG "Cmd = %#02x, ", cmd);
+ printk("Cmd = %#02x, ", cmd);
}
}
@@ -361,28 +361,28 @@ void i2o_debug_state(struct i2o_controller *c)
printk(KERN_INFO "%s: State = ", c->name);
switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
case 0x01:
- printk(KERN_DEBUG "INIT\n");
+ printk("INIT\n");
break;
case 0x02:
- printk(KERN_DEBUG "RESET\n");
+ printk("RESET\n");
break;
case 0x04:
- printk(KERN_DEBUG "HOLD\n");
+ printk("HOLD\n");
break;
case 0x05:
- printk(KERN_DEBUG "READY\n");
+ printk("READY\n");
break;
case 0x08:
- printk(KERN_DEBUG "OPERATIONAL\n");
+ printk("OPERATIONAL\n");
break;
case 0x10:
- printk(KERN_DEBUG "FAILED\n");
+ printk("FAILED\n");
break;
case 0x11:
- printk(KERN_DEBUG "FAULTED\n");
+ printk("FAULTED\n");
break;
default:
- printk(KERN_DEBUG "%x (unknown !!)\n",
+ printk("%x (unknown !!)\n",
((i2o_status_block *) c->status_block.virt)->iop_state);
}
};
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index c13b9321e7a..8c83ee3b092 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -131,8 +131,10 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
int rc = 0;
wait = i2o_exec_wait_alloc();
- if (!wait)
+ if (!wait) {
+ i2o_msg_nop(c, msg);
return -ENOMEM;
+ }
if (tcntxt == 0xffffffff)
tcntxt = 0x80000000;
@@ -337,6 +339,8 @@ static int i2o_exec_probe(struct device *dev)
rc = device_create_file(dev, &dev_attr_product_id);
if (rc) goto err_vid;
+ i2o_dev->iop->exec = i2o_dev;
+
return 0;
err_vid:
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index b17c4b2bc9e..64a52bd7544 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -215,7 +215,7 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
struct i2o_message *msg;
msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
- if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
+ if (IS_ERR(msg))
return PTR_ERR(msg);
msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 8ba275a1277..84e046e94f5 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -554,8 +554,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
return -ENXIO;
}
- msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
-
sb = c->status_block.virt;
if (get_user(size, &user_msg[0])) {
@@ -573,24 +571,30 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
size <<= 2; // Convert to bytes
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ rcode = -EFAULT;
/* Copy in the user's I2O command */
if (copy_from_user(msg, user_msg, size)) {
osm_warn("unable to copy user message\n");
- return -EFAULT;
+ goto out;
}
i2o_dump_message(msg);
if (get_user(reply_size, &user_reply[0]) < 0)
- return -EFAULT;
+ goto out;
reply_size >>= 16;
reply_size <<= 2;
+ rcode = -ENOMEM;
reply = kzalloc(reply_size, GFP_KERNEL);
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
- return -ENOMEM;
+ goto out;
}
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -661,13 +665,14 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
rcode = i2o_msg_post_wait(c, msg, 60);
+ msg = NULL;
if (rcode) {
reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
}
if (sg_offset) {
- u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE];
+ u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE];
/* Copy back the Scatter Gather buffers back to user space */
u32 j;
// TODO 64bit fix
@@ -675,7 +680,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
int sg_size;
// re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
+ memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
// get user msg size in u32s
if (get_user(size, &user_msg[0])) {
rcode = -EFAULT;
@@ -684,7 +689,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
size = size >> 16;
size *= 4;
/* Copy in the user's I2O command */
- if (copy_from_user(msg, user_msg, size)) {
+ if (copy_from_user(rmsg, user_msg, size)) {
rcode = -EFAULT;
goto sg_list_cleanup;
}
@@ -692,7 +697,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
// TODO 64bit fix
- sg = (struct sg_simple_element *)(msg + sg_offset);
+ sg = (struct sg_simple_element *)(rmsg + sg_offset);
for (j = 0; j < sg_count; j++) {
/* Copy out the SG list to user's buffer if necessary */
if (!
@@ -714,7 +719,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
}
- sg_list_cleanup:
+sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
@@ -723,7 +728,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
"%s: Could not copy message context FROM user\n",
c->name);
rcode = -EFAULT;
- goto sg_list_cleanup;
}
if (copy_to_user(user_reply, reply, reply_size)) {
printk(KERN_WARNING
@@ -731,12 +735,14 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
rcode = -EFAULT;
}
}
-
for (i = 0; i < sg_index; i++)
i2o_dma_free(&c->pdev->dev, &sg_list[i]);
- cleanup:
+cleanup:
kfree(reply);
+out:
+ if (msg)
+ i2o_msg_nop(c, msg);
return rcode;
}
@@ -793,8 +799,6 @@ static int i2o_cfg_passthru(unsigned long arg)
return -ENXIO;
}
- msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
-
sb = c->status_block.virt;
if (get_user(size, &user_msg[0]))
@@ -810,12 +814,17 @@ static int i2o_cfg_passthru(unsigned long arg)
size <<= 2; // Convert to bytes
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ rcode = -EFAULT;
/* Copy in the user's I2O command */
if (copy_from_user(msg, user_msg, size))
- return -EFAULT;
+ goto out;
if (get_user(reply_size, &user_reply[0]) < 0)
- return -EFAULT;
+ goto out;
reply_size >>= 16;
reply_size <<= 2;
@@ -824,7 +833,8 @@ static int i2o_cfg_passthru(unsigned long arg)
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
- return -ENOMEM;
+ rcode = -ENOMEM;
+ goto out;
}
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -891,13 +901,14 @@ static int i2o_cfg_passthru(unsigned long arg)
}
rcode = i2o_msg_post_wait(c, msg, 60);
+ msg = NULL;
if (rcode) {
reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
}
if (sg_offset) {
- u32 msg[128];
+ u32 rmsg[128];
/* Copy back the Scatter Gather buffers back to user space */
u32 j;
// TODO 64bit fix
@@ -905,7 +916,7 @@ static int i2o_cfg_passthru(unsigned long arg)
int sg_size;
// re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
+ memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
// get user msg size in u32s
if (get_user(size, &user_msg[0])) {
rcode = -EFAULT;
@@ -914,7 +925,7 @@ static int i2o_cfg_passthru(unsigned long arg)
size = size >> 16;
size *= 4;
/* Copy in the user's I2O command */
- if (copy_from_user(msg, user_msg, size)) {
+ if (copy_from_user(rmsg, user_msg, size)) {
rcode = -EFAULT;
goto sg_list_cleanup;
}
@@ -922,7 +933,7 @@ static int i2o_cfg_passthru(unsigned long arg)
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
// TODO 64bit fix
- sg = (struct sg_simple_element *)(msg + sg_offset);
+ sg = (struct sg_simple_element *)(rmsg + sg_offset);
for (j = 0; j < sg_count; j++) {
/* Copy out the SG list to user's buffer if necessary */
if (!
@@ -944,7 +955,7 @@ static int i2o_cfg_passthru(unsigned long arg)
}
}
- sg_list_cleanup:
+sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
@@ -964,8 +975,11 @@ static int i2o_cfg_passthru(unsigned long arg)
for (i = 0; i < sg_index; i++)
kfree(sg_list[i]);
- cleanup:
+cleanup:
kfree(reply);
+out:
+ if (msg)
+ i2o_msg_nop(c, msg);
return rcode;
}
#endif
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 38e815a2e87..fdbaa776f24 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -209,6 +209,7 @@ static int ucb1x00_thread(void *_ts)
DECLARE_WAITQUEUE(wait, tsk);
int valid = 0;
+ set_freezable();
add_wait_queue(&ts->irq_wait, &wait);
while (!kthread_should_stop()) {
unsigned int x, y, p;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a92b8728b90..1d516f24ba5 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -10,7 +10,7 @@ if MISC_DEVICES
config IBM_ASM
tristate "Device driver for IBM RSA service processor"
- depends on X86 && PCI && EXPERIMENTAL
+ depends on X86 && PCI && INPUT && EXPERIMENTAL
---help---
This option enables device driver support for in-band access to the
IBM RSA (Condor) service processor in eServer xSeries systems.
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 07a085ccbd5..b5df347c81b 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -72,7 +72,7 @@ struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_s
static void free_command(struct kobject *kobj)
{
struct command *cmd = to_command(kobj);
-
+
list_del(&cmd->queue_node);
atomic_dec(&command_count);
dbg("command count: %d\n", atomic_read(&command_count));
@@ -113,14 +113,14 @@ static inline void do_exec_command(struct service_processor *sp)
exec_next_command(sp);
}
}
-
+
/**
* exec_command
* send a command to a service processor
* Commands are executed sequentially. One command (sp->current_command)
* is sent to the service processor. Once the interrupt handler gets a
* message of type command_response, the message is copied into
- * the current commands buffer,
+ * the current commands buffer,
*/
void ibmasm_exec_command(struct service_processor *sp, struct command *cmd)
{
@@ -160,7 +160,7 @@ static void exec_next_command(struct service_processor *sp)
}
}
-/**
+/**
* Sleep until a command has failed or a response has been received
* and the command status been updated by the interrupt handler.
* (see receive_response).
@@ -182,8 +182,8 @@ void ibmasm_receive_command_response(struct service_processor *sp, void *respons
{
struct command *cmd = sp->current_command;
- if (!sp->current_command)
- return;
+ if (!sp->current_command)
+ return;
memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size));
cmd->status = IBMASM_CMD_COMPLETE;
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 13c52f866e2..3dd2dfb8da1 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -44,11 +44,11 @@ void ibmasm_receive_message(struct service_processor *sp, void *message, int mes
size = message_size;
switch (header->type) {
- case sp_event:
+ case sp_event:
ibmasm_receive_event(sp, message, size);
break;
case sp_command_response:
- ibmasm_receive_command_response(sp, message, size);
+ ibmasm_receive_command_response(sp, message, size);
break;
case sp_heartbeat:
ibmasm_receive_heartbeat(sp, message, size);
@@ -95,7 +95,7 @@ int ibmasm_send_driver_vpd(struct service_processor *sp)
strcat(vpd_data, IBMASM_DRIVER_VPD);
vpd_data[10] = 0;
vpd_data[15] = 0;
-
+
ibmasm_exec_command(sp, command);
ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL);
@@ -118,7 +118,7 @@ struct os_state_command {
* During driver init this function is called with os state "up".
* This causes the service processor to start sending heartbeats the
* driver.
- * During driver exit the function is called with os state "down",
+ * During driver exit the function is called with os state "down",
* causing the service processor to stop the heartbeats.
*/
int ibmasm_send_os_state(struct service_processor *sp, int os_state)
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
index 2d21c2741b6..6cbba1afef3 100644
--- a/drivers/misc/ibmasm/dot_command.h
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index fe1e819235a..fda6a4d3bf2 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -51,7 +51,7 @@ static void wake_up_event_readers(struct service_processor *sp)
* event readers.
* There is no reader marker in the buffer, therefore readers are
* responsible for keeping up with the writer, or they will loose events.
- */
+ */
void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size)
{
struct event_buffer *buffer = sp->event_buffer;
@@ -77,13 +77,13 @@ void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int
static inline int event_available(struct event_buffer *b, struct event_reader *r)
{
- return (r->next_serial_number < b->next_serial_number);
+ return (r->next_serial_number < b->next_serial_number);
}
/**
* get_next_event
* Called by event readers (initiated from user space through the file
- * system).
+ * system).
* Sleeps until a new event is available.
*/
int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader)
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 7fd7a43e38d..3036e785b3e 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
index 958c957a5e7..bf2c738d2b7 100644
--- a/drivers/misc/ibmasm/i2o.h
+++ b/drivers/misc/ibmasm/i2o.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -26,9 +26,9 @@ struct i2o_header {
u8 version;
u8 message_flags;
u16 message_size;
- u8 target;
+ u8 target;
u8 initiator_and_target;
- u8 initiator;
+ u8 initiator;
u8 function;
u32 initiator_context;
};
@@ -64,12 +64,12 @@ static inline unsigned short outgoing_message_size(unsigned int data_size)
size = sizeof(struct i2o_header) + data_size;
i2o_size = size / sizeof(u32);
-
+
if (size % sizeof(u32))
i2o_size++;
return i2o_size;
-}
+}
static inline u32 incoming_data_size(struct i2o_message *i2o_message)
{
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 48d5abebfc3..de860bc6d3f 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -58,8 +58,8 @@ static inline char *get_timestamp(char *buf)
return buf;
}
-#define IBMASM_CMD_PENDING 0
-#define IBMASM_CMD_COMPLETE 1
+#define IBMASM_CMD_PENDING 0
+#define IBMASM_CMD_COMPLETE 1
#define IBMASM_CMD_FAILED 2
#define IBMASM_CMD_TIMEOUT_NORMAL 45
@@ -163,55 +163,55 @@ struct service_processor {
};
/* command processing */
-extern struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size);
-extern void ibmasm_exec_command(struct service_processor *sp, struct command *cmd);
-extern void ibmasm_wait_for_response(struct command *cmd, int timeout);
-extern void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size);
+struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size);
+void ibmasm_exec_command(struct service_processor *sp, struct command *cmd);
+void ibmasm_wait_for_response(struct command *cmd, int timeout);
+void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size);
/* event processing */
-extern int ibmasm_event_buffer_init(struct service_processor *sp);
-extern void ibmasm_event_buffer_exit(struct service_processor *sp);
-extern void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size);
-extern void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader);
-extern void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader);
-extern int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader);
-extern void ibmasm_cancel_next_event(struct event_reader *reader);
+int ibmasm_event_buffer_init(struct service_processor *sp);
+void ibmasm_event_buffer_exit(struct service_processor *sp);
+void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size);
+void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader);
+void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader);
+int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader);
+void ibmasm_cancel_next_event(struct event_reader *reader);
/* heartbeat - from SP to OS */
-extern void ibmasm_register_panic_notifier(void);
-extern void ibmasm_unregister_panic_notifier(void);
-extern int ibmasm_heartbeat_init(struct service_processor *sp);
-extern void ibmasm_heartbeat_exit(struct service_processor *sp);
-extern void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size);
+void ibmasm_register_panic_notifier(void);
+void ibmasm_unregister_panic_notifier(void);
+int ibmasm_heartbeat_init(struct service_processor *sp);
+void ibmasm_heartbeat_exit(struct service_processor *sp);
+void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size);
/* reverse heartbeat - from OS to SP */
-extern void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
-extern int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
-extern void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb);
+void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
+int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
+void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb);
/* dot commands */
-extern void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size);
-extern int ibmasm_send_driver_vpd(struct service_processor *sp);
-extern int ibmasm_send_os_state(struct service_processor *sp, int os_state);
+void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size);
+int ibmasm_send_driver_vpd(struct service_processor *sp);
+int ibmasm_send_os_state(struct service_processor *sp, int os_state);
/* low level message processing */
-extern int ibmasm_send_i2o_message(struct service_processor *sp);
-extern irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id);
+int ibmasm_send_i2o_message(struct service_processor *sp);
+irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id);
/* remote console */
-extern void ibmasm_handle_mouse_interrupt(struct service_processor *sp);
-extern int ibmasm_init_remote_input_dev(struct service_processor *sp);
-extern void ibmasm_free_remote_input_dev(struct service_processor *sp);
+void ibmasm_handle_mouse_interrupt(struct service_processor *sp);
+int ibmasm_init_remote_input_dev(struct service_processor *sp);
+void ibmasm_free_remote_input_dev(struct service_processor *sp);
/* file system */
-extern int ibmasmfs_register(void);
-extern void ibmasmfs_unregister(void);
-extern void ibmasmfs_add_sp(struct service_processor *sp);
+int ibmasmfs_register(void);
+void ibmasmfs_unregister(void);
+void ibmasmfs_add_sp(struct service_processor *sp);
/* uart */
#ifdef CONFIG_SERIAL_8250
-extern void ibmasm_register_uart(struct service_processor *sp);
-extern void ibmasm_unregister_uart(struct service_processor *sp);
+void ibmasm_register_uart(struct service_processor *sp);
+void ibmasm_unregister_uart(struct service_processor *sp);
#else
#define ibmasm_register_uart(sp) do { } while(0)
#define ibmasm_unregister_uart(sp) do { } while(0)
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index c436d3de8b8..eb7b073734b 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -17,12 +17,12 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
/*
- * Parts of this code are based on an article by Jonathan Corbet
+ * Parts of this code are based on an article by Jonathan Corbet
* that appeared in Linux Weekly News.
*/
@@ -55,22 +55,22 @@
* For each service processor the following files are created:
*
* command: execute dot commands
- * write: execute a dot command on the service processor
- * read: return the result of a previously executed dot command
+ * write: execute a dot command on the service processor
+ * read: return the result of a previously executed dot command
*
* events: listen for service processor events
- * read: sleep (interruptible) until an event occurs
+ * read: sleep (interruptible) until an event occurs
* write: wakeup sleeping event listener
*
* reverse_heartbeat: send a heartbeat to the service processor
- * read: sleep (interruptible) until the reverse heartbeat fails
+ * read: sleep (interruptible) until the reverse heartbeat fails
* write: wakeup sleeping heartbeat listener
*
* remote_video/width
* remote_video/height
* remote_video/width: control remote display settings
- * write: set value
- * read: read value
+ * write: set value
+ * read: read value
*/
#include <linux/fs.h>
@@ -155,7 +155,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
static struct dentry *ibmasmfs_create_file (struct super_block *sb,
struct dentry *parent,
- const char *name,
+ const char *name,
const struct file_operations *fops,
void *data,
int mode)
@@ -261,7 +261,7 @@ static int command_file_close(struct inode *inode, struct file *file)
struct ibmasmfs_command_data *command_data = file->private_data;
if (command_data->command)
- command_put(command_data->command);
+ command_put(command_data->command);
kfree(command_data);
return 0;
@@ -348,7 +348,7 @@ static ssize_t command_file_write(struct file *file, const char __user *ubuff, s
static int event_file_open(struct inode *inode, struct file *file)
{
struct ibmasmfs_event_data *event_data;
- struct service_processor *sp;
+ struct service_processor *sp;
if (!inode->i_private)
return -ENODEV;
@@ -573,7 +573,7 @@ static ssize_t remote_settings_file_write(struct file *file, const char __user *
kfree(buff);
return -EFAULT;
}
-
+
value = simple_strtoul(buff, NULL, 10);
writel(value, address);
kfree(buff);
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index a3c589b7cbf..4b2398e27fd 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
index e5ed59c589a..766766523a6 100644
--- a/drivers/misc/ibmasm/lowlevel.h
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -48,9 +48,9 @@
#define INTR_CONTROL_REGISTER 0x13A4
#define SCOUT_COM_A_BASE 0x0000
-#define SCOUT_COM_B_BASE 0x0100
-#define SCOUT_COM_C_BASE 0x0200
-#define SCOUT_COM_D_BASE 0x0300
+#define SCOUT_COM_B_BASE 0x0100
+#define SCOUT_COM_C_BASE 0x0200
+#define SCOUT_COM_D_BASE 0x0300
static inline int sp_interrupt_pending(void __iomem *base_address)
{
@@ -86,12 +86,12 @@ static inline void disable_sp_interrupts(void __iomem *base_address)
static inline void enable_uart_interrupts(void __iomem *base_address)
{
- ibmasm_enable_interrupts(base_address, UART_INTR_MASK);
+ ibmasm_enable_interrupts(base_address, UART_INTR_MASK);
}
static inline void disable_uart_interrupts(void __iomem *base_address)
{
- ibmasm_disable_interrupts(base_address, UART_INTR_MASK);
+ ibmasm_disable_interrupts(base_address, UART_INTR_MASK);
}
#define valid_mfa(mfa) ( (mfa) != NO_MFAS_AVAILABLE )
@@ -111,7 +111,7 @@ static inline u32 get_mfa_outbound(void __iomem *base_address)
static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa)
{
- writel(mfa, base_address + OUTBOUND_QUEUE_PORT);
+ writel(mfa, base_address + OUTBOUND_QUEUE_PORT);
}
static inline u32 get_mfa_inbound(void __iomem *base_address)
@@ -126,7 +126,7 @@ static inline u32 get_mfa_inbound(void __iomem *base_address)
static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa)
{
- writel(mfa, base_address + INBOUND_QUEUE_PORT);
+ writel(mfa, base_address + INBOUND_QUEUE_PORT);
}
static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa)
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 2f3bddfab93..fb03a853fac 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -18,9 +18,9 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
- * This driver is based on code originally written by Pete Reynolds
+ * This driver is based on code originally written by Pete Reynolds
* and others.
*
*/
@@ -30,13 +30,13 @@
*
* 1) When loaded it sends a message to the service processor,
* indicating that an OS is * running. This causes the service processor
- * to send periodic heartbeats to the OS.
+ * to send periodic heartbeats to the OS.
*
* 2) Answers the periodic heartbeats sent by the service processor.
* Failure to do so would result in system reboot.
*
* 3) Acts as a pass through for dot commands sent from user applications.
- * The interface for this is the ibmasmfs file system.
+ * The interface for this is the ibmasmfs file system.
*
* 4) Allows user applications to register for event notification. Events
* are sent to the driver through interrupts. They can be read from user
@@ -105,7 +105,7 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi
}
sp->irq = pdev->irq;
- sp->base_address = ioremap(pci_resource_start(pdev, 0),
+ sp->base_address = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (sp->base_address == 0) {
dev_err(sp->dev, "Failed to ioremap pci memory\n");
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index f8fdb2d5417..bec9e2c44be 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -16,7 +16,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -36,10 +36,10 @@ static struct {
unsigned char command[3];
} rhb_dot_cmd = {
.header = {
- .type = sp_read,
+ .type = sp_read,
.command_size = 3,
.data_size = 0,
- .status = 0
+ .status = 0
},
.command = { 4, 3, 6 }
};
@@ -76,9 +76,9 @@ int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_
if (cmd->status != IBMASM_CMD_COMPLETE)
times_failed++;
- wait_event_interruptible_timeout(rhb->wait,
+ wait_event_interruptible_timeout(rhb->wait,
rhb->stopped,
- REVERSE_HEARTBEAT_TIMEOUT * HZ);
+ REVERSE_HEARTBEAT_TIMEOUT * HZ);
if (signal_pending(current) || rhb->stopped) {
result = -EINTR;
diff --git a/drivers/misc/ibmasm/remote.c b/drivers/misc/ibmasm/remote.c
index a40fda6c402..0550ce075fc 100644
--- a/drivers/misc/ibmasm/remote.c
+++ b/drivers/misc/ibmasm/remote.c
@@ -28,11 +28,10 @@
#include "ibmasm.h"
#include "remote.h"
-static int xmax = 1600;
-static int ymax = 1200;
+#define MOUSE_X_MAX 1600
+#define MOUSE_Y_MAX 1200
-
-static unsigned short xlate_high[XLATE_SIZE] = {
+static const unsigned short xlate_high[XLATE_SIZE] = {
[KEY_SYM_ENTER & 0xff] = KEY_ENTER,
[KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH,
[KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK,
@@ -81,7 +80,8 @@ static unsigned short xlate_high[XLATE_SIZE] = {
[KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK,
[KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK,
};
-static unsigned short xlate[XLATE_SIZE] = {
+
+static const unsigned short xlate[XLATE_SIZE] = {
[NO_KEYCODE] = KEY_RESERVED,
[KEY_SYM_SPACE] = KEY_SPACE,
[KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE,
@@ -133,19 +133,16 @@ static unsigned short xlate[XLATE_SIZE] = {
[KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z,
};
-static char remote_mouse_name[] = "ibmasm RSA I remote mouse";
-static char remote_keybd_name[] = "ibmasm RSA I remote keyboard";
-
static void print_input(struct remote_input *input)
{
if (input->type == INPUT_TYPE_MOUSE) {
unsigned char buttons = input->mouse_buttons;
dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n",
input->data.mouse.x, input->data.mouse.y,
- (buttons)?" -- buttons:":"",
- (buttons & REMOTE_BUTTON_LEFT)?"left ":"",
- (buttons & REMOTE_BUTTON_MIDDLE)?"middle ":"",
- (buttons & REMOTE_BUTTON_RIGHT)?"right":""
+ (buttons) ? " -- buttons:" : "",
+ (buttons & REMOTE_BUTTON_LEFT) ? "left " : "",
+ (buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "",
+ (buttons & REMOTE_BUTTON_RIGHT) ? "right" : ""
);
} else {
dbg("remote keypress (code, flag, down):"
@@ -180,7 +177,7 @@ static void send_keyboard_event(struct input_dev *dev,
key = xlate_high[code & 0xff];
else
key = xlate[code];
- input_report_key(dev, key, (input->data.keyboard.key_down) ? 1 : 0);
+ input_report_key(dev, key, input->data.keyboard.key_down);
input_sync(dev);
}
@@ -228,20 +225,22 @@ int ibmasm_init_remote_input_dev(struct service_processor *sp)
mouse_dev->id.vendor = pdev->vendor;
mouse_dev->id.product = pdev->device;
mouse_dev->id.version = 1;
+ mouse_dev->dev.parent = sp->dev;
mouse_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
mouse_dev->keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) |
BIT(BTN_RIGHT) | BIT(BTN_MIDDLE);
set_bit(BTN_TOUCH, mouse_dev->keybit);
- mouse_dev->name = remote_mouse_name;
- input_set_abs_params(mouse_dev, ABS_X, 0, xmax, 0, 0);
- input_set_abs_params(mouse_dev, ABS_Y, 0, ymax, 0, 0);
+ mouse_dev->name = "ibmasm RSA I remote mouse";
+ input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0);
+ input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0);
- mouse_dev->id.bustype = BUS_PCI;
+ keybd_dev->id.bustype = BUS_PCI;
keybd_dev->id.vendor = pdev->vendor;
keybd_dev->id.product = pdev->device;
- mouse_dev->id.version = 2;
+ keybd_dev->id.version = 2;
+ keybd_dev->dev.parent = sp->dev;
keybd_dev->evbit[0] = BIT(EV_KEY);
- keybd_dev->name = remote_keybd_name;
+ keybd_dev->name = "ibmasm RSA I remote keyboard";
for (i = 0; i < XLATE_SIZE; i++) {
if (xlate_high[i])
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index b7076a8442d..72acf5af7a2 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
* Orignally written by Pete Reynolds
*/
@@ -73,7 +73,7 @@ struct keyboard_input {
-struct remote_input {
+struct remote_input {
union {
struct mouse_input mouse;
struct keyboard_input keyboard;
@@ -85,7 +85,7 @@ struct remote_input {
unsigned char pad3;
};
-#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA)
+#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA)
#define display_width(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX)
#define display_height(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY)
#define display_depth(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS)
@@ -93,7 +93,7 @@ struct remote_input {
#define vnc_status(sp) (mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS)
#define isr_control(sp) (mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
-#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
+#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
#define clear_mouse_interrupt(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
#define enable_mouse_interrupts(sp) writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
#define disable_mouse_interrupts(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 9783caf4969..93baa350d69 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4fb2089dc69..b53dac8d1b6 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
#include <linux/blkdev.h>
+#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/mmc/card.h>
@@ -44,11 +45,7 @@ static int mmc_queue_thread(void *d)
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
- /*
- * Set iothread to ensure that we aren't put to sleep by
- * the process freezing. We handle suspension ourselves.
- */
- current->flags |= PF_MEMALLOC|PF_NOFREEZE;
+ current->flags |= PF_MEMALLOC;
down(&mq->thread_sem);
do {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 51bc7e2f1f2..ef89780eb9d 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -16,6 +16,7 @@
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
+#include <linux/freezer.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
@@ -80,7 +81,7 @@ static int mtd_blktrans_thread(void *arg)
struct request_queue *rq = tr->blkcore_priv->rq;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
- current->flags |= PF_MEMALLOC | PF_NOFREEZE;
+ current->flags |= PF_MEMALLOC;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 74002945b71..7c6b223b3f8 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -368,7 +368,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
struct ubi_vid_hdr *vid_hdr;
struct ubi_volume *vol = ubi->volumes[idx];
- uint32_t crc, crc1;
+ uint32_t uninitialized_var(crc);
err = leb_read_lock(ubi, vol_id, lnum);
if (err)
@@ -451,7 +451,7 @@ retry:
}
if (check) {
- crc1 = crc32(UBI_CRC32_INIT, buf, len);
+ uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
if (crc1 != crc) {
ubi_warn("CRC error: calculated %#08x, must be %#08x",
crc1, crc);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9ecaf77eca9..ab2174a56bc 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1346,6 +1346,7 @@ static int ubi_thread(void *u)
ubi_msg("background thread \"%s\" started, PID %d",
ubi->bgt_name, current->pid);
+ set_freezable();
for (;;) {
int err;
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index 4a18b881ae9..fd1e156f174 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -75,6 +75,7 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/mii.h>
+#include <linux/interrupt.h>
#include <net/checksum.h>
#include <asm/atomic.h>
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 9afa47edfc5..3c54014acec 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -2292,10 +2292,15 @@ static int eepro100_resume(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata (pdev);
struct speedo_private *sp = netdev_priv(dev);
void __iomem *ioaddr = sp->regs;
+ int rc;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_enable_device(pdev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
pci_set_master(pdev);
if (!netif_running(dev))
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 3450051ae56..6bb48ba8096 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -671,7 +671,7 @@ static ssize_t natsemi_show_##_name(struct device *dev, \
#define NATSEMI_CREATE_FILE(_dev, _name) \
device_create_file(&_dev->dev, &dev_attr_##_name)
#define NATSEMI_REMOVE_FILE(_dev, _name) \
- device_create_file(&_dev->dev, &dev_attr_##_name)
+ device_remove_file(&_dev->dev, &dev_attr_##_name)
NATSEMI_ATTR(dspcfg_workaround);
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 995c0a5d406..cfdeaf7aa16 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -669,10 +669,15 @@ static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
static int ne2k_pci_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
+ int rc;
pci_set_power_state(pdev, 0);
pci_restore_state(pdev);
- pci_enable_device(pdev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
NS8390_init(dev, 1);
netif_device_attach(dev);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 982a9010c7a..bb6896ae315 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2338,7 +2338,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
{
struct skb_shared_info *info = skb_shinfo(skb);
unsigned int cur_frag, entry;
- struct TxDesc *txd;
+ struct TxDesc * uninitialized_var(txd);
entry = tp->cur_tx;
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 58d7e5d452f..f83bb5cb0d3 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -3692,7 +3692,6 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
__u16 rcode, correlator;
int err = 0;
__u8 xframe = 1;
- __u16 tx_fstatus;
rmf->vl = SWAP_BYTES(rmf->vl);
if(rx_status & FCB_RX_STATUS_DA_MATCHED)
@@ -3783,7 +3782,9 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
}
break;
- case TX_FORWARD:
+ case TX_FORWARD: {
+ __u16 uninitialized_var(tx_fstatus);
+
if((rcode = smctr_rcv_tx_forward(dev, rmf))
!= POSITIVE_ACK)
{
@@ -3811,6 +3812,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
}
}
break;
+ }
/* Received MAC Frames Processed by CRS/REM/RPS. */
case RSP:
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index ec1c556a47c..5d8c78ee2cd 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -2833,6 +2833,8 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
int br, tc;
int br_pwr, error;
+ *br_io = 0;
+
if (rate == 0)
return (0);
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 35eded7ffb2..1cc18e787a6 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -595,8 +595,8 @@ recv_frame( struct net_device *dev )
u32 crc = CRC32_INITIAL;
- unsigned framelen, frameno, ack;
- unsigned is_first, frame_ok;
+ unsigned framelen = 0, frameno, ack;
+ unsigned is_first, frame_ok = 0;
if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
frame_ok = framelen > 4
@@ -604,8 +604,7 @@ recv_frame( struct net_device *dev )
: skip_tail( ioaddr, framelen, crc );
if( frame_ok )
interpret_ack( dev, ack );
- } else
- frame_ok = 0;
+ }
outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
if( frame_ok ) {
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 1c54908fdc4..ee1cc14db38 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -3086,7 +3086,8 @@ static int airo_thread(void *data) {
struct net_device *dev = data;
struct airo_info *ai = dev->priv;
int locked;
-
+
+ set_freezable();
while(1) {
/* make swsusp happy with our thread */
try_to_freeze();
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4a59306a3f0..9f366242c39 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -613,6 +613,7 @@ static int wlan_service_main_thread(void *data)
init_waitqueue_entry(&wait, current);
+ set_freezable();
for (;;) {
lbs_deb_thread( "main-thread 111: intcounter=%d "
"currenttxskb=%p dnld_sent=%d\n",
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index a68b3b3761a..a728a7cd2fc 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <linux/ioport.h>
#include <asm/io.h>
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 50cad3a59a6..7c93a108f9b 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -651,6 +651,7 @@ static int pccardd(void *__skt)
add_wait_queue(&skt->thread_wait, &wait);
complete(&skt->thread_done);
+ set_freezable();
for (;;) {
unsigned long flags;
unsigned int events;
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 3b40f9623cc..3c45142c40b 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -113,7 +113,7 @@ MODULE_LICENSE("Dual MPL/GPL");
#define CONFIG_PCMCIA_SLOT_B
#endif
-#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
+#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
#if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B)
@@ -146,9 +146,9 @@ MODULE_LICENSE("Dual MPL/GPL");
/* ------------------------------------------------------------------------- */
-#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */
-#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */
-#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */
+#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */
+#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */
+#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */
/* ------------------------------------------------------------------------- */
static int pcmcia_schlvl;
@@ -169,8 +169,8 @@ static u32 *m8xx_pgcrx[2];
*/
struct pcmcia_win {
- u32 br;
- u32 or;
+ u32 br;
+ u32 or;
};
/*
@@ -214,7 +214,7 @@ struct pcmcia_win {
/* we keep one lookup table per socket to check flags */
-#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */
+#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */
struct event_table {
u32 regbit;
@@ -224,8 +224,8 @@ struct event_table {
static const char driver_name[] = "m8xx-pcmcia";
struct socket_info {
- void (*handler)(void *info, u32 events);
- void *info;
+ void (*handler) (void *info, u32 events);
+ void *info;
u32 slot;
pcmconf8xx_t *pcmcia;
@@ -234,7 +234,7 @@ struct socket_info {
socket_state_t state;
struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO];
- struct pccard_io_map io_win[PCMCIA_IO_WIN_NO];
+ struct pccard_io_map io_win[PCMCIA_IO_WIN_NO];
struct event_table events[PCMCIA_EVENTS_MAX];
struct pcmcia_socket socket;
};
@@ -248,8 +248,7 @@ static struct socket_info socket[PCMCIA_SOCKETS_NO];
#define M8XX_SIZES_NO 32
-static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] =
-{
+static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = {
0x00000001, 0x00000002, 0x00000008, 0x00000004,
0x00000080, 0x00000040, 0x00000010, 0x00000020,
0x00008000, 0x00004000, 0x00001000, 0x00002000,
@@ -265,7 +264,7 @@ static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] =
static irqreturn_t m8xx_interrupt(int irq, void *dev);
-#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
+#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
/* ------------------------------------------------------------------------- */
/* board specific stuff: */
@@ -289,8 +288,9 @@ static int voltage_set(int slot, int vcc, int vpp)
{
u32 reg = 0;
- switch(vcc) {
- case 0: break;
+ switch (vcc) {
+ case 0:
+ break;
case 33:
reg |= BCSR1_PCVCTL4;
break;
@@ -301,11 +301,12 @@ static int voltage_set(int slot, int vcc, int vpp)
return 1;
}
- switch(vpp) {
- case 0: break;
+ switch (vpp) {
+ case 0:
+ break;
case 33:
case 50:
- if(vcc == vpp)
+ if (vcc == vpp)
reg |= BCSR1_PCVCTL6;
else
return 1;
@@ -316,25 +317,29 @@ static int voltage_set(int slot, int vcc, int vpp)
return 1;
}
- if(!((vcc == 50) || (vcc == 0)))
+ if (!((vcc == 50) || (vcc == 0)))
return 1;
/* first, turn off all power */
- out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | BCSR1_PCVCTL5 | BCSR1_PCVCTL6 | BCSR1_PCVCTL7));
+ out_be32(((u32 *) RPX_CSR_ADDR),
+ in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
+ BCSR1_PCVCTL5 |
+ BCSR1_PCVCTL6 |
+ BCSR1_PCVCTL7));
/* enable new powersettings */
- out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) | reg);
+ out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
return 0;
}
#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_) /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
+#define hardware_enable(_slot_) /* No hardware to enable */
+#define hardware_disable(_slot_) /* No hardware to disable */
-#endif /* CONFIG_RPXCLASSIC */
+#endif /* CONFIG_RPXCLASSIC */
/* FADS Boards from Motorola */
@@ -346,43 +351,45 @@ static int voltage_set(int slot, int vcc, int vpp)
{
u32 reg = 0;
- switch(vcc) {
- case 0:
- break;
- case 33:
- reg |= BCSR1_PCCVCC0;
- break;
- case 50:
- reg |= BCSR1_PCCVCC1;
- break;
- default:
- return 1;
+ switch (vcc) {
+ case 0:
+ break;
+ case 33:
+ reg |= BCSR1_PCCVCC0;
+ break;
+ case 50:
+ reg |= BCSR1_PCCVCC1;
+ break;
+ default:
+ return 1;
}
- switch(vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if(vcc == vpp)
- reg |= BCSR1_PCCVPP1;
- else
- return 1;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= BCSR1_PCCVPP0;
- else
- return 1;
- default:
+ switch (vpp) {
+ case 0:
+ break;
+ case 33:
+ case 50:
+ if (vcc == vpp)
+ reg |= BCSR1_PCCVPP1;
+ else
return 1;
+ break;
+ case 120:
+ if ((vcc == 33) || (vcc == 50))
+ reg |= BCSR1_PCCVPP0;
+ else
+ return 1;
+ default:
+ return 1;
}
/* first, turn off all power */
- out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~(BCSR1_PCCVCC_MASK | BCSR1_PCCVPP_MASK));
+ out_be32((u32 *) BCSR1,
+ in_be32((u32 *) BCSR1) & ~(BCSR1_PCCVCC_MASK |
+ BCSR1_PCCVPP_MASK));
/* enable new powersettings */
- out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) | reg);
+ out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | reg);
return 0;
}
@@ -391,12 +398,12 @@ static int voltage_set(int slot, int vcc, int vpp)
static void hardware_enable(int slot)
{
- out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~BCSR1_PCCEN);
+ out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) & ~BCSR1_PCCEN);
}
static void hardware_disable(int slot)
{
- out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) | BCSR1_PCCEN);
+ out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | BCSR1_PCCEN);
}
#endif
@@ -410,7 +417,7 @@ static void hardware_disable(int slot)
static inline void hardware_enable(int slot)
{
- m8xx_pcmcia_ops.hw_ctrl(slot, 1);
+ m8xx_pcmcia_ops.hw_ctrl(slot, 1);
}
static inline void hardware_disable(int slot)
@@ -436,52 +443,53 @@ static int voltage_set(int slot, int vcc, int vpp)
{
u8 reg = 0;
- switch(vcc) {
- case 0:
- break;
- case 33:
- reg |= CSR2_VCC_33;
- break;
- case 50:
- reg |= CSR2_VCC_50;
- break;
- default:
- return 1;
+ switch (vcc) {
+ case 0:
+ break;
+ case 33:
+ reg |= CSR2_VCC_33;
+ break;
+ case 50:
+ reg |= CSR2_VCC_50;
+ break;
+ default:
+ return 1;
}
- switch(vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if(vcc == vpp)
- reg |= CSR2_VPP_VCC;
- else
- return 1;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= CSR2_VPP_12;
- else
- return 1;
- default:
+ switch (vpp) {
+ case 0:
+ break;
+ case 33:
+ case 50:
+ if (vcc == vpp)
+ reg |= CSR2_VPP_VCC;
+ else
+ return 1;
+ break;
+ case 120:
+ if ((vcc == 33) || (vcc == 50))
+ reg |= CSR2_VPP_12;
+ else
return 1;
+ default:
+ return 1;
}
/* first, turn off all power */
- out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
+ out_8((u8 *) MBX_CSR2_ADDR,
+ in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
/* enable new powersettings */
- out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) | reg);
+ out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
return 0;
}
#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_) /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
+#define hardware_enable(_slot_) /* No hardware to enable */
+#define hardware_disable(_slot_) /* No hardware to disable */
-#endif /* CONFIG_MBX */
+#endif /* CONFIG_MBX */
#if defined(CONFIG_PRxK)
#include <asm/cpld.h>
@@ -495,43 +503,46 @@ static int voltage_set(int slot, int vcc, int vpp)
u8 regread;
cpld_regs *ccpld = get_cpld();
- switch(vcc) {
- case 0:
- break;
- case 33:
- reg |= PCMCIA_VCC_33;
- break;
- case 50:
- reg |= PCMCIA_VCC_50;
- break;
- default:
- return 1;
+ switch (vcc) {
+ case 0:
+ break;
+ case 33:
+ reg |= PCMCIA_VCC_33;
+ break;
+ case 50:
+ reg |= PCMCIA_VCC_50;
+ break;
+ default:
+ return 1;
}
- switch(vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if(vcc == vpp)
- reg |= PCMCIA_VPP_VCC;
- else
- return 1;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= PCMCIA_VPP_12;
- else
- return 1;
- default:
+ switch (vpp) {
+ case 0:
+ break;
+ case 33:
+ case 50:
+ if (vcc == vpp)
+ reg |= PCMCIA_VPP_VCC;
+ else
return 1;
+ break;
+ case 120:
+ if ((vcc == 33) || (vcc == 50))
+ reg |= PCMCIA_VPP_12;
+ else
+ return 1;
+ default:
+ return 1;
}
reg = reg >> (slot << 2);
regread = in_8(&ccpld->fpga_pc_ctl);
- if (reg != (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
+ if (reg !=
+ (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
/* enable new powersettings */
- regread = regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2));
+ regread =
+ regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >>
+ (slot << 2));
out_8(&ccpld->fpga_pc_ctl, reg | regread);
msleep(100);
}
@@ -540,10 +551,10 @@ static int voltage_set(int slot, int vcc, int vpp)
}
#define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV
-#define hardware_enable(_slot_) /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
+#define hardware_enable(_slot_) /* No hardware to enable */
+#define hardware_disable(_slot_) /* No hardware to disable */
-#endif /* CONFIG_PRxK */
+#endif /* CONFIG_PRxK */
static u32 pending_events[PCMCIA_SOCKETS_NO];
static DEFINE_SPINLOCK(pending_event_lock);
@@ -553,7 +564,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
struct socket_info *s;
struct event_table *e;
unsigned int i, events, pscr, pipr, per;
- pcmconf8xx_t *pcmcia = socket[0].pcmcia;
+ pcmconf8xx_t *pcmcia = socket[0].pcmcia;
dprintk("Interrupt!\n");
/* get interrupt sources */
@@ -562,16 +573,16 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
pipr = in_be32(&pcmcia->pcmc_pipr);
per = in_be32(&pcmcia->pcmc_per);
- for(i = 0; i < PCMCIA_SOCKETS_NO; i++) {
+ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
s = &socket[i];
e = &s->events[0];
events = 0;
- while(e->regbit) {
- if(pscr & e->regbit)
+ while (e->regbit) {
+ if (pscr & e->regbit)
events |= e->eventbit;
- e++;
+ e++;
}
/*
@@ -579,13 +590,11 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
* not too nice done,
* we depend on that CD2 is the bit to the left of CD1...
*/
- if(events & SS_DETECT)
- if(((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
- (pipr & M8XX_PCMCIA_CD1(i)))
- {
+ if (events & SS_DETECT)
+ if (((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
+ (pipr & M8XX_PCMCIA_CD1(i))) {
events &= ~SS_DETECT;
}
-
#ifdef PCMCIA_GLITCHY_CD
/*
* I've experienced CD problems with my ADS board.
@@ -593,24 +602,23 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
* real change of Card detection.
*/
- if((events & SS_DETECT) &&
- ((pipr &
- (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
- (s->state.Vcc | s->state.Vpp)) {
+ if ((events & SS_DETECT) &&
+ ((pipr &
+ (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
+ (s->state.Vcc | s->state.Vpp)) {
events &= ~SS_DETECT;
/*printk( "CD glitch workaround - CD = 0x%08x!\n",
- (pipr & (M8XX_PCMCIA_CD2(i)
- | M8XX_PCMCIA_CD1(i))));*/
+ (pipr & (M8XX_PCMCIA_CD2(i)
+ | M8XX_PCMCIA_CD1(i)))); */
}
#endif
/* call the handler */
dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, "
- "pipr = 0x%08x\n",
- i, events, pscr, pipr);
+ "pipr = 0x%08x\n", i, events, pscr, pipr);
- if(events) {
+ if (events) {
spin_lock(&pending_event_lock);
pending_events[i] |= events;
spin_unlock(&pending_event_lock);
@@ -643,11 +651,11 @@ static u32 m8xx_get_graycode(u32 size)
{
u32 k;
- for(k = 0; k < M8XX_SIZES_NO; k++)
- if(m8xx_size_to_gray[k] == size)
+ for (k = 0; k < M8XX_SIZES_NO; k++)
+ if (m8xx_size_to_gray[k] == size)
break;
- if((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
+ if ((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
k = -1;
return k;
@@ -657,7 +665,7 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
{
u32 reg, clocks, psst, psl, psht;
- if(!ns) {
+ if (!ns) {
/*
* We get called with IO maps setup to 0ns
@@ -665,10 +673,10 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
* They should be 255ns.
*/
- if(is_io)
+ if (is_io)
ns = 255;
else
- ns = 100; /* fast memory if 0 */
+ ns = 100; /* fast memory if 0 */
}
/*
@@ -679,23 +687,23 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
/* how we want to adjust the timing - in percent */
-#define ADJ 180 /* 80 % longer accesstime - to be sure */
+#define ADJ 180 /* 80 % longer accesstime - to be sure */
clocks = ((bus_freq / 1000) * ns) / 1000;
- clocks = (clocks * ADJ) / (100*1000);
- if(clocks >= PCMCIA_BMT_LIMIT) {
- printk( "Max access time limit reached\n");
- clocks = PCMCIA_BMT_LIMIT-1;
+ clocks = (clocks * ADJ) / (100 * 1000);
+ if (clocks >= PCMCIA_BMT_LIMIT) {
+ printk("Max access time limit reached\n");
+ clocks = PCMCIA_BMT_LIMIT - 1;
}
- psst = clocks / 7; /* setup time */
- psht = clocks / 7; /* hold time */
- psl = (clocks * 5) / 7; /* strobe length */
+ psst = clocks / 7; /* setup time */
+ psht = clocks / 7; /* hold time */
+ psl = (clocks * 5) / 7; /* strobe length */
psst += clocks - (psst + psht + psl);
- reg = psst << 12;
- reg |= psl << 7;
+ reg = psst << 12;
+ reg |= psl << 7;
reg |= psht << 16;
return reg;
@@ -710,8 +718,8 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
pipr = in_be32(&pcmcia->pcmc_pipr);
- *value = ((pipr & (M8XX_PCMCIA_CD1(lsock)
- | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
+ *value = ((pipr & (M8XX_PCMCIA_CD1(lsock)
+ | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
*value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0;
if (s->state.flags & SS_IOCARD)
@@ -795,16 +803,16 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
/* read out VS1 and VS2 */
reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock))
- >> M8XX_PCMCIA_VS_SHIFT(lsock);
+ >> M8XX_PCMCIA_VS_SHIFT(lsock);
- if(socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
- switch(reg) {
+ if (socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
+ switch (reg) {
case 1:
*value |= SS_3VCARD;
- break; /* GND, NC - 3.3V only */
+ break; /* GND, NC - 3.3V only */
case 2:
*value |= SS_XVCARD;
- break; /* NC. GND - x.xV only */
+ break; /* NC. GND - x.xV only */
};
}
@@ -812,7 +820,7 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
return 0;
}
-static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
+static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state)
{
int lsock = container_of(sock, struct socket_info, socket)->slot;
struct socket_info *s = &socket[lsock];
@@ -821,20 +829,20 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
unsigned long flags;
pcmconf8xx_t *pcmcia = socket[0].pcmcia;
- dprintk( "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
- "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
- state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+ dprintk("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
+ "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
/* First, set voltage - bail out if invalid */
- if(voltage_set(lsock, state->Vcc, state->Vpp))
+ if (voltage_set(lsock, state->Vcc, state->Vpp))
return -EINVAL;
-
/* Take care of reset... */
- if(state->flags & SS_RESET)
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */
+ if (state->flags & SS_RESET)
+ out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */
else
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
+ out_be32(M8XX_PGCRX(lsock),
+ in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
/* ... and output enable. */
@@ -846,10 +854,11 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
no pullups are present -> the cards act wierd.
So right now the buffers are enabled if the power is on. */
- if(state->Vcc || state->Vpp)
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */
+ if (state->Vcc || state->Vpp)
+ out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */
else
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
+ out_be32(M8XX_PGCRX(lsock),
+ in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
/*
* We'd better turn off interrupts before
@@ -866,17 +875,17 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
e = &s->events[0];
reg = 0;
- if(state->csc_mask & SS_DETECT) {
+ if (state->csc_mask & SS_DETECT) {
e->eventbit = SS_DETECT;
reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock)
| M8XX_PCMCIA_CD1(lsock));
e++;
}
- if(state->flags & SS_IOCARD) {
+ if (state->flags & SS_IOCARD) {
/*
* I/O card
*/
- if(state->csc_mask & SS_STSCHG) {
+ if (state->csc_mask & SS_STSCHG) {
e->eventbit = SS_STSCHG;
reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
e++;
@@ -884,9 +893,10 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
/*
* If io_irq is non-zero we should enable irq.
*/
- if(state->io_irq) {
+ if (state->io_irq) {
out_be32(M8XX_PGCRX(lsock),
- in_be32(M8XX_PGCRX(lsock)) | mk_int_int_mask(s->hwirq) << 24);
+ in_be32(M8XX_PGCRX(lsock)) |
+ mk_int_int_mask(s->hwirq) << 24);
/*
* Strange thing here:
* The manual does not tell us which interrupt
@@ -897,33 +907,32 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
* have to be cleared in PSCR in the interrupt handler.
*/
reg |= M8XX_PCMCIA_RDY_L(lsock);
- }
- else
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
- }
- else {
+ } else
+ out_be32(M8XX_PGCRX(lsock),
+ in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
+ } else {
/*
* Memory card
*/
- if(state->csc_mask & SS_BATDEAD) {
+ if (state->csc_mask & SS_BATDEAD) {
e->eventbit = SS_BATDEAD;
reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
e++;
}
- if(state->csc_mask & SS_BATWARN) {
+ if (state->csc_mask & SS_BATWARN) {
e->eventbit = SS_BATWARN;
reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock);
e++;
}
/* What should I trigger on - low/high,raise,fall? */
- if(state->csc_mask & SS_READY) {
+ if (state->csc_mask & SS_READY) {
e->eventbit = SS_READY;
- reg |= e->regbit = 0; //??
+ reg |= e->regbit = 0; //??
e++;
}
}
- e->regbit = 0; /* terminate list */
+ e->regbit = 0; /* terminate list */
/*
* Clear the status changed .
@@ -940,7 +949,9 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
* Ones will enable the interrupt.
*/
- reg |= in_be32(&pcmcia->pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
+ reg |=
+ in_be32(&pcmcia->
+ pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
out_be32(&pcmcia->pcmc_per, reg);
spin_unlock_irqrestore(&events_lock, flags);
@@ -961,67 +972,66 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
unsigned int reg, winnr;
pcmconf8xx_t *pcmcia = s->pcmcia;
-
#define M8XX_SIZE (io->stop - io->start + 1)
#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
- dprintk( "SetIOMap(%d, %d, %#2.2x, %d ns, "
- "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags,
- io->speed, io->start, io->stop);
+ dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, "
+ "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags,
+ io->speed, io->start, io->stop);
if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff)
|| (io->stop > 0xffff) || (io->stop < io->start))
return -EINVAL;
- if((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
+ if ((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
return -EINVAL;
- if(io->flags & MAP_ACTIVE) {
+ if (io->flags & MAP_ACTIVE) {
- dprintk( "io->flags & MAP_ACTIVE\n");
+ dprintk("io->flags & MAP_ACTIVE\n");
winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
- + (lsock * PCMCIA_IO_WIN_NO) + io->map;
+ + (lsock * PCMCIA_IO_WIN_NO) + io->map;
/* setup registers */
- w = (void *) &pcmcia->pcmc_pbr0;
+ w = (void *)&pcmcia->pcmc_pbr0;
w += winnr;
- out_be32(&w->or, 0); /* turn off window first */
+ out_be32(&w->or, 0); /* turn off window first */
out_be32(&w->br, M8XX_BASE);
reg <<= 27;
- reg |= M8XX_PCMCIA_POR_IO |(lsock << 2);
+ reg |= M8XX_PCMCIA_POR_IO | (lsock << 2);
reg |= m8xx_get_speed(io->speed, 1, s->bus_freq);
- if(io->flags & MAP_WRPROT)
+ if (io->flags & MAP_WRPROT)
reg |= M8XX_PCMCIA_POR_WRPROT;
- /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ))*/
- if(io->flags & MAP_16BIT)
+ /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ)) */
+ if (io->flags & MAP_16BIT)
reg |= M8XX_PCMCIA_POR_16BIT;
- if(io->flags & MAP_ACTIVE)
+ if (io->flags & MAP_ACTIVE)
reg |= M8XX_PCMCIA_POR_VALID;
out_be32(&w->or, reg);
dprintk("Socket %u: Mapped io window %u at %#8.8x, "
- "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
+ "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
} else {
/* shutdown IO window */
winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
- + (lsock * PCMCIA_IO_WIN_NO) + io->map;
+ + (lsock * PCMCIA_IO_WIN_NO) + io->map;
/* setup registers */
- w = (void *) &pcmcia->pcmc_pbr0;
+ w = (void *)&pcmcia->pcmc_pbr0;
w += winnr;
- out_be32(&w->or, 0); /* turn off window */
- out_be32(&w->br, 0); /* turn off base address */
+ out_be32(&w->or, 0); /* turn off window */
+ out_be32(&w->br, 0); /* turn off base address */
dprintk("Socket %u: Unmapped io window %u at %#8.8x, "
"OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
@@ -1029,15 +1039,14 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
/* copy the struct and modify the copy */
s->io_win[io->map] = *io;
- s->io_win[io->map].flags &= (MAP_WRPROT
- | MAP_16BIT
- | MAP_ACTIVE);
+ s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
dprintk("SetIOMap exit\n");
return 0;
}
-static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem)
+static int m8xx_set_mem_map(struct pcmcia_socket *sock,
+ struct pccard_mem_map *mem)
{
int lsock = container_of(sock, struct socket_info, socket)->slot;
struct socket_info *s = &socket[lsock];
@@ -1046,19 +1055,19 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
unsigned int reg, winnr;
pcmconf8xx_t *pcmcia = s->pcmcia;
- dprintk( "SetMemMap(%d, %d, %#2.2x, %d ns, "
- "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
- mem->speed, mem->static_start, mem->card_start);
+ dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
+ "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
+ mem->speed, mem->static_start, mem->card_start);
if ((mem->map >= PCMCIA_MEM_WIN_NO)
-// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
+// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
|| (mem->card_start >= 0x04000000)
- || (mem->static_start & 0xfff) /* 4KByte resolution */
- || (mem->card_start & 0xfff))
+ || (mem->static_start & 0xfff) /* 4KByte resolution */
+ ||(mem->card_start & 0xfff))
return -EINVAL;
- if((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
- printk( "Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
+ if ((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
+ printk("Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
return -EINVAL;
}
reg <<= 27;
@@ -1067,50 +1076,47 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
/* Setup the window in the pcmcia controller */
- w = (void *) &pcmcia->pcmc_pbr0;
+ w = (void *)&pcmcia->pcmc_pbr0;
w += winnr;
reg |= lsock << 2;
reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq);
- if(mem->flags & MAP_ATTRIB)
- reg |= M8XX_PCMCIA_POR_ATTRMEM;
+ if (mem->flags & MAP_ATTRIB)
+ reg |= M8XX_PCMCIA_POR_ATTRMEM;
- if(mem->flags & MAP_WRPROT)
+ if (mem->flags & MAP_WRPROT)
reg |= M8XX_PCMCIA_POR_WRPROT;
- if(mem->flags & MAP_16BIT)
+ if (mem->flags & MAP_16BIT)
reg |= M8XX_PCMCIA_POR_16BIT;
- if(mem->flags & MAP_ACTIVE)
+ if (mem->flags & MAP_ACTIVE)
reg |= M8XX_PCMCIA_POR_VALID;
out_be32(&w->or, reg);
dprintk("Socket %u: Mapped memory window %u at %#8.8x, "
- "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
+ "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
- if(mem->flags & MAP_ACTIVE) {
+ if (mem->flags & MAP_ACTIVE) {
/* get the new base address */
mem->static_start = PCMCIA_MEM_WIN_BASE +
- (PCMCIA_MEM_WIN_SIZE * winnr)
- + mem->card_start;
+ (PCMCIA_MEM_WIN_SIZE * winnr)
+ + mem->card_start;
}
dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
- "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
- mem->speed, mem->static_start, mem->card_start);
+ "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
+ mem->speed, mem->static_start, mem->card_start);
/* copy the struct and modify the copy */
old = &s->mem_win[mem->map];
*old = *mem;
- old->flags &= (MAP_ATTRIB
- | MAP_WRPROT
- | MAP_16BIT
- | MAP_ACTIVE);
+ old->flags &= (MAP_ATTRIB | MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
return 0;
}
@@ -1121,7 +1127,7 @@ static int m8xx_sock_init(struct pcmcia_socket *sock)
pccard_io_map io = { 0, 0, 0, 0, 1 };
pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
- dprintk( "sock_init(%d)\n", s);
+ dprintk("sock_init(%d)\n", s);
m8xx_set_socket(sock, &dead_socket);
for (i = 0; i < PCMCIA_IO_WIN_NO; i++) {
@@ -1143,7 +1149,7 @@ static int m8xx_sock_suspend(struct pcmcia_socket *sock)
}
static struct pccard_operations m8xx_services = {
- .init = m8xx_sock_init,
+ .init = m8xx_sock_init,
.suspend = m8xx_sock_suspend,
.get_status = m8xx_get_status,
.set_socket = m8xx_set_socket,
@@ -1151,7 +1157,8 @@ static struct pccard_operations m8xx_services = {
.set_mem_map = m8xx_set_mem_map,
};
-static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id *match)
+static int __init m8xx_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
{
struct pcmcia_win *w;
unsigned int i, m, hwirq;
@@ -1162,49 +1169,50 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
pcmcia_info("%s\n", version);
pcmcia = of_iomap(np, 0);
- if(pcmcia == NULL)
+ if (pcmcia == NULL)
return -EINVAL;
pcmcia_schlvl = irq_of_parse_and_map(np, 0);
- hwirq = irq_map[pcmcia_schlvl].hwirq;
+ hwirq = irq_map[pcmcia_schlvl].hwirq;
if (pcmcia_schlvl < 0)
return -EINVAL;
m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra;
m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb;
-
pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG
" with IRQ %u (%d). \n", pcmcia_schlvl, hwirq);
/* Configure Status change interrupt */
- if(request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED,
- driver_name, socket)) {
+ if (request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED,
+ driver_name, socket)) {
pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n",
pcmcia_schlvl);
return -1;
}
- w = (void *) &pcmcia->pcmc_pbr0;
+ w = (void *)&pcmcia->pcmc_pbr0;
- out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0)| M8XX_PCMCIA_MASK(1));
+ out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
/* connect interrupt and disable CxOE */
- out_be32(M8XX_PGCRX(0), M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
- out_be32(M8XX_PGCRX(1), M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
+ out_be32(M8XX_PGCRX(0),
+ M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
+ out_be32(M8XX_PGCRX(1),
+ M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
/* intialize the fixed memory windows */
- for(i = 0; i < PCMCIA_SOCKETS_NO; i++){
+ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
out_be32(&w->br, PCMCIA_MEM_WIN_BASE +
- (PCMCIA_MEM_WIN_SIZE
- * (m + i * PCMCIA_MEM_WIN_NO)));
+ (PCMCIA_MEM_WIN_SIZE
+ * (m + i * PCMCIA_MEM_WIN_NO)));
- out_be32(&w->or, 0); /* set to not valid */
+ out_be32(&w->or, 0); /* set to not valid */
w++;
}
@@ -1218,10 +1226,11 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
hardware_enable(0);
hardware_enable(1);
- for (i = 0 ; i < PCMCIA_SOCKETS_NO; i++) {
+ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
socket[i].slot = i;
socket[i].socket.owner = THIS_MODULE;
- socket[i].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
+ socket[i].socket.features =
+ SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
socket[i].socket.irq_mask = 0x000;
socket[i].socket.map_size = 0x1000;
socket[i].socket.io_offset = 0;
@@ -1234,7 +1243,6 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
socket[i].bus_freq = ppc_proc_freq;
socket[i].hwirq = hwirq;
-
}
for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
@@ -1246,25 +1254,25 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
return 0;
}
-static int m8xx_remove(struct of_device* ofdev)
+static int m8xx_remove(struct of_device *ofdev)
{
u32 m, i;
struct pcmcia_win *w;
pcmconf8xx_t *pcmcia = socket[0].pcmcia;
for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
- w = (void *) &pcmcia->pcmc_pbr0;
+ w = (void *)&pcmcia->pcmc_pbr0;
out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i));
out_be32(&pcmcia->pcmc_per,
- in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i));
+ in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i));
/* turn off interrupt and disable CxOE */
out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE);
/* turn off memory windows */
for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
- out_be32(&w->or, 0); /* set to not valid */
+ out_be32(&w->or, 0); /* set to not valid */
w++;
}
@@ -1299,21 +1307,21 @@ static int m8xx_resume(struct platform_device *pdev)
static struct of_device_id m8xx_pcmcia_match[] = {
{
- .type = "pcmcia",
- .compatible = "fsl,pq-pcmcia",
- },
+ .type = "pcmcia",
+ .compatible = "fsl,pq-pcmcia",
+ },
{},
};
MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match);
static struct of_platform_driver m8xx_pcmcia_driver = {
- .name = (char *) driver_name,
- .match_table = m8xx_pcmcia_match,
- .probe = m8xx_probe,
- .remove = m8xx_remove,
- .suspend = m8xx_suspend,
- .resume = m8xx_resume,
+ .name = (char *)driver_name,
+ .match_table = m8xx_pcmcia_match,
+ .probe = m8xx_probe,
+ .remove = m8xx_remove,
+ .suspend = m8xx_suspend,
+ .resume = m8xx_resume,
};
static int __init m8xx_init(void)
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 3a201b77b96..03baf1c64a2 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -160,6 +160,7 @@ static int pnp_dock_thread(void * unused)
{
static struct pnp_docking_station_info now;
int docked = -1, d = 0;
+ set_freezable();
while (!unloading)
{
int status;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 83b071b6ece..cea401feb0f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -10,7 +10,6 @@ config RTC_LIB
config RTC_CLASS
tristate "RTC class"
- depends on EXPERIMENTAL
default n
select RTC_LIB
help
@@ -119,7 +118,7 @@ config RTC_DRV_TEST
will be called rtc-test.
comment "I2C RTC drivers"
- depends on RTC_CLASS
+ depends on RTC_CLASS && I2C
config RTC_DRV_DS1307
tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00"
@@ -160,11 +159,11 @@ config RTC_DRV_MAX6900
will be called rtc-max6900.
config RTC_DRV_RS5C372
- tristate "Ricoh RS5C372A/B"
+ tristate "Ricoh RS5C372A/B, RV5C386, RV5C387A"
depends on RTC_CLASS && I2C
help
If you say yes here you get support for the
- Ricoh RS5C372A and RS5C372B RTC chips.
+ Ricoh RS5C372A, RS5C372B, RV5C386, and RV5C387A RTC chips.
This driver can also be built as a module. If so, the module
will be called rtc-rs5c372.
@@ -213,12 +212,40 @@ config RTC_DRV_PCF8583
This driver can also be built as a module. If so, the module
will be called rtc-pcf8583.
+config RTC_DRV_M41T80
+ tristate "ST M41T80 series RTC"
+ depends on RTC_CLASS && I2C
+ help
+ If you say Y here you will get support for the
+ ST M41T80 RTC chips series. Currently following chips are
+ supported: M41T80, M41T81, M41T82, M41T83, M41ST84, M41ST85
+ and M41ST87.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-m41t80.
+
+config RTC_DRV_M41T80_WDT
+ bool "ST M41T80 series RTC watchdog timer"
+ depends on RTC_DRV_M41T80
+ help
+ If you say Y here you will get support for the
+ watchdog timer in ST M41T80 RTC chips series.
+
+config RTC_DRV_TWL92330
+ boolean "TI TWL92330/Menelaus"
+ depends on RTC_CLASS && I2C && MENELAUS
+ help
+ If you say yes here you get support for the RTC on the
+ TWL92330 "Menelaus" power mangement chip, used with OMAP2
+ platforms. The support is integrated with the rest of
+ the Menelaus driver; it's not separate module.
+
comment "SPI RTC drivers"
- depends on RTC_CLASS
+ depends on RTC_CLASS && SPI_MASTER
config RTC_DRV_RS5C348
tristate "Ricoh RS5C348A/B"
- depends on RTC_CLASS && SPI
+ depends on RTC_CLASS && SPI_MASTER
help
If you say yes here you get support for the
Ricoh RS5C348A and RS5C348B RTC chips.
@@ -228,7 +255,7 @@ config RTC_DRV_RS5C348
config RTC_DRV_MAX6902
tristate "Maxim 6902"
- depends on RTC_CLASS && SPI
+ depends on RTC_CLASS && SPI_MASTER
help
If you say yes here you will get support for the
Maxim MAX6902 SPI RTC chip.
@@ -262,6 +289,12 @@ config RTC_DRV_CMOS
This driver can also be built as a module. If so, the module
will be called rtc-cmos.
+config RTC_DRV_DS1216
+ tristate "Dallas DS1216"
+ depends on RTC_CLASS && SNI_RM
+ help
+ If you say yes here you get support for the Dallas DS1216 RTC chips.
+
config RTC_DRV_DS1553
tristate "Dallas DS1553"
depends on RTC_CLASS
@@ -292,6 +325,16 @@ config RTC_DRV_M48T86
This driver can also be built as a module. If so, the module
will be called rtc-m48t86.
+config RTC_DRV_M48T59
+ tristate "ST M48T59"
+ depends on RTC_CLASS
+ help
+ If you say Y here you will get support for the
+ ST M48T59 RTC chip.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-m48t59".
+
config RTC_DRV_V3020
tristate "EM Microelectronic V3020"
depends on RTC_CLASS
@@ -379,6 +422,13 @@ config RTC_DRV_PL031
To compile this driver as a module, choose M here: the
module will be called rtc-pl031.
+config RTC_DRV_AT32AP700X
+ tristate "AT32AP700X series RTC"
+ depends on RTC_CLASS && PLATFORM_AT32AP
+ help
+ Driver for the internal RTC (Realtime Clock) on Atmel AVR32
+ AT32AP700x family processors.
+
config RTC_DRV_AT91RM9200
tristate "AT91RM9200"
depends on RTC_CLASS && ARCH_AT91RM9200
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index a1afbc23607..3109af9a165 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_AT32AP700X) += rtc-at32ap700x.o
obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
@@ -28,6 +29,7 @@ obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
+obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
@@ -41,3 +43,5 @@ obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
+obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
+obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
new file mode 100644
index 00000000000..2999214ca53
--- /dev/null
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -0,0 +1,317 @@
+/*
+ * An RTC driver for the AVR32 AT32AP700x processor series.
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/io.h>
+
+/*
+ * This is a bare-bones RTC. It runs during most system sleep states, but has
+ * no battery backup and gets reset during system restart. It must be
+ * initialized from an external clock (network, I2C, etc) before it can be of
+ * much use.
+ *
+ * The alarm functionality is limited by the hardware, not supporting
+ * periodic interrupts.
+ */
+
+#define RTC_CTRL 0x00
+#define RTC_CTRL_EN 0
+#define RTC_CTRL_PCLR 1
+#define RTC_CTRL_TOPEN 2
+#define RTC_CTRL_PSEL 8
+
+#define RTC_VAL 0x04
+
+#define RTC_TOP 0x08
+
+#define RTC_IER 0x10
+#define RTC_IER_TOPI 0
+
+#define RTC_IDR 0x14
+#define RTC_IDR_TOPI 0
+
+#define RTC_IMR 0x18
+#define RTC_IMR_TOPI 0
+
+#define RTC_ISR 0x1c
+#define RTC_ISR_TOPI 0
+
+#define RTC_ICR 0x20
+#define RTC_ICR_TOPI 0
+
+#define RTC_BIT(name) (1 << RTC_##name)
+#define RTC_BF(name, value) ((value) << RTC_##name)
+
+#define rtc_readl(dev, reg) \
+ __raw_readl((dev)->regs + RTC_##reg)
+#define rtc_writel(dev, reg, value) \
+ __raw_writel((value), (dev)->regs + RTC_##reg)
+
+struct rtc_at32ap700x {
+ struct rtc_device *rtc;
+ void __iomem *regs;
+ unsigned long alarm_time;
+ unsigned long irq;
+ /* Protect against concurrent register access. */
+ spinlock_t lock;
+};
+
+static int at32_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+ unsigned long now;
+
+ now = rtc_readl(rtc, VAL);
+ rtc_time_to_tm(now, tm);
+
+ return 0;
+}
+
+static int at32_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+ unsigned long now;
+ int ret;
+
+ ret = rtc_tm_to_time(tm, &now);
+ if (ret == 0)
+ rtc_writel(rtc, VAL, now);
+
+ return ret;
+}
+
+static int at32_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+
+ rtc_time_to_tm(rtc->alarm_time, &alrm->time);
+ alrm->pending = rtc_readl(rtc, IMR) & RTC_BIT(IMR_TOPI) ? 1 : 0;
+
+ return 0;
+}
+
+static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+ unsigned long rtc_unix_time;
+ unsigned long alarm_unix_time;
+ int ret;
+
+ rtc_unix_time = rtc_readl(rtc, VAL);
+
+ ret = rtc_tm_to_time(&alrm->time, &alarm_unix_time);
+ if (ret)
+ return ret;
+
+ if (alarm_unix_time < rtc_unix_time)
+ return -EINVAL;
+
+ spin_lock_irq(&rtc->lock);
+ rtc->alarm_time = alarm_unix_time;
+ rtc_writel(rtc, TOP, rtc->alarm_time);
+ if (alrm->pending)
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ | RTC_BIT(CTRL_TOPEN));
+ else
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ & ~RTC_BIT(CTRL_TOPEN));
+ spin_unlock_irq(&rtc->lock);
+
+ return ret;
+}
+
+static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ spin_lock_irq(&rtc->lock);
+
+ switch (cmd) {
+ case RTC_AIE_ON:
+ if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
+ ret = -EINVAL;
+ break;
+ }
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ | RTC_BIT(CTRL_TOPEN));
+ rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
+ rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
+ break;
+ case RTC_AIE_OFF:
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ & ~RTC_BIT(CTRL_TOPEN));
+ rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
+ rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ spin_unlock_irq(&rtc->lock);
+
+ return ret;
+}
+
+static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
+{
+ struct rtc_at32ap700x *rtc = (struct rtc_at32ap700x *)dev_id;
+ unsigned long isr = rtc_readl(rtc, ISR);
+ unsigned long events = 0;
+ int ret = IRQ_NONE;
+
+ spin_lock(&rtc->lock);
+
+ if (isr & RTC_BIT(ISR_TOPI)) {
+ rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
+ rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ & ~RTC_BIT(CTRL_TOPEN));
+ rtc_writel(rtc, VAL, rtc->alarm_time);
+ events = RTC_AF | RTC_IRQF;
+ rtc_update_irq(rtc->rtc, 1, events);
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock(&rtc->lock);
+
+ return ret;
+}
+
+static struct rtc_class_ops at32_rtc_ops = {
+ .ioctl = at32_rtc_ioctl,
+ .read_time = at32_rtc_readtime,
+ .set_time = at32_rtc_settime,
+ .read_alarm = at32_rtc_readalarm,
+ .set_alarm = at32_rtc_setalarm,
+};
+
+static int __init at32_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct rtc_at32ap700x *rtc;
+ int irq = -1;
+ int ret;
+
+ rtc = kzalloc(sizeof(struct rtc_at32ap700x), GFP_KERNEL);
+ if (!rtc) {
+ dev_dbg(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_dbg(&pdev->dev, "no mmio resource defined\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_dbg(&pdev->dev, "could not get irq\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = request_irq(irq, at32_rtc_interrupt, IRQF_SHARED, "rtc", rtc);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not request irq %d\n", irq);
+ goto out;
+ }
+
+ rtc->irq = irq;
+ rtc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ if (!rtc->regs) {
+ ret = -ENOMEM;
+ dev_dbg(&pdev->dev, "could not map I/O memory\n");
+ goto out_free_irq;
+ }
+ spin_lock_init(&rtc->lock);
+
+ /*
+ * Maybe init RTC: count from zero at 1 Hz, disable wrap irq.
+ *
+ * Do not reset VAL register, as it can hold an old time
+ * from last JTAG reset.
+ */
+ if (!(rtc_readl(rtc, CTRL) & RTC_BIT(CTRL_EN))) {
+ rtc_writel(rtc, CTRL, RTC_BIT(CTRL_PCLR));
+ rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
+ rtc_writel(rtc, CTRL, RTC_BF(CTRL_PSEL, 0xe)
+ | RTC_BIT(CTRL_EN));
+ }
+
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &at32_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ dev_dbg(&pdev->dev, "could not register rtc device\n");
+ ret = PTR_ERR(rtc->rtc);
+ goto out_iounmap;
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
+ (unsigned long)rtc->regs, rtc->irq);
+
+ return 0;
+
+out_iounmap:
+ iounmap(rtc->regs);
+out_free_irq:
+ free_irq(irq, rtc);
+out:
+ kfree(rtc);
+ return ret;
+}
+
+static int __exit at32_rtc_remove(struct platform_device *pdev)
+{
+ struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev);
+
+ free_irq(rtc->irq, rtc);
+ iounmap(rtc->regs);
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+MODULE_ALIAS("at32ap700x_rtc");
+
+static struct platform_driver at32_rtc_driver = {
+ .remove = __exit_p(at32_rtc_remove),
+ .driver = {
+ .name = "at32ap700x_rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init at32_rtc_init(void)
+{
+ return platform_driver_probe(&at32_rtc_driver, at32_rtc_probe);
+}
+module_init(at32_rtc_init);
+
+static void __exit at32_rtc_exit(void)
+{
+ platform_driver_unregister(&at32_rtc_driver);
+}
+module_exit(at32_rtc_exit);
+
+MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_DESCRIPTION("Real time clock for AVR32 AT32AP700x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index f4e5f0040ff..304535942de 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -341,6 +341,8 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
case RTC_IRQP_READ:
if (ops->irq_set_freq)
err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
+ else
+ err = -ENOTTY;
break;
case RTC_IRQP_SET:
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
new file mode 100644
index 00000000000..83efb88f8f2
--- /dev/null
+++ b/drivers/rtc/rtc-ds1216.c
@@ -0,0 +1,226 @@
+/*
+ * Dallas DS1216 RTC driver
+ *
+ * Copyright (c) 2007 Thomas Bogendoerfer
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/bcd.h>
+
+#define DRV_VERSION "0.1"
+
+struct ds1216_regs {
+ u8 tsec;
+ u8 sec;
+ u8 min;
+ u8 hour;
+ u8 wday;
+ u8 mday;
+ u8 month;
+ u8 year;
+};
+
+#define DS1216_HOUR_1224 (1 << 7)
+#define DS1216_HOUR_AMPM (1 << 5)
+
+struct ds1216_priv {
+ struct rtc_device *rtc;
+ void __iomem *ioaddr;
+ size_t size;
+ unsigned long baseaddr;
+};
+
+static const u8 magic[] = {
+ 0xc5, 0x3a, 0xa3, 0x5c, 0xc5, 0x3a, 0xa3, 0x5c
+};
+
+/*
+ * Read the 64 bit we'd like to have - It a series
+ * of 64 bits showing up in the LSB of the base register.
+ *
+ */
+static void ds1216_read(u8 __iomem *ioaddr, u8 *buf)
+{
+ unsigned char c;
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ c = 0;
+ for (j = 0; j < 8; j++)
+ c |= (readb(ioaddr) & 0x1) << j;
+ buf[i] = c;
+ }
+}
+
+static void ds1216_write(u8 __iomem *ioaddr, const u8 *buf)
+{
+ unsigned char c;
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ c = buf[i];
+ for (j = 0; j < 8; j++) {
+ writeb(c, ioaddr);
+ c = c >> 1;
+ }
+ }
+}
+
+static void ds1216_switch_ds_to_clock(u8 __iomem *ioaddr)
+{
+ /* Reset magic pointer */
+ readb(ioaddr);
+ /* Write 64 bit magic to DS1216 */
+ ds1216_write(ioaddr, magic);
+}
+
+static int ds1216_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1216_priv *priv = platform_get_drvdata(pdev);
+ struct ds1216_regs regs;
+
+ ds1216_switch_ds_to_clock(priv->ioaddr);
+ ds1216_read(priv->ioaddr, (u8 *)&regs);
+
+ tm->tm_sec = BCD2BIN(regs.sec);
+ tm->tm_min = BCD2BIN(regs.min);
+ if (regs.hour & DS1216_HOUR_1224) {
+ /* AM/PM mode */
+ tm->tm_hour = BCD2BIN(regs.hour & 0x1f);
+ if (regs.hour & DS1216_HOUR_AMPM)
+ tm->tm_hour += 12;
+ } else
+ tm->tm_hour = BCD2BIN(regs.hour & 0x3f);
+ tm->tm_wday = (regs.wday & 7) - 1;
+ tm->tm_mday = BCD2BIN(regs.mday & 0x3f);
+ tm->tm_mon = BCD2BIN(regs.month & 0x1f);
+ tm->tm_year = BCD2BIN(regs.year);
+ if (tm->tm_year < 70)
+ tm->tm_year += 100;
+ return 0;
+}
+
+static int ds1216_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1216_priv *priv = platform_get_drvdata(pdev);
+ struct ds1216_regs regs;
+
+ ds1216_switch_ds_to_clock(priv->ioaddr);
+ ds1216_read(priv->ioaddr, (u8 *)&regs);
+
+ regs.tsec = 0; /* clear 0.1 and 0.01 seconds */
+ regs.sec = BIN2BCD(tm->tm_sec);
+ regs.min = BIN2BCD(tm->tm_min);
+ regs.hour &= DS1216_HOUR_1224;
+ if (regs.hour && tm->tm_hour > 12) {
+ regs.hour |= DS1216_HOUR_AMPM;
+ tm->tm_hour -= 12;
+ }
+ regs.hour |= BIN2BCD(tm->tm_hour);
+ regs.wday &= ~7;
+ regs.wday |= tm->tm_wday;
+ regs.mday = BIN2BCD(tm->tm_mday);
+ regs.month = BIN2BCD(tm->tm_mon);
+ regs.year = BIN2BCD(tm->tm_year % 100);
+
+ ds1216_switch_ds_to_clock(priv->ioaddr);
+ ds1216_write(priv->ioaddr, (u8 *)&regs);
+ return 0;
+}
+
+static const struct rtc_class_ops ds1216_rtc_ops = {
+ .read_time = ds1216_rtc_read_time,
+ .set_time = ds1216_rtc_set_time,
+};
+
+static int __devinit ds1216_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+ struct resource *res;
+ struct ds1216_priv *priv;
+ int ret = 0;
+ u8 dummy[8];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->size = res->end - res->start + 1;
+ if (!request_mem_region(res->start, priv->size, pdev->name)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->baseaddr = res->start;
+ priv->ioaddr = ioremap(priv->baseaddr, priv->size);
+ if (!priv->ioaddr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rtc = rtc_device_register("ds1216", &pdev->dev,
+ &ds1216_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ goto out;
+ }
+ priv->rtc = rtc;
+ platform_set_drvdata(pdev, priv);
+
+ /* dummy read to get clock into a known state */
+ ds1216_read(priv->ioaddr, dummy);
+ return 0;
+
+out:
+ if (priv->rtc)
+ rtc_device_unregister(priv->rtc);
+ if (priv->ioaddr)
+ iounmap(priv->ioaddr);
+ if (priv->baseaddr)
+ release_mem_region(priv->baseaddr, priv->size);
+ kfree(priv);
+ return ret;
+}
+
+static int __devexit ds1216_rtc_remove(struct platform_device *pdev)
+{
+ struct ds1216_priv *priv = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(priv->rtc);
+ iounmap(priv->ioaddr);
+ release_mem_region(priv->baseaddr, priv->size);
+ kfree(priv);
+ return 0;
+}
+
+static struct platform_driver ds1216_rtc_platform_driver = {
+ .driver = {
+ .name = "rtc-ds1216",
+ .owner = THIS_MODULE,
+ },
+ .probe = ds1216_rtc_probe,
+ .remove = __devexit_p(ds1216_rtc_remove),
+};
+
+static int __init ds1216_rtc_init(void)
+{
+ return platform_driver_register(&ds1216_rtc_platform_driver);
+}
+
+static void __exit ds1216_rtc_exit(void)
+{
+ platform_driver_unregister(&ds1216_rtc_platform_driver);
+}
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
+MODULE_DESCRIPTION("DS1216 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ds1216_rtc_init);
+module_exit(ds1216_rtc_exit);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 3f0f7b8fa81..5158a625671 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -24,29 +24,29 @@
* setting the date and time), Linux can ignore the non-clock features.
* That's a natural job for a factory or repair bench.
*
- * If the I2C "force" mechanism is used, we assume the chip is a ds1337.
- * (Much better would be board-specific tables of I2C devices, along with
- * the platform_data drivers would use to sort such issues out.)
+ * This is currently a simple no-alarms driver. If your board has the
+ * alarm irq wired up on a ds1337 or ds1339, and you want to use that,
+ * then look at the rtc-rs5c372 driver for code to steal...
*/
enum ds_type {
- unknown = 0,
- ds_1307, /* or ds1338, ... */
- ds_1337, /* or ds1339, ... */
- ds_1340, /* or st m41t00, ... */
+ ds_1307,
+ ds_1337,
+ ds_1338,
+ ds_1339,
+ ds_1340,
+ m41t00,
// rs5c372 too? different address...
};
-static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END };
-
-I2C_CLIENT_INSMOD;
-
-
/* RTC registers don't differ much, except for the century flag */
#define DS1307_REG_SECS 0x00 /* 00-59 */
# define DS1307_BIT_CH 0x80
+# define DS1340_BIT_nEOSC 0x80
#define DS1307_REG_MIN 0x01 /* 00-59 */
#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
+# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */
+# define DS1307_BIT_PM 0x20 /* in REG_HOUR */
# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
#define DS1307_REG_WDAY 0x03 /* 01-07 */
@@ -56,11 +56,12 @@ I2C_CLIENT_INSMOD;
#define DS1307_REG_YEAR 0x06 /* 00-99 */
/* Other registers (control, status, alarms, trickle charge, NVRAM, etc)
- * start at 7, and they differ a lot. Only control and status matter for RTC;
- * be careful using them.
+ * start at 7, and they differ a LOT. Only control and status matter for
+ * basic RTC date and time functionality; be careful using them.
*/
-#define DS1307_REG_CONTROL 0x07
+#define DS1307_REG_CONTROL 0x07 /* or ds1338 */
# define DS1307_BIT_OUT 0x80
+# define DS1338_BIT_OSF 0x20
# define DS1307_BIT_SQWE 0x10
# define DS1307_BIT_RS1 0x02
# define DS1307_BIT_RS0 0x01
@@ -71,6 +72,13 @@ I2C_CLIENT_INSMOD;
# define DS1337_BIT_INTCN 0x04
# define DS1337_BIT_A2IE 0x02
# define DS1337_BIT_A1IE 0x01
+#define DS1340_REG_CONTROL 0x07
+# define DS1340_BIT_OUT 0x80
+# define DS1340_BIT_FT 0x40
+# define DS1340_BIT_CALIB_SIGN 0x20
+# define DS1340_M_CALIBRATION 0x1f
+#define DS1340_REG_FLAG 0x09
+# define DS1340_BIT_OSF 0x80
#define DS1337_REG_STATUS 0x0f
# define DS1337_BIT_OSF 0x80
# define DS1337_BIT_A2I 0x02
@@ -84,21 +92,63 @@ struct ds1307 {
u8 regs[8];
enum ds_type type;
struct i2c_msg msg[2];
- struct i2c_client client;
+ struct i2c_client *client;
+ struct i2c_client dev;
struct rtc_device *rtc;
};
+struct chip_desc {
+ char name[9];
+ unsigned nvram56:1;
+ unsigned alarm:1;
+ enum ds_type type;
+};
+
+static const struct chip_desc chips[] = { {
+ .name = "ds1307",
+ .type = ds_1307,
+ .nvram56 = 1,
+}, {
+ .name = "ds1337",
+ .type = ds_1337,
+ .alarm = 1,
+}, {
+ .name = "ds1338",
+ .type = ds_1338,
+ .nvram56 = 1,
+}, {
+ .name = "ds1339",
+ .type = ds_1339,
+ .alarm = 1,
+}, {
+ .name = "ds1340",
+ .type = ds_1340,
+}, {
+ .name = "m41t00",
+ .type = m41t00,
+}, };
+
+static inline const struct chip_desc *find_chip(const char *s)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(chips); i++)
+ if (strnicmp(s, chips[i].name, sizeof chips[i].name) == 0)
+ return &chips[i];
+ return NULL;
+}
static int ds1307_get_time(struct device *dev, struct rtc_time *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
int tmp;
- /* read the RTC registers all at once */
+ /* read the RTC date and time registers all at once */
ds1307->msg[1].flags = I2C_M_RD;
ds1307->msg[1].len = 7;
- tmp = i2c_transfer(ds1307->client.adapter, ds1307->msg, 2);
+ tmp = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
+ ds1307->msg, 2);
if (tmp != 2) {
dev_err(dev, "%s error %d\n", "read", tmp);
return -EIO;
@@ -129,7 +179,8 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
- return 0;
+ /* initial clock setting can be undefined */
+ return rtc_valid_tm(t);
}
static int ds1307_set_time(struct device *dev, struct rtc_time *t)
@@ -157,11 +208,18 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
tmp = t->tm_year - 100;
buf[DS1307_REG_YEAR] = BIN2BCD(tmp);
- if (ds1307->type == ds_1337)
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
- else if (ds1307->type == ds_1340)
+ break;
+ case ds_1340:
buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
| DS1340_BIT_CENTURY;
+ break;
+ default:
+ break;
+ }
ds1307->msg[1].flags = 0;
ds1307->msg[1].len = 8;
@@ -170,7 +228,8 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
"write", buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6]);
- result = i2c_transfer(ds1307->client.adapter, &ds1307->msg[1], 1);
+ result = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
+ &ds1307->msg[1], 1);
if (result != 1) {
dev_err(dev, "%s error %d\n", "write", tmp);
return -EIO;
@@ -185,25 +244,29 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
static struct i2c_driver ds1307_driver;
-static int __devinit
-ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
+static int __devinit ds1307_probe(struct i2c_client *client)
{
struct ds1307 *ds1307;
int err = -ENODEV;
- struct i2c_client *client;
int tmp;
-
- if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) {
- err = -ENOMEM;
- goto exit;
+ const struct chip_desc *chip;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+
+ chip = find_chip(client->name);
+ if (!chip) {
+ dev_err(&client->dev, "unknown chip type '%s'\n",
+ client->name);
+ return -ENODEV;
}
- client = &ds1307->client;
- client->addr = address;
- client->adapter = adapter;
- client->driver = &ds1307_driver;
- client->flags = 0;
+ if (!i2c_check_functionality(adapter,
+ I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ return -EIO;
+
+ if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL)))
+ return -ENOMEM;
+ ds1307->client = client;
i2c_set_clientdata(client, ds1307);
ds1307->msg[0].addr = client->addr;
@@ -216,14 +279,16 @@ ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
ds1307->msg[1].len = sizeof(ds1307->regs);
ds1307->msg[1].buf = ds1307->regs;
- /* HACK: "force" implies "needs ds1337-style-oscillator setup" */
- if (kind >= 0) {
- ds1307->type = ds_1337;
+ ds1307->type = chip->type;
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
ds1307->reg_addr = DS1337_REG_CONTROL;
ds1307->msg[1].len = 2;
- tmp = i2c_transfer(client->adapter, ds1307->msg, 2);
+ /* get registers that the "rtc" read below won't read... */
+ tmp = i2c_transfer(adapter, ds1307->msg, 2);
if (tmp != 2) {
pr_debug("read error %d\n", tmp);
err = -EIO;
@@ -233,19 +298,26 @@ ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
ds1307->reg_addr = 0;
ds1307->msg[1].len = sizeof(ds1307->regs);
- /* oscillator is off; need to turn it on */
- if ((ds1307->regs[0] & DS1337_BIT_nEOSC)
- || (ds1307->regs[1] & DS1337_BIT_OSF)) {
- printk(KERN_ERR "no ds1337 oscillator code\n");
- goto exit_free;
+ /* oscillator off? turn it on, so clock can tick. */
+ if (ds1307->regs[0] & DS1337_BIT_nEOSC)
+ i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL,
+ ds1307->regs[0] & ~DS1337_BIT_nEOSC);
+
+ /* oscillator fault? clear flag, and warn */
+ if (ds1307->regs[1] & DS1337_BIT_OSF) {
+ i2c_smbus_write_byte_data(client, DS1337_REG_STATUS,
+ ds1307->regs[1] & ~DS1337_BIT_OSF);
+ dev_warn(&client->dev, "SET TIME!\n");
}
- } else
- ds1307->type = ds_1307;
+ break;
+ default:
+ break;
+ }
read_rtc:
/* read RTC registers */
- tmp = i2c_transfer(client->adapter, ds1307->msg, 2);
+ tmp = i2c_transfer(adapter, ds1307->msg, 2);
if (tmp != 2) {
pr_debug("read error %d\n", tmp);
err = -EIO;
@@ -257,72 +329,80 @@ read_rtc:
* still a few values that are clearly out-of-range.
*/
tmp = ds1307->regs[DS1307_REG_SECS];
- if (tmp & DS1307_BIT_CH) {
- if (ds1307->type && ds1307->type != ds_1307) {
- pr_debug("not a ds1307?\n");
- goto exit_free;
- }
- ds1307->type = ds_1307;
-
- /* this partial initialization should work for ds1307,
- * ds1338, ds1340, st m41t00, and more.
+ switch (ds1307->type) {
+ case ds_1340:
+ /* FIXME read register with DS1340_BIT_OSF, use that to
+ * trigger the "set time" warning (*after* restarting the
+ * oscillator!) instead of this weaker ds1307/m41t00 test.
*/
- dev_warn(&client->dev, "oscillator started; SET TIME!\n");
- i2c_smbus_write_byte_data(client, 0, 0);
- goto read_rtc;
+ case ds_1307:
+ case m41t00:
+ /* clock halted? turn it on, so clock can tick. */
+ if (tmp & DS1307_BIT_CH) {
+ i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+ dev_warn(&client->dev, "SET TIME!\n");
+ goto read_rtc;
+ }
+ break;
+ case ds_1338:
+ /* clock halted? turn it on, so clock can tick. */
+ if (tmp & DS1307_BIT_CH)
+ i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+
+ /* oscillator fault? clear flag, and warn */
+ if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) {
+ i2c_smbus_write_byte_data(client, DS1307_REG_CONTROL,
+ ds1307->regs[DS1337_REG_CONTROL]
+ & ~DS1338_BIT_OSF);
+ dev_warn(&client->dev, "SET TIME!\n");
+ goto read_rtc;
+ }
+ break;
+ case ds_1337:
+ case ds_1339:
+ break;
}
+
+ tmp = ds1307->regs[DS1307_REG_SECS];
tmp = BCD2BIN(tmp & 0x7f);
if (tmp > 60)
- goto exit_free;
+ goto exit_bad;
tmp = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f);
if (tmp > 60)
- goto exit_free;
+ goto exit_bad;
tmp = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
if (tmp == 0 || tmp > 31)
- goto exit_free;
+ goto exit_bad;
tmp = BCD2BIN(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
if (tmp == 0 || tmp > 12)
- goto exit_free;
+ goto exit_bad;
- /* force into in 24 hour mode (most chips) or
- * disable century bit (ds1340)
- */
tmp = ds1307->regs[DS1307_REG_HOUR];
- if (tmp & (1 << 6)) {
- if (tmp & (1 << 5))
- tmp = BCD2BIN(tmp & 0x1f) + 12;
- else
- tmp = BCD2BIN(tmp);
- i2c_smbus_write_byte_data(client,
- DS1307_REG_HOUR,
- BIN2BCD(tmp));
- }
-
- /* FIXME chips like 1337 can generate alarm irqs too; those are
- * worth exposing through the API (especially when the irq is
- * wakeup-capable).
- */
-
switch (ds1307->type) {
- case unknown:
- strlcpy(client->name, "unknown", I2C_NAME_SIZE);
- break;
- case ds_1307:
- strlcpy(client->name, "ds1307", I2C_NAME_SIZE);
- break;
- case ds_1337:
- strlcpy(client->name, "ds1337", I2C_NAME_SIZE);
- break;
case ds_1340:
- strlcpy(client->name, "ds1340", I2C_NAME_SIZE);
+ case m41t00:
+ /* NOTE: ignores century bits; fix before deploying
+ * systems that will run through year 2100.
+ */
break;
- }
+ default:
+ if (!(tmp & DS1307_BIT_12HR))
+ break;
- /* Tell the I2C layer a new client has arrived */
- if ((err = i2c_attach_client(client)))
- goto exit_free;
+ /* Be sure we're in 24 hour mode. Multi-master systems
+ * take note...
+ */
+ tmp = BCD2BIN(tmp & 0x1f);
+ if (tmp == 12)
+ tmp = 0;
+ if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
+ tmp += 12;
+ i2c_smbus_write_byte_data(client,
+ DS1307_REG_HOUR,
+ BIN2BCD(tmp));
+ }
ds1307->rtc = rtc_device_register(client->name, &client->dev,
&ds13xx_rtc_ops, THIS_MODULE);
@@ -330,46 +410,40 @@ read_rtc:
err = PTR_ERR(ds1307->rtc);
dev_err(&client->dev,
"unable to register the class device\n");
- goto exit_detach;
+ goto exit_free;
}
return 0;
-exit_detach:
- i2c_detach_client(client);
+exit_bad:
+ dev_dbg(&client->dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
+ "bogus register",
+ ds1307->regs[0], ds1307->regs[1],
+ ds1307->regs[2], ds1307->regs[3],
+ ds1307->regs[4], ds1307->regs[5],
+ ds1307->regs[6]);
+
exit_free:
kfree(ds1307);
-exit:
return err;
}
-static int __devinit
-ds1307_attach_adapter(struct i2c_adapter *adapter)
-{
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
- return 0;
- return i2c_probe(adapter, &addr_data, ds1307_detect);
-}
-
-static int __devexit ds1307_detach_client(struct i2c_client *client)
+static int __devexit ds1307_remove(struct i2c_client *client)
{
- int err;
struct ds1307 *ds1307 = i2c_get_clientdata(client);
rtc_device_unregister(ds1307->rtc);
- if ((err = i2c_detach_client(client)))
- return err;
kfree(ds1307);
return 0;
}
static struct i2c_driver ds1307_driver = {
.driver = {
- .name = "ds1307",
+ .name = "rtc-ds1307",
.owner = THIS_MODULE,
},
- .attach_adapter = ds1307_attach_adapter,
- .detach_client = __devexit_p(ds1307_detach_client),
+ .probe = ds1307_probe,
+ .remove = __devexit_p(ds1307_remove),
};
static int __init ds1307_init(void)
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
new file mode 100644
index 00000000000..80c4a846306
--- /dev/null
+++ b/drivers/rtc/rtc-m41t80.c
@@ -0,0 +1,917 @@
+/*
+ * I2C client/driver for the ST M41T80 family of i2c rtc chips.
+ *
+ * Author: Alexander Bigga <ab@mycable.de>
+ *
+ * Based on m41t00.c by Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2006 (c) mycable GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#endif
+
+#define M41T80_REG_SSEC 0
+#define M41T80_REG_SEC 1
+#define M41T80_REG_MIN 2
+#define M41T80_REG_HOUR 3
+#define M41T80_REG_WDAY 4
+#define M41T80_REG_DAY 5
+#define M41T80_REG_MON 6
+#define M41T80_REG_YEAR 7
+#define M41T80_REG_ALARM_MON 0xa
+#define M41T80_REG_ALARM_DAY 0xb
+#define M41T80_REG_ALARM_HOUR 0xc
+#define M41T80_REG_ALARM_MIN 0xd
+#define M41T80_REG_ALARM_SEC 0xe
+#define M41T80_REG_FLAGS 0xf
+#define M41T80_REG_SQW 0x13
+
+#define M41T80_DATETIME_REG_SIZE (M41T80_REG_YEAR + 1)
+#define M41T80_ALARM_REG_SIZE \
+ (M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON)
+
+#define M41T80_SEC_ST (1 << 7) /* ST: Stop Bit */
+#define M41T80_ALMON_AFE (1 << 7) /* AFE: AF Enable Bit */
+#define M41T80_ALMON_SQWE (1 << 6) /* SQWE: SQW Enable Bit */
+#define M41T80_ALHOUR_HT (1 << 6) /* HT: Halt Update Bit */
+#define M41T80_FLAGS_AF (1 << 6) /* AF: Alarm Flag Bit */
+#define M41T80_FLAGS_BATT_LOW (1 << 4) /* BL: Battery Low Bit */
+
+#define M41T80_FEATURE_HT (1 << 0)
+#define M41T80_FEATURE_BL (1 << 1)
+
+#define DRV_VERSION "0.05"
+
+struct m41t80_chip_info {
+ const char *name;
+ u8 features;
+};
+
+static const struct m41t80_chip_info m41t80_chip_info_tbl[] = {
+ {
+ .name = "m41t80",
+ .features = 0,
+ },
+ {
+ .name = "m41t81",
+ .features = M41T80_FEATURE_HT,
+ },
+ {
+ .name = "m41t81s",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41t82",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41t83",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41st84",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41st85",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41st87",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+};
+
+struct m41t80_data {
+ const struct m41t80_chip_info *chip;
+ struct rtc_device *rtc;
+};
+
+static int m41t80_get_datetime(struct i2c_client *client,
+ struct rtc_time *tm)
+{
+ u8 buf[M41T80_DATETIME_REG_SIZE], dt_addr[1] = { M41T80_REG_SEC };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = dt_addr,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
+ .buf = buf + M41T80_REG_SEC,
+ },
+ };
+
+ if (i2c_transfer(client->adapter, msgs, 2) < 0) {
+ dev_err(&client->dev, "read error\n");
+ return -EIO;
+ }
+
+ tm->tm_sec = BCD2BIN(buf[M41T80_REG_SEC] & 0x7f);
+ tm->tm_min = BCD2BIN(buf[M41T80_REG_MIN] & 0x7f);
+ tm->tm_hour = BCD2BIN(buf[M41T80_REG_HOUR] & 0x3f);
+ tm->tm_mday = BCD2BIN(buf[M41T80_REG_DAY] & 0x3f);
+ tm->tm_wday = buf[M41T80_REG_WDAY] & 0x07;
+ tm->tm_mon = BCD2BIN(buf[M41T80_REG_MON] & 0x1f) - 1;
+
+ /* assume 20YY not 19YY, and ignore the Century Bit */
+ tm->tm_year = BCD2BIN(buf[M41T80_REG_YEAR]) + 100;
+ return 0;
+}
+
+/* Sets the given date and time to the real time clock. */
+static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+ u8 wbuf[1 + M41T80_DATETIME_REG_SIZE];
+ u8 *buf = &wbuf[1];
+ u8 dt_addr[1] = { M41T80_REG_SEC };
+ struct i2c_msg msgs_in[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = dt_addr,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
+ .buf = buf + M41T80_REG_SEC,
+ },
+ };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1 + M41T80_DATETIME_REG_SIZE,
+ .buf = wbuf,
+ },
+ };
+
+ /* Read current reg values into buf[1..7] */
+ if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
+ dev_err(&client->dev, "read error\n");
+ return -EIO;
+ }
+
+ wbuf[0] = 0; /* offset into rtc's regs */
+ /* Merge time-data and register flags into buf[0..7] */
+ buf[M41T80_REG_SSEC] = 0;
+ buf[M41T80_REG_SEC] =
+ BIN2BCD(tm->tm_sec) | (buf[M41T80_REG_SEC] & ~0x7f);
+ buf[M41T80_REG_MIN] =
+ BIN2BCD(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f);
+ buf[M41T80_REG_HOUR] =
+ BIN2BCD(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f) ;
+ buf[M41T80_REG_WDAY] =
+ (tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07);
+ buf[M41T80_REG_DAY] =
+ BIN2BCD(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f);
+ buf[M41T80_REG_MON] =
+ BIN2BCD(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f);
+ /* assume 20YY not 19YY */
+ buf[M41T80_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
+
+ if (i2c_transfer(client->adapter, msgs, 1) != 1) {
+ dev_err(&client->dev, "write error\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
+static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct m41t80_data *clientdata = i2c_get_clientdata(client);
+ u8 reg;
+
+ if (clientdata->chip->features & M41T80_FEATURE_BL) {
+ reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ seq_printf(seq, "battery\t\t: %s\n",
+ (reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok");
+ }
+ return 0;
+}
+#else
+#define m41t80_rtc_proc NULL
+#endif
+
+static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ return m41t80_get_datetime(to_i2c_client(dev), tm);
+}
+
+static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ return m41t80_set_datetime(to_i2c_client(dev), tm);
+}
+
+#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
+static int
+m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int rc;
+
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ case RTC_AIE_ON:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+ if (rc < 0)
+ goto err;
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ rc &= ~M41T80_ALMON_AFE;
+ break;
+ case RTC_AIE_ON:
+ rc |= M41T80_ALMON_AFE;
+ break;
+ }
+ if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
+ goto err;
+ return 0;
+err:
+ return -EIO;
+}
+#else
+#define m41t80_rtc_ioctl NULL
+#endif
+
+static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 wbuf[1 + M41T80_ALARM_REG_SIZE];
+ u8 *buf = &wbuf[1];
+ u8 *reg = buf - M41T80_REG_ALARM_MON;
+ u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
+ struct i2c_msg msgs_in[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = dt_addr,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = M41T80_ALARM_REG_SIZE,
+ .buf = buf,
+ },
+ };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1 + M41T80_ALARM_REG_SIZE,
+ .buf = wbuf,
+ },
+ };
+
+ if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
+ dev_err(&client->dev, "read error\n");
+ return -EIO;
+ }
+ reg[M41T80_REG_ALARM_MON] &= ~(0x1f | M41T80_ALMON_AFE);
+ reg[M41T80_REG_ALARM_DAY] = 0;
+ reg[M41T80_REG_ALARM_HOUR] &= ~(0x3f | 0x80);
+ reg[M41T80_REG_ALARM_MIN] = 0;
+ reg[M41T80_REG_ALARM_SEC] = 0;
+
+ wbuf[0] = M41T80_REG_ALARM_MON; /* offset into rtc's regs */
+ reg[M41T80_REG_ALARM_SEC] |= t->time.tm_sec >= 0 ?
+ BIN2BCD(t->time.tm_sec) : 0x80;
+ reg[M41T80_REG_ALARM_MIN] |= t->time.tm_min >= 0 ?
+ BIN2BCD(t->time.tm_min) : 0x80;
+ reg[M41T80_REG_ALARM_HOUR] |= t->time.tm_hour >= 0 ?
+ BIN2BCD(t->time.tm_hour) : 0x80;
+ reg[M41T80_REG_ALARM_DAY] |= t->time.tm_mday >= 0 ?
+ BIN2BCD(t->time.tm_mday) : 0x80;
+ if (t->time.tm_mon >= 0)
+ reg[M41T80_REG_ALARM_MON] |= BIN2BCD(t->time.tm_mon + 1);
+ else
+ reg[M41T80_REG_ALARM_DAY] |= 0x40;
+
+ if (i2c_transfer(client->adapter, msgs, 1) != 1) {
+ dev_err(&client->dev, "write error\n");
+ return -EIO;
+ }
+
+ if (t->enabled) {
+ reg[M41T80_REG_ALARM_MON] |= M41T80_ALMON_AFE;
+ if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ reg[M41T80_REG_ALARM_MON]) < 0) {
+ dev_err(&client->dev, "write error\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 buf[M41T80_ALARM_REG_SIZE + 1]; /* all alarm regs and flags */
+ u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
+ u8 *reg = buf - M41T80_REG_ALARM_MON;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = dt_addr,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = M41T80_ALARM_REG_SIZE + 1,
+ .buf = buf,
+ },
+ };
+
+ if (i2c_transfer(client->adapter, msgs, 2) < 0) {
+ dev_err(&client->dev, "read error\n");
+ return -EIO;
+ }
+ t->time.tm_sec = -1;
+ t->time.tm_min = -1;
+ t->time.tm_hour = -1;
+ t->time.tm_mday = -1;
+ t->time.tm_mon = -1;
+ if (!(reg[M41T80_REG_ALARM_SEC] & 0x80))
+ t->time.tm_sec = BCD2BIN(reg[M41T80_REG_ALARM_SEC] & 0x7f);
+ if (!(reg[M41T80_REG_ALARM_MIN] & 0x80))
+ t->time.tm_min = BCD2BIN(reg[M41T80_REG_ALARM_MIN] & 0x7f);
+ if (!(reg[M41T80_REG_ALARM_HOUR] & 0x80))
+ t->time.tm_hour = BCD2BIN(reg[M41T80_REG_ALARM_HOUR] & 0x3f);
+ if (!(reg[M41T80_REG_ALARM_DAY] & 0x80))
+ t->time.tm_mday = BCD2BIN(reg[M41T80_REG_ALARM_DAY] & 0x3f);
+ if (!(reg[M41T80_REG_ALARM_DAY] & 0x40))
+ t->time.tm_mon = BCD2BIN(reg[M41T80_REG_ALARM_MON] & 0x1f) - 1;
+ t->time.tm_year = -1;
+ t->time.tm_wday = -1;
+ t->time.tm_yday = -1;
+ t->time.tm_isdst = -1;
+ t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
+ t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
+ return 0;
+}
+
+static struct rtc_class_ops m41t80_rtc_ops = {
+ .read_time = m41t80_rtc_read_time,
+ .set_time = m41t80_rtc_set_time,
+ .read_alarm = m41t80_rtc_read_alarm,
+ .set_alarm = m41t80_rtc_set_alarm,
+ .proc = m41t80_rtc_proc,
+ .ioctl = m41t80_rtc_ioctl,
+};
+
+#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
+static ssize_t m41t80_sysfs_show_flags(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ if (val < 0)
+ return -EIO;
+ return sprintf(buf, "%#x\n", val);
+}
+static DEVICE_ATTR(flags, S_IRUGO, m41t80_sysfs_show_flags, NULL);
+
+static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
+ if (val < 0)
+ return -EIO;
+ val = (val >> 4) & 0xf;
+ switch (val) {
+ case 0:
+ break;
+ case 1:
+ val = 32768;
+ break;
+ default:
+ val = 32768 >> val;
+ }
+ return sprintf(buf, "%d\n", val);
+}
+static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int almon, sqw;
+ int val = simple_strtoul(buf, NULL, 0);
+
+ if (val) {
+ if (!is_power_of_2(val))
+ return -EINVAL;
+ val = ilog2(val);
+ if (val == 15)
+ val = 1;
+ else if (val < 14)
+ val = 15 - val;
+ else
+ return -EINVAL;
+ }
+ /* disable SQW, set SQW frequency & re-enable */
+ almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+ if (almon < 0)
+ return -EIO;
+ sqw = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
+ if (sqw < 0)
+ return -EIO;
+ sqw = (sqw & 0x0f) | (val << 4);
+ if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ almon & ~M41T80_ALMON_SQWE) < 0 ||
+ i2c_smbus_write_byte_data(client, M41T80_REG_SQW, sqw) < 0)
+ return -EIO;
+ if (val && i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ almon | M41T80_ALMON_SQWE) < 0)
+ return -EIO;
+ return count;
+}
+static DEVICE_ATTR(sqwfreq, S_IRUGO | S_IWUSR,
+ m41t80_sysfs_show_sqwfreq, m41t80_sysfs_set_sqwfreq);
+
+static struct attribute *attrs[] = {
+ &dev_attr_flags.attr,
+ &dev_attr_sqwfreq.attr,
+ NULL,
+};
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static int m41t80_sysfs_register(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &attr_group);
+}
+#else
+static int m41t80_sysfs_register(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+/*
+ *****************************************************************************
+ *
+ * Watchdog Driver
+ *
+ *****************************************************************************
+ */
+static struct i2c_client *save_client;
+
+/* Default margin */
+#define WD_TIMO 60 /* 1..31 seconds */
+
+static int wdt_margin = WD_TIMO;
+module_param(wdt_margin, int, 0);
+MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 60s)");
+
+static unsigned long wdt_is_open;
+static int boot_flag;
+
+/**
+ * wdt_ping:
+ *
+ * Reload counter one with the watchdog timeout. We don't bother reloading
+ * the cascade counter.
+ */
+static void wdt_ping(void)
+{
+ unsigned char i2c_data[2];
+ struct i2c_msg msgs1[1] = {
+ {
+ .addr = save_client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = i2c_data,
+ },
+ };
+ i2c_data[0] = 0x09; /* watchdog register */
+
+ if (wdt_margin > 31)
+ i2c_data[1] = (wdt_margin & 0xFC) | 0x83; /* resolution = 4s */
+ else
+ /*
+ * WDS = 1 (0x80), mulitplier = WD_TIMO, resolution = 1s (0x02)
+ */
+ i2c_data[1] = wdt_margin<<2 | 0x82;
+
+ i2c_transfer(save_client->adapter, msgs1, 1);
+}
+
+/**
+ * wdt_disable:
+ *
+ * disables watchdog.
+ */
+static void wdt_disable(void)
+{
+ unsigned char i2c_data[2], i2c_buf[0x10];
+ struct i2c_msg msgs0[2] = {
+ {
+ .addr = save_client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = i2c_data,
+ },
+ {
+ .addr = save_client->addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = i2c_buf,
+ },
+ };
+ struct i2c_msg msgs1[1] = {
+ {
+ .addr = save_client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = i2c_data,
+ },
+ };
+
+ i2c_data[0] = 0x09;
+ i2c_transfer(save_client->adapter, msgs0, 2);
+
+ i2c_data[0] = 0x09;
+ i2c_data[1] = 0x00;
+ i2c_transfer(save_client->adapter, msgs1, 1);
+}
+
+/**
+ * wdt_write:
+ * @file: file handle to the watchdog
+ * @buf: buffer to write (unused as data does not matter here
+ * @count: count of bytes
+ * @ppos: pointer to the position to write. No seeks allowed
+ *
+ * A write to a watchdog device is defined as a keepalive signal. Any
+ * write of data will do, as we we don't define content meaning.
+ */
+static ssize_t wdt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /* Can't seek (pwrite) on this device
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+ */
+ if (count) {
+ wdt_ping();
+ return 1;
+ }
+ return 0;
+}
+
+static ssize_t wdt_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+/**
+ * wdt_ioctl:
+ * @inode: inode of the device
+ * @file: file handle to the device
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ *
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features. We only actually usefully support
+ * querying capabilities and current status.
+ */
+static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int new_margin, rv;
+ static struct watchdog_info ident = {
+ .options = WDIOF_POWERUNDER | WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .firmware_version = 1,
+ .identity = "M41T80 WTD"
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user((struct watchdog_info __user *)arg, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(boot_flag, (int __user *)arg);
+ case WDIOC_KEEPALIVE:
+ wdt_ping();
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_margin, (int __user *)arg))
+ return -EFAULT;
+ /* Arbitrary, can't find the card's limits */
+ if (new_margin < 1 || new_margin > 124)
+ return -EINVAL;
+ wdt_margin = new_margin;
+ wdt_ping();
+ /* Fall */
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt_margin, (int __user *)arg);
+
+ case WDIOC_SETOPTIONS:
+ if (copy_from_user(&rv, (int __user *)arg, sizeof(int)))
+ return -EFAULT;
+
+ if (rv & WDIOS_DISABLECARD) {
+ printk(KERN_INFO
+ "rtc-m41t80: disable watchdog\n");
+ wdt_disable();
+ }
+
+ if (rv & WDIOS_ENABLECARD) {
+ printk(KERN_INFO
+ "rtc-m41t80: enable watchdog\n");
+ wdt_ping();
+ }
+
+ return -EINVAL;
+ }
+ return -ENOTTY;
+}
+
+/**
+ * wdt_open:
+ * @inode: inode of device
+ * @file: file handle to device
+ *
+ */
+static int wdt_open(struct inode *inode, struct file *file)
+{
+ if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
+ if (test_and_set_bit(0, &wdt_is_open))
+ return -EBUSY;
+ /*
+ * Activate
+ */
+ wdt_is_open = 1;
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/**
+ * wdt_close:
+ * @inode: inode to board
+ * @file: file handle to board
+ *
+ */
+static int wdt_release(struct inode *inode, struct file *file)
+{
+ if (MINOR(inode->i_rdev) == WATCHDOG_MINOR)
+ clear_bit(0, &wdt_is_open);
+ return 0;
+}
+
+/**
+ * notify_sys:
+ * @this: our notifier block
+ * @code: the event being reported
+ * @unused: unused
+ *
+ * Our notifier is called on system shutdowns. We want to turn the card
+ * off at reboot otherwise the machine will reboot again during memory
+ * test or worse yet during the following fsck. This would suck, in fact
+ * trust me - if it happens it does suck.
+ */
+static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ /* Disable Watchdog */
+ wdt_disable();
+ return NOTIFY_DONE;
+}
+
+static const struct file_operations wdt_fops = {
+ .owner = THIS_MODULE,
+ .read = wdt_read,
+ .ioctl = wdt_ioctl,
+ .write = wdt_write,
+ .open = wdt_open,
+ .release = wdt_release,
+};
+
+static struct miscdevice wdt_dev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &wdt_fops,
+};
+
+/*
+ * The WDT card needs to learn about soft shutdowns in order to
+ * turn the timebomb registers off.
+ */
+static struct notifier_block wdt_notifier = {
+ .notifier_call = wdt_notify_sys,
+};
+#endif /* CONFIG_RTC_DRV_M41T80_WDT */
+
+/*
+ *****************************************************************************
+ *
+ * Driver Interface
+ *
+ *****************************************************************************
+ */
+static int m41t80_probe(struct i2c_client *client)
+{
+ int i, rc = 0;
+ struct rtc_device *rtc = NULL;
+ struct rtc_time tm;
+ const struct m41t80_chip_info *chip;
+ struct m41t80_data *clientdata = NULL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C
+ | I2C_FUNC_SMBUS_BYTE_DATA)) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ dev_info(&client->dev,
+ "chip found, driver version " DRV_VERSION "\n");
+
+ chip = NULL;
+ for (i = 0; i < ARRAY_SIZE(m41t80_chip_info_tbl); i++) {
+ if (!strcmp(m41t80_chip_info_tbl[i].name, client->name)) {
+ chip = &m41t80_chip_info_tbl[i];
+ break;
+ }
+ }
+ if (!chip) {
+ dev_err(&client->dev, "%s is not supported\n", client->name);
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ clientdata = kzalloc(sizeof(*clientdata), GFP_KERNEL);
+ if (!clientdata) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ rtc = rtc_device_register(client->name, &client->dev,
+ &m41t80_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ rc = PTR_ERR(rtc);
+ rtc = NULL;
+ goto exit;
+ }
+
+ clientdata->rtc = rtc;
+ clientdata->chip = chip;
+ i2c_set_clientdata(client, clientdata);
+
+ /* Make sure HT (Halt Update) bit is cleared */
+ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
+ if (rc < 0)
+ goto ht_err;
+
+ if (rc & M41T80_ALHOUR_HT) {
+ if (chip->features & M41T80_FEATURE_HT) {
+ m41t80_get_datetime(client, &tm);
+ dev_info(&client->dev, "HT bit was set!\n");
+ dev_info(&client->dev,
+ "Power Down at "
+ "%04i-%02i-%02i %02i:%02i:%02i\n",
+ tm.tm_year + 1900,
+ tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
+ tm.tm_min, tm.tm_sec);
+ }
+ if (i2c_smbus_write_byte_data(client,
+ M41T80_REG_ALARM_HOUR,
+ rc & ~M41T80_ALHOUR_HT) < 0)
+ goto ht_err;
+ }
+
+ /* Make sure ST (stop) bit is cleared */
+ rc = i2c_smbus_read_byte_data(client, M41T80_REG_SEC);
+ if (rc < 0)
+ goto st_err;
+
+ if (rc & M41T80_SEC_ST) {
+ if (i2c_smbus_write_byte_data(client, M41T80_REG_SEC,
+ rc & ~M41T80_SEC_ST) < 0)
+ goto st_err;
+ }
+
+ rc = m41t80_sysfs_register(&client->dev);
+ if (rc)
+ goto exit;
+
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+ if (chip->features & M41T80_FEATURE_HT) {
+ rc = misc_register(&wdt_dev);
+ if (rc)
+ goto exit;
+ rc = register_reboot_notifier(&wdt_notifier);
+ if (rc) {
+ misc_deregister(&wdt_dev);
+ goto exit;
+ }
+ save_client = client;
+ }
+#endif
+ return 0;
+
+st_err:
+ rc = -EIO;
+ dev_err(&client->dev, "Can't clear ST bit\n");
+ goto exit;
+ht_err:
+ rc = -EIO;
+ dev_err(&client->dev, "Can't clear HT bit\n");
+ goto exit;
+
+exit:
+ if (rtc)
+ rtc_device_unregister(rtc);
+ kfree(clientdata);
+ return rc;
+}
+
+static int m41t80_remove(struct i2c_client *client)
+{
+ struct m41t80_data *clientdata = i2c_get_clientdata(client);
+ struct rtc_device *rtc = clientdata->rtc;
+
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+ if (clientdata->chip->features & M41T80_FEATURE_HT) {
+ misc_deregister(&wdt_dev);
+ unregister_reboot_notifier(&wdt_notifier);
+ }
+#endif
+ if (rtc)
+ rtc_device_unregister(rtc);
+ kfree(clientdata);
+
+ return 0;
+}
+
+static struct i2c_driver m41t80_driver = {
+ .driver = {
+ .name = "m41t80",
+ },
+ .probe = m41t80_probe,
+ .remove = m41t80_remove,
+};
+
+static int __init m41t80_rtc_init(void)
+{
+ return i2c_add_driver(&m41t80_driver);
+}
+
+static void __exit m41t80_rtc_exit(void)
+{
+ i2c_del_driver(&m41t80_driver);
+}
+
+MODULE_AUTHOR("Alexander Bigga <ab@mycable.de>");
+MODULE_DESCRIPTION("ST Microelectronics M41T80 series RTC I2C Client Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(m41t80_rtc_init);
+module_exit(m41t80_rtc_exit);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
new file mode 100644
index 00000000000..33b752350ab
--- /dev/null
+++ b/drivers/rtc/rtc-m48t59.c
@@ -0,0 +1,491 @@
+/*
+ * ST M48T59 RTC driver
+ *
+ * Copyright (c) 2007 Wind River Systems, Inc.
+ *
+ * Author: Mark Zhan <rongkai.zhan@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/rtc/m48t59.h>
+#include <linux/bcd.h>
+
+#ifndef NO_IRQ
+#define NO_IRQ (-1)
+#endif
+
+#define M48T59_READ(reg) pdata->read_byte(dev, reg)
+#define M48T59_WRITE(val, reg) pdata->write_byte(dev, reg, val)
+
+#define M48T59_SET_BITS(mask, reg) \
+ M48T59_WRITE((M48T59_READ(reg) | (mask)), (reg))
+#define M48T59_CLEAR_BITS(mask, reg) \
+ M48T59_WRITE((M48T59_READ(reg) & ~(mask)), (reg))
+
+struct m48t59_private {
+ void __iomem *ioaddr;
+ unsigned int size; /* iomem size */
+ unsigned int irq;
+ struct rtc_device *rtc;
+ spinlock_t lock; /* serialize the NVRAM and RTC access */
+};
+
+/*
+ * This is the generic access method when the chip is memory-mapped
+ */
+static void
+m48t59_mem_writeb(struct device *dev, u32 ofs, u8 val)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+
+ writeb(val, m48t59->ioaddr+ofs);
+}
+
+static u8
+m48t59_mem_readb(struct device *dev, u32 ofs)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+
+ return readb(m48t59->ioaddr+ofs);
+}
+
+/*
+ * NOTE: M48T59 only uses BCD mode
+ */
+static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ /* Issue the READ command */
+ M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+
+ tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
+ /* tm_mon is 0-11 */
+ tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
+ tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_MDAY));
+
+ val = M48T59_READ(M48T59_WDAY);
+ if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB)) {
+ dev_dbg(dev, "Century bit is enabled\n");
+ tm->tm_year += 100; /* one century */
+ }
+
+ tm->tm_wday = BCD2BIN(val & 0x07);
+ tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_HOUR) & 0x3F);
+ tm->tm_min = BCD2BIN(M48T59_READ(M48T59_MIN) & 0x7F);
+ tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_SEC) & 0x7F);
+
+ /* Clear the READ bit */
+ M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ dev_dbg(dev, "RTC read time %04d-%02d-%02d %02d/%02d/%02d\n",
+ tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+ return 0;
+}
+
+static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ unsigned long flags;
+ u8 val = 0;
+
+ dev_dbg(dev, "RTC set time %04d-%02d-%02d %02d/%02d/%02d\n",
+ tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ /* Issue the WRITE command */
+ M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+
+ M48T59_WRITE((BIN2BCD(tm->tm_sec) & 0x7F), M48T59_SEC);
+ M48T59_WRITE((BIN2BCD(tm->tm_min) & 0x7F), M48T59_MIN);
+ M48T59_WRITE((BIN2BCD(tm->tm_hour) & 0x3F), M48T59_HOUR);
+ M48T59_WRITE((BIN2BCD(tm->tm_mday) & 0x3F), M48T59_MDAY);
+ /* tm_mon is 0-11 */
+ M48T59_WRITE((BIN2BCD(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
+ M48T59_WRITE(BIN2BCD(tm->tm_year % 100), M48T59_YEAR);
+
+ if (tm->tm_year/100)
+ val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
+ val |= (BIN2BCD(tm->tm_wday) & 0x07);
+ M48T59_WRITE(val, M48T59_WDAY);
+
+ /* Clear the WRITE bit */
+ M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+ return 0;
+}
+
+/*
+ * Read alarm time and date in RTC
+ */
+static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct rtc_time *tm = &alrm->time;
+ unsigned long flags;
+ u8 val;
+
+ /* If no irq, we don't support ALARM */
+ if (m48t59->irq == NO_IRQ)
+ return -EIO;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ /* Issue the READ command */
+ M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+
+ tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
+ /* tm_mon is 0-11 */
+ tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
+
+ val = M48T59_READ(M48T59_WDAY);
+ if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB))
+ tm->tm_year += 100; /* one century */
+
+ tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_ALARM_DATE));
+ tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_ALARM_HOUR));
+ tm->tm_min = BCD2BIN(M48T59_READ(M48T59_ALARM_MIN));
+ tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_ALARM_SEC));
+
+ /* Clear the READ bit */
+ M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ dev_dbg(dev, "RTC read alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
+ tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+ return 0;
+}
+
+/*
+ * Set alarm time and date in RTC
+ */
+static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct rtc_time *tm = &alrm->time;
+ u8 mday, hour, min, sec;
+ unsigned long flags;
+
+ /* If no irq, we don't support ALARM */
+ if (m48t59->irq == NO_IRQ)
+ return -EIO;
+
+ /*
+ * 0xff means "always match"
+ */
+ mday = tm->tm_mday;
+ mday = (mday >= 1 && mday <= 31) ? BIN2BCD(mday) : 0xff;
+ if (mday == 0xff)
+ mday = M48T59_READ(M48T59_MDAY);
+
+ hour = tm->tm_hour;
+ hour = (hour < 24) ? BIN2BCD(hour) : 0x00;
+
+ min = tm->tm_min;
+ min = (min < 60) ? BIN2BCD(min) : 0x00;
+
+ sec = tm->tm_sec;
+ sec = (sec < 60) ? BIN2BCD(sec) : 0x00;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ /* Issue the WRITE command */
+ M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+
+ M48T59_WRITE(mday, M48T59_ALARM_DATE);
+ M48T59_WRITE(hour, M48T59_ALARM_HOUR);
+ M48T59_WRITE(min, M48T59_ALARM_MIN);
+ M48T59_WRITE(sec, M48T59_ALARM_SEC);
+
+ /* Clear the WRITE bit */
+ M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ dev_dbg(dev, "RTC set alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
+ tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+ return 0;
+}
+
+/*
+ * Handle commands from user-space
+ */
+static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ switch (cmd) {
+ case RTC_AIE_OFF: /* alarm interrupt off */
+ M48T59_WRITE(0x00, M48T59_INTR);
+ break;
+ case RTC_AIE_ON: /* alarm interrupt on */
+ M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ return ret;
+}
+
+static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ val = M48T59_READ(M48T59_FLAGS);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ seq_printf(seq, "battery\t\t: %s\n",
+ (val & M48T59_FLAGS_BF) ? "low" : "normal");
+ return 0;
+}
+
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
+{
+ struct device *dev = (struct device *)dev_id;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ u8 event;
+
+ spin_lock(&m48t59->lock);
+ event = M48T59_READ(M48T59_FLAGS);
+ spin_unlock(&m48t59->lock);
+
+ if (event & M48T59_FLAGS_AF) {
+ rtc_update_irq(m48t59->rtc, 1, (RTC_AF | RTC_IRQF));
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static const struct rtc_class_ops m48t59_rtc_ops = {
+ .ioctl = m48t59_rtc_ioctl,
+ .read_time = m48t59_rtc_read_time,
+ .set_time = m48t59_rtc_set_time,
+ .read_alarm = m48t59_rtc_readalarm,
+ .set_alarm = m48t59_rtc_setalarm,
+ .proc = m48t59_rtc_proc,
+};
+
+static ssize_t m48t59_nvram_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ ssize_t cnt = 0;
+ unsigned long flags;
+
+ for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
+ spin_lock_irqsave(&m48t59->lock, flags);
+ *buf++ = M48T59_READ(cnt);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+ }
+
+ return cnt;
+}
+
+static ssize_t m48t59_nvram_write(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ ssize_t cnt = 0;
+ unsigned long flags;
+
+ for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
+ spin_lock_irqsave(&m48t59->lock, flags);
+ M48T59_WRITE(*buf++, cnt);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+ }
+
+ return cnt;
+}
+
+static struct bin_attribute m48t59_nvram_attr = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUGO | S_IWUGO,
+ .owner = THIS_MODULE,
+ },
+ .read = m48t59_nvram_read,
+ .write = m48t59_nvram_write,
+};
+
+static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
+{
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = NULL;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ /* This chip could be memory-mapped or I/O-mapped */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -EINVAL;
+ }
+
+ if (res->flags & IORESOURCE_IO) {
+ /* If we are I/O-mapped, the platform should provide
+ * the operations accessing chip registers.
+ */
+ if (!pdata || !pdata->write_byte || !pdata->read_byte)
+ return -EINVAL;
+ } else if (res->flags & IORESOURCE_MEM) {
+ /* we are memory-mapped */
+ if (!pdata) {
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ /* Ensure we only kmalloc platform data once */
+ pdev->dev.platform_data = pdata;
+ }
+
+ /* Try to use the generic memory read/write ops */
+ if (!pdata->write_byte)
+ pdata->write_byte = m48t59_mem_writeb;
+ if (!pdata->read_byte)
+ pdata->read_byte = m48t59_mem_readb;
+ }
+
+ m48t59 = kzalloc(sizeof(*m48t59), GFP_KERNEL);
+ if (!m48t59)
+ return -ENOMEM;
+
+ m48t59->size = res->end - res->start + 1;
+ m48t59->ioaddr = ioremap(res->start, m48t59->size);
+ if (!m48t59->ioaddr)
+ goto out;
+
+ /* Try to get irq number. We also can work in
+ * the mode without IRQ.
+ */
+ m48t59->irq = platform_get_irq(pdev, 0);
+ if (m48t59->irq < 0)
+ m48t59->irq = NO_IRQ;
+
+ if (m48t59->irq != NO_IRQ) {
+ ret = request_irq(m48t59->irq, m48t59_rtc_interrupt,
+ IRQF_SHARED, "rtc-m48t59", &pdev->dev);
+ if (ret)
+ goto out;
+ }
+
+ m48t59->rtc = rtc_device_register("m48t59", &pdev->dev,
+ &m48t59_rtc_ops, THIS_MODULE);
+ if (IS_ERR(m48t59->rtc)) {
+ ret = PTR_ERR(m48t59->rtc);
+ goto out;
+ }
+
+ ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
+ if (ret)
+ goto out;
+
+ spin_lock_init(&m48t59->lock);
+ platform_set_drvdata(pdev, m48t59);
+ return 0;
+
+out:
+ if (!IS_ERR(m48t59->rtc))
+ rtc_device_unregister(m48t59->rtc);
+ if (m48t59->irq != NO_IRQ)
+ free_irq(m48t59->irq, &pdev->dev);
+ if (m48t59->ioaddr)
+ iounmap(m48t59->ioaddr);
+ if (m48t59)
+ kfree(m48t59);
+ return ret;
+}
+
+static int __devexit m48t59_rtc_remove(struct platform_device *pdev)
+{
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+
+ sysfs_remove_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
+ if (!IS_ERR(m48t59->rtc))
+ rtc_device_unregister(m48t59->rtc);
+ if (m48t59->ioaddr)
+ iounmap(m48t59->ioaddr);
+ if (m48t59->irq != NO_IRQ)
+ free_irq(m48t59->irq, &pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(m48t59);
+ return 0;
+}
+
+static struct platform_driver m48t59_rtc_platdrv = {
+ .driver = {
+ .name = "rtc-m48t59",
+ .owner = THIS_MODULE,
+ },
+ .probe = m48t59_rtc_probe,
+ .remove = __devexit_p(m48t59_rtc_remove),
+};
+
+static int __init m48t59_rtc_init(void)
+{
+ return platform_driver_register(&m48t59_rtc_platdrv);
+}
+
+static void __exit m48t59_rtc_exit(void)
+{
+ platform_driver_unregister(&m48t59_rtc_platdrv);
+}
+
+module_init(m48t59_rtc_init);
+module_exit(m48t59_rtc_exit);
+
+MODULE_AUTHOR("Mark Zhan <rongkai.zhan@windriver.com>");
+MODULE_DESCRIPTION("M48T59 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 09bbe575647..6b67b509792 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -13,13 +13,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
-#define DRV_VERSION "0.4"
-
-/* Addresses to scan */
-static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD;
+#define DRV_VERSION "0.5"
/*
@@ -88,9 +82,6 @@ struct rs5c372 {
unsigned has_irq:1;
char buf[17];
char *regs;
-
- /* on conversion to a "new style" i2c driver, this vanishes */
- struct i2c_client dev;
};
static int rs5c_get_regs(struct rs5c372 *rs5c)
@@ -483,25 +474,35 @@ static int rs5c_sysfs_register(struct device *dev)
return err;
}
+static void rs5c_sysfs_unregister(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_trim);
+ device_remove_file(dev, &dev_attr_osc);
+}
+
#else
static int rs5c_sysfs_register(struct device *dev)
{
return 0;
}
+
+static void rs5c_sysfs_unregister(struct device *dev)
+{
+ /* nothing */
+}
#endif /* SYSFS */
static struct i2c_driver rs5c372_driver;
-static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
+static int rs5c372_probe(struct i2c_client *client)
{
int err = 0;
- struct i2c_client *client;
struct rs5c372 *rs5c372;
struct rtc_time tm;
- dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
+ dev_dbg(&client->dev, "%s\n", __FUNCTION__);
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
err = -ENODEV;
goto exit;
}
@@ -514,35 +515,22 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
/* we read registers 0x0f then 0x00-0x0f; skip the first one */
rs5c372->regs=&rs5c372->buf[1];
- /* On conversion to a "new style" i2c driver, we'll be handed
- * the i2c_client (we won't create it)
- */
- client = &rs5c372->dev;
rs5c372->client = client;
-
- /* I2C client */
- client->addr = address;
- client->driver = &rs5c372_driver;
- client->adapter = adapter;
-
- strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE);
-
i2c_set_clientdata(client, rs5c372);
- /* Inform the i2c layer */
- if ((err = i2c_attach_client(client)))
- goto exit_kfree;
-
err = rs5c_get_regs(rs5c372);
if (err < 0)
- goto exit_detach;
+ goto exit_kfree;
- /* For "new style" drivers, irq is in i2c_client and chip type
- * info comes from i2c_client.dev.platform_data. Meanwhile:
- *
- * STICK BOARD-SPECIFIC SETUP CODE RIGHT HERE
- */
- if (rs5c372->type == rtc_undef) {
+ if (strcmp(client->name, "rs5c372a") == 0)
+ rs5c372->type = rtc_rs5c372a;
+ else if (strcmp(client->name, "rs5c372b") == 0)
+ rs5c372->type = rtc_rs5c372b;
+ else if (strcmp(client->name, "rv5c386") == 0)
+ rs5c372->type = rtc_rv5c386;
+ else if (strcmp(client->name, "rv5c387a") == 0)
+ rs5c372->type = rtc_rv5c387a;
+ else {
rs5c372->type = rtc_rs5c372b;
dev_warn(&client->dev, "assuming rs5c372b\n");
}
@@ -567,7 +555,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
break;
default:
dev_err(&client->dev, "unknown RTC type\n");
- goto exit_detach;
+ goto exit_kfree;
}
/* if the oscillator lost power and no other software (like
@@ -601,7 +589,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
if ((i2c_master_send(client, buf, 3)) != 3) {
dev_err(&client->dev, "setup error\n");
- goto exit_detach;
+ goto exit_kfree;
}
rs5c372->regs[RS5C_REG_CTRL1] = buf[1];
rs5c372->regs[RS5C_REG_CTRL2] = buf[2];
@@ -621,14 +609,14 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
rs5c372->time24 ? "24hr" : "am/pm"
);
- /* FIXME when client->irq exists, use it to register alarm irq */
+ /* REVISIT use client->irq to register alarm irq ... */
rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
&client->dev, &rs5c372_rtc_ops, THIS_MODULE);
if (IS_ERR(rs5c372->rtc)) {
err = PTR_ERR(rs5c372->rtc);
- goto exit_detach;
+ goto exit_kfree;
}
err = rs5c_sysfs_register(&client->dev);
@@ -640,9 +628,6 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
exit_devreg:
rtc_device_unregister(rs5c372->rtc);
-exit_detach:
- i2c_detach_client(client);
-
exit_kfree:
kfree(rs5c372);
@@ -650,24 +635,12 @@ exit:
return err;
}
-static int rs5c372_attach(struct i2c_adapter *adapter)
+static int rs5c372_remove(struct i2c_client *client)
{
- return i2c_probe(adapter, &addr_data, rs5c372_probe);
-}
-
-static int rs5c372_detach(struct i2c_client *client)
-{
- int err;
struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
- if (rs5c372->rtc)
- rtc_device_unregister(rs5c372->rtc);
-
- /* REVISIT properly destroy the sysfs files ... */
-
- if ((err = i2c_detach_client(client)))
- return err;
-
+ rtc_device_unregister(rs5c372->rtc);
+ rs5c_sysfs_unregister(&client->dev);
kfree(rs5c372);
return 0;
}
@@ -676,8 +649,8 @@ static struct i2c_driver rs5c372_driver = {
.driver = {
.name = "rtc-rs5c372",
},
- .attach_adapter = &rs5c372_attach,
- .detach_client = &rs5c372_detach,
+ .probe = rs5c372_probe,
+ .remove = rs5c372_remove,
};
static __init int rs5c372_init(void)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1340451ea40..35765f6a86e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -747,14 +747,9 @@ dcssblk_check_params(void)
static void __exit
dcssblk_exit(void)
{
- int rc;
-
PRINT_DEBUG("DCSSBLOCK EXIT...\n");
s390_root_dev_unregister(dcssblk_root_dev);
- rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
- if (rc) {
- PRINT_ERR("unregister_blkdev() failed!\n");
- }
+ unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
PRINT_DEBUG("...finished!\n");
}
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 512857a2316..5157a2abc58 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -619,8 +619,7 @@ static void __exit jsflash_cleanup_module(void)
jsf0.busy = 0;
misc_deregister(&jsf_dev);
- if (unregister_blkdev(JSFD_MAJOR, "jsfd") != 0)
- printk("jsfd: cleanup_module failed\n");
+ unregister_blkdev(JSFD_MAJOR, "jsfd");
blk_cleanup_queue(jsf_queue);
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index d70ddfda93f..9c5342e7a69 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -40,6 +40,7 @@
#include <linux/err.h>
#include <linux/blkdev.h>
+#include <linux/freezer.h>
#include <linux/scatterlist.h>
/* ---------- SCSI Host glue ---------- */
@@ -868,8 +869,6 @@ static int sas_queue_thread(void *_sas_ha)
{
struct sas_ha_struct *sas_ha = _sas_ha;
- current->flags |= PF_NOFREEZE;
-
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 9adb64ac054..8a525abda30 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -19,6 +19,7 @@
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
@@ -1516,8 +1517,6 @@ int scsi_error_handler(void *data)
{
struct Scsi_Host *shost = data;
- current->flags |= PF_NOFREEZE;
-
/*
* We use TASK_INTERRUPTIBLE so that the thread is not
* counted against the load average as a running process.
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index cab42cbd920..7fa413ddccf 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -338,6 +338,34 @@ config SERIAL_AMBA_PL011_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+config SERIAL_SB1250_DUART
+ tristate "BCM1xxx on-chip DUART serial support"
+ depends on SIBYTE_SB1xxx_SOC=y
+ select SERIAL_CORE
+ default y
+ ---help---
+ Support for the asynchronous serial interface (DUART) included in
+ the BCM1250 and derived System-On-a-Chip (SOC) devices. Note that
+ the letter D in DUART stands for "dual", which is how the device
+ is implemented. Depending on the SOC configuration there may be
+ one or more DUARTs available of which all are handled.
+
+ If unsure, say Y. To compile this driver as a module, choose M here:
+ the module will be called sb1250-duart.
+
+config SERIAL_SB1250_DUART_CONSOLE
+ bool "Support for console on a BCM1xxx DUART serial port"
+ depends on SERIAL_SB1250_DUART=y
+ select SERIAL_CORE_CONSOLE
+ default y
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode).
+
+ If unsure, say Y.
+
config SERIAL_ATMEL
bool "AT91 / AT32 on-chip serial port support"
depends on (ARM && ARCH_AT91) || AVR32
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 08ad0d97818..c48cdd61b73 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
obj-$(CONFIG_SERIAL_ICOM) += icom.o
obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
+obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
new file mode 100644
index 00000000000..1d9d7285172
--- /dev/null
+++ b/drivers/serial/sb1250-duart.c
@@ -0,0 +1,972 @@
+/*
+ * drivers/serial/sb1250-duart.c
+ *
+ * Support for the asynchronous serial interface (DUART) included
+ * in the BCM1250 and derived System-On-a-Chip (SOC) devices.
+ *
+ * Copyright (c) 2007 Maciej W. Rozycki
+ *
+ * Derived from drivers/char/sb1250_duart.c for which the following
+ * copyright applies:
+ *
+ * Copyright (c) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * References:
+ *
+ * "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
+ */
+
+#if defined(CONFIG_SERIAL_SB1250_DUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/war.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_uart.h>
+#include <asm/sibyte/swarm.h>
+
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+
+#define SBD_CHANREGS(line) A_BCM1480_DUART_CHANREG((line), 0)
+#define SBD_CTRLREGS(line) A_BCM1480_DUART_CTRLREG((line), 0)
+#define SBD_INT(line) (K_BCM1480_INT_UART_0 + (line))
+
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+
+#define SBD_CHANREGS(line) A_DUART_CHANREG((line), 0)
+#define SBD_CTRLREGS(line) A_DUART_CTRLREG(0)
+#define SBD_INT(line) (K_INT_UART_0 + (line))
+
+#else
+#error invalid SB1250 UART configuration
+
+#endif
+
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("BCM1xxx on-chip DUART serial driver");
+MODULE_LICENSE("GPL");
+
+
+#define DUART_MAX_CHIP 2
+#define DUART_MAX_SIDE 2
+
+/*
+ * Per-port state.
+ */
+struct sbd_port {
+ struct sbd_duart *duart;
+ struct uart_port port;
+ unsigned char __iomem *memctrl;
+ int tx_stopped;
+ int initialised;
+};
+
+/*
+ * Per-DUART state for the shared register space.
+ */
+struct sbd_duart {
+ struct sbd_port sport[2];
+ unsigned long mapctrl;
+ atomic_t map_guard;
+};
+
+#define to_sport(uport) container_of(uport, struct sbd_port, port)
+
+static struct sbd_duart sbd_duarts[DUART_MAX_CHIP];
+
+#define __unused __attribute__((__unused__))
+
+
+/*
+ * Reading and writing SB1250 DUART registers.
+ *
+ * There are three register spaces: two per-channel ones and
+ * a shared one. We have to define accessors appropriately.
+ * All registers are 64-bit and all but the Baud Rate Clock
+ * registers only define 8 least significant bits. There is
+ * also a workaround to take into account. Raw accessors use
+ * the full register width, but cooked ones truncate it
+ * intentionally so that the rest of the driver does not care.
+ */
+static u64 __read_sbdchn(struct sbd_port *sport, int reg)
+{
+ void __iomem *csr = sport->port.membase + reg;
+
+ return __raw_readq(csr);
+}
+
+static u64 __read_sbdshr(struct sbd_port *sport, int reg)
+{
+ void __iomem *csr = sport->memctrl + reg;
+
+ return __raw_readq(csr);
+}
+
+static void __write_sbdchn(struct sbd_port *sport, int reg, u64 value)
+{
+ void __iomem *csr = sport->port.membase + reg;
+
+ __raw_writeq(value, csr);
+}
+
+static void __write_sbdshr(struct sbd_port *sport, int reg, u64 value)
+{
+ void __iomem *csr = sport->memctrl + reg;
+
+ __raw_writeq(value, csr);
+}
+
+/*
+ * In bug 1956, we get glitches that can mess up uart registers. This
+ * "read-mode-reg after any register access" is an accepted workaround.
+ */
+static void __war_sbd1956(struct sbd_port *sport)
+{
+ __read_sbdchn(sport, R_DUART_MODE_REG_1);
+ __read_sbdchn(sport, R_DUART_MODE_REG_2);
+}
+
+static unsigned char read_sbdchn(struct sbd_port *sport, int reg)
+{
+ unsigned char retval;
+
+ retval = __read_sbdchn(sport, reg);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+ return retval;
+}
+
+static unsigned char read_sbdshr(struct sbd_port *sport, int reg)
+{
+ unsigned char retval;
+
+ retval = __read_sbdshr(sport, reg);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+ return retval;
+}
+
+static void write_sbdchn(struct sbd_port *sport, int reg, unsigned int value)
+{
+ __write_sbdchn(sport, reg, value);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+}
+
+static void write_sbdshr(struct sbd_port *sport, int reg, unsigned int value)
+{
+ __write_sbdshr(sport, reg, value);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+}
+
+
+static int sbd_receive_ready(struct sbd_port *sport)
+{
+ return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_RX_RDY;
+}
+
+static int sbd_receive_drain(struct sbd_port *sport)
+{
+ int loops = 10000;
+
+ while (sbd_receive_ready(sport) && loops--)
+ read_sbdchn(sport, R_DUART_RX_HOLD);
+ return loops;
+}
+
+static int __unused sbd_transmit_ready(struct sbd_port *sport)
+{
+ return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_RDY;
+}
+
+static int __unused sbd_transmit_drain(struct sbd_port *sport)
+{
+ int loops = 10000;
+
+ while (!sbd_transmit_ready(sport) && loops--)
+ udelay(2);
+ return loops;
+}
+
+static int sbd_transmit_empty(struct sbd_port *sport)
+{
+ return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_EMT;
+}
+
+static int sbd_line_drain(struct sbd_port *sport)
+{
+ int loops = 10000;
+
+ while (!sbd_transmit_empty(sport) && loops--)
+ udelay(2);
+ return loops;
+}
+
+
+static unsigned int sbd_tx_empty(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ return sbd_transmit_empty(sport) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int sbd_get_mctrl(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mctrl, status;
+
+ status = read_sbdshr(sport, R_DUART_IN_PORT);
+ status >>= (uport->line) % 2;
+ mctrl = (!(status & M_DUART_IN_PIN0_VAL) ? TIOCM_CTS : 0) |
+ (!(status & M_DUART_IN_PIN4_VAL) ? TIOCM_CAR : 0) |
+ (!(status & M_DUART_RIN0_PIN) ? TIOCM_RNG : 0) |
+ (!(status & M_DUART_IN_PIN2_VAL) ? TIOCM_DSR : 0);
+ return mctrl;
+}
+
+static void sbd_set_mctrl(struct uart_port *uport, unsigned int mctrl)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int clr = 0, set = 0, mode2;
+
+ if (mctrl & TIOCM_DTR)
+ set |= M_DUART_SET_OPR2;
+ else
+ clr |= M_DUART_CLR_OPR2;
+ if (mctrl & TIOCM_RTS)
+ set |= M_DUART_SET_OPR0;
+ else
+ clr |= M_DUART_CLR_OPR0;
+ clr <<= (uport->line) % 2;
+ set <<= (uport->line) % 2;
+
+ mode2 = read_sbdchn(sport, R_DUART_MODE_REG_2);
+ mode2 &= ~M_DUART_CHAN_MODE;
+ if (mctrl & TIOCM_LOOP)
+ mode2 |= V_DUART_CHAN_MODE_LCL_LOOP;
+ else
+ mode2 |= V_DUART_CHAN_MODE_NORMAL;
+
+ write_sbdshr(sport, R_DUART_CLEAR_OPR, clr);
+ write_sbdshr(sport, R_DUART_SET_OPR, set);
+ write_sbdchn(sport, R_DUART_MODE_REG_2, mode2);
+}
+
+static void sbd_stop_tx(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
+ sport->tx_stopped = 1;
+};
+
+static void sbd_start_tx(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mask;
+
+ /* Enable tx interrupts. */
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ mask |= M_DUART_IMR_TX;
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+
+ /* Go!, go!, go!... */
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
+ sport->tx_stopped = 0;
+};
+
+static void sbd_stop_rx(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
+};
+
+static void sbd_enable_ms(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdchn(sport, R_DUART_AUXCTL_X,
+ M_DUART_CIN_CHNG_ENA | M_DUART_CTS_CHNG_ENA);
+}
+
+static void sbd_break_ctl(struct uart_port *uport, int break_state)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ if (break_state == -1)
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_START_BREAK);
+ else
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_STOP_BREAK);
+}
+
+
+static void sbd_receive_chars(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+ struct uart_icount *icount;
+ unsigned int status, ch, flag;
+ int count;
+
+ for (count = 16; count; count--) {
+ status = read_sbdchn(sport, R_DUART_STATUS);
+ if (!(status & M_DUART_RX_RDY))
+ break;
+
+ ch = read_sbdchn(sport, R_DUART_RX_HOLD);
+
+ flag = TTY_NORMAL;
+
+ icount = &uport->icount;
+ icount->rx++;
+
+ if (unlikely(status &
+ (M_DUART_RCVD_BRK | M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR | M_DUART_OVRUN_ERR))) {
+ if (status & M_DUART_RCVD_BRK) {
+ icount->brk++;
+ if (uart_handle_break(uport))
+ continue;
+ } else if (status & M_DUART_FRM_ERR)
+ icount->frame++;
+ else if (status & M_DUART_PARITY_ERR)
+ icount->parity++;
+ if (status & M_DUART_OVRUN_ERR)
+ icount->overrun++;
+
+ status &= uport->read_status_mask;
+ if (status & M_DUART_RCVD_BRK)
+ flag = TTY_BREAK;
+ else if (status & M_DUART_FRM_ERR)
+ flag = TTY_FRAME;
+ else if (status & M_DUART_PARITY_ERR)
+ flag = TTY_PARITY;
+ }
+
+ if (uart_handle_sysrq_char(uport, ch))
+ continue;
+
+ uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag);
+ }
+
+ tty_flip_buffer_push(uport->info->tty);
+}
+
+static void sbd_transmit_chars(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+ struct circ_buf *xmit = &sport->port.info->xmit;
+ unsigned int mask;
+ int stop_tx;
+
+ /* XON/XOFF chars. */
+ if (sport->port.x_char) {
+ write_sbdchn(sport, R_DUART_TX_HOLD, sport->port.x_char);
+ sport->port.icount.tx++;
+ sport->port.x_char = 0;
+ return;
+ }
+
+ /* If nothing to do or stopped or hardware stopped. */
+ stop_tx = (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port));
+
+ /* Send char. */
+ if (!stop_tx) {
+ write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+ }
+
+ /* Are we are done? */
+ if (stop_tx || uart_circ_empty(xmit)) {
+ /* Disable tx interrupts. */
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ mask &= ~M_DUART_IMR_TX;
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+ }
+}
+
+static void sbd_status_handle(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+ unsigned int delta;
+
+ delta = read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
+ delta >>= (uport->line) % 2;
+
+ if (delta & (M_DUART_IN_PIN0_VAL << S_DUART_IN_PIN_CHNG))
+ uart_handle_cts_change(uport, !(delta & M_DUART_IN_PIN0_VAL));
+
+ if (delta & (M_DUART_IN_PIN2_VAL << S_DUART_IN_PIN_CHNG))
+ uport->icount.dsr++;
+
+ if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) <<
+ S_DUART_IN_PIN_CHNG))
+ wake_up_interruptible(&uport->info->delta_msr_wait);
+}
+
+static irqreturn_t sbd_interrupt(int irq, void *dev_id)
+{
+ struct sbd_port *sport = dev_id;
+ struct uart_port *uport = &sport->port;
+ irqreturn_t status = IRQ_NONE;
+ unsigned int intstat;
+ int count;
+
+ for (count = 16; count; count--) {
+ intstat = read_sbdshr(sport,
+ R_DUART_ISRREG((uport->line) % 2));
+ intstat &= read_sbdshr(sport,
+ R_DUART_IMRREG((uport->line) % 2));
+ intstat &= M_DUART_ISR_ALL;
+ if (!intstat)
+ break;
+
+ if (intstat & M_DUART_ISR_RX)
+ sbd_receive_chars(sport);
+ if (intstat & M_DUART_ISR_IN)
+ sbd_status_handle(sport);
+ if (intstat & M_DUART_ISR_TX)
+ sbd_transmit_chars(sport);
+
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+
+
+static int sbd_startup(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mode1;
+ int ret;
+
+ ret = request_irq(sport->port.irq, sbd_interrupt,
+ IRQF_SHARED, "sb1250-duart", sport);
+ if (ret)
+ return ret;
+
+ /* Clear the receive FIFO. */
+ sbd_receive_drain(sport);
+
+ /* Clear the interrupt registers. */
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_BREAK_INT);
+ read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
+
+ /* Set rx/tx interrupt to FIFO available. */
+ mode1 = read_sbdchn(sport, R_DUART_MODE_REG_1);
+ mode1 &= ~(M_DUART_RX_IRQ_SEL_RXFULL | M_DUART_TX_IRQ_SEL_TXEMPT);
+ write_sbdchn(sport, R_DUART_MODE_REG_1, mode1);
+
+ /* Disable tx, enable rx. */
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_EN);
+ sport->tx_stopped = 1;
+
+ /* Enable interrupts. */
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
+ M_DUART_IMR_IN | M_DUART_IMR_RX);
+
+ return 0;
+}
+
+static void sbd_shutdown(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
+ sport->tx_stopped = 1;
+ free_irq(sport->port.irq, sport);
+}
+
+
+static void sbd_init_port(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+
+ if (sport->initialised)
+ return;
+
+ /* There is no DUART reset feature, so just set some sane defaults. */
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_TX);
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_RX);
+ write_sbdchn(sport, R_DUART_MODE_REG_1, V_DUART_BITS_PER_CHAR_8);
+ write_sbdchn(sport, R_DUART_MODE_REG_2, 0);
+ write_sbdchn(sport, R_DUART_FULL_CTL,
+ V_DUART_INT_TIME(0) | V_DUART_SIG_FULL(15));
+ write_sbdchn(sport, R_DUART_OPCR_X, 0);
+ write_sbdchn(sport, R_DUART_AUXCTL_X, 0);
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
+
+ sport->initialised = 1;
+}
+
+static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
+ struct ktermios *old_termios)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mode1 = 0, mode2 = 0, aux = 0;
+ unsigned int mode1mask = 0, mode2mask = 0, auxmask = 0;
+ unsigned int oldmode1, oldmode2, oldaux;
+ unsigned int baud, brg;
+ unsigned int command;
+
+ mode1mask |= ~(M_DUART_PARITY_MODE | M_DUART_PARITY_TYPE_ODD |
+ M_DUART_BITS_PER_CHAR);
+ mode2mask |= ~M_DUART_STOP_BIT_LEN_2;
+ auxmask |= ~M_DUART_CTS_CHNG_ENA;
+
+ /* Byte size. */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ case CS6:
+ /* Unsupported, leave unchanged. */
+ mode1mask |= M_DUART_PARITY_MODE;
+ break;
+ case CS7:
+ mode1 |= V_DUART_BITS_PER_CHAR_7;
+ break;
+ case CS8:
+ default:
+ mode1 |= V_DUART_BITS_PER_CHAR_8;
+ break;
+ }
+
+ /* Parity and stop bits. */
+ if (termios->c_cflag & CSTOPB)
+ mode2 |= M_DUART_STOP_BIT_LEN_2;
+ else
+ mode2 |= M_DUART_STOP_BIT_LEN_1;
+ if (termios->c_cflag & PARENB)
+ mode1 |= V_DUART_PARITY_MODE_ADD;
+ else
+ mode1 |= V_DUART_PARITY_MODE_NONE;
+ if (termios->c_cflag & PARODD)
+ mode1 |= M_DUART_PARITY_TYPE_ODD;
+ else
+ mode1 |= M_DUART_PARITY_TYPE_EVEN;
+
+ baud = uart_get_baud_rate(uport, termios, old_termios, 1200, 5000000);
+ brg = V_DUART_BAUD_RATE(baud);
+ /* The actual lower bound is 1221bps, so compensate. */
+ if (brg > M_DUART_CLK_COUNTER)
+ brg = M_DUART_CLK_COUNTER;
+
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ uport->read_status_mask = M_DUART_OVRUN_ERR;
+ if (termios->c_iflag & INPCK)
+ uport->read_status_mask |= M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ uport->read_status_mask |= M_DUART_RCVD_BRK;
+
+ uport->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR;
+ if (termios->c_iflag & IGNBRK) {
+ uport->ignore_status_mask |= M_DUART_RCVD_BRK;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= M_DUART_OVRUN_ERR;
+ }
+
+ if (termios->c_cflag & CREAD)
+ command = M_DUART_RX_EN;
+ else
+ command = M_DUART_RX_DIS;
+
+ if (termios->c_cflag & CRTSCTS)
+ aux |= M_DUART_CTS_CHNG_ENA;
+ else
+ aux &= ~M_DUART_CTS_CHNG_ENA;
+
+ spin_lock(&uport->lock);
+
+ if (sport->tx_stopped)
+ command |= M_DUART_TX_DIS;
+ else
+ command |= M_DUART_TX_EN;
+
+ oldmode1 = read_sbdchn(sport, R_DUART_MODE_REG_1) & mode1mask;
+ oldmode2 = read_sbdchn(sport, R_DUART_MODE_REG_2) & mode2mask;
+ oldaux = read_sbdchn(sport, R_DUART_AUXCTL_X) & auxmask;
+
+ if (!sport->tx_stopped)
+ sbd_line_drain(sport);
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
+
+ write_sbdchn(sport, R_DUART_MODE_REG_1, mode1 | oldmode1);
+ write_sbdchn(sport, R_DUART_MODE_REG_2, mode2 | oldmode2);
+ write_sbdchn(sport, R_DUART_CLK_SEL, brg);
+ write_sbdchn(sport, R_DUART_AUXCTL_X, aux | oldaux);
+
+ write_sbdchn(sport, R_DUART_CMD, command);
+
+ spin_unlock(&uport->lock);
+}
+
+
+static const char *sbd_type(struct uart_port *uport)
+{
+ return "SB1250 DUART";
+}
+
+static void sbd_release_port(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ struct sbd_duart *duart = sport->duart;
+ int map_guard;
+
+ iounmap(sport->memctrl);
+ sport->memctrl = NULL;
+ iounmap(uport->membase);
+ uport->membase = NULL;
+
+ map_guard = atomic_add_return(-1, &duart->map_guard);
+ if (!map_guard)
+ release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING);
+ release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
+}
+
+static int sbd_map_port(struct uart_port *uport)
+{
+ static const char *err = KERN_ERR "sbd: Cannot map MMIO\n";
+ struct sbd_port *sport = to_sport(uport);
+ struct sbd_duart *duart = sport->duart;
+
+ if (!uport->membase)
+ uport->membase = ioremap_nocache(uport->mapbase,
+ DUART_CHANREG_SPACING);
+ if (!uport->membase) {
+ printk(err);
+ return -ENOMEM;
+ }
+
+ if (!sport->memctrl)
+ sport->memctrl = ioremap_nocache(duart->mapctrl,
+ DUART_CHANREG_SPACING);
+ if (!sport->memctrl) {
+ printk(err);
+ iounmap(uport->membase);
+ uport->membase = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int sbd_request_port(struct uart_port *uport)
+{
+ static const char *err = KERN_ERR
+ "sbd: Unable to reserve MMIO resource\n";
+ struct sbd_duart *duart = to_sport(uport)->duart;
+ int map_guard;
+ int ret = 0;
+
+ if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING,
+ "sb1250-duart")) {
+ printk(err);
+ return -EBUSY;
+ }
+ map_guard = atomic_add_return(1, &duart->map_guard);
+ if (map_guard == 1) {
+ if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING,
+ "sb1250-duart")) {
+ atomic_add(-1, &duart->map_guard);
+ printk(err);
+ ret = -EBUSY;
+ }
+ }
+ if (!ret) {
+ ret = sbd_map_port(uport);
+ if (ret) {
+ map_guard = atomic_add_return(-1, &duart->map_guard);
+ if (!map_guard)
+ release_mem_region(duart->mapctrl,
+ DUART_CHANREG_SPACING);
+ }
+ }
+ if (ret) {
+ release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
+ return ret;
+ }
+ return 0;
+}
+
+static void sbd_config_port(struct uart_port *uport, int flags)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ if (flags & UART_CONFIG_TYPE) {
+ if (sbd_request_port(uport))
+ return;
+
+ uport->type = PORT_SB1250_DUART;
+
+ sbd_init_port(sport);
+ }
+}
+
+static int sbd_verify_port(struct uart_port *uport, struct serial_struct *ser)
+{
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_SB1250_DUART)
+ ret = -EINVAL;
+ if (ser->irq != uport->irq)
+ ret = -EINVAL;
+ if (ser->baud_base != uport->uartclk / 16)
+ ret = -EINVAL;
+ return ret;
+}
+
+
+static struct uart_ops sbd_ops = {
+ .tx_empty = sbd_tx_empty,
+ .set_mctrl = sbd_set_mctrl,
+ .get_mctrl = sbd_get_mctrl,
+ .stop_tx = sbd_stop_tx,
+ .start_tx = sbd_start_tx,
+ .stop_rx = sbd_stop_rx,
+ .enable_ms = sbd_enable_ms,
+ .break_ctl = sbd_break_ctl,
+ .startup = sbd_startup,
+ .shutdown = sbd_shutdown,
+ .set_termios = sbd_set_termios,
+ .type = sbd_type,
+ .release_port = sbd_release_port,
+ .request_port = sbd_request_port,
+ .config_port = sbd_config_port,
+ .verify_port = sbd_verify_port,
+};
+
+/* Initialize SB1250 DUART port structures. */
+static void __init sbd_probe_duarts(void)
+{
+ static int probed;
+ int chip, side;
+ int max_lines, line;
+
+ if (probed)
+ return;
+
+ /* Set the number of available units based on the SOC type. */
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1x55:
+ case K_SYS_SOC_TYPE_BCM1x80:
+ max_lines = 4;
+ break;
+ default:
+ /* Assume at least two serial ports at the normal address. */
+ max_lines = 2;
+ break;
+ }
+
+ probed = 1;
+
+ for (chip = 0, line = 0; chip < DUART_MAX_CHIP && line < max_lines;
+ chip++) {
+ sbd_duarts[chip].mapctrl = SBD_CTRLREGS(line);
+
+ for (side = 0; side < DUART_MAX_SIDE && line < max_lines;
+ side++, line++) {
+ struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+ struct uart_port *uport = &sport->port;
+
+ sport->duart = &sbd_duarts[chip];
+
+ uport->irq = SBD_INT(line);
+ uport->uartclk = 100000000 / 20 * 16;
+ uport->fifosize = 16;
+ uport->iotype = UPIO_MEM;
+ uport->flags = UPF_BOOT_AUTOCONF;
+ uport->ops = &sbd_ops;
+ uport->line = line;
+ uport->mapbase = SBD_CHANREGS(line);
+ }
+ }
+}
+
+
+#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
+/*
+ * Serial console stuff. Very basic, polling driver for doing serial
+ * console output. The console_sem is held by the caller, so we
+ * shouldn't be interrupted for more console activity.
+ */
+static void sbd_console_putchar(struct uart_port *uport, int ch)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ sbd_transmit_drain(sport);
+ write_sbdchn(sport, R_DUART_TX_HOLD, ch);
+}
+
+static void sbd_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ int chip = co->index / DUART_MAX_SIDE;
+ int side = co->index % DUART_MAX_SIDE;
+ struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+ struct uart_port *uport = &sport->port;
+ unsigned long flags;
+ unsigned int mask;
+
+ /* Disable transmit interrupts and enable the transmitter. */
+ spin_lock_irqsave(&uport->lock, flags);
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
+ mask & ~M_DUART_IMR_TX);
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ uart_console_write(&sport->port, s, count, sbd_console_putchar);
+
+ /* Restore transmit interrupts and the transmitter enable. */
+ spin_lock_irqsave(&uport->lock, flags);
+ sbd_line_drain(sport);
+ if (sport->tx_stopped)
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int __init sbd_console_setup(struct console *co, char *options)
+{
+ int chip = co->index / DUART_MAX_SIDE;
+ int side = co->index % DUART_MAX_SIDE;
+ struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+ struct uart_port *uport = &sport->port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ if (!sport->duart)
+ return -ENXIO;
+
+ ret = sbd_map_port(uport);
+ if (ret)
+ return ret;
+
+ sbd_init_port(sport);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver sbd_reg;
+static struct console sbd_console = {
+ .name = "duart",
+ .write = sbd_console_write,
+ .device = uart_console_device,
+ .setup = sbd_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sbd_reg
+};
+
+static int __init sbd_serial_console_init(void)
+{
+ sbd_probe_duarts();
+ register_console(&sbd_console);
+
+ return 0;
+}
+
+console_initcall(sbd_serial_console_init);
+
+#define SERIAL_SB1250_DUART_CONSOLE &sbd_console
+#else
+#define SERIAL_SB1250_DUART_CONSOLE NULL
+#endif /* CONFIG_SERIAL_SB1250_DUART_CONSOLE */
+
+
+static struct uart_driver sbd_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "duart",
+ .major = TTY_MAJOR,
+ .minor = SB1250_DUART_MINOR_BASE,
+ .nr = DUART_MAX_CHIP * DUART_MAX_SIDE,
+ .cons = SERIAL_SB1250_DUART_CONSOLE,
+};
+
+/* Set up the driver and register it. */
+static int __init sbd_init(void)
+{
+ int i, ret;
+
+ sbd_probe_duarts();
+
+ ret = uart_register_driver(&sbd_reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DUART_MAX_CHIP * DUART_MAX_SIDE; i++) {
+ struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
+ struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
+ struct uart_port *uport = &sport->port;
+
+ if (sport->duart)
+ uart_add_one_port(&sbd_reg, uport);
+ }
+
+ return 0;
+}
+
+/* Unload the driver. Unregister stuff, get ready to go away. */
+static void __exit sbd_exit(void)
+{
+ int i;
+
+ for (i = DUART_MAX_CHIP * DUART_MAX_SIDE - 1; i >= 0; i--) {
+ struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
+ struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
+ struct uart_port *uport = &sport->port;
+
+ if (sport->duart)
+ uart_remove_one_port(&sbd_reg, uport);
+ }
+
+ uart_unregister_driver(&sbd_reg);
+}
+
+module_init(sbd_init);
+module_exit(sbd_exit);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 5e3f748f269..b91571122da 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -107,6 +107,15 @@ config SPI_IMX
This enables using the Freescale iMX SPI controller in master
mode.
+config SPI_LM70_LLP
+ tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
+ depends on SPI_MASTER && PARPORT && EXPERIMENTAL
+ select SPI_BITBANG
+ help
+ This driver supports the NS LM70 LLP Evaluation Board,
+ which interfaces to an LM70 temperature sensor using
+ a parallel port.
+
config SPI_MPC52xx_PSC
tristate "Freescale MPC52xx PSC SPI controller"
depends on SPI_MASTER && PPC_MPC52xx && EXPERIMENTAL
@@ -133,6 +142,12 @@ config SPI_OMAP_UWIRE
help
This hooks up to the MicroWire controller on OMAP1 chips.
+config SPI_OMAP24XX
+ tristate "McSPI driver for OMAP24xx"
+ depends on SPI_MASTER && ARCH_OMAP24XX
+ help
+ SPI master controller for OMAP24xx Multichannel SPI
+ (McSPI) modules.
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
@@ -145,17 +160,36 @@ config SPI_PXA2XX
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
+ select SPI_BITBANG
help
SPI driver for Samsung S3C24XX series ARM SoCs
config SPI_S3C24XX_GPIO
tristate "Samsung S3C24XX series SPI by GPIO"
- depends on SPI_MASTER && ARCH_S3C2410 && SPI_BITBANG && EXPERIMENTAL
+ depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
+ select SPI_BITBANG
help
SPI driver for Samsung S3C24XX series ARM SoCs using
GPIO lines to provide the SPI bus. This can be used where
the inbuilt hardware cannot provide the transfer mode, or
where the board is using non hardware connected pins.
+
+config SPI_TXX9
+ tristate "Toshiba TXx9 SPI controller"
+ depends on SPI_MASTER && GENERIC_GPIO && CPU_TX49XX
+ help
+ SPI driver for Toshiba TXx9 MIPS SoCs
+
+config SPI_XILINX
+ tristate "Xilinx SPI controller"
+ depends on SPI_MASTER && XILINX_VIRTEX && EXPERIMENTAL
+ select SPI_BITBANG
+ help
+ This exposes the SPI controller IP from the Xilinx EDK.
+
+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
+ Product Specification document (DS464) for hardware details.
+
#
# Add new SPI master controllers in alphabetical order above this line
#
@@ -187,6 +221,15 @@ config SPI_SPIDEV
Note that this application programming interface is EXPERIMENTAL
and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
+config SPI_TLE62X0
+ tristate "Infineon TLE62X0 (for power switching)"
+ depends on SPI_MASTER && SYSFS
+ help
+ SPI driver for Infineon TLE62X0 series line driver chips,
+ such as the TLE6220, TLE6230 and TLE6240. This provides a
+ sysfs interface, with each line presented as a kind of GPIO
+ exposing both switch control and diagnostic feedback.
+
#
# Add new SPI protocol masters in alphabetical order above this line
#
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5788d867de8..41fbac45c32 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,17 +17,22 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
obj-$(CONFIG_SPI_IMX) += spi_imx.o
+obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
+obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
+obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
+obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
obj-$(CONFIG_SPI_AT25) += at25.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o
+obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
# ... add above this line ...
# SPI slave controller drivers (upstream link)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 8b2601de363..ad144054da3 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -46,6 +46,7 @@ struct atmel_spi {
struct clk *clk;
struct platform_device *pdev;
unsigned new_1:1;
+ struct spi_device *stay;
u8 stopping;
struct list_head queue;
@@ -62,29 +63,62 @@ struct atmel_spi {
/*
* Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
* they assume that spi slave device state will not change on deselect, so
- * that automagic deselection is OK. Not so! Workaround uses nCSx pins
- * as GPIOs; or newer controllers have CSAAT and friends.
+ * that automagic deselection is OK. ("NPCSx rises if no data is to be
+ * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
+ * controllers have CSAAT and friends.
*
- * Since the CSAAT functionality is a bit weird on newer controllers
- * as well, we use GPIO to control nCSx pins on all controllers.
+ * Since the CSAAT functionality is a bit weird on newer controllers as
+ * well, we use GPIO to control nCSx pins on all controllers, updating
+ * MR.PCS to avoid confusing the controller. Using GPIOs also lets us
+ * support active-high chipselects despite the controller's belief that
+ * only active-low devices/systems exists.
+ *
+ * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
+ * right when driven with GPIO. ("Mode Fault does not allow more than one
+ * Master on Chip Select 0.") No workaround exists for that ... so for
+ * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
+ * and (c) will trigger that first erratum in some cases.
*/
-static inline void cs_activate(struct spi_device *spi)
+static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
{
unsigned gpio = (unsigned) spi->controller_data;
unsigned active = spi->mode & SPI_CS_HIGH;
+ u32 mr;
+
+ mr = spi_readl(as, MR);
+ mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
- dev_dbg(&spi->dev, "activate %u%s\n", gpio, active ? " (high)" : "");
- gpio_set_value(gpio, active);
+ dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
+ gpio, active ? " (high)" : "",
+ mr);
+
+ if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
+ gpio_set_value(gpio, active);
+ spi_writel(as, MR, mr);
}
-static inline void cs_deactivate(struct spi_device *spi)
+static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
{
unsigned gpio = (unsigned) spi->controller_data;
unsigned active = spi->mode & SPI_CS_HIGH;
+ u32 mr;
- dev_dbg(&spi->dev, "DEactivate %u%s\n", gpio, active ? " (low)" : "");
- gpio_set_value(gpio, !active);
+ /* only deactivate *this* device; sometimes transfers to
+ * another device may be active when this routine is called.
+ */
+ mr = spi_readl(as, MR);
+ if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
+ mr = SPI_BFINS(PCS, 0xf, mr);
+ spi_writel(as, MR, mr);
+ }
+
+ dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
+ gpio, active ? " (low)" : "",
+ mr);
+
+ if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
+ gpio_set_value(gpio, !active);
}
/*
@@ -140,6 +174,7 @@ static void atmel_spi_next_xfer(struct spi_master *master,
/* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer"
* mechanism might help avoid the IRQ latency between transfers
+ * (and improve the nCS0 errata handling on at91rm9200 chips)
*
* We're also waiting for ENDRX before we start the next
* transfer because we need to handle some difficult timing
@@ -169,33 +204,62 @@ static void atmel_spi_next_message(struct spi_master *master)
{
struct atmel_spi *as = spi_master_get_devdata(master);
struct spi_message *msg;
- u32 mr;
+ struct spi_device *spi;
BUG_ON(as->current_transfer);
msg = list_entry(as->queue.next, struct spi_message, queue);
+ spi = msg->spi;
- /* Select the chip */
- mr = spi_readl(as, MR);
- mr = SPI_BFINS(PCS, ~(1 << msg->spi->chip_select), mr);
- spi_writel(as, MR, mr);
- cs_activate(msg->spi);
+ dev_dbg(master->cdev.dev, "start message %p for %s\n",
+ msg, spi->dev.bus_id);
+
+ /* select chip if it's not still active */
+ if (as->stay) {
+ if (as->stay != spi) {
+ cs_deactivate(as, as->stay);
+ cs_activate(as, spi);
+ }
+ as->stay = NULL;
+ } else
+ cs_activate(as, spi);
atmel_spi_next_xfer(master, msg);
}
-static void
+/*
+ * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
+ * - The buffer is either valid for CPU access, else NULL
+ * - If the buffer is valid, so is its DMA addresss
+ *
+ * This driver manages the dma addresss unless message->is_dma_mapped.
+ */
+static int
atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
{
+ struct device *dev = &as->pdev->dev;
+
xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
- if (xfer->tx_buf)
- xfer->tx_dma = dma_map_single(&as->pdev->dev,
+ if (xfer->tx_buf) {
+ xfer->tx_dma = dma_map_single(dev,
(void *) xfer->tx_buf, xfer->len,
DMA_TO_DEVICE);
- if (xfer->rx_buf)
- xfer->rx_dma = dma_map_single(&as->pdev->dev,
+ if (dma_mapping_error(xfer->tx_dma))
+ return -ENOMEM;
+ }
+ if (xfer->rx_buf) {
+ xfer->rx_dma = dma_map_single(dev,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(xfer->tx_dma)) {
+ if (xfer->tx_buf)
+ dma_unmap_single(dev,
+ xfer->tx_dma, xfer->len,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+ }
+ return 0;
}
static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
@@ -211,9 +275,13 @@ static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
static void
atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
- struct spi_message *msg, int status)
+ struct spi_message *msg, int status, int stay)
{
- cs_deactivate(msg->spi);
+ if (!stay || status < 0)
+ cs_deactivate(as, msg->spi);
+ else
+ as->stay = msg->spi;
+
list_del(&msg->queue);
msg->status = status;
@@ -303,7 +371,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
- atmel_spi_msg_done(master, as, msg, -EIO);
+ atmel_spi_msg_done(master, as, msg, -EIO, 0);
} else if (pending & SPI_BIT(ENDRX)) {
ret = IRQ_HANDLED;
@@ -321,12 +389,13 @@ atmel_spi_interrupt(int irq, void *dev_id)
if (msg->transfers.prev == &xfer->transfer_list) {
/* report completed message */
- atmel_spi_msg_done(master, as, msg, 0);
+ atmel_spi_msg_done(master, as, msg, 0,
+ xfer->cs_change);
} else {
if (xfer->cs_change) {
- cs_deactivate(msg->spi);
+ cs_deactivate(as, msg->spi);
udelay(1);
- cs_activate(msg->spi);
+ cs_activate(as, msg->spi);
}
/*
@@ -350,6 +419,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
return ret;
}
+/* the spi->mode bits understood by this driver: */
#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
static int atmel_spi_setup(struct spi_device *spi)
@@ -388,6 +458,14 @@ static int atmel_spi_setup(struct spi_device *spi)
return -EINVAL;
}
+ /* see notes above re chipselect */
+ if (cpu_is_at91rm9200()
+ && spi->chip_select == 0
+ && (spi->mode & SPI_CS_HIGH)) {
+ dev_dbg(&spi->dev, "setup: can't be active-high\n");
+ return -EINVAL;
+ }
+
/* speed zero convention is used by some upper layers */
bus_hz = clk_get_rate(as->clk);
if (spi->max_speed_hz) {
@@ -397,8 +475,9 @@ static int atmel_spi_setup(struct spi_device *spi)
scbr = ((bus_hz + spi->max_speed_hz - 1)
/ spi->max_speed_hz);
if (scbr >= (1 << SPI_SCBR_SIZE)) {
- dev_dbg(&spi->dev, "setup: %d Hz too slow, scbr %u\n",
- spi->max_speed_hz, scbr);
+ dev_dbg(&spi->dev,
+ "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
+ spi->max_speed_hz, scbr, bus_hz/255);
return -EINVAL;
}
} else
@@ -423,6 +502,14 @@ static int atmel_spi_setup(struct spi_device *spi)
return ret;
spi->controller_state = (void *)npcs_pin;
gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&as->lock, flags);
+ if (as->stay == spi)
+ as->stay = NULL;
+ cs_deactivate(as, spi);
+ spin_unlock_irqrestore(&as->lock, flags);
}
dev_dbg(&spi->dev,
@@ -464,14 +551,22 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
dev_dbg(&spi->dev, "no protocol options yet\n");
return -ENOPROTOOPT;
}
- }
- /* scrub dcache "early" */
- if (!msg->is_dma_mapped) {
- list_for_each_entry(xfer, &msg->transfers, transfer_list)
- atmel_spi_dma_map_xfer(as, xfer);
+ /*
+ * DMA map early, for performance (empties dcache ASAP) and
+ * better fault reporting. This is a DMA-only driver.
+ *
+ * NOTE that if dma_unmap_single() ever starts to do work on
+ * platforms supported by this driver, we would need to clean
+ * up mappings for previously-mapped transfers.
+ */
+ if (!msg->is_dma_mapped) {
+ if (atmel_spi_dma_map_xfer(as, xfer) < 0)
+ return -ENOMEM;
+ }
}
+#ifdef VERBOSE
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
dev_dbg(controller,
" xfer %p: len %u tx %p/%08x rx %p/%08x\n",
@@ -479,6 +574,7 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
xfer->tx_buf, xfer->tx_dma,
xfer->rx_buf, xfer->rx_dma);
}
+#endif
msg->status = -EINPROGRESS;
msg->actual_length = 0;
@@ -494,8 +590,21 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
static void atmel_spi_cleanup(struct spi_device *spi)
{
- if (spi->controller_state)
- gpio_free((unsigned int)spi->controller_data);
+ struct atmel_spi *as = spi_master_get_devdata(spi->master);
+ unsigned gpio = (unsigned) spi->controller_data;
+ unsigned long flags;
+
+ if (!spi->controller_state)
+ return;
+
+ spin_lock_irqsave(&as->lock, flags);
+ if (as->stay == spi) {
+ as->stay = NULL;
+ cs_deactivate(as, spi);
+ }
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ gpio_free(gpio);
}
/*-------------------------------------------------------------------------*/
@@ -536,6 +645,10 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
as = spi_master_get_devdata(master);
+ /*
+ * Scratch buffer is used for throwaway rx and tx data.
+ * It's coherent to minimize dcache pollution.
+ */
as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
&as->buffer_dma, GFP_KERNEL);
if (!as->buffer)
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index ae2b1af0dba..c47a650183a 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -280,6 +280,9 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
+
static int au1550_spi_setup(struct spi_device *spi)
{
struct au1550_spi *hw = spi_master_get_devdata(spi->master);
@@ -292,6 +295,12 @@ static int au1550_spi_setup(struct spi_device *spi)
return -EINVAL;
}
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
if (spi->max_speed_hz == 0)
spi->max_speed_hz = hw->freq_max;
if (spi->max_speed_hz > hw->freq_max
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 11f36bef305..d2a4b2bdb07 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -270,6 +270,9 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
spin_unlock_irq(&mps->lock);
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
+
static int mpc52xx_psc_spi_setup(struct spi_device *spi)
{
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
@@ -279,6 +282,12 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
if (spi->bits_per_word%8)
return -EINVAL;
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
new file mode 100644
index 00000000000..6b357cdb9ea
--- /dev/null
+++ b/drivers/spi/omap2_mcspi.c
@@ -0,0 +1,1081 @@
+/*
+ * OMAP2 McSPI controller driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
+ * Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <linux/spi/spi.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/clock.h>
+
+
+#define OMAP2_MCSPI_MAX_FREQ 48000000
+
+#define OMAP2_MCSPI_REVISION 0x00
+#define OMAP2_MCSPI_SYSCONFIG 0x10
+#define OMAP2_MCSPI_SYSSTATUS 0x14
+#define OMAP2_MCSPI_IRQSTATUS 0x18
+#define OMAP2_MCSPI_IRQENABLE 0x1c
+#define OMAP2_MCSPI_WAKEUPENABLE 0x20
+#define OMAP2_MCSPI_SYST 0x24
+#define OMAP2_MCSPI_MODULCTRL 0x28
+
+/* per-channel banks, 0x14 bytes each, first is: */
+#define OMAP2_MCSPI_CHCONF0 0x2c
+#define OMAP2_MCSPI_CHSTAT0 0x30
+#define OMAP2_MCSPI_CHCTRL0 0x34
+#define OMAP2_MCSPI_TX0 0x38
+#define OMAP2_MCSPI_RX0 0x3c
+
+/* per-register bitmasks: */
+
+#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0)
+#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1)
+
+#define OMAP2_MCSPI_SYSSTATUS_RESETDONE (1 << 0)
+
+#define OMAP2_MCSPI_MODULCTRL_SINGLE (1 << 0)
+#define OMAP2_MCSPI_MODULCTRL_MS (1 << 2)
+#define OMAP2_MCSPI_MODULCTRL_STEST (1 << 3)
+
+#define OMAP2_MCSPI_CHCONF_PHA (1 << 0)
+#define OMAP2_MCSPI_CHCONF_POL (1 << 1)
+#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
+#define OMAP2_MCSPI_CHCONF_EPOL (1 << 6)
+#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
+#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY (0x01 << 12)
+#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY (0x02 << 12)
+#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
+#define OMAP2_MCSPI_CHCONF_DMAW (1 << 14)
+#define OMAP2_MCSPI_CHCONF_DMAR (1 << 15)
+#define OMAP2_MCSPI_CHCONF_DPE0 (1 << 16)
+#define OMAP2_MCSPI_CHCONF_DPE1 (1 << 17)
+#define OMAP2_MCSPI_CHCONF_IS (1 << 18)
+#define OMAP2_MCSPI_CHCONF_TURBO (1 << 19)
+#define OMAP2_MCSPI_CHCONF_FORCE (1 << 20)
+
+#define OMAP2_MCSPI_CHSTAT_RXS (1 << 0)
+#define OMAP2_MCSPI_CHSTAT_TXS (1 << 1)
+#define OMAP2_MCSPI_CHSTAT_EOT (1 << 2)
+
+#define OMAP2_MCSPI_CHCTRL_EN (1 << 0)
+
+
+/* We have 2 DMA channels per CS, one for RX and one for TX */
+struct omap2_mcspi_dma {
+ int dma_tx_channel;
+ int dma_rx_channel;
+
+ int dma_tx_sync_dev;
+ int dma_rx_sync_dev;
+
+ struct completion dma_tx_completion;
+ struct completion dma_rx_completion;
+};
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 8
+
+
+struct omap2_mcspi {
+ struct work_struct work;
+ /* lock protects queue and registers */
+ spinlock_t lock;
+ struct list_head msg_queue;
+ struct spi_master *master;
+ struct clk *ick;
+ struct clk *fck;
+ /* Virtual base address of the controller */
+ void __iomem *base;
+ /* SPI1 has 4 channels, while SPI2 has 2 */
+ struct omap2_mcspi_dma *dma_channels;
+};
+
+struct omap2_mcspi_cs {
+ void __iomem *base;
+ int word_len;
+};
+
+static struct workqueue_struct *omap2_mcspi_wq;
+
+#define MOD_REG_BIT(val, mask, set) do { \
+ if (set) \
+ val |= mask; \
+ else \
+ val &= ~mask; \
+} while (0)
+
+static inline void mcspi_write_reg(struct spi_master *master,
+ int idx, u32 val)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ __raw_writel(val, mcspi->base + idx);
+}
+
+static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ return __raw_readl(mcspi->base + idx);
+}
+
+static inline void mcspi_write_cs_reg(const struct spi_device *spi,
+ int idx, u32 val)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ __raw_writel(val, cs->base + idx);
+}
+
+static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ return __raw_readl(cs->base + idx);
+}
+
+static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
+ int is_read, int enable)
+{
+ u32 l, rw;
+
+ l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+
+ if (is_read) /* 1 is read, 0 write */
+ rw = OMAP2_MCSPI_CHCONF_DMAR;
+ else
+ rw = OMAP2_MCSPI_CHCONF_DMAW;
+
+ MOD_REG_BIT(l, rw, enable);
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+}
+
+static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
+{
+ u32 l;
+
+ l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
+}
+
+static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
+{
+ u32 l;
+
+ l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+}
+
+static void omap2_mcspi_set_master_mode(struct spi_master *master)
+{
+ u32 l;
+
+ /* setup when switching from (reset default) slave mode
+ * to single-channel master mode
+ */
+ l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
+ MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
+ MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
+ MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
+}
+
+static unsigned
+omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi_dma *mcspi_dma;
+ unsigned int count, c;
+ unsigned long base, tx_reg, rx_reg;
+ int word_len, data_type, element_count;
+ u8 * rx;
+ const u8 * tx;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ base = (unsigned long) io_v2p(cs->base);
+ tx_reg = base + OMAP2_MCSPI_TX0;
+ rx_reg = base + OMAP2_MCSPI_RX0;
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+ if (word_len <= 8) {
+ data_type = OMAP_DMA_DATA_TYPE_S8;
+ element_count = count;
+ } else if (word_len <= 16) {
+ data_type = OMAP_DMA_DATA_TYPE_S16;
+ element_count = count >> 1;
+ } else /* word_len <= 32 */ {
+ data_type = OMAP_DMA_DATA_TYPE_S32;
+ element_count = count >> 2;
+ }
+
+ if (tx != NULL) {
+ omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
+ data_type, element_count, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ mcspi_dma->dma_tx_sync_dev, 0);
+
+ omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ tx_reg, 0, 0);
+
+ omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
+ OMAP_DMA_AMODE_POST_INC,
+ xfer->tx_dma, 0, 0);
+ }
+
+ if (rx != NULL) {
+ omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
+ data_type, element_count, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ mcspi_dma->dma_rx_sync_dev, 1);
+
+ omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ rx_reg, 0, 0);
+
+ omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
+ OMAP_DMA_AMODE_POST_INC,
+ xfer->rx_dma, 0, 0);
+ }
+
+ if (tx != NULL) {
+ omap_start_dma(mcspi_dma->dma_tx_channel);
+ omap2_mcspi_set_dma_req(spi, 0, 1);
+ }
+
+ if (rx != NULL) {
+ omap_start_dma(mcspi_dma->dma_rx_channel);
+ omap2_mcspi_set_dma_req(spi, 1, 1);
+ }
+
+ if (tx != NULL) {
+ wait_for_completion(&mcspi_dma->dma_tx_completion);
+ dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE);
+ }
+
+ if (rx != NULL) {
+ wait_for_completion(&mcspi_dma->dma_rx_completion);
+ dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
+ }
+ return count;
+}
+
+static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(__raw_readl(reg) & bit)) {
+ if (time_after(jiffies, timeout))
+ return -1;
+ cpu_relax();
+ }
+ return 0;
+}
+
+static unsigned
+omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ unsigned int count, c;
+ u32 l;
+ void __iomem *base = cs->base;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+ void __iomem *chstat_reg;
+ int word_len;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+
+ /* We store the pre-calculated register addresses on stack to speed
+ * up the transfer loop. */
+ tx_reg = base + OMAP2_MCSPI_TX0;
+ rx_reg = base + OMAP2_MCSPI_RX0;
+ chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
+
+ if (word_len <= 8) {
+ u8 *rx;
+ const u8 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+ do {
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "write-%d %02x\n",
+ word_len, *tx);
+#endif
+ __raw_writel(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+ /* prevent last RX_ONLY read from triggering
+ * more word i/o: switch to rx+tx
+ */
+ if (c == 0 && tx == NULL)
+ mcspi_write_cs_reg(spi,
+ OMAP2_MCSPI_CHCONF0, l);
+ *rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "read-%d %02x\n",
+ word_len, *(rx - 1));
+#endif
+ }
+ c -= 1;
+ } while (c);
+ } else if (word_len <= 16) {
+ u16 *rx;
+ const u16 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "write-%d %04x\n",
+ word_len, *tx);
+#endif
+ __raw_writel(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+ /* prevent last RX_ONLY read from triggering
+ * more word i/o: switch to rx+tx
+ */
+ if (c == 0 && tx == NULL)
+ mcspi_write_cs_reg(spi,
+ OMAP2_MCSPI_CHCONF0, l);
+ *rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "read-%d %04x\n",
+ word_len, *(rx - 1));
+#endif
+ }
+ c -= 2;
+ } while (c);
+ } else if (word_len <= 32) {
+ u32 *rx;
+ const u32 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "write-%d %04x\n",
+ word_len, *tx);
+#endif
+ __raw_writel(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+ /* prevent last RX_ONLY read from triggering
+ * more word i/o: switch to rx+tx
+ */
+ if (c == 0 && tx == NULL)
+ mcspi_write_cs_reg(spi,
+ OMAP2_MCSPI_CHCONF0, l);
+ *rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "read-%d %04x\n",
+ word_len, *(rx - 1));
+#endif
+ }
+ c -= 4;
+ } while (c);
+ }
+
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (xfer->rx_buf == NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ } else if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0)
+ dev_err(&spi->dev, "EOT timed out\n");
+ }
+out:
+ return count - c;
+}
+
+/* called only when no transfer is active to this device */
+static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+ u32 l = 0, div = 0;
+ u8 word_len = spi->bits_per_word;
+
+ mcspi = spi_master_get_devdata(spi->master);
+
+ if (t != NULL && t->bits_per_word)
+ word_len = t->bits_per_word;
+
+ cs->word_len = word_len;
+
+ if (spi->max_speed_hz) {
+ while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
+ > spi->max_speed_hz)
+ div++;
+ } else
+ div = 15;
+
+ l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+
+ /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
+ * REVISIT: this controller could support SPI_3WIRE mode.
+ */
+ l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
+ l |= OMAP2_MCSPI_CHCONF_DPE0;
+
+ /* wordlength */
+ l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
+ l |= (word_len - 1) << 7;
+
+ /* set chipselect polarity; manage with FORCE */
+ if (!(spi->mode & SPI_CS_HIGH))
+ l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_EPOL;
+
+ /* set clock divisor */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
+ l |= div << 2;
+
+ /* set SPI mode 0..3 */
+ if (spi->mode & SPI_CPOL)
+ l |= OMAP2_MCSPI_CHCONF_POL;
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_POL;
+ if (spi->mode & SPI_CPHA)
+ l |= OMAP2_MCSPI_CHCONF_PHA;
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_PHA;
+
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+
+ dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
+ OMAP2_MCSPI_MAX_FREQ / (1 << div),
+ (spi->mode & SPI_CPHA) ? "trailing" : "leading",
+ (spi->mode & SPI_CPOL) ? "inverted" : "normal");
+
+ return 0;
+}
+
+static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
+
+ complete(&mcspi_dma->dma_rx_completion);
+
+ /* We must disable the DMA RX request */
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+}
+
+static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
+
+ complete(&mcspi_dma->dma_tx_completion);
+
+ /* We must disable the DMA TX request */
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+}
+
+static int omap2_mcspi_request_dma(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
+
+ if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
+ omap2_mcspi_dma_rx_callback, spi,
+ &mcspi_dma->dma_rx_channel)) {
+ dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
+ return -EAGAIN;
+ }
+
+ if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
+ omap2_mcspi_dma_tx_callback, spi,
+ &mcspi_dma->dma_tx_channel)) {
+ omap_free_dma(mcspi_dma->dma_rx_channel);
+ mcspi_dma->dma_rx_channel = -1;
+ dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
+ return -EAGAIN;
+ }
+
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
+ return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+static int omap2_mcspi_setup(struct spi_device *spi)
+{
+ int ret;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
+ if (spi->bits_per_word == 0)
+ spi->bits_per_word = 8;
+ else if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+ dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
+ spi->bits_per_word);
+ return -EINVAL;
+ }
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ if (!cs) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ cs->base = mcspi->base + spi->chip_select * 0x14;
+ spi->controller_state = cs;
+ }
+
+ if (mcspi_dma->dma_rx_channel == -1
+ || mcspi_dma->dma_tx_channel == -1) {
+ ret = omap2_mcspi_request_dma(spi);
+ if (ret < 0)
+ return ret;
+ }
+
+ clk_enable(mcspi->ick);
+ clk_enable(mcspi->fck);
+ ret = omap2_mcspi_setup_transfer(spi, NULL);
+ clk_disable(mcspi->fck);
+ clk_disable(mcspi->ick);
+
+ return ret;
+}
+
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ kfree(spi->controller_state);
+
+ if (mcspi_dma->dma_rx_channel != -1) {
+ omap_free_dma(mcspi_dma->dma_rx_channel);
+ mcspi_dma->dma_rx_channel = -1;
+ }
+ if (mcspi_dma->dma_tx_channel != -1) {
+ omap_free_dma(mcspi_dma->dma_tx_channel);
+ mcspi_dma->dma_tx_channel = -1;
+ }
+}
+
+static void omap2_mcspi_work(struct work_struct *work)
+{
+ struct omap2_mcspi *mcspi;
+
+ mcspi = container_of(work, struct omap2_mcspi, work);
+ spin_lock_irq(&mcspi->lock);
+
+ clk_enable(mcspi->ick);
+ clk_enable(mcspi->fck);
+
+ /* We only enable one channel at a time -- the one whose message is
+ * at the head of the queue -- although this controller would gladly
+ * arbitrate among multiple channels. This corresponds to "single
+ * channel" master mode. As a side effect, we need to manage the
+ * chipselect with the FORCE bit ... CS != channel enable.
+ */
+ while (!list_empty(&mcspi->msg_queue)) {
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ struct omap2_mcspi_device_config *conf;
+ struct omap2_mcspi_cs *cs;
+ int par_override = 0;
+ int status = 0;
+ u32 chconf;
+
+ m = container_of(mcspi->msg_queue.next, struct spi_message,
+ queue);
+
+ list_del_init(&m->queue);
+ spin_unlock_irq(&mcspi->lock);
+
+ spi = m->spi;
+ conf = spi->controller_data;
+ cs = spi->controller_state;
+
+ omap2_mcspi_set_enable(spi, 1);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override || t->speed_hz || t->bits_per_word) {
+ par_override = 1;
+ status = omap2_mcspi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ if (!t->speed_hz && !t->bits_per_word)
+ par_override = 0;
+ }
+
+ if (!cs_active) {
+ omap2_mcspi_force_cs(spi, 1);
+ cs_active = 1;
+ }
+
+ chconf = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+ if (t->tx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
+ else if (t->rx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, chconf);
+
+ if (t->len) {
+ unsigned count;
+
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (t->tx_buf == NULL)
+ __raw_writel(0, cs->base
+ + OMAP2_MCSPI_TX0);
+
+ if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
+ count = omap2_mcspi_txrx_dma(spi, t);
+ else
+ count = omap2_mcspi_txrx_pio(spi, t);
+ m->actual_length += count;
+
+ if (count != t->len) {
+ status = -EIO;
+ break;
+ }
+ }
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ /* ignore the "leave it on after last xfer" hint */
+ if (t->cs_change) {
+ omap2_mcspi_force_cs(spi, 0);
+ cs_active = 0;
+ }
+ }
+
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap2_mcspi_setup_transfer(spi, NULL);
+ }
+
+ if (cs_active)
+ omap2_mcspi_force_cs(spi, 0);
+
+ omap2_mcspi_set_enable(spi, 0);
+
+ m->status = status;
+ m->complete(m->context);
+
+ spin_lock_irq(&mcspi->lock);
+ }
+
+ clk_disable(mcspi->fck);
+ clk_disable(mcspi->ick);
+
+ spin_unlock_irq(&mcspi->lock);
+}
+
+static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct omap2_mcspi *mcspi;
+ unsigned long flags;
+ struct spi_transfer *t;
+
+ m->actual_length = 0;
+ m->status = 0;
+
+ /* reject invalid messages and transfers */
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ const void *tx_buf = t->tx_buf;
+ void *rx_buf = t->rx_buf;
+ unsigned len = t->len;
+
+ if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
+ || (len && !(rx_buf || tx_buf))
+ || (t->bits_per_word &&
+ ( t->bits_per_word < 4
+ || t->bits_per_word > 32))) {
+ dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
+ t->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ t->bits_per_word);
+ return -EINVAL;
+ }
+ if (t->speed_hz && t->speed_hz < OMAP2_MCSPI_MAX_FREQ/(1<<16)) {
+ dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
+ t->speed_hz,
+ OMAP2_MCSPI_MAX_FREQ/(1<<16));
+ return -EINVAL;
+ }
+
+ if (m->is_dma_mapped || len < DMA_MIN_BYTES)
+ continue;
+
+ /* Do DMA mapping "early" for better error reporting and
+ * dcache use. Note that if dma_unmap_single() ever starts
+ * to do real work on ARM, we'd need to clean up mappings
+ * for previous transfers on *ALL* exits of this loop...
+ */
+ if (tx_buf != NULL) {
+ t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(t->tx_dma)) {
+ dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+ 'T', len);
+ return -EINVAL;
+ }
+ }
+ if (rx_buf != NULL) {
+ t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(t->rx_dma)) {
+ dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+ 'R', len);
+ if (tx_buf != NULL)
+ dma_unmap_single(NULL, t->tx_dma,
+ len, DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+ }
+ }
+
+ mcspi = spi_master_get_devdata(spi->master);
+
+ spin_lock_irqsave(&mcspi->lock, flags);
+ list_add_tail(&m->queue, &mcspi->msg_queue);
+ queue_work(omap2_mcspi_wq, &mcspi->work);
+ spin_unlock_irqrestore(&mcspi->lock, flags);
+
+ return 0;
+}
+
+static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
+{
+ struct spi_master *master = mcspi->master;
+ u32 tmp;
+
+ clk_enable(mcspi->ick);
+ clk_enable(mcspi->fck);
+
+ mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
+ OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
+ do {
+ tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
+ } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
+
+ mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
+ /* (3 << 8) | (2 << 3) | */
+ OMAP2_MCSPI_SYSCONFIG_AUTOIDLE);
+
+ omap2_mcspi_set_master_mode(master);
+
+ clk_disable(mcspi->fck);
+ clk_disable(mcspi->ick);
+ return 0;
+}
+
+static u8 __initdata spi1_rxdma_id [] = {
+ OMAP24XX_DMA_SPI1_RX0,
+ OMAP24XX_DMA_SPI1_RX1,
+ OMAP24XX_DMA_SPI1_RX2,
+ OMAP24XX_DMA_SPI1_RX3,
+};
+
+static u8 __initdata spi1_txdma_id [] = {
+ OMAP24XX_DMA_SPI1_TX0,
+ OMAP24XX_DMA_SPI1_TX1,
+ OMAP24XX_DMA_SPI1_TX2,
+ OMAP24XX_DMA_SPI1_TX3,
+};
+
+static u8 __initdata spi2_rxdma_id[] = {
+ OMAP24XX_DMA_SPI2_RX0,
+ OMAP24XX_DMA_SPI2_RX1,
+};
+
+static u8 __initdata spi2_txdma_id[] = {
+ OMAP24XX_DMA_SPI2_TX0,
+ OMAP24XX_DMA_SPI2_TX1,
+};
+
+static int __init omap2_mcspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap2_mcspi *mcspi;
+ struct resource *r;
+ int status = 0, i;
+ const u8 *rxdma_id, *txdma_id;
+ unsigned num_chipselect;
+
+ switch (pdev->id) {
+ case 1:
+ rxdma_id = spi1_rxdma_id;
+ txdma_id = spi1_txdma_id;
+ num_chipselect = 4;
+ break;
+ case 2:
+ rxdma_id = spi2_rxdma_id;
+ txdma_id = spi2_txdma_id;
+ num_chipselect = 2;
+ break;
+ /* REVISIT omap2430 has a third McSPI ... */
+ default:
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+
+ master->setup = omap2_mcspi_setup;
+ master->transfer = omap2_mcspi_transfer;
+ master->cleanup = omap2_mcspi_cleanup;
+ master->num_chipselect = num_chipselect;
+
+ dev_set_drvdata(&pdev->dev, master);
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi->master = master;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ status = -ENODEV;
+ goto err1;
+ }
+ if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ pdev->dev.bus_id)) {
+ status = -EBUSY;
+ goto err1;
+ }
+
+ mcspi->base = (void __iomem *) io_p2v(r->start);
+
+ INIT_WORK(&mcspi->work, omap2_mcspi_work);
+
+ spin_lock_init(&mcspi->lock);
+ INIT_LIST_HEAD(&mcspi->msg_queue);
+
+ mcspi->ick = clk_get(&pdev->dev, "mcspi_ick");
+ if (IS_ERR(mcspi->ick)) {
+ dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
+ status = PTR_ERR(mcspi->ick);
+ goto err1a;
+ }
+ mcspi->fck = clk_get(&pdev->dev, "mcspi_fck");
+ if (IS_ERR(mcspi->fck)) {
+ dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
+ status = PTR_ERR(mcspi->fck);
+ goto err2;
+ }
+
+ mcspi->dma_channels = kcalloc(master->num_chipselect,
+ sizeof(struct omap2_mcspi_dma),
+ GFP_KERNEL);
+
+ if (mcspi->dma_channels == NULL)
+ goto err3;
+
+ for (i = 0; i < num_chipselect; i++) {
+ mcspi->dma_channels[i].dma_rx_channel = -1;
+ mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i];
+ mcspi->dma_channels[i].dma_tx_channel = -1;
+ mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i];
+ }
+
+ if (omap2_mcspi_reset(mcspi) < 0)
+ goto err4;
+
+ status = spi_register_master(master);
+ if (status < 0)
+ goto err4;
+
+ return status;
+
+err4:
+ kfree(mcspi->dma_channels);
+err3:
+ clk_put(mcspi->fck);
+err2:
+ clk_put(mcspi->ick);
+err1a:
+ release_mem_region(r->start, (r->end - r->start) + 1);
+err1:
+ spi_master_put(master);
+ return status;
+}
+
+static int __exit omap2_mcspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *dma_channels;
+ struct resource *r;
+
+ master = dev_get_drvdata(&pdev->dev);
+ mcspi = spi_master_get_devdata(master);
+ dma_channels = mcspi->dma_channels;
+
+ clk_put(mcspi->fck);
+ clk_put(mcspi->ick);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, (r->end - r->start) + 1);
+
+ spi_unregister_master(master);
+ kfree(dma_channels);
+
+ return 0;
+}
+
+static struct platform_driver omap2_mcspi_driver = {
+ .driver = {
+ .name = "omap2_mcspi",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(omap2_mcspi_remove),
+};
+
+
+static int __init omap2_mcspi_init(void)
+{
+ omap2_mcspi_wq = create_singlethread_workqueue(
+ omap2_mcspi_driver.driver.name);
+ if (omap2_mcspi_wq == NULL)
+ return -1;
+ return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
+}
+subsys_initcall(omap2_mcspi_init);
+
+static void __exit omap2_mcspi_exit(void)
+{
+ platform_driver_unregister(&omap2_mcspi_driver);
+
+ destroy_workqueue(omap2_mcspi_wq);
+}
+module_exit(omap2_mcspi_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index 95183e1df52..d275c615a73 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -445,10 +445,19 @@ done:
return status;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
static int uwire_setup(struct spi_device *spi)
{
struct uwire_state *ust = spi->controller_state;
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
if (ust == NULL) {
ust = kzalloc(sizeof(*ust), GFP_KERNEL);
if (ust == NULL)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 9f2c887ffa0..e51311b2da0 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1067,6 +1067,9 @@ static int transfer(struct spi_device *spi, struct spi_message *msg)
return 0;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
+
static int setup(struct spi_device *spi)
{
struct pxa2xx_spi_chip *chip_info = NULL;
@@ -1093,6 +1096,12 @@ static int setup(struct spi_device *spi)
return -EINVAL;
}
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4831edbae2d..018884d7a5f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
+#include <linux/mutex.h>
#include <linux/spi/spi.h>
@@ -185,7 +186,7 @@ struct boardinfo {
};
static LIST_HEAD(board_list);
-static DECLARE_MUTEX(board_lock);
+static DEFINE_MUTEX(board_lock);
/**
@@ -292,9 +293,9 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
bi->n_board_info = n;
memcpy(bi->board_info, info, n * sizeof *info);
- down(&board_lock);
+ mutex_lock(&board_lock);
list_add_tail(&bi->list, &board_list);
- up(&board_lock);
+ mutex_unlock(&board_lock);
return 0;
}
@@ -308,7 +309,7 @@ scan_boardinfo(struct spi_master *master)
struct boardinfo *bi;
struct device *dev = master->cdev.dev;
- down(&board_lock);
+ mutex_lock(&board_lock);
list_for_each_entry(bi, &board_list, list) {
struct spi_board_info *chip = bi->board_info;
unsigned n;
@@ -330,7 +331,7 @@ scan_boardinfo(struct spi_master *master)
(void) spi_new_device(master, chip);
}
}
- up(&board_lock);
+ mutex_unlock(&board_lock);
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 88425e1af4d..0c85c984ccb 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -187,12 +187,10 @@ int spi_bitbang_setup(struct spi_device *spi)
bitbang = spi_master_get_devdata(spi->master);
- /* REVISIT: some systems will want to support devices using lsb-first
- * bit encodings on the wire. In pure software that would be trivial,
- * just bitbang_txrx_le_cphaX() routines shifting the other way, and
- * some hardware controllers also have this support.
+ /* Bitbangers can support SPI_CS_HIGH, SPI_3WIRE, and so on;
+ * add those to master->flags, and provide the other support.
*/
- if ((spi->mode & SPI_LSB_FIRST) != 0)
+ if ((spi->mode & ~(SPI_CPOL|SPI_CPHA|bitbang->flags)) != 0)
return -EINVAL;
if (!cs) {
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 656be4a5094..aee9ad6f633 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -1163,6 +1163,9 @@ msg_rejected:
return -EINVAL;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
/* On first setup bad values must free chip_data memory since will cause
spi_new_device to fail. Bad value setup from protocol driver are simply not
applied and notified to the calling driver. */
@@ -1174,6 +1177,12 @@ static int setup(struct spi_device *spi)
u32 tmp;
int status = 0;
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
/* Get controller data */
chip_info = spi->controller_data;
@@ -1245,21 +1254,6 @@ static int setup(struct spi_device *spi)
/* SPI mode */
tmp = spi->mode;
- if (tmp & SPI_LSB_FIRST) {
- status = -EINVAL;
- if (first_setup) {
- dev_err(&spi->dev,
- "setup - "
- "HW doesn't support LSB first transfer\n");
- goto err_first_setup;
- } else {
- dev_err(&spi->dev,
- "setup - "
- "HW doesn't support LSB first transfer, "
- "default to MSB first\n");
- spi->mode &= ~SPI_LSB_FIRST;
- }
- }
if (tmp & SPI_CS_HIGH) {
u32_EDIT(chip->control,
SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c
new file mode 100644
index 00000000000..4ea68ac1611
--- /dev/null
+++ b/drivers/spi/spi_lm70llp.c
@@ -0,0 +1,361 @@
+/*
+ * spi_lm70llp.c - driver for lm70llp eval board for the LM70 sensor
+ *
+ * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/parport.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+
+/*
+ * The LM70 communicates with a host processor using a 3-wire variant of
+ * the SPI/Microwire bus interface. This driver specifically supports an
+ * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel
+ * port to bitbang an SPI-parport bridge. Accordingly, this is an SPI
+ * master controller driver. The hwmon/lm70 driver is a "SPI protocol
+ * driver", layered on top of this one and usable without the lm70llp.
+ *
+ * The LM70 is a temperature sensor chip from National Semiconductor; its
+ * datasheet is available at http://www.national.com/pf/LM/LM70.html
+ *
+ * Also see Documentation/spi/spi-lm70llp. The SPI<->parport code here is
+ * (heavily) based on spi-butterfly by David Brownell.
+ *
+ * The LM70 LLP connects to the PC parallel port in the following manner:
+ *
+ * Parallel LM70 LLP
+ * Port Direction JP2 Header
+ * ----------- --------- ------------
+ * D0 2 - -
+ * D1 3 --> V+ 5
+ * D2 4 --> V+ 5
+ * D3 5 --> V+ 5
+ * D4 6 --> V+ 5
+ * D5 7 --> nCS 8
+ * D6 8 --> SCLK 3
+ * D7 9 --> SI/O 5
+ * GND 25 - GND 7
+ * Select 13 <-- SI/O 1
+ *
+ * Note that parport pin 13 actually gets inverted by the transistor
+ * arrangement which lets either the parport or the LM70 drive the
+ * SI/SO signal.
+ */
+
+#define DRVNAME "spi-lm70llp"
+
+#define lm70_INIT 0xBE
+#define SIO 0x10
+#define nCS 0x20
+#define SCLK 0x40
+
+/*-------------------------------------------------------------------------*/
+
+struct spi_lm70llp {
+ struct spi_bitbang bitbang;
+ struct parport *port;
+ struct pardevice *pd;
+ struct spi_device *spidev_lm70;
+ struct spi_board_info info;
+ struct class_device *cdev;
+};
+
+/* REVISIT : ugly global ; provides "exclusive open" facility */
+static struct spi_lm70llp *lm70llp;
+
+
+/*-------------------------------------------------------------------*/
+
+static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi)
+{
+ return spi->controller_data;
+}
+
+/*---------------------- LM70 LLP eval board-specific inlines follow */
+
+/* NOTE: we don't actually need to reread the output values, since they'll
+ * still be what we wrote before. Plus, going through parport builds in
+ * a ~1ms/operation delay; these SPI transfers could easily be faster.
+ */
+
+static inline void deassertCS(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data | nCS);
+}
+
+static inline void assertCS(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data & ~nCS);
+}
+
+static inline void clkHigh(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data | SCLK);
+}
+
+static inline void clkLow(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data & ~SCLK);
+}
+
+/*------------------------- SPI-LM70-specific inlines ----------------------*/
+
+static inline void spidelay(unsigned d)
+{
+ udelay(d);
+}
+
+static inline void setsck(struct spi_device *s, int is_on)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(s);
+
+ if (is_on)
+ clkHigh(pp);
+ else
+ clkLow(pp);
+}
+
+static inline void setmosi(struct spi_device *s, int is_on)
+{
+ /* FIXME update D7 ... this way we can put the chip
+ * into shutdown mode and read the manufacturer ID,
+ * but we can't put it back into operational mode.
+ */
+}
+
+/*
+ * getmiso:
+ * Why do we return 0 when the SIO line is high and vice-versa?
+ * The fact is, the lm70 eval board from NS (which this driver drives),
+ * is wired in just such a way : when the lm70's SIO goes high, a transistor
+ * switches it to low reflecting this on the parport (pin 13), and vice-versa.
+ */
+static inline int getmiso(struct spi_device *s)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(s);
+ return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1 );
+}
+/*--------------------------------------------------------------------*/
+
+#define EXPAND_BITBANG_TXRX 1
+#include <linux/spi/spi_bitbang.h>
+
+static void lm70_chipselect(struct spi_device *spi, int value)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(spi);
+
+ if (value)
+ assertCS(pp);
+ else
+ deassertCS(pp);
+}
+
+/*
+ * Our actual bitbanger routine.
+ */
+static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
+{
+ static u32 sio=0;
+ static int first_time=1;
+
+ /* First time: perform SPI bitbang and return the LSB of
+ * the result of the SPI call.
+ */
+ if (first_time) {
+ sio = bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+ first_time=0;
+ return (sio & 0x00ff);
+ }
+ /* Return the MSB of the result of the SPI call */
+ else {
+ first_time=1;
+ return (sio >> 8);
+ }
+}
+
+static void spi_lm70llp_attach(struct parport *p)
+{
+ struct pardevice *pd;
+ struct spi_lm70llp *pp;
+ struct spi_master *master;
+ int status;
+
+ if (lm70llp) {
+ printk(KERN_WARNING
+ "%s: spi_lm70llp instance already loaded. Aborting.\n",
+ DRVNAME);
+ return;
+ }
+
+ /* TODO: this just _assumes_ a lm70 is there ... no probe;
+ * the lm70 driver could verify it, reading the manf ID.
+ */
+
+ master = spi_alloc_master(p->physport->dev, sizeof *pp);
+ if (!master) {
+ status = -ENOMEM;
+ goto out_fail;
+ }
+ pp = spi_master_get_devdata(master);
+
+ master->bus_num = -1; /* dynamic alloc of a bus number */
+ master->num_chipselect = 1;
+
+ /*
+ * SPI and bitbang hookup.
+ */
+ pp->bitbang.master = spi_master_get(master);
+ pp->bitbang.chipselect = lm70_chipselect;
+ pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
+ pp->bitbang.flags = SPI_3WIRE;
+
+ /*
+ * Parport hookup
+ */
+ pp->port = p;
+ pd = parport_register_device(p, DRVNAME,
+ NULL, NULL, NULL,
+ PARPORT_FLAG_EXCL, pp);
+ if (!pd) {
+ status = -ENOMEM;
+ goto out_free_master;
+ }
+ pp->pd = pd;
+
+ status = parport_claim(pd);
+ if (status < 0)
+ goto out_parport_unreg;
+
+ /*
+ * Start SPI ...
+ */
+ status = spi_bitbang_start(&pp->bitbang);
+ if (status < 0) {
+ printk(KERN_WARNING
+ "%s: spi_bitbang_start failed with status %d\n",
+ DRVNAME, status);
+ goto out_off_and_release;
+ }
+
+ /*
+ * The modalias name MUST match the device_driver name
+ * for the bus glue code to match and subsequently bind them.
+ * We are binding to the generic drivers/hwmon/lm70.c device
+ * driver.
+ */
+ strcpy(pp->info.modalias, "lm70");
+ pp->info.max_speed_hz = 6 * 1000 * 1000;
+ pp->info.chip_select = 0;
+ pp->info.mode = SPI_3WIRE | SPI_MODE_0;
+
+ /* power up the chip, and let the LM70 control SI/SO */
+ parport_write_data(pp->port, lm70_INIT);
+
+ /* Enable access to our primary data structure via
+ * the board info's (void *)controller_data.
+ */
+ pp->info.controller_data = pp;
+ pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
+ if (pp->spidev_lm70)
+ dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
+ pp->spidev_lm70->dev.bus_id);
+ else {
+ printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME);
+ status = -ENODEV;
+ goto out_bitbang_stop;
+ }
+ pp->spidev_lm70->bits_per_word = 16;
+
+ lm70llp = pp;
+
+ return;
+
+out_bitbang_stop:
+ spi_bitbang_stop(&pp->bitbang);
+out_off_and_release:
+ /* power down */
+ parport_write_data(pp->port, 0);
+ mdelay(10);
+ parport_release(pp->pd);
+out_parport_unreg:
+ parport_unregister_device(pd);
+out_free_master:
+ (void) spi_master_put(master);
+out_fail:
+ pr_info("%s: spi_lm70llp probe fail, status %d\n", DRVNAME, status);
+}
+
+static void spi_lm70llp_detach(struct parport *p)
+{
+ struct spi_lm70llp *pp;
+
+ if (!lm70llp || lm70llp->port != p)
+ return;
+
+ pp = lm70llp;
+ spi_bitbang_stop(&pp->bitbang);
+
+ /* power down */
+ parport_write_data(pp->port, 0);
+ msleep(10);
+
+ parport_release(pp->pd);
+ parport_unregister_device(pp->pd);
+
+ (void) spi_master_put(pp->bitbang.master);
+
+ lm70llp = NULL;
+}
+
+
+static struct parport_driver spi_lm70llp_drv = {
+ .name = DRVNAME,
+ .attach = spi_lm70llp_attach,
+ .detach = spi_lm70llp_detach,
+};
+
+static int __init init_spi_lm70llp(void)
+{
+ return parport_register_driver(&spi_lm70llp_drv);
+}
+module_init(init_spi_lm70llp);
+
+static void __exit cleanup_spi_lm70llp(void)
+{
+ parport_unregister_driver(&spi_lm70llp_drv);
+}
+module_exit(cleanup_spi_lm70llp);
+
+MODULE_AUTHOR("Kaiwan N Billimoria <kaiwan@designergraphix.com>");
+MODULE_DESCRIPTION(
+ "Parport adapter for the National Semiconductor LM70 LLP eval board");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index e9798bf7b8c..3295cfcc9f2 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -47,6 +47,7 @@ struct mpc83xx_spi_reg {
#define SPMODE_ENABLE (1 << 24)
#define SPMODE_LEN(x) ((x) << 20)
#define SPMODE_PM(x) ((x) << 16)
+#define SPMODE_OP (1 << 14)
/*
* Default for SPI Mode:
@@ -85,6 +86,11 @@ struct mpc83xx_spi {
unsigned nsecs; /* (clock cycle time)/2 */
u32 sysclk;
+ u32 rx_shift; /* RX data reg shift when in qe mode */
+ u32 tx_shift; /* TX data reg shift when in qe mode */
+
+ bool qe_mode;
+
void (*activate_cs) (u8 cs, u8 polarity);
void (*deactivate_cs) (u8 cs, u8 polarity);
};
@@ -103,7 +109,7 @@ static inline u32 mpc83xx_spi_read_reg(__be32 __iomem * reg)
void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \
{ \
type * rx = mpc83xx_spi->rx; \
- *rx++ = (type)data; \
+ *rx++ = (type)(data >> mpc83xx_spi->rx_shift); \
mpc83xx_spi->rx = rx; \
}
@@ -114,7 +120,7 @@ u32 mpc83xx_spi_tx_buf_##type(struct mpc83xx_spi *mpc83xx_spi) \
const type * tx = mpc83xx_spi->tx; \
if (!tx) \
return 0; \
- data = *tx++; \
+ data = *tx++ << mpc83xx_spi->tx_shift; \
mpc83xx_spi->tx = tx; \
return data; \
}
@@ -158,6 +164,12 @@ static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) {
u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64);
+ if (pm > 0x0f) {
+ printk(KERN_WARNING "MPC83xx SPI: SPICLK can't be less then a SYSCLK/1024!\n"
+ "Requested SPICLK is %d Hz. Will use %d Hz instead.\n",
+ spi->max_speed_hz, mpc83xx_spi->sysclk / 1024);
+ pm = 0x0f;
+ }
regval |= SPMODE_PM(pm) | SPMODE_DIV16;
} else {
u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4);
@@ -197,12 +209,22 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
|| ((bits_per_word > 16) && (bits_per_word != 32)))
return -EINVAL;
+ mpc83xx_spi->rx_shift = 0;
+ mpc83xx_spi->tx_shift = 0;
if (bits_per_word <= 8) {
mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
+ if (mpc83xx_spi->qe_mode) {
+ mpc83xx_spi->rx_shift = 16;
+ mpc83xx_spi->tx_shift = 24;
+ }
} else if (bits_per_word <= 16) {
mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16;
mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16;
+ if (mpc83xx_spi->qe_mode) {
+ mpc83xx_spi->rx_shift = 16;
+ mpc83xx_spi->tx_shift = 16;
+ }
} else if (bits_per_word <= 32) {
mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32;
mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32;
@@ -232,12 +254,21 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
static int mpc83xx_spi_setup(struct spi_device *spi)
{
struct spi_bitbang *bitbang;
struct mpc83xx_spi *mpc83xx_spi;
int retval;
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
if (!spi->max_speed_hz)
return -EINVAL;
@@ -371,7 +402,6 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
ret = -ENODEV;
goto free_master;
}
-
mpc83xx_spi = spi_master_get_devdata(master);
mpc83xx_spi->bitbang.master = spi_master_get(master);
mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect;
@@ -380,9 +410,17 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
mpc83xx_spi->sysclk = pdata->sysclk;
mpc83xx_spi->activate_cs = pdata->activate_cs;
mpc83xx_spi->deactivate_cs = pdata->deactivate_cs;
+ mpc83xx_spi->qe_mode = pdata->qe_mode;
mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
+ mpc83xx_spi->rx_shift = 0;
+ mpc83xx_spi->tx_shift = 0;
+ if (mpc83xx_spi->qe_mode) {
+ mpc83xx_spi->rx_shift = 16;
+ mpc83xx_spi->tx_shift = 24;
+ }
+
mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup;
init_completion(&mpc83xx_spi->done);
@@ -417,6 +455,9 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+ if (pdata->qe_mode)
+ regval |= SPMODE_OP;
+
mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
ret = spi_bitbang_start(&mpc83xx_spi->bitbang);
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index d5a710f6e44..7071ff8da63 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -146,6 +146,9 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
return 0;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
static int s3c24xx_spi_setup(struct spi_device *spi)
{
int ret;
@@ -153,8 +156,11 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
- if ((spi->mode & SPI_LSB_FIRST) != 0)
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
return -EINVAL;
+ }
ret = s3c24xx_spi_setupxfer(spi, NULL);
if (ret < 0) {
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
new file mode 100644
index 00000000000..08e981c4064
--- /dev/null
+++ b/drivers/spi/spi_txx9.c
@@ -0,0 +1,474 @@
+/*
+ * spi_txx9.c - TXx9 SPI controller driver.
+ *
+ * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ *
+ * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp)
+ */
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <asm/gpio.h>
+
+
+#define SPI_FIFO_SIZE 4
+
+#define TXx9_SPMCR 0x00
+#define TXx9_SPCR0 0x04
+#define TXx9_SPCR1 0x08
+#define TXx9_SPFS 0x0c
+#define TXx9_SPSR 0x14
+#define TXx9_SPDR 0x18
+
+/* SPMCR : SPI Master Control */
+#define TXx9_SPMCR_OPMODE 0xc0
+#define TXx9_SPMCR_CONFIG 0x40
+#define TXx9_SPMCR_ACTIVE 0x80
+#define TXx9_SPMCR_SPSTP 0x02
+#define TXx9_SPMCR_BCLR 0x01
+
+/* SPCR0 : SPI Control 0 */
+#define TXx9_SPCR0_TXIFL_MASK 0xc000
+#define TXx9_SPCR0_RXIFL_MASK 0x3000
+#define TXx9_SPCR0_SIDIE 0x0800
+#define TXx9_SPCR0_SOEIE 0x0400
+#define TXx9_SPCR0_RBSIE 0x0200
+#define TXx9_SPCR0_TBSIE 0x0100
+#define TXx9_SPCR0_IFSPSE 0x0010
+#define TXx9_SPCR0_SBOS 0x0004
+#define TXx9_SPCR0_SPHA 0x0002
+#define TXx9_SPCR0_SPOL 0x0001
+
+/* SPSR : SPI Status */
+#define TXx9_SPSR_TBSI 0x8000
+#define TXx9_SPSR_RBSI 0x4000
+#define TXx9_SPSR_TBS_MASK 0x3800
+#define TXx9_SPSR_RBS_MASK 0x0700
+#define TXx9_SPSR_SPOE 0x0080
+#define TXx9_SPSR_IFSD 0x0008
+#define TXx9_SPSR_SIDLE 0x0004
+#define TXx9_SPSR_STRDY 0x0002
+#define TXx9_SPSR_SRRDY 0x0001
+
+
+struct txx9spi {
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ spinlock_t lock; /* protect 'queue' */
+ struct list_head queue;
+ wait_queue_head_t waitq;
+ void __iomem *membase;
+ int irq;
+ int baseclk;
+ struct clk *clk;
+ u32 max_speed_hz, min_speed_hz;
+ int last_chipselect;
+ int last_chipselect_val;
+};
+
+static u32 txx9spi_rd(struct txx9spi *c, int reg)
+{
+ return __raw_readl(c->membase + reg);
+}
+static void txx9spi_wr(struct txx9spi *c, u32 val, int reg)
+{
+ __raw_writel(val, c->membase + reg);
+}
+
+static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
+ int on, unsigned int cs_delay)
+{
+ int val = (spi->mode & SPI_CS_HIGH) ? on : !on;
+ if (on) {
+ /* deselect the chip with cs_change hint in last transfer */
+ if (c->last_chipselect >= 0)
+ gpio_set_value(c->last_chipselect,
+ !c->last_chipselect_val);
+ c->last_chipselect = spi->chip_select;
+ c->last_chipselect_val = val;
+ } else {
+ c->last_chipselect = -1;
+ ndelay(cs_delay); /* CS Hold Time */
+ }
+ gpio_set_value(spi->chip_select, val);
+ ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CS_HIGH|SPI_CPOL|SPI_CPHA)
+
+static int txx9spi_setup(struct spi_device *spi)
+{
+ struct txx9spi *c = spi_master_get_devdata(spi->master);
+ u8 bits_per_word;
+
+ if (spi->mode & ~MODEBITS)
+ return -EINVAL;
+
+ if (!spi->max_speed_hz
+ || spi->max_speed_hz > c->max_speed_hz
+ || spi->max_speed_hz < c->min_speed_hz)
+ return -EINVAL;
+
+ bits_per_word = spi->bits_per_word ? : 8;
+ if (bits_per_word != 8 && bits_per_word != 16)
+ return -EINVAL;
+
+ if (gpio_direction_output(spi->chip_select,
+ !(spi->mode & SPI_CS_HIGH))) {
+ dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n");
+ return -EINVAL;
+ }
+
+ /* deselect chip */
+ spin_lock(&c->lock);
+ txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz);
+ spin_unlock(&c->lock);
+
+ return 0;
+}
+
+static irqreturn_t txx9spi_interrupt(int irq, void *dev_id)
+{
+ struct txx9spi *c = dev_id;
+
+ /* disable rx intr */
+ txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE,
+ TXx9_SPCR0);
+ wake_up(&c->waitq);
+ return IRQ_HANDLED;
+}
+
+static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
+{
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t;
+ unsigned int cs_delay;
+ unsigned int cs_change = 1;
+ int status = 0;
+ u32 mcr;
+ u32 prev_speed_hz = 0;
+ u8 prev_bits_per_word = 0;
+
+ /* CS setup/hold/recovery time in nsec */
+ cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz;
+
+ mcr = txx9spi_rd(c, TXx9_SPMCR);
+ if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) {
+ dev_err(&spi->dev, "Bad mode.\n");
+ status = -EIO;
+ goto exit;
+ }
+ mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
+
+ /* enter config mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+ txx9spi_wr(c, TXx9_SPCR0_SBOS
+ | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0)
+ | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0)
+ | 0x08,
+ TXx9_SPCR0);
+
+ list_for_each_entry (t, &m->transfers, transfer_list) {
+ const void *txbuf = t->tx_buf;
+ void *rxbuf = t->rx_buf;
+ u32 data;
+ unsigned int len = t->len;
+ unsigned int wsize;
+ u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
+ u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
+
+ bits_per_word = bits_per_word ? : 8;
+ wsize = bits_per_word >> 3; /* in bytes */
+
+ if (prev_speed_hz != speed_hz
+ || prev_bits_per_word != bits_per_word) {
+ u32 n = (c->baseclk + speed_hz - 1) / speed_hz;
+ if (n < 1)
+ n = 1;
+ else if (n > 0xff)
+ n = 0xff;
+ /* enter config mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
+ TXx9_SPMCR);
+ txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1);
+ /* enter active mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR);
+
+ prev_speed_hz = speed_hz;
+ prev_bits_per_word = bits_per_word;
+ }
+
+ if (cs_change)
+ txx9spi_cs_func(spi, c, 1, cs_delay);
+ cs_change = t->cs_change;
+ while (len) {
+ unsigned int count = SPI_FIFO_SIZE;
+ int i;
+ u32 cr0;
+
+ if (len < count * wsize)
+ count = len / wsize;
+ /* now tx must be idle... */
+ while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE))
+ cpu_relax();
+ cr0 = txx9spi_rd(c, TXx9_SPCR0);
+ cr0 &= ~TXx9_SPCR0_RXIFL_MASK;
+ cr0 |= (count - 1) << 12;
+ /* enable rx intr */
+ cr0 |= TXx9_SPCR0_RBSIE;
+ txx9spi_wr(c, cr0, TXx9_SPCR0);
+ /* send */
+ for (i = 0; i < count; i++) {
+ if (txbuf) {
+ data = (wsize == 1)
+ ? *(const u8 *)txbuf
+ : *(const u16 *)txbuf;
+ txx9spi_wr(c, data, TXx9_SPDR);
+ txbuf += wsize;
+ } else
+ txx9spi_wr(c, 0, TXx9_SPDR);
+ }
+ /* wait all rx data */
+ wait_event(c->waitq,
+ txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI);
+ /* receive */
+ for (i = 0; i < count; i++) {
+ data = txx9spi_rd(c, TXx9_SPDR);
+ if (rxbuf) {
+ if (wsize == 1)
+ *(u8 *)rxbuf = data;
+ else
+ *(u16 *)rxbuf = data;
+ rxbuf += wsize;
+ }
+ }
+ len -= count * wsize;
+ }
+ m->actual_length += t->len;
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (!cs_change)
+ continue;
+ if (t->transfer_list.next == &m->transfers)
+ break;
+ /* sometimes a short mid-message deselect of the chip
+ * may be needed to terminate a mode or command
+ */
+ txx9spi_cs_func(spi, c, 0, cs_delay);
+ }
+
+exit:
+ m->status = status;
+ m->complete(m->context);
+
+ /* normally deactivate chipselect ... unless no error and
+ * cs_change has hinted that the next message will probably
+ * be for this chip too.
+ */
+ if (!(status == 0 && cs_change))
+ txx9spi_cs_func(spi, c, 0, cs_delay);
+
+ /* enter config mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+}
+
+static void txx9spi_work(struct work_struct *work)
+{
+ struct txx9spi *c = container_of(work, struct txx9spi, work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->lock, flags);
+ while (!list_empty(&c->queue)) {
+ struct spi_message *m;
+
+ m = container_of(c->queue.next, struct spi_message, queue);
+ list_del_init(&m->queue);
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ txx9spi_work_one(c, m);
+
+ spin_lock_irqsave(&c->lock, flags);
+ }
+ spin_unlock_irqrestore(&c->lock, flags);
+}
+
+static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_master *master = spi->master;
+ struct txx9spi *c = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ unsigned long flags;
+
+ m->actual_length = 0;
+
+ /* check each transfer's parameters */
+ list_for_each_entry (t, &m->transfers, transfer_list) {
+ u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
+ u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
+
+ bits_per_word = bits_per_word ? : 8;
+ if (!t->tx_buf && !t->rx_buf && t->len)
+ return -EINVAL;
+ if (bits_per_word != 8 && bits_per_word != 16)
+ return -EINVAL;
+ if (t->len & ((bits_per_word >> 3) - 1))
+ return -EINVAL;
+ if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&c->lock, flags);
+ list_add_tail(&m->queue, &c->queue);
+ queue_work(c->workqueue, &c->work);
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ return 0;
+}
+
+static int __init txx9spi_probe(struct platform_device *dev)
+{
+ struct spi_master *master;
+ struct txx9spi *c;
+ struct resource *res;
+ int ret = -ENODEV;
+ u32 mcr;
+
+ master = spi_alloc_master(&dev->dev, sizeof(*c));
+ if (!master)
+ return ret;
+ c = spi_master_get_devdata(master);
+ c->irq = -1;
+ platform_set_drvdata(dev, master);
+
+ INIT_WORK(&c->work, txx9spi_work);
+ spin_lock_init(&c->lock);
+ INIT_LIST_HEAD(&c->queue);
+ init_waitqueue_head(&c->waitq);
+
+ c->clk = clk_get(&dev->dev, "spi-baseclk");
+ if (IS_ERR(c->clk)) {
+ ret = PTR_ERR(c->clk);
+ c->clk = NULL;
+ goto exit;
+ }
+ ret = clk_enable(c->clk);
+ if (ret) {
+ clk_put(c->clk);
+ c->clk = NULL;
+ goto exit;
+ }
+ c->baseclk = clk_get_rate(c->clk);
+ c->min_speed_hz = (c->baseclk + 0xff - 1) / 0xff;
+ c->max_speed_hz = c->baseclk;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto exit;
+ c->membase = ioremap(res->start, res->end - res->start + 1);
+ if (!c->membase)
+ goto exit;
+
+ /* enter config mode */
+ mcr = txx9spi_rd(c, TXx9_SPMCR);
+ mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+
+ c->irq = platform_get_irq(dev, 0);
+ if (c->irq < 0)
+ goto exit;
+ ret = request_irq(c->irq, txx9spi_interrupt, 0, dev->name, c);
+ if (ret) {
+ c->irq = -1;
+ goto exit;
+ }
+
+ c->workqueue = create_singlethread_workqueue(master->cdev.dev->bus_id);
+ if (!c->workqueue)
+ goto exit;
+ c->last_chipselect = -1;
+
+ dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
+ (unsigned long long)res->start, c->irq,
+ (c->baseclk + 500000) / 1000000);
+
+ master->bus_num = dev->id;
+ master->setup = txx9spi_setup;
+ master->transfer = txx9spi_transfer;
+ master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
+
+ ret = spi_register_master(master);
+ if (ret)
+ goto exit;
+ return 0;
+exit:
+ if (c->workqueue)
+ destroy_workqueue(c->workqueue);
+ if (c->irq >= 0)
+ free_irq(c->irq, c);
+ if (c->membase)
+ iounmap(c->membase);
+ if (c->clk) {
+ clk_disable(c->clk);
+ clk_put(c->clk);
+ }
+ platform_set_drvdata(dev, NULL);
+ spi_master_put(master);
+ return ret;
+}
+
+static int __exit txx9spi_remove(struct platform_device *dev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
+ struct txx9spi *c = spi_master_get_devdata(master);
+
+ spi_unregister_master(master);
+ platform_set_drvdata(dev, NULL);
+ destroy_workqueue(c->workqueue);
+ free_irq(c->irq, c);
+ iounmap(c->membase);
+ clk_disable(c->clk);
+ clk_put(c->clk);
+ spi_master_put(master);
+ return 0;
+}
+
+static struct platform_driver txx9spi_driver = {
+ .remove = __exit_p(txx9spi_remove),
+ .driver = {
+ .name = "txx9spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init txx9spi_init(void)
+{
+ return platform_driver_probe(&txx9spi_driver, txx9spi_probe);
+}
+subsys_initcall(txx9spi_init);
+
+static void __exit txx9spi_exit(void)
+{
+ platform_driver_unregister(&txx9spi_driver);
+}
+module_exit(txx9spi_exit);
+
+MODULE_DESCRIPTION("TXx9 SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d04242aee40..38b60ad0eda 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -181,7 +181,8 @@ static int spidev_message(struct spidev_data *spidev,
}
if (u_tmp->tx_buf) {
k_tmp->tx_buf = buf;
- if (copy_from_user(buf, (const u8 __user *)u_tmp->tx_buf,
+ if (copy_from_user(buf, (const u8 __user *)
+ (ptrdiff_t) u_tmp->tx_buf,
u_tmp->len))
goto done;
}
@@ -213,7 +214,8 @@ static int spidev_message(struct spidev_data *spidev,
buf = spidev->buffer;
for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
if (u_tmp->rx_buf) {
- if (__copy_to_user((u8 __user *)u_tmp->rx_buf, buf,
+ if (__copy_to_user((u8 __user *)
+ (ptrdiff_t) u_tmp->rx_buf, buf,
u_tmp->len)) {
status = -EFAULT;
goto done;
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
new file mode 100644
index 00000000000..6da58ca48b3
--- /dev/null
+++ b/drivers/spi/tle62x0.c
@@ -0,0 +1,328 @@
+/*
+ * tle62x0.c -- support Infineon TLE62x0 driver chips
+ *
+ * Copyright (c) 2007 Simtec Electronics
+ * Ben Dooks, <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/tle62x0.h>
+
+
+#define CMD_READ 0x00
+#define CMD_SET 0xff
+
+#define DIAG_NORMAL 0x03
+#define DIAG_OVERLOAD 0x02
+#define DIAG_OPEN 0x01
+#define DIAG_SHORTGND 0x00
+
+struct tle62x0_state {
+ struct spi_device *us;
+ struct mutex lock;
+ unsigned int nr_gpio;
+ unsigned int gpio_state;
+
+ unsigned char tx_buff[4];
+ unsigned char rx_buff[4];
+};
+
+static int to_gpio_num(struct device_attribute *attr);
+
+static inline int tle62x0_write(struct tle62x0_state *st)
+{
+ unsigned char *buff = st->tx_buff;
+ unsigned int gpio_state = st->gpio_state;
+
+ buff[0] = CMD_SET;
+
+ if (st->nr_gpio == 16) {
+ buff[1] = gpio_state >> 8;
+ buff[2] = gpio_state;
+ } else {
+ buff[1] = gpio_state;
+ }
+
+ dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n",
+ buff[0], buff[1], buff[2]);
+
+ return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
+}
+
+static inline int tle62x0_read(struct tle62x0_state *st)
+{
+ unsigned char *txbuff = st->tx_buff;
+ struct spi_transfer xfer = {
+ .tx_buf = txbuff,
+ .rx_buf = st->rx_buff,
+ .len = (st->nr_gpio * 2) / 8,
+ };
+ struct spi_message msg;
+
+ txbuff[0] = CMD_READ;
+ txbuff[1] = 0x00;
+ txbuff[2] = 0x00;
+ txbuff[3] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(st->us, &msg);
+}
+
+static unsigned char *decode_fault(unsigned int fault_code)
+{
+ fault_code &= 3;
+
+ switch (fault_code) {
+ case DIAG_NORMAL:
+ return "N";
+ case DIAG_OVERLOAD:
+ return "V";
+ case DIAG_OPEN:
+ return "O";
+ case DIAG_SHORTGND:
+ return "G";
+ }
+
+ return "?";
+}
+
+static ssize_t tle62x0_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ char *bp = buf;
+ unsigned char *buff = st->rx_buff;
+ unsigned long fault = 0;
+ int ptr;
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = tle62x0_read(st);
+
+ dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
+
+ for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
+ fault <<= 8;
+ fault |= ((unsigned long)buff[ptr]);
+
+ dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]);
+ }
+
+ for (ptr = 0; ptr < st->nr_gpio; ptr++) {
+ bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2)));
+ }
+
+ *bp++ = '\n';
+
+ mutex_unlock(&st->lock);
+ return bp - buf;
+}
+
+static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL);
+
+static ssize_t tle62x0_gpio_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ int gpio_num = to_gpio_num(attr);
+ int value;
+
+ mutex_lock(&st->lock);
+ value = (st->gpio_state >> gpio_num) & 1;
+ mutex_unlock(&st->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d", value);
+}
+
+static ssize_t tle62x0_gpio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ int gpio_num = to_gpio_num(attr);
+ unsigned long val;
+ char *endp;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (buf == endp)
+ return -EINVAL;
+
+ dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val);
+
+ mutex_lock(&st->lock);
+
+ if (val)
+ st->gpio_state |= 1 << gpio_num;
+ else
+ st->gpio_state &= ~(1 << gpio_num);
+
+ tle62x0_write(st);
+ mutex_unlock(&st->lock);
+
+ return len;
+}
+
+static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+
+static struct device_attribute *gpio_attrs[] = {
+ [0] = &dev_attr_gpio1,
+ [1] = &dev_attr_gpio2,
+ [2] = &dev_attr_gpio3,
+ [3] = &dev_attr_gpio4,
+ [4] = &dev_attr_gpio5,
+ [5] = &dev_attr_gpio6,
+ [6] = &dev_attr_gpio7,
+ [7] = &dev_attr_gpio8,
+ [8] = &dev_attr_gpio9,
+ [9] = &dev_attr_gpio10,
+ [10] = &dev_attr_gpio11,
+ [11] = &dev_attr_gpio12,
+ [12] = &dev_attr_gpio13,
+ [13] = &dev_attr_gpio14,
+ [14] = &dev_attr_gpio15,
+ [15] = &dev_attr_gpio16
+};
+
+static int to_gpio_num(struct device_attribute *attr)
+{
+ int ptr;
+
+ for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) {
+ if (gpio_attrs[ptr] == attr)
+ return ptr;
+ }
+
+ return -1;
+}
+
+static int __devinit tle62x0_probe(struct spi_device *spi)
+{
+ struct tle62x0_state *st;
+ struct tle62x0_pdata *pdata;
+ int ptr;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&spi->dev, "no device data specified\n");
+ return -EINVAL;
+ }
+
+ st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
+ if (st == NULL) {
+ dev_err(&spi->dev, "no memory for device state\n");
+ return -ENOMEM;
+ }
+
+ st->us = spi;
+ st->nr_gpio = pdata->gpio_count;
+ st->gpio_state = pdata->init_state;
+
+ mutex_init(&st->lock);
+
+ ret = device_create_file(&spi->dev, &dev_attr_status_show);
+ if (ret) {
+ dev_err(&spi->dev, "cannot create status attribute\n");
+ goto err_status;
+ }
+
+ for (ptr = 0; ptr < pdata->gpio_count; ptr++) {
+ ret = device_create_file(&spi->dev, gpio_attrs[ptr]);
+ if (ret) {
+ dev_err(&spi->dev, "cannot create gpio attribute\n");
+ goto err_gpios;
+ }
+ }
+
+ /* tle62x0_write(st); */
+ spi_set_drvdata(spi, st);
+ return 0;
+
+ err_gpios:
+ for (; ptr > 0; ptr--)
+ device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+ device_remove_file(&spi->dev, &dev_attr_status_show);
+
+ err_status:
+ kfree(st);
+ return ret;
+}
+
+static int __devexit tle62x0_remove(struct spi_device *spi)
+{
+ struct tle62x0_state *st = spi_get_drvdata(spi);
+ int ptr;
+
+ for (ptr = 0; ptr < st->nr_gpio; ptr++)
+ device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+ kfree(st);
+ return 0;
+}
+
+static struct spi_driver tle62x0_driver = {
+ .driver = {
+ .name = "tle62x0",
+ .owner = THIS_MODULE,
+ },
+ .probe = tle62x0_probe,
+ .remove = __devexit_p(tle62x0_remove),
+};
+
+static __init int tle62x0_init(void)
+{
+ return spi_register_driver(&tle62x0_driver);
+}
+
+static __exit void tle62x0_exit(void)
+{
+ spi_unregister_driver(&tle62x0_driver);
+}
+
+module_init(tle62x0_init);
+module_exit(tle62x0_exit);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("TLE62x0 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
new file mode 100644
index 00000000000..f0bf9a68e96
--- /dev/null
+++ b/drivers/spi/xilinx_spi.c
@@ -0,0 +1,434 @@
+/*
+ * xilinx_spi.c
+ *
+ * Xilinx SPI controller driver (master mode only)
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/io.h>
+
+#include <syslib/virtex_devices.h>
+
+#define XILINX_SPI_NAME "xspi"
+
+/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
+ * Product Specification", DS464
+ */
+#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
+
+#define XSPI_CR_ENABLE 0x02
+#define XSPI_CR_MASTER_MODE 0x04
+#define XSPI_CR_CPOL 0x08
+#define XSPI_CR_CPHA 0x10
+#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL)
+#define XSPI_CR_TXFIFO_RESET 0x20
+#define XSPI_CR_RXFIFO_RESET 0x40
+#define XSPI_CR_MANUAL_SSELECT 0x80
+#define XSPI_CR_TRANS_INHIBIT 0x100
+
+#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
+
+#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
+#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
+#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */
+#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
+#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
+
+#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
+#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
+
+#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
+
+/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
+ * IPIF registers are 32 bit
+ */
+#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */
+#define XIPIF_V123B_GINTR_ENABLE 0x80000000
+
+#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */
+#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */
+
+#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */
+#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while
+ * disabled */
+#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */
+#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
+#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
+#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
+
+#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
+#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
+
+struct xilinx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *regs; /* virt. address of the control registers */
+
+ u32 irq;
+
+ u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
+
+ u8 *rx_ptr; /* pointer in the Tx buffer */
+ const u8 *tx_ptr; /* pointer in the Rx buffer */
+ int remaining_bytes; /* the number of bytes left to transfer */
+};
+
+static void xspi_init_hw(void __iomem *regs_base)
+{
+ /* Reset the SPI device */
+ out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
+ XIPIF_V123B_RESET_MASK);
+ /* Disable all the interrupts just in case */
+ out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0);
+ /* Enable the global IPIF interrupt */
+ out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
+ XIPIF_V123B_GINTR_ENABLE);
+ /* Deselect the slave on the SPI bus */
+ out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
+ /* Disable the transmitter, enable Manual Slave Select Assertion,
+ * put SPI controller into master mode, and enable it */
+ out_be16(regs_base + XSPI_CR_OFFSET,
+ XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
+ | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
+}
+
+static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+
+ if (is_on == BITBANG_CS_INACTIVE) {
+ /* Deselect the slave on the SPI bus */
+ out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
+ } else if (is_on == BITBANG_CS_ACTIVE) {
+ /* Set the SPI clock phase and polarity */
+ u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
+ & ~XSPI_CR_MODE_MASK;
+ if (spi->mode & SPI_CPHA)
+ cr |= XSPI_CR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ cr |= XSPI_CR_CPOL;
+ out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+
+ /* We do not check spi->max_speed_hz here as the SPI clock
+ * frequency is not software programmable (the IP block design
+ * parameter)
+ */
+
+ /* Activate the chip select */
+ out_be32(xspi->regs + XSPI_SSR_OFFSET,
+ ~(0x0001 << spi->chip_select));
+ }
+}
+
+/* spi_bitbang requires custom setup_transfer() to be defined if there is a
+ * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
+ * supports just 8 bits per word, and SPI clock can't be changed in software.
+ * Check for 8 bits per word. Chip select delay calculations could be
+ * added here as soon as bitbang_work() can be made aware of the delay value.
+ */
+static int xilinx_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ u8 bits_per_word;
+ u32 hz;
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+
+ bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
+ hz = (t) ? t->speed_hz : spi->max_speed_hz;
+ if (bits_per_word != 8) {
+ dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+ __FUNCTION__, bits_per_word);
+ return -EINVAL;
+ }
+
+ if (hz && xspi->speed_hz > hz) {
+ dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
+ __FUNCTION__, hz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
+
+static int xilinx_spi_setup(struct spi_device *spi)
+{
+ struct spi_bitbang *bitbang;
+ struct xilinx_spi *xspi;
+ int retval;
+
+ xspi = spi_master_get_devdata(spi->master);
+ bitbang = &xspi->bitbang;
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ if (spi->mode & ~MODEBITS) {
+ dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
+ __FUNCTION__, spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
+ retval = xilinx_spi_setup_transfer(spi, NULL);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
+ __FUNCTION__, spi->mode & MODEBITS, spi->bits_per_word, 0);
+
+ return 0;
+}
+
+static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
+{
+ u8 sr;
+
+ /* Fill the Tx FIFO with as many bytes as possible */
+ sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
+ if (xspi->tx_ptr) {
+ out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
+ } else {
+ out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
+ }
+ xspi->remaining_bytes--;
+ sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ }
+}
+
+static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ipif_ier;
+ u16 cr;
+
+ /* We get here with transmitter inhibited */
+
+ xspi->tx_ptr = t->tx_buf;
+ xspi->rx_ptr = t->rx_buf;
+ xspi->remaining_bytes = t->len;
+ INIT_COMPLETION(xspi->done);
+
+ xilinx_spi_fill_tx_fifo(xspi);
+
+ /* Enable the transmit empty interrupt, which we use to determine
+ * progress on the transmission.
+ */
+ ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET);
+ out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET,
+ ipif_ier | XSPI_INTR_TX_EMPTY);
+
+ /* Start the transfer by not inhibiting the transmitter any longer */
+ cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
+ out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+
+ wait_for_completion(&xspi->done);
+
+ /* Disable the transmit empty interrupt */
+ out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier);
+
+ return t->len - xspi->remaining_bytes;
+}
+
+
+/* This driver supports single master mode only. Hence Tx FIFO Empty
+ * is the only interrupt we care about.
+ * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
+ * Fault are not to happen.
+ */
+static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+{
+ struct xilinx_spi *xspi = dev_id;
+ u32 ipif_isr;
+
+ /* Get the IPIF interrupts, and clear them immediately */
+ ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr);
+
+ if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
+ u16 cr;
+ u8 sr;
+
+ /* A transmit has just completed. Process received data and
+ * check for more data to transmit. Always inhibit the
+ * transmitter while the Isr refills the transmit register/FIFO,
+ * or make sure it is stopped if we're done.
+ */
+ cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
+ out_be16(xspi->regs + XSPI_CR_OFFSET,
+ cr | XSPI_CR_TRANS_INHIBIT);
+
+ /* Read out all the data from the Rx FIFO */
+ sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
+ u8 data;
+
+ data = in_8(xspi->regs + XSPI_RXD_OFFSET);
+ if (xspi->rx_ptr) {
+ *xspi->rx_ptr++ = data;
+ }
+ sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ }
+
+ /* See if there is more data to send */
+ if (xspi->remaining_bytes > 0) {
+ xilinx_spi_fill_tx_fifo(xspi);
+ /* Start the transfer by not inhibiting the
+ * transmitter any longer
+ */
+ out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+ } else {
+ /* No more data to send.
+ * Indicate the transfer is completed.
+ */
+ complete(&xspi->done);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init xilinx_spi_probe(struct platform_device *dev)
+{
+ int ret = 0;
+ struct spi_master *master;
+ struct xilinx_spi *xspi;
+ struct xspi_platform_data *pdata;
+ struct resource *r;
+
+ /* Get resources(memory, IRQ) associated with the device */
+ master = spi_alloc_master(&dev->dev, sizeof(struct xilinx_spi));
+
+ if (master == NULL) {
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(dev, master);
+ pdata = dev->dev.platform_data;
+
+ if (pdata == NULL) {
+ ret = -ENODEV;
+ goto put_master;
+ }
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto put_master;
+ }
+
+ xspi = spi_master_get_devdata(master);
+ xspi->bitbang.master = spi_master_get(master);
+ xspi->bitbang.chipselect = xilinx_spi_chipselect;
+ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
+ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
+ xspi->bitbang.master->setup = xilinx_spi_setup;
+ init_completion(&xspi->done);
+
+ if (!request_mem_region(r->start,
+ r->end - r->start + 1, XILINX_SPI_NAME)) {
+ ret = -ENXIO;
+ goto put_master;
+ }
+
+ xspi->regs = ioremap(r->start, r->end - r->start + 1);
+ if (xspi->regs == NULL) {
+ ret = -ENOMEM;
+ goto put_master;
+ }
+
+ xspi->irq = platform_get_irq(dev, 0);
+ if (xspi->irq < 0) {
+ ret = -ENXIO;
+ goto unmap_io;
+ }
+
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+ xspi->speed_hz = pdata->speed_hz;
+
+ /* SPI controller initializations */
+ xspi_init_hw(xspi->regs);
+
+ /* Register for SPI Interrupt */
+ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
+ if (ret != 0)
+ goto unmap_io;
+
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret != 0) {
+ dev_err(&dev->dev, "spi_bitbang_start FAILED\n");
+ goto free_irq;
+ }
+
+ dev_info(&dev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
+ r->start, (u32)xspi->regs, xspi->irq);
+
+ return ret;
+
+free_irq:
+ free_irq(xspi->irq, xspi);
+unmap_io:
+ iounmap(xspi->regs);
+put_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int __devexit xilinx_spi_remove(struct platform_device *dev)
+{
+ struct xilinx_spi *xspi;
+ struct spi_master *master;
+
+ master = platform_get_drvdata(dev);
+ xspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&xspi->bitbang);
+ free_irq(xspi->irq, xspi);
+ iounmap(xspi->regs);
+ platform_set_drvdata(dev, 0);
+ spi_master_put(xspi->bitbang.master);
+
+ return 0;
+}
+
+static struct platform_driver xilinx_spi_driver = {
+ .probe = xilinx_spi_probe,
+ .remove = __devexit_p(xilinx_spi_remove),
+ .driver = {
+ .name = XILINX_SPI_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init xilinx_spi_init(void)
+{
+ return platform_driver_register(&xilinx_spi_driver);
+}
+module_init(xilinx_spi_init);
+
+static void __exit xilinx_spi_exit(void)
+{
+ platform_driver_unregister(&xilinx_spi_driver);
+}
+module_exit(xilinx_spi_exit);
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION("Xilinx SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/telephony/Kconfig b/drivers/telephony/Kconfig
index 8f530e68263..5f98f673f1b 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/telephony/Kconfig
@@ -19,6 +19,7 @@ if PHONE
config PHONE_IXJ
tristate "QuickNet Internet LineJack/PhoneJack support"
+ depends ISA || PCI
---help---
Say M if you have a telephony card manufactured by Quicknet
Technologies, Inc. These include the Internet PhoneJACK and
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index c7b0a357b04..49cd9793404 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -3453,7 +3453,6 @@ static void ixj_write_frame(IXJ *j)
{
int cnt, frame_count, dly;
IXJ_WORD dat;
- BYTES blankword;
frame_count = 0;
if(j->flags.cidplay) {
@@ -3501,6 +3500,8 @@ static void ixj_write_frame(IXJ *j)
}
if (frame_count >= 1) {
if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) {
+ BYTES blankword;
+
switch (j->play_mode) {
case PLAYBACK_MODE_ULAW:
case PLAYBACK_MODE_ALAW:
@@ -3508,6 +3509,7 @@ static void ixj_write_frame(IXJ *j)
break;
case PLAYBACK_MODE_8LINEAR:
case PLAYBACK_MODE_16LINEAR:
+ default:
blankword.low = blankword.high = 0x00;
break;
case PLAYBACK_MODE_8LINEAR_WSS:
@@ -3531,6 +3533,8 @@ static void ixj_write_frame(IXJ *j)
j->flags.play_first_frame = 0;
} else if (j->play_codec == G723_63 && j->flags.play_first_frame) {
for (cnt = 0; cnt < 24; cnt++) {
+ BYTES blankword;
+
if(cnt == 12) {
blankword.low = 0x02;
blankword.high = 0x00;
@@ -4868,6 +4872,7 @@ static char daa_CR_read(IXJ *j, int cr)
bytes.high = 0xB0 + cr;
break;
case SOP_PU_PULSEDIALING:
+ default:
bytes.high = 0xF0 + cr;
break;
}
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 071b9675a78..7dd73546bf4 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -16,7 +16,7 @@ config USB_ARCH_HAS_HCD
boolean
default y if USB_ARCH_HAS_OHCI
default y if USB_ARCH_HAS_EHCI
- default y if PCMCIA # sl811_cs
+ default y if PCMCIA && !M32R # sl811_cs
default y if ARM # SL-811
default PCI
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 4973e147bc7..8f046659b4e 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -1168,6 +1168,7 @@ static int uea_kthread(void *data)
struct uea_softc *sc = data;
int ret = -EAGAIN;
+ set_freezable();
uea_enters(INS_TO_USBDEV(sc));
while (!kthread_should_stop()) {
if (ret < 0 || sc->reset)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 50e79010401..fd74c50b180 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2728,6 +2728,7 @@ loop:
static int hub_thread(void *__unused)
{
+ set_freezable();
do {
hub_events();
wait_event_interruptible(khubd_wait,
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 8712ef98717..be7a1bd2823 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3434,6 +3434,9 @@ static int fsg_main_thread(void *fsg_)
allow_signal(SIGKILL);
allow_signal(SIGUSR1);
+ /* Allow the thread to be frozen */
+ set_freezable();
+
/* Arrange for userspace references to be interpreted as kernel
* pointers. That way we can pass a kernel pointer to a routine
* that expects a __user pointer and it will work okay. */
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index 1fd5fc220cd..42d4e6454a7 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -630,7 +630,7 @@ static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int time
} else
status = urb->status;
- if (actual_length)
+ if (status >= 0)
*actual_length = urb->actual_length;
return status;
@@ -664,7 +664,7 @@ static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsi
int ret;
struct usb_ctrlrequest *dr;
struct urb *urb;
- int length;
+ int uninitialized_var(length);
dbg ("auerchain_control_msg");
dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bef8bcd9bd9..28842d208bb 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -311,8 +311,6 @@ static int usb_stor_control_thread(void * __us)
struct Scsi_Host *host = us_to_host(us);
int autopm_rc;
- current->flags |= PF_NOFREEZE;
-
for(;;) {
US_DEBUGP("*** thread sleeping.\n");
if(down_interruptible(&us->sema))
@@ -920,6 +918,7 @@ static int usb_stor_scan_thread(void * __us)
printk(KERN_DEBUG
"usb-storage: device found at %d\n", us->pusb_dev->devnum);
+ set_freezable();
/* Wait for the timeout to expire or for a disconnect */
if (delay_use > 0) {
printk(KERN_DEBUG "usb-storage: waiting for device "
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index 0dda73da862..7f907fb23b8 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -60,7 +60,7 @@ static u_long videomemory;
static u_long videomemorysize;
static struct fb_info fb_info;
-static u32 mc68x328fb_pseudo_palette[17];
+static u32 mc68x328fb_pseudo_palette[16];
static struct fb_var_screeninfo mc68x328fb_default __initdata = {
.red = { 0, 8, 0 },
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 9b7a76be36a..0c5644bb59a 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -812,7 +812,7 @@ config FB_PVR2
config FB_EPSON1355
bool "Epson 1355 framebuffer support"
- depends on (FB = y) && (SUPERH || ARCH_CEIVA)
+ depends on (FB = y) && ARCH_CEIVA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1820,6 +1820,10 @@ config FB_XILINX
framebuffer. ML300 carries a 640*480 LCD display on the board,
ML403 uses a standard DB15 VGA connector.
+if ARCH_OMAP
+ source "drivers/video/omap/Kconfig"
+endif
+
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index bd8b0522950..a562f9d69d2 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -113,6 +113,7 @@ obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
obj-$(CONFIG_FB_PS3) += ps3fb.o
obj-$(CONFIG_FB_SM501) += sm501fb.o
obj-$(CONFIG_FB_XILINX) += xilinxfb.o
+obj-$(CONFIG_FB_OMAP) += omap/
# Platform or fallback drivers go here
obj-$(CONFIG_FB_VESA) += vesafb.o
diff --git a/drivers/video/aty/ati_ids.h b/drivers/video/aty/ati_ids.h
index 90e7df22f50..685a754991c 100644
--- a/drivers/video/aty/ati_ids.h
+++ b/drivers/video/aty/ati_ids.h
@@ -204,6 +204,7 @@
#define PCI_CHIP_RV280_5961 0x5961
#define PCI_CHIP_RV280_5962 0x5962
#define PCI_CHIP_RV280_5964 0x5964
+#define PCI_CHIP_RS485_5975 0x5975
#define PCI_CHIP_RV280_5C61 0x5C61
#define PCI_CHIP_RV280_5C63 0x5C63
#define PCI_CHIP_R423_5D57 0x5D57
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 2fbff631743..ef330e34d03 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -541,7 +541,7 @@ static char ram_off[] __devinitdata = "OFF";
#endif /* CONFIG_FB_ATY_CT */
-static u32 pseudo_palette[17];
+static u32 pseudo_palette[16];
#ifdef CONFIG_FB_ATY_GX
static char *aty_gx_ram[8] __devinitdata = {
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 2349e71b008..47ca62fe7c3 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -153,6 +153,8 @@ static struct pci_device_id radeonfb_pci_table[] = {
/* Mobility 9200 (M9+) */
CHIP_DEF(PCI_CHIP_RV280_5C61, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
CHIP_DEF(PCI_CHIP_RV280_5C63, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
+ /*Mobility Xpress 200 */
+ CHIP_DEF(PCI_CHIP_RS485_5975, R300, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY),
/* 9200 */
CHIP_DEF(PCI_CHIP_RV280_5960, RV280, CHIP_HAS_CRTC2),
CHIP_DEF(PCI_CHIP_RV280_5961, RV280, CHIP_HAS_CRTC2),
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 7ebffcdfd1e..7c922c7b460 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -301,7 +301,7 @@ struct radeonfb_info {
void __iomem *bios_seg;
int fp_bios_start;
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
struct { u8 red, green, blue, pad; }
palette[256];
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index d3b8a6be291..49643969f9f 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -118,6 +118,22 @@ config FRAMEBUFFER_CONSOLE
help
Low-level framebuffer-based console driver.
+config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
+ bool "Map the console to the primary display device"
+ depends on FRAMEBUFFER_CONSOLE
+ default n
+ ---help---
+ If this option is selected, the framebuffer console will
+ automatically select the primary display device (if the architecture
+ supports this feature). Otherwise, the framebuffer console will
+ always select the first framebuffer driver that is loaded. The latter
+ is the default behavior.
+
+ You can always override the automatic selection of the primary device
+ by using the fbcon=map: boot option.
+
+ If unsure, select n.
+
config FRAMEBUFFER_CONSOLE_ROTATION
bool "Framebuffer Console Rotation"
depends on FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 73813c60d03..decfdc8eb9c 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -75,6 +75,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/crc32.h> /* For counting font checksums */
+#include <asm/fb.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -125,6 +126,8 @@ static int first_fb_vc;
static int last_fb_vc = MAX_NR_CONSOLES - 1;
static int fbcon_is_default = 1;
static int fbcon_has_exited;
+static int primary_device = -1;
+static int map_override;
/* font data */
static char fontname[40];
@@ -152,6 +155,7 @@ static int fbcon_set_origin(struct vc_data *);
#define DEFAULT_CURSOR_BLINK_RATE (20)
static int vbl_cursor_cnt;
+static int fbcon_cursor_noblink;
#define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1)
@@ -188,16 +192,14 @@ static __inline__ void ypan_down(struct vc_data *vc, int count);
static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
- struct vc_data *vc);
-static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
- int unit);
+ int unit);
static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
static void fbcon_start(void);
static void fbcon_exit(void);
-static struct class_device *fbcon_class_device;
+static struct device *fbcon_device;
#ifdef CONFIG_MAC
/*
@@ -441,7 +443,8 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
struct fbcon_ops *ops = info->fbcon_par;
if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
- !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
+ !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) &&
+ !fbcon_cursor_noblink) {
if (!info->queue.func)
INIT_WORK(&info->queue, fb_flashcursor);
@@ -495,13 +498,17 @@ static int __init fb_console_setup(char *this_opt)
if (!strncmp(options, "map:", 4)) {
options += 4;
- if (*options)
+ if (*options) {
for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) {
if (!options[j])
j = 0;
con2fb_map_boot[i] =
(options[j++]-'0') % FB_MAX;
}
+
+ map_override = 1;
+ }
+
return 1;
}
@@ -736,7 +743,9 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
if (!err) {
info->fbcon_par = ops;
- set_blitting_type(vc, info);
+
+ if (vc)
+ set_blitting_type(vc, info);
}
if (err) {
@@ -798,11 +807,7 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
ops->flags |= FBCON_FLAGS_INIT;
ops->graphics = 0;
-
- if (vc)
- fbcon_set_disp(info, &info->var, vc);
- else
- fbcon_preset_disp(info, &info->var, unit);
+ fbcon_set_disp(info, &info->var, unit);
if (show_logo) {
struct vc_data *fg_vc = vc_cons[fg_console].d;
@@ -1107,6 +1112,9 @@ static void fbcon_init(struct vc_data *vc, int init)
if (var_to_display(p, &info->var, info))
return;
+ if (!info->fbcon_par)
+ con2fb_acquire_newinfo(vc, info, vc->vc_num, -1);
+
/* If we are not the first console on this
fb, copy the font from that console */
t = &fb_display[fg_console];
@@ -1349,6 +1357,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
return;
+ if (vc->vc_cursor_type & 0x10)
+ fbcon_del_cursor_timer(info);
+ else
+ fbcon_add_cursor_timer(info);
+
ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
if (mode & CM_SOFTBACK) {
mode &= ~CM_SOFTBACK;
@@ -1368,36 +1381,29 @@ static int scrollback_phys_max = 0;
static int scrollback_max = 0;
static int scrollback_current = 0;
-/*
- * If no vc is existent yet, just set struct display
- */
-static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
- int unit)
-{
- struct display *p = &fb_display[unit];
- struct display *t = &fb_display[fg_console];
-
- if (var_to_display(p, var, info))
- return;
-
- p->fontdata = t->fontdata;
- p->userfont = t->userfont;
- if (p->userfont)
- REFCOUNT(p->fontdata)++;
-}
-
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
- struct vc_data *vc)
+ int unit)
{
- struct display *p = &fb_display[vc->vc_num], *t;
- struct vc_data **default_mode = vc->vc_display_fg;
- struct vc_data *svc = *default_mode;
+ struct display *p, *t;
+ struct vc_data **default_mode, *vc;
+ struct vc_data *svc;
struct fbcon_ops *ops = info->fbcon_par;
int rows, cols, charcnt = 256;
+ p = &fb_display[unit];
+
if (var_to_display(p, var, info))
return;
+
+ vc = vc_cons[unit].d;
+
+ if (!vc)
+ return;
+
+ default_mode = vc->vc_display_fg;
+ svc = *default_mode;
t = &fb_display[svc->vc_num];
+
if (!vc->vc_font.data) {
vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
vc->vc_font.width = (*default_mode)->vc_font.width;
@@ -1704,6 +1710,56 @@ static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
}
}
+static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
+ struct display *p, int line, int count, int ycount)
+{
+ int offset = ycount * vc->vc_cols;
+ unsigned short *d = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+ unsigned short *s = d + offset;
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+
+ do {
+ c = scr_readw(s);
+
+ if (c == scr_readw(d)) {
+ if (s > start) {
+ ops->bmove(vc, info, line + ycount, x,
+ line, x, 1, s-start);
+ x += s - start + 1;
+ start = s + 1;
+ } else {
+ x++;
+ start++;
+ }
+ }
+
+ scr_writew(c, d);
+ console_conditional_schedule();
+ s++;
+ d++;
+ } while (s < le);
+ if (s > start)
+ ops->bmove(vc, info, line + ycount, x, line, x, 1,
+ s-start);
+ console_conditional_schedule();
+ if (ycount > 0)
+ line++;
+ else {
+ line--;
+ /* NOTE: We subtract two lines from these pointers */
+ s -= vc->vc_size_row;
+ d -= vc->vc_size_row;
+ }
+ }
+}
+
static void fbcon_redraw(struct vc_data *vc, struct display *p,
int line, int count, int offset)
{
@@ -1789,7 +1845,6 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
@@ -1813,10 +1868,15 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
goto redraw_up;
switch (p->scrollmode) {
case SCROLL_MOVE:
- ops->bmove(vc, info, t + count, 0, t, 0,
- b - t - count, vc->vc_cols);
- ops->clear(vc, info, b - count, 0, count,
- vc->vc_cols);
+ fbcon_redraw_blit(vc, info, p, t, b - t - count,
+ count);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return 1;
break;
case SCROLL_WRAP_MOVE:
@@ -1899,9 +1959,15 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
goto redraw_down;
switch (p->scrollmode) {
case SCROLL_MOVE:
- ops->bmove(vc, info, t, 0, t + count, 0,
- b - t - count, vc->vc_cols);
- ops->clear(vc, info, t, 0, count, vc->vc_cols);
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+ -count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return 1;
break;
case SCROLL_WRAP_MOVE:
@@ -2937,9 +3003,48 @@ static int fbcon_mode_deleted(struct fb_info *info,
return found;
}
-static int fbcon_fb_unregistered(int idx)
+#ifdef CONFIG_VT_HW_CONSOLE_BINDING
+static int fbcon_unbind(void)
{
- int i;
+ int ret;
+
+ ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ fbcon_is_default);
+ return ret;
+}
+#else
+static inline int fbcon_unbind(void)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+
+static int fbcon_fb_unbind(int idx)
+{
+ int i, new_idx = -1, ret = 0;
+
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map[i] != idx &&
+ con2fb_map[i] != -1) {
+ new_idx = i;
+ break;
+ }
+ }
+
+ if (new_idx != -1) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map[i] == idx)
+ set_con2fb_map(i, new_idx, 0);
+ }
+ } else
+ ret = fbcon_unbind();
+
+ return ret;
+}
+
+static int fbcon_fb_unregistered(struct fb_info *info)
+{
+ int i, idx = info->node;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] == idx)
@@ -2967,12 +3072,48 @@ static int fbcon_fb_unregistered(int idx)
if (!num_registered_fb)
unregister_con_driver(&fb_con);
+
+ if (primary_device == idx)
+ primary_device = -1;
+
return 0;
}
-static int fbcon_fb_registered(int idx)
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
+static void fbcon_select_primary(struct fb_info *info)
{
- int ret = 0, i;
+ if (!map_override && primary_device == -1 &&
+ fb_is_primary_device(info)) {
+ int i;
+
+ printk(KERN_INFO "fbcon: %s (fb%i) is primary device\n",
+ info->fix.id, info->node);
+ primary_device = info->node;
+
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map_boot[i] = primary_device;
+
+ if (con_is_bound(&fb_con)) {
+ printk(KERN_INFO "fbcon: Remapping primary device, "
+ "fb%i, to tty %i-%i\n", info->node,
+ first_fb_vc + 1, last_fb_vc + 1);
+ info_idx = primary_device;
+ }
+ }
+
+}
+#else
+static inline void fbcon_select_primary(struct fb_info *info)
+{
+ return;
+}
+#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+
+static int fbcon_fb_registered(struct fb_info *info)
+{
+ int ret = 0, i, idx = info->node;
+
+ fbcon_select_primary(info);
if (info_idx == -1) {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -2986,8 +3127,7 @@ static int fbcon_fb_registered(int idx)
ret = fbcon_takeover(1);
} else {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
- if (con2fb_map_boot[i] == idx &&
- con2fb_map[i] == -1)
+ if (con2fb_map_boot[i] == idx)
set_con2fb_map(i, idx, 0);
}
}
@@ -3034,12 +3174,7 @@ static void fbcon_new_modelist(struct fb_info *info)
mode = fb_find_nearest_mode(fb_display[i].mode,
&info->modelist);
fb_videomode_to_var(&var, mode);
-
- if (vc)
- fbcon_set_disp(info, &var, vc);
- else
- fbcon_preset_disp(info, &var, i);
-
+ fbcon_set_disp(info, &var, vc->vc_num);
}
}
@@ -3114,11 +3249,14 @@ static int fbcon_event_notify(struct notifier_block *self,
mode = event->data;
ret = fbcon_mode_deleted(info, mode);
break;
+ case FB_EVENT_FB_UNBIND:
+ ret = fbcon_fb_unbind(info->node);
+ break;
case FB_EVENT_FB_REGISTERED:
- ret = fbcon_fb_registered(info->node);
+ ret = fbcon_fb_registered(info);
break;
case FB_EVENT_FB_UNREGISTERED:
- ret = fbcon_fb_unregistered(info->node);
+ ret = fbcon_fb_unregistered(info);
break;
case FB_EVENT_SET_CONSOLE_MAP:
con2fb = event->data;
@@ -3179,8 +3317,9 @@ static struct notifier_block fbcon_event_notifier = {
.notifier_call = fbcon_event_notify,
};
-static ssize_t store_rotate(struct class_device *class_device,
- const char *buf, size_t count)
+static ssize_t store_rotate(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct fb_info *info;
int rotate, idx;
@@ -3203,8 +3342,9 @@ err:
return count;
}
-static ssize_t store_rotate_all(struct class_device *class_device,
- const char *buf, size_t count)
+static ssize_t store_rotate_all(struct device *device,
+ struct device_attribute *attr,const char *buf,
+ size_t count)
{
struct fb_info *info;
int rotate, idx;
@@ -3227,7 +3367,8 @@ err:
return count;
}
-static ssize_t show_rotate(struct class_device *class_device, char *buf)
+static ssize_t show_rotate(struct device *device,
+ struct device_attribute *attr,char *buf)
{
struct fb_info *info;
int rotate = 0, idx;
@@ -3248,20 +3389,86 @@ err:
return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
}
-static struct class_device_attribute class_device_attrs[] = {
+static ssize_t show_cursor_blink(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *info;
+ struct fbcon_ops *ops;
+ int idx, blink = -1;
+
+ if (fbcon_has_exited)
+ return 0;
+
+ acquire_console_sem();
+ idx = con2fb_map[fg_console];
+
+ if (idx == -1 || registered_fb[idx] == NULL)
+ goto err;
+
+ info = registered_fb[idx];
+ ops = info->fbcon_par;
+
+ if (!ops)
+ goto err;
+
+ blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
+err:
+ release_console_sem();
+ return snprintf(buf, PAGE_SIZE, "%d\n", blink);
+}
+
+static ssize_t store_cursor_blink(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info;
+ int blink, idx;
+ char **last = NULL;
+
+ if (fbcon_has_exited)
+ return count;
+
+ acquire_console_sem();
+ idx = con2fb_map[fg_console];
+
+ if (idx == -1 || registered_fb[idx] == NULL)
+ goto err;
+
+ info = registered_fb[idx];
+
+ if (!info->fbcon_par)
+ goto err;
+
+ blink = simple_strtoul(buf, last, 0);
+
+ if (blink) {
+ fbcon_cursor_noblink = 0;
+ fbcon_add_cursor_timer(info);
+ } else {
+ fbcon_cursor_noblink = 1;
+ fbcon_del_cursor_timer(info);
+ }
+
+err:
+ release_console_sem();
+ return count;
+}
+
+static struct device_attribute device_attrs[] = {
__ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
__ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all),
+ __ATTR(cursor_blink, S_IRUGO|S_IWUSR, show_cursor_blink,
+ store_cursor_blink),
};
-static int fbcon_init_class_device(void)
+static int fbcon_init_device(void)
{
int i, error = 0;
fbcon_has_sysfs = 1;
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
- error = class_device_create_file(fbcon_class_device,
- &class_device_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+ error = device_create_file(fbcon_device, &device_attrs[i]);
if (error)
break;
@@ -3269,8 +3476,7 @@ static int fbcon_init_class_device(void)
if (error) {
while (--i >= 0)
- class_device_remove_file(fbcon_class_device,
- &class_device_attrs[i]);
+ device_remove_file(fbcon_device, &device_attrs[i]);
fbcon_has_sysfs = 0;
}
@@ -3356,16 +3562,15 @@ static int __init fb_console_init(void)
acquire_console_sem();
fb_register_client(&fbcon_event_notifier);
- fbcon_class_device =
- class_device_create(fb_class, NULL, MKDEV(0, 0), NULL, "fbcon");
+ fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), "fbcon");
- if (IS_ERR(fbcon_class_device)) {
- printk(KERN_WARNING "Unable to create class_device "
+ if (IS_ERR(fbcon_device)) {
+ printk(KERN_WARNING "Unable to create device "
"for fbcon; errno = %ld\n",
- PTR_ERR(fbcon_class_device));
- fbcon_class_device = NULL;
+ PTR_ERR(fbcon_device));
+ fbcon_device = NULL;
} else
- fbcon_init_class_device();
+ fbcon_init_device();
for (i = 0; i < MAX_NR_CONSOLES; i++)
con2fb_map[i] = -1;
@@ -3379,14 +3584,13 @@ module_init(fb_console_init);
#ifdef MODULE
-static void __exit fbcon_deinit_class_device(void)
+static void __exit fbcon_deinit_device(void)
{
int i;
if (fbcon_has_sysfs) {
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
- class_device_remove_file(fbcon_class_device,
- &class_device_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+ device_remove_file(fbcon_device, &device_attrs[i]);
fbcon_has_sysfs = 0;
}
@@ -3396,8 +3600,8 @@ static void __exit fb_console_exit(void)
{
acquire_console_sem();
fb_unregister_client(&fbcon_event_notifier);
- fbcon_deinit_class_device();
- class_device_destroy(fb_class, MKDEV(0, 0));
+ fbcon_deinit_device();
+ device_destroy(fb_class, MKDEV(0, 0));
fbcon_exit();
release_console_sem();
unregister_con_driver(&fb_con);
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 8b762739b1e..b0be7eac32d 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -94,7 +94,7 @@ static inline int VAR_MATCH(struct fb_var_screeninfo *x, struct fb_var_screeninf
struct fb_info_control {
struct fb_info info;
struct fb_par_control par;
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index 94a66c2d2cf..e23324d10be 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -1068,15 +1068,18 @@ static int cyblafb_setcolreg(unsigned regno, unsigned red, unsigned green,
out8(0x3C9, green >> 10);
out8(0x3C9, blue >> 10);
- } else if (bpp == 16) // RGB 565
- ((u32 *) info->pseudo_palette)[regno] =
- (red & 0xF800) |
- ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
- else if (bpp == 32) // ARGB 8888
- ((u32 *) info->pseudo_palette)[regno] =
- ((transp & 0xFF00) << 16) |
- ((red & 0xFF00) << 8) |
- ((green & 0xFF00)) | ((blue & 0xFF00) >> 8);
+ } else if (regno < 16) {
+ if (bpp == 16) // RGB 565
+ ((u32 *) info->pseudo_palette)[regno] =
+ (red & 0xF800) |
+ ((green & 0xFC00) >> 5) |
+ ((blue & 0xF800) >> 11);
+ else if (bpp == 32) // ARGB 8888
+ ((u32 *) info->pseudo_palette)[regno] =
+ ((transp & 0xFF00) << 16) |
+ ((red & 0xFF00) << 8) |
+ ((green & 0xFF00)) | ((blue & 0xFF00) >> 8);
+ }
return 0;
}
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index ca2c54ce508..33be46ccb54 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -63,23 +63,12 @@
struct epson1355_par {
unsigned long reg_addr;
+ u32 pseudo_palette[16];
};
/* ------------------------------------------------------------------------- */
-#ifdef CONFIG_SUPERH
-
-static inline u8 epson1355_read_reg(int index)
-{
- return ctrl_inb(par.reg_addr + index);
-}
-
-static inline void epson1355_write_reg(u8 data, int index)
-{
- ctrl_outb(data, par.reg_addr + index);
-}
-
-#elif defined(CONFIG_ARM)
+#if defined(CONFIG_ARM)
# ifdef CONFIG_ARCH_CEIVA
# include <asm/arch/hardware.h>
@@ -289,7 +278,7 @@ static int epson1355fb_blank(int blank_mode, struct fb_info *info)
struct epson1355_par *par = info->par;
switch (blank_mode) {
- case FB_BLANK_UNBLANKING:
+ case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
lcd_enable(par, 1);
backlight_enable(1);
@@ -635,7 +624,7 @@ int __init epson1355fb_probe(struct platform_device *dev)
goto bail;
}
- info = framebuffer_alloc(sizeof(struct epson1355_par) + sizeof(u32) * 256, &dev->dev);
+ info = framebuffer_alloc(sizeof(struct epson1355_par), &dev->dev);
if (!info) {
rc = -ENOMEM;
goto bail;
@@ -648,7 +637,7 @@ int __init epson1355fb_probe(struct platform_device *dev)
rc = -ENOMEM;
goto bail;
}
- info->pseudo_palette = (void *)(default_par + 1);
+ info->pseudo_palette = default_par->pseudo_palette;
info->screen_base = ioremap(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN);
if (!info->screen_base) {
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 38c2e2558f5..215ac579f90 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -33,17 +33,10 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/efi.h>
+#include <linux/fb.h>
-#if defined(__mc68000__) || defined(CONFIG_APUS)
-#include <asm/setup.h>
-#endif
+#include <asm/fb.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#include <linux/fb.h>
/*
* Frame buffer device initialization and setup routines
@@ -411,10 +404,146 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
}
}
+static int fb_show_logo_line(struct fb_info *info, int rotate,
+ const struct linux_logo *logo, int y,
+ unsigned int n)
+{
+ u32 *palette = NULL, *saved_pseudo_palette = NULL;
+ unsigned char *logo_new = NULL, *logo_rotate = NULL;
+ struct fb_image image;
+
+ /* Return if the frame buffer is not mapped or suspended */
+ if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
+ info->flags & FBINFO_MODULE)
+ return 0;
+
+ image.depth = 8;
+ image.data = logo->data;
+
+ if (fb_logo.needs_cmapreset)
+ fb_set_logocmap(info, logo);
+
+ if (fb_logo.needs_truepalette ||
+ fb_logo.needs_directpalette) {
+ palette = kmalloc(256 * 4, GFP_KERNEL);
+ if (palette == NULL)
+ return 0;
+
+ if (fb_logo.needs_truepalette)
+ fb_set_logo_truepalette(info, logo, palette);
+ else
+ fb_set_logo_directpalette(info, logo, palette);
+
+ saved_pseudo_palette = info->pseudo_palette;
+ info->pseudo_palette = palette;
+ }
+
+ if (fb_logo.depth <= 4) {
+ logo_new = kmalloc(logo->width * logo->height, GFP_KERNEL);
+ if (logo_new == NULL) {
+ kfree(palette);
+ if (saved_pseudo_palette)
+ info->pseudo_palette = saved_pseudo_palette;
+ return 0;
+ }
+ image.data = logo_new;
+ fb_set_logo(info, logo, logo_new, fb_logo.depth);
+ }
+
+ image.dx = 0;
+ image.dy = y;
+ image.width = logo->width;
+ image.height = logo->height;
+
+ if (rotate) {
+ logo_rotate = kmalloc(logo->width *
+ logo->height, GFP_KERNEL);
+ if (logo_rotate)
+ fb_rotate_logo(info, logo_rotate, &image, rotate);
+ }
+
+ fb_do_show_logo(info, &image, rotate, n);
+
+ kfree(palette);
+ if (saved_pseudo_palette != NULL)
+ info->pseudo_palette = saved_pseudo_palette;
+ kfree(logo_new);
+ kfree(logo_rotate);
+ return logo->height;
+}
+
+
+#ifdef CONFIG_FB_LOGO_EXTRA
+
+#define FB_LOGO_EX_NUM_MAX 10
+static struct logo_data_extra {
+ const struct linux_logo *logo;
+ unsigned int n;
+} fb_logo_ex[FB_LOGO_EX_NUM_MAX];
+static unsigned int fb_logo_ex_num;
+
+void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n)
+{
+ if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX)
+ return;
+
+ fb_logo_ex[fb_logo_ex_num].logo = logo;
+ fb_logo_ex[fb_logo_ex_num].n = n;
+ fb_logo_ex_num++;
+}
+
+static int fb_prepare_extra_logos(struct fb_info *info, unsigned int height,
+ unsigned int yres)
+{
+ unsigned int i;
+
+ /* FIXME: logo_ex supports only truecolor fb. */
+ if (info->fix.visual != FB_VISUAL_TRUECOLOR)
+ fb_logo_ex_num = 0;
+
+ for (i = 0; i < fb_logo_ex_num; i++) {
+ height += fb_logo_ex[i].logo->height;
+ if (height > yres) {
+ height -= fb_logo_ex[i].logo->height;
+ fb_logo_ex_num = i;
+ break;
+ }
+ }
+ return height;
+}
+
+static int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
+{
+ unsigned int i;
+
+ for (i = 0; i < fb_logo_ex_num; i++)
+ y += fb_show_logo_line(info, rotate,
+ fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
+
+ return y;
+}
+
+#else /* !CONFIG_FB_LOGO_EXTRA */
+
+static inline int fb_prepare_extra_logos(struct fb_info *info,
+ unsigned int height,
+ unsigned int yres)
+{
+ return height;
+}
+
+static inline int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
+{
+ return y;
+}
+
+#endif /* CONFIG_FB_LOGO_EXTRA */
+
+
int fb_prepare_logo(struct fb_info *info, int rotate)
{
int depth = fb_get_color_depth(&info->var, &info->fix);
- int yres;
+ unsigned int yres;
memset(&fb_logo, 0, sizeof(struct logo_data));
@@ -456,7 +585,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
if (!fb_logo.logo) {
return 0;
}
-
+
if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
yres = info->var.yres;
else
@@ -473,75 +602,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
else if (fb_logo.logo->type == LINUX_LOGO_VGA16)
fb_logo.depth = 4;
else
- fb_logo.depth = 1;
- return fb_logo.logo->height;
+ fb_logo.depth = 1;
+
+ return fb_prepare_extra_logos(info, fb_logo.logo->height, yres);
}
int fb_show_logo(struct fb_info *info, int rotate)
{
- u32 *palette = NULL, *saved_pseudo_palette = NULL;
- unsigned char *logo_new = NULL, *logo_rotate = NULL;
- struct fb_image image;
-
- /* Return if the frame buffer is not mapped or suspended */
- if (fb_logo.logo == NULL || info->state != FBINFO_STATE_RUNNING ||
- info->flags & FBINFO_MODULE)
- return 0;
-
- image.depth = 8;
- image.data = fb_logo.logo->data;
-
- if (fb_logo.needs_cmapreset)
- fb_set_logocmap(info, fb_logo.logo);
-
- if (fb_logo.needs_truepalette ||
- fb_logo.needs_directpalette) {
- palette = kmalloc(256 * 4, GFP_KERNEL);
- if (palette == NULL)
- return 0;
-
- if (fb_logo.needs_truepalette)
- fb_set_logo_truepalette(info, fb_logo.logo, palette);
- else
- fb_set_logo_directpalette(info, fb_logo.logo, palette);
-
- saved_pseudo_palette = info->pseudo_palette;
- info->pseudo_palette = palette;
- }
-
- if (fb_logo.depth <= 4) {
- logo_new = kmalloc(fb_logo.logo->width * fb_logo.logo->height,
- GFP_KERNEL);
- if (logo_new == NULL) {
- kfree(palette);
- if (saved_pseudo_palette)
- info->pseudo_palette = saved_pseudo_palette;
- return 0;
- }
- image.data = logo_new;
- fb_set_logo(info, fb_logo.logo, logo_new, fb_logo.depth);
- }
+ int y;
- image.dx = 0;
- image.dy = 0;
- image.width = fb_logo.logo->width;
- image.height = fb_logo.logo->height;
+ y = fb_show_logo_line(info, rotate, fb_logo.logo, 0,
+ num_online_cpus());
+ y = fb_show_extra_logos(info, y, rotate);
- if (rotate) {
- logo_rotate = kmalloc(fb_logo.logo->width *
- fb_logo.logo->height, GFP_KERNEL);
- if (logo_rotate)
- fb_rotate_logo(info, logo_rotate, &image, rotate);
- }
-
- fb_do_show_logo(info, &image, rotate, num_online_cpus());
-
- kfree(palette);
- if (saved_pseudo_palette != NULL)
- info->pseudo_palette = saved_pseudo_palette;
- kfree(logo_new);
- kfree(logo_rotate);
- return fb_logo.logo->height;
+ return y;
}
#else
int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
@@ -1155,17 +1229,15 @@ fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#endif
-static int
+static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
{
int fbidx = iminor(file->f_path.dentry->d_inode);
struct fb_info *info = registered_fb[fbidx];
struct fb_ops *fb = info->fbops;
unsigned long off;
-#if !defined(__sparc__) || defined(__sparc_v9__)
unsigned long start;
u32 len;
-#endif
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
@@ -1180,12 +1252,6 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
return res;
}
-#if defined(__sparc__) && !defined(__sparc_v9__)
- /* Should never get here, all fb drivers should have their own
- mmap routines */
- return -EINVAL;
-#else
- /* !sparc32... */
lock_kernel();
/* frame buffer memory */
@@ -1209,50 +1275,11 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
vma->vm_pgoff = off >> PAGE_SHIFT;
/* This is an IO map - tell maydump to skip this VMA */
vma->vm_flags |= VM_IO | VM_RESERVED;
-#if defined(__mc68000__)
-#if defined(CONFIG_SUN3)
- pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
-#elif defined(CONFIG_MMU)
- if (CPU_IS_020_OR_030)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
- if (CPU_IS_040_OR_060) {
- pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
- /* Use no-cache mode, serialized */
- pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
- }
-#endif
-#elif defined(__powerpc__)
- vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-#elif defined(__alpha__)
- /* Caching is off in the I/O space quadrant by design. */
-#elif defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3)
- pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
-#elif defined(__mips__) || defined(__sparc_v9__)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#elif defined(__hppa__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-#elif defined(__avr32__)
- vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
- & ~_PAGE_CACHABLE)
- | (_PAGE_BUFFER | _PAGE_DIRTY));
-#elif defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#else
-#warning What do we have to do here??
-#endif
+ fb_pgprotect(file, vma, off);
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
-#endif /* !sparc32 */
}
static int
@@ -1388,17 +1415,34 @@ register_framebuffer(struct fb_info *fb_info)
*
* Returns negative errno on error, or zero for success.
*
+ * This function will also notify the framebuffer console
+ * to release the driver.
+ *
+ * This is meant to be called within a driver's module_exit()
+ * function. If this is called outside module_exit(), ensure
+ * that the driver implements fb_open() and fb_release() to
+ * check that no processes are using the device.
*/
int
unregister_framebuffer(struct fb_info *fb_info)
{
struct fb_event event;
- int i;
+ int i, ret = 0;
i = fb_info->node;
- if (!registered_fb[i])
- return -EINVAL;
+ if (!registered_fb[i]) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ event.info = fb_info;
+ ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
+
+ if (ret) {
+ ret = -EINVAL;
+ goto done;
+ }
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
@@ -1410,7 +1454,8 @@ unregister_framebuffer(struct fb_info *fb_info)
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
event.info = fb_info;
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
- return 0;
+done:
+ return ret;
}
/**
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index 70ff55b1459..6c91c61cdb6 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -195,13 +195,15 @@ static int fm2fb_blank(int blank, struct fb_info *info)
static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
- if (regno > info->cmap.len)
- return 1;
- red >>= 8;
- green >>= 8;
- blue >>= 8;
+ if (regno < 16) {
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
+ ((u32*)(info->pseudo_palette))[regno] = (red << 16) |
+ (green << 8) | blue;
+ }
- ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue;
return 0;
}
@@ -237,7 +239,7 @@ static int __devinit fm2fb_probe(struct zorro_dev *z,
if (!zorro_request_device(z,"fm2fb"))
return -ENXIO;
- info = framebuffer_alloc(256 * sizeof(u32), &z->dev);
+ info = framebuffer_alloc(16 * sizeof(u32), &z->dev);
if (!info) {
zorro_release_device(z);
return -ENOMEM;
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index bf0e60b5a3b..b9b572b293d 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -86,7 +86,7 @@ static int gbe_revision;
static int ypan, ywrap;
-static uint32_t pseudo_palette[256];
+static uint32_t pseudo_palette[16];
static char *mode_option __initdata = NULL;
@@ -854,8 +854,7 @@ static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
green >>= 8;
blue >>= 8;
- switch (info->var.bits_per_pixel) {
- case 8:
+ if (info->var.bits_per_pixel <= 8) {
/* wait for the color map FIFO to have a free entry */
for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++)
udelay(10);
@@ -864,23 +863,25 @@ static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
}
gbe->cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
- break;
- case 15:
- case 16:
- red >>= 3;
- green >>= 3;
- blue >>= 3;
- pseudo_palette[regno] =
- (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset);
- break;
- case 32:
- pseudo_palette[regno] =
- (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset);
- break;
+ } else if (regno < 16) {
+ switch (info->var.bits_per_pixel) {
+ case 15:
+ case 16:
+ red >>= 3;
+ green >>= 3;
+ blue >>= 3;
+ pseudo_palette[regno] =
+ (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ break;
+ case 32:
+ pseudo_palette[regno] =
+ (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ break;
+ }
}
return 0;
diff --git a/drivers/video/i810/i810.h b/drivers/video/i810/i810.h
index 889e4ea5edc..328ae6c673e 100644
--- a/drivers/video/i810/i810.h
+++ b/drivers/video/i810/i810.h
@@ -266,7 +266,7 @@ struct i810fb_par {
struct i810fb_i2c_chan chan[3];
struct mutex open_lock;
unsigned int use_count;
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
unsigned long mmio_start_phys;
u8 __iomem *mmio_start_virtual;
u8 *edid;
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 80b94c19a9f..6148300fadd 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -302,7 +302,7 @@ struct intelfb_info {
u32 ring_lockup;
/* palette */
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
/* chip info */
int pci_chipset;
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 9397bcef301..da219c043c9 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,6 +10,11 @@ menuconfig LOGO
if LOGO
+config FB_LOGO_EXTRA
+ bool
+ depends on FB
+ default y if SPU_BASE
+
config LOGO_LINUX_MONO
bool "Standard black and white Linux logo"
default y
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index b985dfad6c6..a5fc4edf84e 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_LOGO_SUPERH_VGA16) += logo_superh_vga16.o
obj-$(CONFIG_LOGO_SUPERH_CLUT224) += logo_superh_clut224.o
obj-$(CONFIG_LOGO_M32R_CLUT224) += logo_m32r_clut224.o
+obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
+
# How to generate logo's
# Use logo-cfiles to retrieve list of .c files to be built
diff --git a/drivers/video/logo/logo_spe_clut224.ppm b/drivers/video/logo/logo_spe_clut224.ppm
new file mode 100644
index 00000000000..d36ad624a79
--- /dev/null
+++ b/drivers/video/logo/logo_spe_clut224.ppm
@@ -0,0 +1,283 @@
+P3
+40 40
+255
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 6 6 6
+15 15 15 21 21 21 19 19 19 14 14 14 6 6 6 2 2 2
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 2 2 2 21 21 21 55 55 55
+56 56 56 54 54 54 53 53 53 60 60 60 56 56 56 25 25 25
+6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 2 2 2 27 27 27 62 62 62 17 17 19
+2 2 6 2 2 6 2 2 6 2 2 6 16 16 18 57 57 57
+45 45 45 8 8 8 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 16 16 16 62 62 62 8 8 10 2 2 6
+2 2 6 2 2 6 2 2 6 12 12 14 67 67 67 16 16 17
+45 45 45 41 41 41 4 4 4 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 2 2 2 35 35 35 40 40 40 2 2 6 2 2 6
+2 2 6 2 2 6 2 2 6 15 15 17 70 70 70 27 27 27
+3 3 6 62 62 62 20 20 20 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 4 4 4 58 58 58 12 12 14 2 2 6 2 2 6
+2 2 6 2 2 6 2 2 6 4 4 7 4 4 7 2 2 6
+2 2 6 34 34 36 40 40 40 3 3 3 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 7 7 64 64 64 2 2 6 5 5 5 17 17 17
+3 3 6 2 2 6 2 2 6 15 15 15 21 21 21 7 7 10
+2 2 6 8 8 10 62 62 62 6 6 6 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 7 7 66 66 66 5 5 8 122 122 122 122 122 122
+9 9 11 3 3 6 104 96 81 179 179 179 122 122 122 13 13 13
+2 2 6 2 2 6 67 67 67 10 10 10 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 7 7 65 65 65 41 41 43 152 149 142 192 191 189
+48 48 49 23 23 24 228 210 210 86 86 86 192 191 189 59 59 61
+2 2 6 2 2 6 64 64 64 14 14 14 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 7 7 66 66 66 59 59 59 59 59 61 86 86 86
+99 84 50 78 66 28 152 149 142 5 5 8 122 122 122 104 96 81
+2 2 6 2 2 6 67 67 67 14 14 14 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 5 5 5 63 63 63 24 24 24 152 149 142 175 122 13
+238 184 12 220 170 13 226 181 52 112 86 32 194 165 151 46 46 47
+2 2 6 2 2 6 65 65 65 17 17 17 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 5 5 5 59 59 59 21 21 21 175 122 13 231 174 11
+240 192 13 237 183 61 240 192 13 240 192 13 234 179 16 81 64 9
+2 2 6 2 2 6 63 63 63 25 25 25 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 5 5 5 54 54 54 51 48 39 189 138 9 238 184 12
+240 192 13 240 192 13 240 192 13 215 161 11 207 152 19 81 64 9
+16 16 18 5 5 8 40 40 40 44 44 44 4 4 4 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 5 5 5 59 59 59 27 27 27 126 107 64 187 136 12
+220 170 13 201 147 20 189 138 9 198 154 46 199 182 125 70 70 70
+27 27 27 104 96 81 12 12 14 70 70 70 16 16 16 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 17 17 17 70 70 70 12 12 12 168 168 168 174 135 135
+175 122 13 175 122 13 178 151 83 192 191 189 233 233 233 179 179 179
+3 3 6 29 29 31 3 3 6 41 41 41 44 44 44 5 5 5
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+8 8 8 53 53 53 44 44 44 59 59 59 238 238 238 192 191 189
+192 191 189 192 191 189 221 205 205 240 240 240 253 253 253 253 253 253
+70 70 70 2 2 6 2 2 6 5 5 8 67 67 67 22 22 22
+2 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 5 5
+38 38 38 56 56 56 7 7 9 221 205 205 253 253 253 233 233 233
+221 205 205 233 233 233 251 251 251 253 253 253 253 253 253 253 253 253
+192 191 189 2 2 6 2 2 6 2 2 6 25 25 25 64 64 64
+15 15 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 27 27 27
+66 66 66 7 7 9 86 86 86 252 252 252 253 253 253 253 253 253
+252 252 252 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+244 244 244 19 19 21 2 2 6 2 2 6 2 2 6 38 38 38
+54 54 54 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 14 14 14 62 62 62
+10 10 12 3 3 6 122 122 122 235 235 235 251 251 251 248 248 248
+235 235 235 248 248 248 252 252 252 246 246 246 233 233 233 237 228 228
+223 207 207 70 70 70 2 2 6 2 2 6 2 2 6 2 2 6
+46 46 47 38 38 38 4 4 4 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 2 2 2 33 33 33 44 44 44
+4 4 7 9 9 11 168 168 168 240 240 240 252 252 252 252 252 252
+246 246 246 253 253 253 253 253 253 251 251 251 245 241 241 233 233 233
+221 205 205 192 191 189 29 29 31 27 27 27 9 9 12 2 2 6
+3 3 6 65 65 65 15 15 15 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 6 6 6 59 59 59 19 19 21
+24 24 24 86 86 86 249 249 249 253 253 253 253 253 253 253 253 253
+253 253 253 228 210 210 241 230 230 253 253 253 253 253 253 253 253 253
+251 251 251 228 210 210 152 149 142 5 5 8 27 27 27 4 4 7
+2 2 6 46 46 47 34 34 34 2 2 2 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 16 16 16 67 67 67 19 19 21
+12 12 14 223 207 207 254 20 20 254 20 20 253 127 127 242 223 223
+254 20 20 253 127 127 254 48 48 242 223 223 254 86 86 254 20 20
+254 20 20 253 137 137 233 233 233 32 32 32 35 35 35 23 23 24
+2 2 6 15 15 15 60 60 60 6 6 6 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 4 4 4 38 38 38 48 48 49 22 22 22
+86 86 86 253 253 253 254 20 20 241 230 230 227 216 186 253 137 137
+253 137 137 253 253 253 253 137 137 253 137 137 254 48 48 253 253 253
+253 253 253 253 253 253 253 253 253 62 62 62 2 2 6 23 23 24
+2 2 6 2 2 6 62 62 62 17 17 17 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 14 14 14 70 70 70 14 14 14 16 16 18
+179 179 179 253 253 253 227 216 186 254 48 48 240 219 160 253 127 127
+254 20 20 253 137 137 254 86 86 231 203 141 254 20 20 254 20 20
+253 137 137 253 253 253 253 253 253 104 96 81 2 2 6 23 23 24
+2 2 6 2 2 6 46 46 47 27 27 27 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 4 4 4 39 39 39 42 42 43 19 19 21 13 13 13
+228 210 210 242 223 223 253 253 253 242 223 223 253 127 127 253 127 127
+253 127 127 253 127 127 253 137 137 253 253 253 254 48 48 253 253 253
+228 210 210 253 253 253 253 253 253 122 122 122 2 2 6 19 19 19
+2 2 6 2 2 6 39 39 39 38 38 38 3 3 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 8 8 8 60 60 60 3 3 6 33 33 33 38 38 38
+253 137 137 254 86 86 253 137 137 254 86 86 253 137 137 209 197 168
+253 127 127 253 253 253 253 253 253 253 253 253 253 127 127 254 86 86
+254 86 86 253 137 137 253 253 253 122 122 122 2 2 6 17 17 17
+2 2 6 2 2 6 34 34 36 42 42 43 3 3 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 13 13 13 59 59 59 2 2 6 9 9 12 56 56 56
+252 252 252 240 219 160 253 137 137 240 219 160 253 253 253 237 228 228
+254 86 86 253 253 253 253 253 253 253 253 253 253 253 253 242 223 223
+227 216 186 249 249 249 253 253 253 122 122 122 16 16 17 17 17 17
+12 12 14 3 3 6 39 39 39 38 38 38 3 3 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2
+5 5 5 22 22 22 104 96 81 187 136 12 207 152 19 51 48 39
+221 205 205 253 253 253 253 253 253 253 253 253 253 253 253 240 240 240
+250 247 243 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 250 247 243 240 219 160 99 84 50 5 5 8 2 2 6
+7 7 9 46 46 47 58 58 58 35 35 35 3 3 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 8 8 8 33 33 33
+58 58 58 86 86 86 170 136 53 239 182 13 246 190 14 220 170 13
+44 38 29 179 179 179 253 253 253 253 253 253 253 253 253 240 240 240
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 240 219 160 240 192 13 112 86 32 2 2 6 2 2 6
+3 3 6 41 33 20 220 170 13 53 53 53 4 4 4 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 2 2 2 32 32 32 150 116 44
+215 161 11 215 161 11 228 170 11 245 188 14 246 190 14 246 190 14
+187 136 12 9 9 11 122 122 122 251 251 251 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+248 248 248 211 196 135 239 182 13 175 122 13 6 5 6 2 2 6
+16 14 12 187 136 12 238 184 12 84 78 65 10 10 10 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 4 4 4 53 53 53 207 152 19
+242 185 13 245 188 14 246 190 14 246 190 14 246 190 14 246 190 14
+240 192 13 81 64 9 2 2 6 86 86 86 244 244 244 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+233 233 233 199 182 125 231 174 11 207 152 19 175 122 13 175 122 13
+201 147 20 239 182 13 244 187 14 150 116 44 35 35 35 6 6 6
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 5 5 5 53 53 53 201 147 20
+242 185 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 220 170 13 13 11 10 2 2 6 152 149 142 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+235 235 235 199 182 125 228 170 11 234 177 12 226 168 11 226 168 11
+234 177 12 246 190 14 246 190 14 234 179 16 126 107 64 36 36 36
+6 6 6 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 3 3 3 48 48 49 189 142 35
+242 185 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 140 112 39 36 36 36 192 191 189 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+192 191 189 112 86 32 226 168 11 244 187 14 244 187 14 244 187 14
+245 188 14 246 190 14 246 190 14 246 190 14 242 185 13 150 116 44
+27 27 27 2 2 2 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 6 6 6 58 58 58 189 142 35
+239 182 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 239 188 14 209 197 168 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 252 252 252 168 168 168
+16 16 18 97 67 8 228 170 11 245 188 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14 244 187 14 198 154 46
+35 35 35 3 3 3 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 13 13 13 84 78 65 215 161 11
+244 187 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 238 184 12 187 136 12 168 168 168 244 244 244
+253 253 253 252 252 252 240 240 240 179 179 179 67 67 67 2 2 6
+2 2 6 97 67 8 228 170 11 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 245 188 14 234 177 12 189 142 35 86 77 61
+16 16 16 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 13 13 13 103 92 56 207 152 19
+228 170 11 234 177 12 239 182 13 242 186 14 245 188 14 246 190 14
+246 190 14 246 190 14 239 182 13 189 138 9 41 33 20 10 10 12
+30 30 31 23 23 24 5 5 8 2 2 6 2 2 6 2 2 6
+4 4 6 112 86 32 215 161 11 245 188 14 246 190 14 245 188 14
+239 182 13 228 170 11 189 142 35 104 96 81 48 48 49 17 17 17
+2 2 2 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 5 5 5 39 39 39 103 92 56
+141 109 44 175 122 13 187 136 12 189 138 9 207 152 19 228 170 11
+239 182 13 239 182 13 215 161 11 175 122 13 41 33 20 2 2 6
+15 15 17 20 20 22 20 20 22 20 20 22 20 20 22 8 8 10
+4 4 6 97 67 8 189 138 9 231 174 11 239 182 13 226 168 11
+189 138 9 126 107 64 59 59 59 21 21 21 5 5 5 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 5 5 5 17 17 17
+34 34 34 57 57 57 84 78 65 103 92 56 125 101 41 140 112 39
+175 122 13 175 122 13 175 122 13 97 67 8 72 67 58 84 78 65
+60 60 60 56 56 56 56 56 56 56 56 56 57 57 57 65 65 65
+86 86 86 95 73 34 175 122 13 187 136 12 187 136 12 175 122 13
+103 92 56 41 41 41 10 10 10 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+2 2 2 4 4 4 12 12 12 24 24 24 40 40 40 70 70 70
+86 77 61 95 73 34 88 72 41 72 67 58 36 36 36 10 10 10
+5 5 5 5 5 5 5 5 5 4 4 4 5 5 5 6 6 6
+22 22 22 61 61 59 88 72 41 112 86 32 112 86 32 84 78 65
+32 32 32 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 10 10 10
+21 21 21 33 33 33 31 31 31 16 16 16 2 2 2 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+2 2 2 12 12 12 30 30 31 40 40 40 32 32 32 16 16 16
+2 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index f7d647dda97..aa8c714d624 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -170,7 +170,7 @@ static struct fb_fix_screeninfo macfb_fix = {
};
static struct fb_info fb_info;
-static u32 pseudo_palette[17];
+static u32 pseudo_palette[16];
static int inverse = 0;
static int vidtest = 0;
@@ -529,56 +529,63 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
if (regno >= fb_info->cmap.len)
return 1;
- switch (fb_info->var.bits_per_pixel) {
- case 1:
- /* We shouldn't get here */
- break;
- case 2:
- case 4:
- case 8:
- if (macfb_setpalette)
- macfb_setpalette(regno, red, green, blue, fb_info);
- else
- return 1;
- break;
- case 16:
- if (fb_info->var.red.offset == 10) {
- /* 1:5:5:5 */
- ((u32*) (fb_info->pseudo_palette))[regno] =
+ if (fb_info->var.bits_per_pixel <= 8) {
+ switch (fb_info->var.bits_per_pixel) {
+ case 1:
+ /* We shouldn't get here */
+ break;
+ case 2:
+ case 4:
+ case 8:
+ if (macfb_setpalette)
+ macfb_setpalette(regno, red, green, blue,
+ fb_info);
+ else
+ return 1;
+ break;
+ }
+ } else if (regno < 16) {
+ switch (fb_info->var.bits_per_pixel) {
+ case 16:
+ if (fb_info->var.red.offset == 10) {
+ /* 1:5:5:5 */
+ ((u32*) (fb_info->pseudo_palette))[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11) |
((transp != 0) << 15);
- } else {
- /* 0:5:6:5 */
- ((u32*) (fb_info->pseudo_palette))[regno] =
+ } else {
+ /* 0:5:6:5 */
+ ((u32*) (fb_info->pseudo_palette))[regno] =
((red & 0xf800) ) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
+ }
+ break;
+ /* I'm pretty sure that one or the other of these
+ doesn't exist on 68k Macs */
+ case 24:
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+ ((u32 *)(fb_info->pseudo_palette))[regno] =
+ (red << fb_info->var.red.offset) |
+ (green << fb_info->var.green.offset) |
+ (blue << fb_info->var.blue.offset);
+ break;
+ case 32:
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+ ((u32 *)(fb_info->pseudo_palette))[regno] =
+ (red << fb_info->var.red.offset) |
+ (green << fb_info->var.green.offset) |
+ (blue << fb_info->var.blue.offset);
+ break;
}
- break;
- /* I'm pretty sure that one or the other of these
- doesn't exist on 68k Macs */
- case 24:
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- ((u32 *)(fb_info->pseudo_palette))[regno] =
- (red << fb_info->var.red.offset) |
- (green << fb_info->var.green.offset) |
- (blue << fb_info->var.blue.offset);
- break;
- case 32:
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- ((u32 *)(fb_info->pseudo_palette))[regno] =
- (red << fb_info->var.red.offset) |
- (green << fb_info->var.green.offset) |
- (blue << fb_info->var.blue.offset);
- break;
- }
- return 0;
+ }
+
+ return 0;
}
static struct fb_ops macfb_ops = {
diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
index ab2149531a0..083f60321ed 100644
--- a/drivers/video/macmodes.c
+++ b/drivers/video/macmodes.c
@@ -369,9 +369,8 @@ EXPORT_SYMBOL(mac_map_monitor_sense);
*
*/
-int __devinit mac_find_mode(struct fb_var_screeninfo *var,
- struct fb_info *info, const char *mode_option,
- unsigned int default_bpp)
+int mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
+ const char *mode_option, unsigned int default_bpp)
{
const struct fb_videomode *db = NULL;
unsigned int dbsize = 0;
diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h
index babeb81f467..b86ba08aac9 100644
--- a/drivers/video/macmodes.h
+++ b/drivers/video/macmodes.h
@@ -55,10 +55,10 @@ extern int mac_vmode_to_var(int vmode, int cmode,
extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
int *cmode);
extern int mac_map_monitor_sense(int sense);
-extern int __devinit mac_find_mode(struct fb_var_screeninfo *var,
- struct fb_info *info,
- const char *mode_option,
- unsigned int default_bpp);
+extern int mac_find_mode(struct fb_var_screeninfo *var,
+ struct fb_info *info,
+ const char *mode_option,
+ unsigned int default_bpp);
/*
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index c57aaadf410..3660d2673bd 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -91,7 +91,6 @@ static inline void matrox_cfb4_pal(u_int32_t* pal) {
for (i = 0; i < 16; i++) {
pal[i] = i * 0x11111111U;
}
- pal[i] = 0xFFFFFFFF;
}
static inline void matrox_cfb8_pal(u_int32_t* pal) {
@@ -100,7 +99,6 @@ static inline void matrox_cfb8_pal(u_int32_t* pal) {
for (i = 0; i < 16; i++) {
pal[i] = i * 0x01010101U;
}
- pal[i] = 0x0F0F0F0F;
}
static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area);
@@ -145,13 +143,10 @@ void matrox_cfbX_init(WPMINFO2) {
ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit;
}
break;
- case 16: if (ACCESS_FBINFO(fbcon).var.green.length == 5) {
+ case 16: if (ACCESS_FBINFO(fbcon).var.green.length == 5)
maccess = 0xC0000001;
- ACCESS_FBINFO(cmap[16]) = 0x7FFF7FFF;
- } else {
+ else
maccess = 0x40000001;
- ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
- }
mopmode = M_OPMODE_16BPP;
if (accel) {
ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
@@ -161,7 +156,6 @@ void matrox_cfbX_init(WPMINFO2) {
break;
case 24: maccess = 0x00000003;
mopmode = M_OPMODE_24BPP;
- ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
if (accel) {
ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
@@ -170,7 +164,6 @@ void matrox_cfbX_init(WPMINFO2) {
break;
case 32: maccess = 0x00000002;
mopmode = M_OPMODE_32BPP;
- ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
if (accel) {
ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 886e475f22f..86ca7b17900 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -679,6 +679,8 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
mga_outb(M_DAC_VAL, blue);
break;
case 16:
+ if (regno >= 16)
+ break;
{
u_int16_t col =
(red << ACCESS_FBINFO(fbcon).var.red.offset) |
@@ -690,6 +692,8 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
break;
case 24:
case 32:
+ if (regno >= 16)
+ break;
ACCESS_FBINFO(cmap[regno]) =
(red << ACCESS_FBINFO(fbcon).var.red.offset) |
(green << ACCESS_FBINFO(fbcon).var.green.offset) |
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index 9c25c2f7966..d59577c8de8 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -518,7 +518,7 @@ struct matrox_fb_info {
dll:1;
} memory;
} values;
- u_int32_t cmap[17];
+ u_int32_t cmap[16];
};
#define info2minfo(info) container_of(info, struct matrox_fb_info, fbcon)
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 03ae55b168f..4b3344e0369 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -163,11 +163,6 @@ static void matroxfb_dh_disable(struct matroxfb_dh_fb_info* m2info) {
ACCESS_FBINFO(hw).crtc2.ctl = 0x00000004;
}
-static void matroxfb_dh_cfbX_init(struct matroxfb_dh_fb_info* m2info) {
- /* no acceleration for secondary head... */
- m2info->cmap[16] = 0xFFFFFFFF;
-}
-
static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info,
struct fb_var_screeninfo* var) {
unsigned int pos;
@@ -385,7 +380,6 @@ static int matroxfb_dh_set_par(struct fb_info* info) {
}
}
up_read(&ACCESS_FBINFO(altout).lock);
- matroxfb_dh_cfbX_init(m2info);
}
m2info->initialized = 1;
return 0;
diff --git a/drivers/video/matrox/matroxfb_crtc2.h b/drivers/video/matrox/matroxfb_crtc2.h
index 177177609be..1005582e843 100644
--- a/drivers/video/matrox/matroxfb_crtc2.h
+++ b/drivers/video/matrox/matroxfb_crtc2.h
@@ -28,7 +28,7 @@ struct matroxfb_dh_fb_info {
unsigned int interlaced:1;
- u_int32_t cmap[17];
+ u_int32_t cmap[16];
};
#endif /* __MATROXFB_CRTC2_H__ */
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 5d29a26b8cd..de0d755f901 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -273,8 +273,11 @@ static int matroxfb_PLL_mavenclock(const struct matrox_pll_features2* pll,
}
}
}
+
+ /* if h2/post/in/feed have not been assigned, return zero (error) */
if (besth2 < 2)
return 0;
+
dprintk(KERN_ERR "clk: %02X %02X %02X %d %d\n", *in, *feed, *post, fxtal, fwant);
return fxtal * (*feed) / (*in) * ctl->den;
}
@@ -284,7 +287,7 @@ static unsigned int matroxfb_mavenclock(const struct matrox_pll_ctl* ctl,
unsigned int* in, unsigned int* feed, unsigned int* post,
unsigned int* htotal2) {
unsigned int fvco;
- unsigned int p;
+ unsigned int uninitialized_var(p);
fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2);
if (!fvco)
@@ -715,7 +718,9 @@ static int maven_find_exact_clocks(unsigned int ht, unsigned int vt,
m->regs[0x82] = 0x81;
for (x = 0; x < 8; x++) {
- unsigned int a, b, c, h2;
+ unsigned int c;
+ unsigned int uninitialized_var(a), uninitialized_var(b),
+ uninitialized_var(h2);
unsigned int h = ht + 2 + x;
if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) {
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index aff11bbf59a..d1a10549f54 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -150,8 +150,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
if (((par->Chipset & 0xfff0) == 0x0290) ||
- ((par->Chipset & 0xfff0) == 0x0390) ||
- ((par->Chipset & 0xfff0) == 0x02E0)) {
+ ((par->Chipset & 0xfff0) == 0x0390)) {
MB = 1;
NB = 1;
} else {
@@ -161,7 +160,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
*MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
pll = NV_RD32(par->PMC, 0x4000);
- P = (pll >> 16) & 0x03;
+ P = (pll >> 16) & 0x07;
pll = NV_RD32(par->PMC, 0x4004);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
@@ -892,11 +891,17 @@ void NVCalcStateExt(struct nvidia_par *par,
state->general = bpp == 16 ? 0x00101100 : 0x00100100;
state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00;
break;
+ case NV_ARCH_40:
+ if (!par->FlatPanel)
+ state->control = NV_RD32(par->PRAMDAC0, 0x0580) &
+ 0xeffffeff;
+ /* fallthrough */
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
default:
- if ((par->Chipset & 0xfff0) == 0x0240) {
+ if ((par->Chipset & 0xfff0) == 0x0240 ||
+ (par->Chipset & 0xfff0) == 0x03d0) {
state->arbitration0 = 256;
state->arbitration1 = 0x0480;
} else if (((par->Chipset & 0xffff) == 0x01A0) ||
@@ -939,7 +944,7 @@ void NVCalcStateExt(struct nvidia_par *par,
void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
{
- int i;
+ int i, j;
NV_WR32(par->PMC, 0x0140, 0x00000000);
NV_WR32(par->PMC, 0x0200, 0xFFFF00FF);
@@ -951,7 +956,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
NV_WR32(par->PTIMER, 0x0100 * 4, 0xFFFFFFFF);
if (par->Architecture == NV_ARCH_04) {
- NV_WR32(par->PFB, 0x0200, state->config);
+ if (state)
+ NV_WR32(par->PFB, 0x0200, state->config);
} else if ((par->Architecture < NV_ARCH_40) ||
(par->Chipset & 0xfff0) == 0x0040) {
for (i = 0; i < 8; i++) {
@@ -964,8 +970,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
- ((par->Chipset & 0xfff0) == 0x02E0) ||
- ((par->Chipset & 0xfff0) == 0x0290))
+ ((par->Chipset & 0xfff0) == 0x0290) ||
+ ((par->Chipset & 0xfff0) == 0x0390) ||
+ ((par->Chipset & 0xfff0) == 0x03D0))
regions = 15;
for(i = 0; i < regions; i++) {
NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0);
@@ -1206,16 +1213,20 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
} else {
if (par->Architecture >= NV_ARCH_40) {
- u32 tmp;
-
NV_WR32(par->PGRAPH, 0x0084, 0x401287c0);
NV_WR32(par->PGRAPH, 0x008C, 0x60de8051);
NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f);
+ NV_WR32(par->PGRAPH, 0x0bc4,
+ NV_RD32(par->PGRAPH, 0x0bc4) |
+ 0x00008000);
- tmp = NV_RD32(par->REGS, 0x1540) & 0xff;
- for(i = 0; tmp && !(tmp & 1); tmp >>= 1, i++);
- NV_WR32(par->PGRAPH, 0x5000, i);
+ j = NV_RD32(par->REGS, 0x1540) & 0xff;
+
+ if (j) {
+ for (i = 0; !(j & 1); j >>= 1, i++);
+ NV_WR32(par->PGRAPH, 0x5000, i);
+ }
if ((par->Chipset & 0xfff0) == 0x0040) {
NV_WR32(par->PGRAPH, 0x09b0,
@@ -1250,6 +1261,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
case 0x0160:
case 0x01D0:
case 0x0240:
+ case 0x03D0:
NV_WR32(par->PMC, 0x1700,
NV_RD32(par->PFB, 0x020C));
NV_WR32(par->PMC, 0x1704, 0);
@@ -1269,7 +1281,6 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
0x00000108);
break;
case 0x0220:
- case 0x0230:
NV_WR32(par->PGRAPH, 0x0860, 0);
NV_WR32(par->PGRAPH, 0x0864, 0);
NV_WR32(par->PRAMDAC, 0x0608,
@@ -1277,8 +1288,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
0x00100000);
break;
case 0x0090:
- case 0x02E0:
case 0x0290:
+ case 0x0390:
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC, 0x0608) |
0x00100000);
@@ -1355,8 +1366,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
} else {
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
- ((par->Chipset & 0xfff0) == 0x02E0) ||
- ((par->Chipset & 0xfff0) == 0x0290)) {
+ ((par->Chipset & 0xfff0) == 0x0290) ||
+ ((par->Chipset & 0xfff0) == 0x0390) ||
+ ((par->Chipset & 0xfff0) == 0x03D0)) {
for (i = 0; i < 60; i++) {
NV_WR32(par->PGRAPH,
0x0D00 + i*4,
@@ -1407,8 +1419,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
} else {
if ((par->Chipset & 0xfff0) == 0x0090 ||
(par->Chipset & 0xfff0) == 0x01D0 ||
- (par->Chipset & 0xfff0) == 0x02E0 ||
- (par->Chipset & 0xfff0) == 0x0290) {
+ (par->Chipset & 0xfff0) == 0x0290 ||
+ (par->Chipset & 0xfff0) == 0x0390) {
NV_WR32(par->PGRAPH, 0x0DF0,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x0DF4,
@@ -1495,6 +1507,12 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001);
+
+ if (!state) {
+ par->CurrentState = NULL;
+ return;
+ }
+
if (par->Architecture >= NV_ARCH_10) {
if (par->twoHeads) {
NV_WR32(par->PCRTC0, 0x0860, state->head);
@@ -1566,6 +1584,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
VGA_WR08(par->PCIO, 0x03D5, state->interlace);
if (!par->FlatPanel) {
+ if (par->Architecture >= NV_ARCH_40)
+ NV_WR32(par->PRAMDAC0, 0x0580, state->control);
+
NV_WR32(par->PRAMDAC0, 0x050C, state->pllsel);
NV_WR32(par->PRAMDAC0, 0x0508, state->vpll);
if (par->twoHeads)
@@ -1631,6 +1652,9 @@ void NVUnloadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state) {
state->scale = NV_RD32(par->PRAMDAC, 0x0848);
state->config = NV_RD32(par->PFB, 0x0200);
+ if (par->Architecture >= NV_ARCH_40 && !par->FlatPanel)
+ state->control = NV_RD32(par->PRAMDAC0, 0x0580);
+
if (par->Architecture >= NV_ARCH_10) {
if (par->twoHeads) {
state->head = NV_RD32(par->PCRTC0, 0x0860);
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index 707e2c8a13e..82579d3a997 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -166,11 +166,13 @@ u8 NVReadDacData(struct nvidia_par *par)
static int NVIsConnected(struct nvidia_par *par, int output)
{
volatile u32 __iomem *PRAMDAC = par->PRAMDAC0;
- u32 reg52C, reg608;
+ u32 reg52C, reg608, dac0_reg608 = 0;
int present;
- if (output)
- PRAMDAC += 0x800;
+ if (output) {
+ dac0_reg608 = NV_RD32(PRAMDAC, 0x0608);
+ PRAMDAC += 0x800;
+ }
reg52C = NV_RD32(PRAMDAC, 0x052C);
reg608 = NV_RD32(PRAMDAC, 0x0608);
@@ -194,8 +196,8 @@ static int NVIsConnected(struct nvidia_par *par, int output)
else
printk("nvidiafb: CRTC%i analog not found\n", output);
- NV_WR32(par->PRAMDAC0, 0x0608, NV_RD32(par->PRAMDAC0, 0x0608) &
- 0x0000EFFF);
+ if (output)
+ NV_WR32(par->PRAMDAC0, 0x0608, dac0_reg608);
NV_WR32(PRAMDAC, 0x052C, reg52C);
NV_WR32(PRAMDAC, 0x0608, reg608);
diff --git a/drivers/video/nvidia/nv_type.h b/drivers/video/nvidia/nv_type.h
index 38f7cc0a233..2fdf77ec39f 100644
--- a/drivers/video/nvidia/nv_type.h
+++ b/drivers/video/nvidia/nv_type.h
@@ -86,6 +86,7 @@ typedef struct _riva_hw_state {
u32 timingV;
u32 displayV;
u32 crtcSync;
+ u32 control;
} RIVA_HW_STATE;
struct riva_regs {
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 41f63658572..a7fe214f0f7 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -674,6 +674,7 @@ static int nvidiafb_set_par(struct fb_info *info)
info->fbops->fb_sync = nvidiafb_sync;
info->pixmap.scan_align = 4;
info->flags &= ~FBINFO_HWACCEL_DISABLED;
+ info->flags |= FBINFO_READS_FAST;
NVResetGraphics(info);
} else {
info->fbops->fb_imageblit = cfb_imageblit;
@@ -682,6 +683,7 @@ static int nvidiafb_set_par(struct fb_info *info)
info->fbops->fb_sync = NULL;
info->pixmap.scan_align = 1;
info->flags |= FBINFO_HWACCEL_DISABLED;
+ info->flags &= ~FBINFO_READS_FAST;
}
par->cursor_reset = 1;
@@ -1193,7 +1195,8 @@ static u32 __devinit nvidia_get_chipset(struct fb_info *info)
printk(KERN_INFO PFX "Device ID: %x \n", id);
- if ((id & 0xfff0) == 0x00f0) {
+ if ((id & 0xfff0) == 0x00f0 ||
+ (id & 0xfff0) == 0x02e0) {
/* pci-e */
id = NV_RD32(par->REGS, 0x1800);
@@ -1238,18 +1241,16 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
case 0x0040: /* GeForce 6800 */
case 0x00C0: /* GeForce 6800 */
case 0x0120: /* GeForce 6800 */
- case 0x0130:
case 0x0140: /* GeForce 6600 */
case 0x0160: /* GeForce 6200 */
case 0x01D0: /* GeForce 7200, 7300, 7400 */
- case 0x02E0: /* GeForce 7300 GT */
case 0x0090: /* GeForce 7800 */
case 0x0210: /* GeForce 6800 */
case 0x0220: /* GeForce 6200 */
- case 0x0230:
case 0x0240: /* GeForce 6100 */
case 0x0290: /* GeForce 7900 */
case 0x0390: /* GeForce 7600 */
+ case 0x03D0:
arch = NV_ARCH_40;
break;
case 0x0020: /* TNT, TNT2 */
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 885b42836cb..452433d4697 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -271,7 +271,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
return;
}
- size = sizeof(struct fb_info) + sizeof(u32) * 17;
+ size = sizeof(struct fb_info) + sizeof(u32) * 16;
info = kmalloc(size, GFP_ATOMIC);
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
new file mode 100644
index 00000000000..7f4d25b8a18
--- /dev/null
+++ b/drivers/video/omap/Kconfig
@@ -0,0 +1,58 @@
+config FB_OMAP
+ tristate "OMAP frame buffer support (EXPERIMENTAL)"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Frame buffer driver for OMAP based boards.
+
+config FB_OMAP_BOOTLOADER_INIT
+ bool "Check bootloader initializaion"
+ depends on FB_OMAP
+ help
+ Say Y here if you want to enable checking if the bootloader has
+ already initialized the display controller. In this case the
+ driver will skip the initialization.
+
+config FB_OMAP_CONSISTENT_DMA_SIZE
+ int "Consistent DMA memory size (MB)"
+ depends on FB_OMAP
+ range 1 14
+ default 2
+ help
+ Increase the DMA consistent memory size according to your video
+ memory needs, for example if you want to use multiple planes.
+ The size must be 2MB aligned.
+ If unsure say 1.
+
+config FB_OMAP_DMA_TUNE
+ bool "Set DMA SDRAM access priority high"
+ depends on FB_OMAP && ARCH_OMAP1
+ help
+ On systems in which video memory is in system memory
+ (SDRAM) this will speed up graphics DMA operations.
+ If you have such a system and want to use rotation
+ answer yes. Answer no if you have a dedicated video
+ memory, or don't use any of the accelerated features.
+
+config FB_OMAP_LCDC_EXTERNAL
+ bool "External LCD controller support"
+ depends on FB_OMAP
+ help
+ Say Y here, if you want to have support for boards with an
+ external LCD controller connected to the SoSSI/RFBI interface.
+
+config FB_OMAP_LCDC_HWA742
+ bool "Epson HWA742 LCD controller support"
+ depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
+ help
+ Say Y here if you want to have support for the external
+ Epson HWA742 LCD controller.
+
+config FB_OMAP_LCDC_BLIZZARD
+ bool "Epson Blizzard LCD controller support"
+ depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
+ help
+ Say Y here if you want to have support for the external
+ Epson Blizzard LCD controller.
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
new file mode 100644
index 00000000000..99da8b6d2c3
--- /dev/null
+++ b/drivers/video/omap/Makefile
@@ -0,0 +1,29 @@
+#
+# Makefile for the new OMAP framebuffer device driver
+#
+
+obj-$(CONFIG_FB_OMAP) += omapfb.o
+
+objs-yy := omapfb_main.o
+
+objs-y$(CONFIG_ARCH_OMAP1) += lcdc.o
+objs-y$(CONFIG_ARCH_OMAP2) += dispc.o
+
+objs-$(CONFIG_ARCH_OMAP1)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += sossi.o
+objs-$(CONFIG_ARCH_OMAP2)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += rfbi.o
+
+objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
+objs-y$(CONFIG_FB_OMAP_LCDC_BLIZZARD) += blizzard.o
+
+objs-y$(CONFIG_MACH_OMAP_H4) += lcd_h4.o
+objs-y$(CONFIG_MACH_OMAP_H3) += lcd_h3.o
+objs-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
+objs-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o
+objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
+objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
+objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
+objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
+objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
+
+omapfb-objs := $(objs-yy)
+
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
new file mode 100644
index 00000000000..e682940a97a
--- /dev/null
+++ b/drivers/video/omap/blizzard.c
@@ -0,0 +1,1568 @@
+/*
+ * Epson Blizzard LCD controller driver
+ *
+ * Copyright (C) 2004-2005 Nokia Corporation
+ * Authors: Juha Yrjola <juha.yrjola@nokia.com>
+ * Imre Deak <imre.deak@nokia.com>
+ * YUV support: Jussi Laako <jussi.laako@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/blizzard.h>
+
+#include "dispc.h"
+
+#define MODULE_NAME "blizzard"
+
+#define BLIZZARD_REV_CODE 0x00
+#define BLIZZARD_CONFIG 0x02
+#define BLIZZARD_PLL_DIV 0x04
+#define BLIZZARD_PLL_LOCK_RANGE 0x06
+#define BLIZZARD_PLL_CLOCK_SYNTH_0 0x08
+#define BLIZZARD_PLL_CLOCK_SYNTH_1 0x0a
+#define BLIZZARD_PLL_MODE 0x0c
+#define BLIZZARD_CLK_SRC 0x0e
+#define BLIZZARD_MEM_BANK0_ACTIVATE 0x10
+#define BLIZZARD_MEM_BANK0_STATUS 0x14
+#define BLIZZARD_HDISP 0x2a
+#define BLIZZARD_HNDP 0x2c
+#define BLIZZARD_VDISP0 0x2e
+#define BLIZZARD_VDISP1 0x30
+#define BLIZZARD_VNDP 0x32
+#define BLIZZARD_HSW 0x34
+#define BLIZZARD_VSW 0x38
+#define BLIZZARD_DISPLAY_MODE 0x68
+#define BLIZZARD_INPUT_WIN_X_START_0 0x6c
+#define BLIZZARD_DATA_SOURCE_SELECT 0x8e
+#define BLIZZARD_DISP_MEM_DATA_PORT 0x90
+#define BLIZZARD_DISP_MEM_READ_ADDR0 0x92
+#define BLIZZARD_POWER_SAVE 0xE6
+#define BLIZZARD_NDISP_CTRL_STATUS 0xE8
+
+/* Data source select */
+/* For S1D13745 */
+#define BLIZZARD_SRC_WRITE_LCD_BACKGROUND 0x00
+#define BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE 0x01
+#define BLIZZARD_SRC_WRITE_OVERLAY_ENABLE 0x04
+#define BLIZZARD_SRC_DISABLE_OVERLAY 0x05
+/* For S1D13744 */
+#define BLIZZARD_SRC_WRITE_LCD 0x00
+#define BLIZZARD_SRC_BLT_LCD 0x06
+
+#define BLIZZARD_COLOR_RGB565 0x01
+#define BLIZZARD_COLOR_YUV420 0x09
+
+#define BLIZZARD_VERSION_S1D13745 0x01 /* Hailstorm */
+#define BLIZZARD_VERSION_S1D13744 0x02 /* Blizzard */
+
+#define BLIZZARD_AUTO_UPDATE_TIME (HZ / 20)
+
+/* Reserve 4 request slots for requests in irq context */
+#define REQ_POOL_SIZE 24
+#define IRQ_REQ_POOL_SIZE 4
+
+#define REQ_FROM_IRQ_POOL 0x01
+
+#define REQ_COMPLETE 0
+#define REQ_PENDING 1
+
+struct blizzard_reg_list {
+ int start;
+ int end;
+};
+
+/* These need to be saved / restored separately from the rest. */
+static struct blizzard_reg_list blizzard_pll_regs[] = {
+ {
+ .start = 0x04, /* Don't save PLL ctrl (0x0C) */
+ .end = 0x0a,
+ },
+ {
+ .start = 0x0e, /* Clock configuration */
+ .end = 0x0e,
+ },
+};
+
+static struct blizzard_reg_list blizzard_gen_regs[] = {
+ {
+ .start = 0x18, /* SDRAM control */
+ .end = 0x20,
+ },
+ {
+ .start = 0x28, /* LCD Panel configuration */
+ .end = 0x5a, /* HSSI interface, TV configuration */
+ },
+};
+
+static u8 blizzard_reg_cache[0x5a / 2];
+
+struct update_param {
+ int plane;
+ int x, y, width, height;
+ int out_x, out_y;
+ int out_width, out_height;
+ int color_mode;
+ int bpp;
+ int flags;
+};
+
+struct blizzard_request {
+ struct list_head entry;
+ unsigned int flags;
+
+ int (*handler)(struct blizzard_request *req);
+ void (*complete)(void *data);
+ void *complete_data;
+
+ union {
+ struct update_param update;
+ struct completion *sync;
+ } par;
+};
+
+struct plane_info {
+ unsigned long offset;
+ int pos_x, pos_y;
+ int width, height;
+ int out_width, out_height;
+ int scr_width;
+ int color_mode;
+ int bpp;
+};
+
+struct blizzard_struct {
+ enum omapfb_update_mode update_mode;
+ enum omapfb_update_mode update_mode_before_suspend;
+
+ struct timer_list auto_update_timer;
+ int stop_auto_update;
+ struct omapfb_update_window auto_update_window;
+ int enabled_planes;
+ int vid_nonstd_color;
+ int vid_scaled;
+ int last_color_mode;
+ int zoom_on;
+ int screen_width;
+ int screen_height;
+ unsigned te_connected:1;
+ unsigned vsync_only:1;
+
+ struct plane_info plane[OMAPFB_PLANE_NUM];
+
+ struct blizzard_request req_pool[REQ_POOL_SIZE];
+ struct list_head pending_req_list;
+ struct list_head free_req_list;
+ struct semaphore req_sema;
+ spinlock_t req_lock;
+
+ unsigned long sys_ck_rate;
+ struct extif_timings reg_timings, lut_timings;
+
+ u32 max_transmit_size;
+ u32 extif_clk_period;
+ int extif_clk_div;
+ unsigned long pix_tx_time;
+ unsigned long line_upd_time;
+
+ struct omapfb_device *fbdev;
+ struct lcd_ctrl_extif *extif;
+ struct lcd_ctrl *int_ctrl;
+
+ void (*power_up)(struct device *dev);
+ void (*power_down)(struct device *dev);
+
+ int version;
+} blizzard;
+
+struct lcd_ctrl blizzard_ctrl;
+
+static u8 blizzard_read_reg(u8 reg)
+{
+ u8 data;
+
+ blizzard.extif->set_bits_per_cycle(8);
+ blizzard.extif->write_command(&reg, 1);
+ blizzard.extif->read_data(&data, 1);
+
+ return data;
+}
+
+static void blizzard_write_reg(u8 reg, u8 val)
+{
+ blizzard.extif->set_bits_per_cycle(8);
+ blizzard.extif->write_command(&reg, 1);
+ blizzard.extif->write_data(&val, 1);
+}
+
+static void blizzard_restart_sdram(void)
+{
+ unsigned long tmo;
+
+ blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
+ udelay(50);
+ blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 1);
+ tmo = jiffies + msecs_to_jiffies(200);
+ while (!(blizzard_read_reg(BLIZZARD_MEM_BANK0_STATUS) & 0x01)) {
+ if (time_after(jiffies, tmo)) {
+ dev_err(blizzard.fbdev->dev,
+ "s1d1374x: SDRAM not ready");
+ break;
+ }
+ msleep(1);
+ }
+}
+
+static void blizzard_stop_sdram(void)
+{
+ blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
+}
+
+/* Wait until the last window was completely written into the controllers
+ * SDRAM and we can start transferring the next window.
+ */
+static void blizzard_wait_line_buffer(void)
+{
+ unsigned long tmo = jiffies + msecs_to_jiffies(30);
+
+ while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 7)) {
+ if (time_after(jiffies, tmo)) {
+ if (printk_ratelimit())
+ dev_err(blizzard.fbdev->dev,
+ "s1d1374x: line buffer not ready\n");
+ break;
+ }
+ }
+}
+
+/* Wait until the YYC color space converter is idle. */
+static void blizzard_wait_yyc(void)
+{
+ unsigned long tmo = jiffies + msecs_to_jiffies(30);
+
+ while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 4)) {
+ if (time_after(jiffies, tmo)) {
+ if (printk_ratelimit())
+ dev_err(blizzard.fbdev->dev,
+ "s1d1374x: YYC not ready\n");
+ break;
+ }
+ }
+}
+
+static void disable_overlay(void)
+{
+ blizzard_write_reg(BLIZZARD_DATA_SOURCE_SELECT,
+ BLIZZARD_SRC_DISABLE_OVERLAY);
+}
+
+static void set_window_regs(int x_start, int y_start, int x_end, int y_end,
+ int x_out_start, int y_out_start,
+ int x_out_end, int y_out_end, int color_mode,
+ int zoom_off, int flags)
+{
+ u8 tmp[18];
+ u8 cmd;
+
+ x_end--;
+ y_end--;
+ tmp[0] = x_start;
+ tmp[1] = x_start >> 8;
+ tmp[2] = y_start;
+ tmp[3] = y_start >> 8;
+ tmp[4] = x_end;
+ tmp[5] = x_end >> 8;
+ tmp[6] = y_end;
+ tmp[7] = y_end >> 8;
+
+ x_out_end--;
+ y_out_end--;
+ tmp[8] = x_out_start;
+ tmp[9] = x_out_start >> 8;
+ tmp[10] = y_out_start;
+ tmp[11] = y_out_start >> 8;
+ tmp[12] = x_out_end;
+ tmp[13] = x_out_end >> 8;
+ tmp[14] = y_out_end;
+ tmp[15] = y_out_end >> 8;
+
+ tmp[16] = color_mode;
+ if (zoom_off && blizzard.version == BLIZZARD_VERSION_S1D13745)
+ tmp[17] = BLIZZARD_SRC_WRITE_LCD_BACKGROUND;
+ else if (flags & OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY)
+ tmp[17] = BLIZZARD_SRC_WRITE_OVERLAY_ENABLE;
+ else
+ tmp[17] = blizzard.version == BLIZZARD_VERSION_S1D13744 ?
+ BLIZZARD_SRC_WRITE_LCD :
+ BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE;
+
+ blizzard.extif->set_bits_per_cycle(8);
+ cmd = BLIZZARD_INPUT_WIN_X_START_0;
+ blizzard.extif->write_command(&cmd, 1);
+ blizzard.extif->write_data(tmp, 18);
+}
+
+static void enable_tearsync(int y, int width, int height, int screen_height,
+ int out_height, int force_vsync)
+{
+ u8 b;
+
+ b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+ b |= 1 << 3;
+ blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
+
+ if (likely(blizzard.vsync_only || force_vsync)) {
+ blizzard.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ if (width * blizzard.pix_tx_time < blizzard.line_upd_time) {
+ blizzard.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ if ((width * blizzard.pix_tx_time / 1000) * height <
+ (y + out_height) * (blizzard.line_upd_time / 1000)) {
+ blizzard.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ blizzard.extif->enable_tearsync(1, y + 1);
+}
+
+static void disable_tearsync(void)
+{
+ u8 b;
+
+ blizzard.extif->enable_tearsync(0, 0);
+ b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+ b &= ~(1 << 3);
+ blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
+ b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+}
+
+static inline void set_extif_timings(const struct extif_timings *t);
+
+static inline struct blizzard_request *alloc_req(void)
+{
+ unsigned long flags;
+ struct blizzard_request *req;
+ int req_flags = 0;
+
+ if (!in_interrupt())
+ down(&blizzard.req_sema);
+ else
+ req_flags = REQ_FROM_IRQ_POOL;
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+ BUG_ON(list_empty(&blizzard.free_req_list));
+ req = list_entry(blizzard.free_req_list.next,
+ struct blizzard_request, entry);
+ list_del(&req->entry);
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+
+ INIT_LIST_HEAD(&req->entry);
+ req->flags = req_flags;
+
+ return req;
+}
+
+static inline void free_req(struct blizzard_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+
+ list_del(&req->entry);
+ list_add(&req->entry, &blizzard.free_req_list);
+ if (!(req->flags & REQ_FROM_IRQ_POOL))
+ up(&blizzard.req_sema);
+
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+}
+
+static void process_pending_requests(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+
+ while (!list_empty(&blizzard.pending_req_list)) {
+ struct blizzard_request *req;
+ void (*complete)(void *);
+ void *complete_data;
+
+ req = list_entry(blizzard.pending_req_list.next,
+ struct blizzard_request, entry);
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+
+ if (req->handler(req) == REQ_PENDING)
+ return;
+
+ complete = req->complete;
+ complete_data = req->complete_data;
+ free_req(req);
+
+ if (complete)
+ complete(complete_data);
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+}
+
+static void submit_req_list(struct list_head *head)
+{
+ unsigned long flags;
+ int process = 1;
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+ if (likely(!list_empty(&blizzard.pending_req_list)))
+ process = 0;
+ list_splice_init(head, blizzard.pending_req_list.prev);
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+
+ if (process)
+ process_pending_requests();
+}
+
+static void request_complete(void *data)
+{
+ struct blizzard_request *req = (struct blizzard_request *)data;
+ void (*complete)(void *);
+ void *complete_data;
+
+ complete = req->complete;
+ complete_data = req->complete_data;
+
+ free_req(req);
+
+ if (complete)
+ complete(complete_data);
+
+ process_pending_requests();
+}
+
+
+static int do_full_screen_update(struct blizzard_request *req)
+{
+ int i;
+ int flags;
+
+ for (i = 0; i < 3; i++) {
+ struct plane_info *p = &blizzard.plane[i];
+ if (!(blizzard.enabled_planes & (1 << i))) {
+ blizzard.int_ctrl->enable_plane(i, 0);
+ continue;
+ }
+ dev_dbg(blizzard.fbdev->dev, "pw %d ph %d\n",
+ p->width, p->height);
+ blizzard.int_ctrl->setup_plane(i,
+ OMAPFB_CHANNEL_OUT_LCD, p->offset,
+ p->scr_width, p->pos_x, p->pos_y,
+ p->width, p->height,
+ p->color_mode);
+ blizzard.int_ctrl->enable_plane(i, 1);
+ }
+
+ dev_dbg(blizzard.fbdev->dev, "sw %d sh %d\n",
+ blizzard.screen_width, blizzard.screen_height);
+ blizzard_wait_line_buffer();
+ flags = req->par.update.flags;
+ if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
+ enable_tearsync(0, blizzard.screen_width,
+ blizzard.screen_height,
+ blizzard.screen_height,
+ blizzard.screen_height,
+ flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
+ else
+ disable_tearsync();
+
+ set_window_regs(0, 0, blizzard.screen_width, blizzard.screen_height,
+ 0, 0, blizzard.screen_width, blizzard.screen_height,
+ BLIZZARD_COLOR_RGB565, blizzard.zoom_on, flags);
+ blizzard.zoom_on = 0;
+
+ blizzard.extif->set_bits_per_cycle(16);
+ /* set_window_regs has left the register index at the right
+ * place, so no need to set it here.
+ */
+ blizzard.extif->transfer_area(blizzard.screen_width,
+ blizzard.screen_height,
+ request_complete, req);
+ return REQ_PENDING;
+}
+
+/* Setup all planes with an overlapping area with the update window. */
+static int do_partial_update(struct blizzard_request *req, int plane,
+ int x, int y, int w, int h,
+ int x_out, int y_out, int w_out, int h_out,
+ int wnd_color_mode, int bpp)
+{
+ int i;
+ int gx1, gy1, gx2, gy2;
+ int gx1_out, gy1_out, gx2_out, gy2_out;
+ int color_mode;
+ int flags;
+ int zoom_off;
+
+ /* Global coordinates, relative to pixel 0,0 of the LCD */
+ gx1 = x + blizzard.plane[plane].pos_x;
+ gy1 = y + blizzard.plane[plane].pos_y;
+ gx2 = gx1 + w;
+ gy2 = gy1 + h;
+
+ flags = req->par.update.flags;
+ if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
+ gx1_out = gx1;
+ gy1_out = gy1;
+ gx2_out = gx1 + w * 2;
+ gy2_out = gy1 + h * 2;
+ } else {
+ gx1_out = x_out + blizzard.plane[plane].pos_x;
+ gy1_out = y_out + blizzard.plane[plane].pos_y;
+ gx2_out = gx1_out + w_out;
+ gy2_out = gy1_out + h_out;
+ }
+ zoom_off = blizzard.zoom_on && gx1 == 0 && gy1 == 0 &&
+ w == blizzard.screen_width && h == blizzard.screen_height;
+ blizzard.zoom_on = (!zoom_off && blizzard.zoom_on) ||
+ (w < w_out || h < h_out);
+
+ for (i = 0; i < OMAPFB_PLANE_NUM; i++) {
+ struct plane_info *p = &blizzard.plane[i];
+ int px1, py1;
+ int px2, py2;
+ int pw, ph;
+ int pposx, pposy;
+ unsigned long offset;
+
+ if (!(blizzard.enabled_planes & (1 << i)) ||
+ (wnd_color_mode && i != plane)) {
+ blizzard.int_ctrl->enable_plane(i, 0);
+ continue;
+ }
+ /* Plane coordinates */
+ if (i == plane) {
+ /* Plane in which we are doing the update.
+ * Local coordinates are the one in the update
+ * request.
+ */
+ px1 = x;
+ py1 = y;
+ px2 = x + w;
+ py2 = y + h;
+ pposx = 0;
+ pposy = 0;
+ } else {
+ /* Check if this plane has an overlapping part */
+ px1 = gx1 - p->pos_x;
+ py1 = gy1 - p->pos_y;
+ px2 = gx2 - p->pos_x;
+ py2 = gy2 - p->pos_y;
+ if (px1 >= p->width || py1 >= p->height ||
+ px2 <= 0 || py2 <= 0) {
+ blizzard.int_ctrl->enable_plane(i, 0);
+ continue;
+ }
+ /* Calculate the coordinates for the overlapping
+ * part in the plane's local coordinates.
+ */
+ pposx = -px1;
+ pposy = -py1;
+ if (px1 < 0)
+ px1 = 0;
+ if (py1 < 0)
+ py1 = 0;
+ if (px2 > p->width)
+ px2 = p->width;
+ if (py2 > p->height)
+ py2 = p->height;
+ if (pposx < 0)
+ pposx = 0;
+ if (pposy < 0)
+ pposy = 0;
+ }
+ pw = px2 - px1;
+ ph = py2 - py1;
+ offset = p->offset + (p->scr_width * py1 + px1) * p->bpp / 8;
+ if (wnd_color_mode)
+ /* Window embedded in the plane with a differing
+ * color mode / bpp. Calculate the number of DMA
+ * transfer elements in terms of the plane's bpp.
+ */
+ pw = (pw + 1) * bpp / p->bpp;
+#ifdef VERBOSE
+ dev_dbg(blizzard.fbdev->dev,
+ "plane %d offset %#08lx pposx %d pposy %d "
+ "px1 %d py1 %d pw %d ph %d\n",
+ i, offset, pposx, pposy, px1, py1, pw, ph);
+#endif
+ blizzard.int_ctrl->setup_plane(i,
+ OMAPFB_CHANNEL_OUT_LCD, offset,
+ p->scr_width,
+ pposx, pposy, pw, ph,
+ p->color_mode);
+
+ blizzard.int_ctrl->enable_plane(i, 1);
+ }
+
+ switch (wnd_color_mode) {
+ case OMAPFB_COLOR_YUV420:
+ color_mode = BLIZZARD_COLOR_YUV420;
+ /* Currently only the 16 bits/pixel cycle format is
+ * supported on the external interface. Adjust the number
+ * of transfer elements per line for 12bpp format.
+ */
+ w = (w + 1) * 3 / 4;
+ break;
+ default:
+ color_mode = BLIZZARD_COLOR_RGB565;
+ break;
+ }
+
+ blizzard_wait_line_buffer();
+ if (blizzard.last_color_mode == BLIZZARD_COLOR_YUV420)
+ blizzard_wait_yyc();
+ blizzard.last_color_mode = color_mode;
+ if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
+ enable_tearsync(gy1, w, h,
+ blizzard.screen_height,
+ h_out,
+ flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
+ else
+ disable_tearsync();
+
+ set_window_regs(gx1, gy1, gx2, gy2, gx1_out, gy1_out, gx2_out, gy2_out,
+ color_mode, zoom_off, flags);
+
+ blizzard.extif->set_bits_per_cycle(16);
+ /* set_window_regs has left the register index at the right
+ * place, so no need to set it here.
+ */
+ blizzard.extif->transfer_area(w, h, request_complete, req);
+
+ return REQ_PENDING;
+}
+
+static int send_frame_handler(struct blizzard_request *req)
+{
+ struct update_param *par = &req->par.update;
+ int plane = par->plane;
+
+#ifdef VERBOSE
+ dev_dbg(blizzard.fbdev->dev,
+ "send_frame: x %d y %d w %d h %d "
+ "x_out %d y_out %d w_out %d h_out %d "
+ "color_mode %04x flags %04x planes %01x\n",
+ par->x, par->y, par->width, par->height,
+ par->out_x, par->out_y, par->out_width, par->out_height,
+ par->color_mode, par->flags, blizzard.enabled_planes);
+#endif
+ if (par->flags & OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY)
+ disable_overlay();
+
+ if ((blizzard.enabled_planes & blizzard.vid_nonstd_color) ||
+ (blizzard.enabled_planes & blizzard.vid_scaled))
+ return do_full_screen_update(req);
+
+ return do_partial_update(req, plane, par->x, par->y,
+ par->width, par->height,
+ par->out_x, par->out_y,
+ par->out_width, par->out_height,
+ par->color_mode, par->bpp);
+}
+
+static void send_frame_complete(void *data)
+{
+}
+
+#define ADD_PREQ(_x, _y, _w, _h, _x_out, _y_out, _w_out, _h_out) do { \
+ req = alloc_req(); \
+ req->handler = send_frame_handler; \
+ req->complete = send_frame_complete; \
+ req->par.update.plane = plane_idx; \
+ req->par.update.x = _x; \
+ req->par.update.y = _y; \
+ req->par.update.width = _w; \
+ req->par.update.height = _h; \
+ req->par.update.out_x = _x_out; \
+ req->par.update.out_y = _y_out; \
+ req->par.update.out_width = _w_out; \
+ req->par.update.out_height = _h_out; \
+ req->par.update.bpp = bpp; \
+ req->par.update.color_mode = color_mode;\
+ req->par.update.flags = flags; \
+ list_add_tail(&req->entry, req_head); \
+} while(0)
+
+static void create_req_list(int plane_idx,
+ struct omapfb_update_window *win,
+ struct list_head *req_head)
+{
+ struct blizzard_request *req;
+ int x = win->x;
+ int y = win->y;
+ int width = win->width;
+ int height = win->height;
+ int x_out = win->out_x;
+ int y_out = win->out_y;
+ int width_out = win->out_width;
+ int height_out = win->out_height;
+ int color_mode;
+ int bpp;
+ int flags;
+ unsigned int ystart = y;
+ unsigned int yspan = height;
+ unsigned int ystart_out = y_out;
+ unsigned int yspan_out = height_out;
+
+ flags = win->format & ~OMAPFB_FORMAT_MASK;
+ color_mode = win->format & OMAPFB_FORMAT_MASK;
+ switch (color_mode) {
+ case OMAPFB_COLOR_YUV420:
+ /* Embedded window with different color mode */
+ bpp = 12;
+ /* X, Y, height must be aligned at 2, width at 4 pixels */
+ x &= ~1;
+ y &= ~1;
+ height = yspan = height & ~1;
+ width = width & ~3;
+ break;
+ default:
+ /* Same as the plane color mode */
+ bpp = blizzard.plane[plane_idx].bpp;
+ break;
+ }
+ if (width * height * bpp / 8 > blizzard.max_transmit_size) {
+ yspan = blizzard.max_transmit_size / (width * bpp / 8);
+ yspan_out = yspan * height_out / height;
+ ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
+ width_out, yspan_out);
+ ystart += yspan;
+ ystart_out += yspan_out;
+ yspan = height - yspan;
+ yspan_out = height_out - yspan_out;
+ flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+ }
+
+ ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
+ width_out, yspan_out);
+}
+
+static void auto_update_complete(void *data)
+{
+ if (!blizzard.stop_auto_update)
+ mod_timer(&blizzard.auto_update_timer,
+ jiffies + BLIZZARD_AUTO_UPDATE_TIME);
+}
+
+static void blizzard_update_window_auto(unsigned long arg)
+{
+ LIST_HEAD(req_list);
+ struct blizzard_request *last;
+ struct omapfb_plane_struct *plane;
+
+ plane = blizzard.fbdev->fb_info[0]->par;
+ create_req_list(plane->idx,
+ &blizzard.auto_update_window, &req_list);
+ last = list_entry(req_list.prev, struct blizzard_request, entry);
+
+ last->complete = auto_update_complete;
+ last->complete_data = NULL;
+
+ submit_req_list(&req_list);
+}
+
+int blizzard_update_window_async(struct fb_info *fbi,
+ struct omapfb_update_window *win,
+ void (*complete_callback)(void *arg),
+ void *complete_callback_data)
+{
+ LIST_HEAD(req_list);
+ struct blizzard_request *last;
+ struct omapfb_plane_struct *plane = fbi->par;
+
+ if (unlikely(blizzard.update_mode != OMAPFB_MANUAL_UPDATE))
+ return -EINVAL;
+ if (unlikely(!blizzard.te_connected &&
+ (win->format & OMAPFB_FORMAT_FLAG_TEARSYNC)))
+ return -EINVAL;
+
+ create_req_list(plane->idx, win, &req_list);
+ last = list_entry(req_list.prev, struct blizzard_request, entry);
+
+ last->complete = complete_callback;
+ last->complete_data = (void *)complete_callback_data;
+
+ submit_req_list(&req_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(blizzard_update_window_async);
+
+static int update_full_screen(void)
+{
+ return blizzard_update_window_async(blizzard.fbdev->fb_info[0],
+ &blizzard.auto_update_window, NULL, NULL);
+
+}
+
+static int blizzard_setup_plane(int plane, int channel_out,
+ unsigned long offset, int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ struct plane_info *p;
+
+#ifdef VERBOSE
+ dev_dbg(blizzard.fbdev->dev,
+ "plane %d ch_out %d offset %#08lx scr_width %d "
+ "pos_x %d pos_y %d width %d height %d color_mode %d\n",
+ plane, channel_out, offset, screen_width,
+ pos_x, pos_y, width, height, color_mode);
+#endif
+ if ((unsigned)plane > OMAPFB_PLANE_NUM)
+ return -EINVAL;
+ p = &blizzard.plane[plane];
+
+ switch (color_mode) {
+ case OMAPFB_COLOR_YUV422:
+ case OMAPFB_COLOR_YUY422:
+ p->bpp = 16;
+ blizzard.vid_nonstd_color &= ~(1 << plane);
+ break;
+ case OMAPFB_COLOR_YUV420:
+ p->bpp = 12;
+ blizzard.vid_nonstd_color |= 1 << plane;
+ break;
+ case OMAPFB_COLOR_RGB565:
+ p->bpp = 16;
+ blizzard.vid_nonstd_color &= ~(1 << plane);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p->offset = offset;
+ p->pos_x = pos_x;
+ p->pos_y = pos_y;
+ p->width = width;
+ p->height = height;
+ p->scr_width = screen_width;
+ if (!p->out_width)
+ p->out_width = width;
+ if (!p->out_height)
+ p->out_height = height;
+
+ p->color_mode = color_mode;
+
+ return 0;
+}
+
+static int blizzard_set_scale(int plane, int orig_w, int orig_h,
+ int out_w, int out_h)
+{
+ struct plane_info *p = &blizzard.plane[plane];
+ int r;
+
+ dev_dbg(blizzard.fbdev->dev,
+ "plane %d orig_w %d orig_h %d out_w %d out_h %d\n",
+ plane, orig_w, orig_h, out_w, out_h);
+ if ((unsigned)plane > OMAPFB_PLANE_NUM)
+ return -ENODEV;
+
+ r = blizzard.int_ctrl->set_scale(plane, orig_w, orig_h, out_w, out_h);
+ if (r < 0)
+ return r;
+
+ p->width = orig_w;
+ p->height = orig_h;
+ p->out_width = out_w;
+ p->out_height = out_h;
+ if (orig_w == out_w && orig_h == out_h)
+ blizzard.vid_scaled &= ~(1 << plane);
+ else
+ blizzard.vid_scaled |= 1 << plane;
+
+ return 0;
+}
+
+static int blizzard_enable_plane(int plane, int enable)
+{
+ if (enable)
+ blizzard.enabled_planes |= 1 << plane;
+ else
+ blizzard.enabled_planes &= ~(1 << plane);
+
+ return 0;
+}
+
+static int sync_handler(struct blizzard_request *req)
+{
+ complete(req->par.sync);
+ return REQ_COMPLETE;
+}
+
+static void blizzard_sync(void)
+{
+ LIST_HEAD(req_list);
+ struct blizzard_request *req;
+ struct completion comp;
+
+ req = alloc_req();
+
+ req->handler = sync_handler;
+ req->complete = NULL;
+ init_completion(&comp);
+ req->par.sync = &comp;
+
+ list_add(&req->entry, &req_list);
+ submit_req_list(&req_list);
+
+ wait_for_completion(&comp);
+}
+
+
+static void blizzard_bind_client(struct omapfb_notifier_block *nb)
+{
+ if (blizzard.update_mode == OMAPFB_MANUAL_UPDATE) {
+ omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
+ }
+}
+
+static int blizzard_set_update_mode(enum omapfb_update_mode mode)
+{
+ if (unlikely(mode != OMAPFB_MANUAL_UPDATE &&
+ mode != OMAPFB_AUTO_UPDATE &&
+ mode != OMAPFB_UPDATE_DISABLED))
+ return -EINVAL;
+
+ if (mode == blizzard.update_mode)
+ return 0;
+
+ dev_info(blizzard.fbdev->dev, "s1d1374x: setting update mode to %s\n",
+ mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
+ (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
+
+ switch (blizzard.update_mode) {
+ case OMAPFB_MANUAL_UPDATE:
+ omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_DISABLED);
+ break;
+ case OMAPFB_AUTO_UPDATE:
+ blizzard.stop_auto_update = 1;
+ del_timer_sync(&blizzard.auto_update_timer);
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ break;
+ }
+
+ blizzard.update_mode = mode;
+ blizzard_sync();
+ blizzard.stop_auto_update = 0;
+
+ switch (mode) {
+ case OMAPFB_MANUAL_UPDATE:
+ omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
+ break;
+ case OMAPFB_AUTO_UPDATE:
+ blizzard_update_window_auto(0);
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ break;
+ }
+
+ return 0;
+}
+
+static enum omapfb_update_mode blizzard_get_update_mode(void)
+{
+ return blizzard.update_mode;
+}
+
+static inline void set_extif_timings(const struct extif_timings *t)
+{
+ blizzard.extif->set_timings(t);
+}
+
+static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
+{
+ int bus_tick = blizzard.extif_clk_period * div;
+ return (ps + bus_tick - 1) / bus_tick * bus_tick;
+}
+
+static int calc_reg_timing(unsigned long sysclk, int div)
+{
+ struct extif_timings *t;
+ unsigned long systim;
+
+ /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+ * AccessTime 2 ns + 12.2 ns (regs),
+ * WEOffTime = WEOnTime + 1 ns,
+ * REOffTime = REOnTime + 12 ns (regs),
+ * CSOffTime = REOffTime + 1 ns
+ * ReadCycle = 2ns + 2*SYSCLK (regs),
+ * WriteCycle = 2*SYSCLK + 2 ns,
+ * CSPulseWidth = 10 ns */
+
+ systim = 1000000000 / (sysclk / 1000);
+ dev_dbg(blizzard.fbdev->dev,
+ "Blizzard systim %lu ps extif_clk_period %u div %d\n",
+ systim, blizzard.extif_clk_period, div);
+
+ t = &blizzard.reg_timings;
+ memset(t, 0, sizeof(*t));
+
+ t->clk_div = div;
+
+ t->cs_on_time = 0;
+ t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
+ t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+ t->re_off_time = round_to_extif_ticks(t->re_on_time + 13000, div);
+ t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+ t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->we_cycle_time < t->we_off_time)
+ t->we_cycle_time = t->we_off_time;
+ t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->re_cycle_time < t->re_off_time)
+ t->re_cycle_time = t->re_off_time;
+ t->cs_pulse_width = 0;
+
+ dev_dbg(blizzard.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+ dev_dbg(blizzard.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
+ t->we_on_time, t->we_off_time, t->re_cycle_time,
+ t->we_cycle_time);
+ dev_dbg(blizzard.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
+ t->access_time, t->cs_pulse_width);
+
+ return blizzard.extif->convert_timings(t);
+}
+
+static int calc_lut_timing(unsigned long sysclk, int div)
+{
+ struct extif_timings *t;
+ unsigned long systim;
+
+ /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+ * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
+ * WEOffTime = WEOnTime + 1 ns,
+ * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
+ * CSOffTime = REOffTime + 1 ns
+ * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
+ * WriteCycle = 2*SYSCLK + 2 ns,
+ * CSPulseWidth = 10 ns */
+
+ systim = 1000000000 / (sysclk / 1000);
+ dev_dbg(blizzard.fbdev->dev,
+ "Blizzard systim %lu ps extif_clk_period %u div %d\n",
+ systim, blizzard.extif_clk_period, div);
+
+ t = &blizzard.lut_timings;
+ memset(t, 0, sizeof(*t));
+
+ t->clk_div = div;
+
+ t->cs_on_time = 0;
+ t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+ 26000, div);
+ t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+ t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+ 26000, div);
+ t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+ t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->we_cycle_time < t->we_off_time)
+ t->we_cycle_time = t->we_off_time;
+ t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
+ if (t->re_cycle_time < t->re_off_time)
+ t->re_cycle_time = t->re_off_time;
+ t->cs_pulse_width = 0;
+
+ dev_dbg(blizzard.fbdev->dev,
+ "[lut]cson %d csoff %d reon %d reoff %d\n",
+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+ dev_dbg(blizzard.fbdev->dev,
+ "[lut]weon %d weoff %d recyc %d wecyc %d\n",
+ t->we_on_time, t->we_off_time, t->re_cycle_time,
+ t->we_cycle_time);
+ dev_dbg(blizzard.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
+ t->access_time, t->cs_pulse_width);
+
+ return blizzard.extif->convert_timings(t);
+}
+
+static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
+{
+ int max_clk_div;
+ int div;
+
+ blizzard.extif->get_clk_info(&blizzard.extif_clk_period, &max_clk_div);
+ for (div = 1; div <= max_clk_div; div++) {
+ if (calc_reg_timing(sysclk, div) == 0)
+ break;
+ }
+ if (div > max_clk_div) {
+ dev_dbg(blizzard.fbdev->dev, "reg timing failed\n");
+ goto err;
+ }
+ *extif_mem_div = div;
+
+ for (div = 1; div <= max_clk_div; div++) {
+ if (calc_lut_timing(sysclk, div) == 0)
+ break;
+ }
+
+ if (div > max_clk_div)
+ goto err;
+
+ blizzard.extif_clk_div = div;
+
+ return 0;
+err:
+ dev_err(blizzard.fbdev->dev, "can't setup timings\n");
+ return -1;
+}
+
+static void calc_blizzard_clk_rates(unsigned long ext_clk,
+ unsigned long *sys_clk, unsigned long *pix_clk)
+{
+ int pix_clk_src;
+ int sys_div = 0, sys_mul = 0;
+ int pix_div;
+
+ pix_clk_src = blizzard_read_reg(BLIZZARD_CLK_SRC);
+ pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
+ if ((pix_clk_src & (0x3 << 1)) == 0) {
+ /* Source is the PLL */
+ sys_div = (blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x3f) + 1;
+ sys_mul = blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_0);
+ sys_mul |= ((blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_1)
+ & 0x0f) << 11);
+ *sys_clk = ext_clk * sys_mul / sys_div;
+ } else /* else source is ext clk, or oscillator */
+ *sys_clk = ext_clk;
+
+ *pix_clk = *sys_clk / pix_div; /* HZ */
+ dev_dbg(blizzard.fbdev->dev,
+ "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
+ ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
+ dev_dbg(blizzard.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
+ *sys_clk, *pix_clk);
+}
+
+static int setup_tearsync(unsigned long pix_clk, int extif_div)
+{
+ int hdisp, vdisp;
+ int hndp, vndp;
+ int hsw, vsw;
+ int hs, vs;
+ int hs_pol_inv, vs_pol_inv;
+ int use_hsvs, use_ndp;
+ u8 b;
+
+ hsw = blizzard_read_reg(BLIZZARD_HSW);
+ vsw = blizzard_read_reg(BLIZZARD_VSW);
+ hs_pol_inv = !(hsw & 0x80);
+ vs_pol_inv = !(vsw & 0x80);
+ hsw = hsw & 0x7f;
+ vsw = vsw & 0x3f;
+
+ hdisp = blizzard_read_reg(BLIZZARD_HDISP) * 8;
+ vdisp = blizzard_read_reg(BLIZZARD_VDISP0) +
+ ((blizzard_read_reg(BLIZZARD_VDISP1) & 0x3) << 8);
+
+ hndp = blizzard_read_reg(BLIZZARD_HNDP) & 0x3f;
+ vndp = blizzard_read_reg(BLIZZARD_VNDP);
+
+ /* time to transfer one pixel (16bpp) in ps */
+ blizzard.pix_tx_time = blizzard.reg_timings.we_cycle_time;
+ if (blizzard.extif->get_max_tx_rate != NULL) {
+ /* The external interface might have a rate limitation,
+ * if so, we have to maximize our transfer rate.
+ */
+ unsigned long min_tx_time;
+ unsigned long max_tx_rate = blizzard.extif->get_max_tx_rate();
+
+ dev_dbg(blizzard.fbdev->dev, "max_tx_rate %ld HZ\n",
+ max_tx_rate);
+ min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */
+ if (blizzard.pix_tx_time < min_tx_time)
+ blizzard.pix_tx_time = min_tx_time;
+ }
+
+ /* time to update one line in ps */
+ blizzard.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
+ blizzard.line_upd_time *= 1000;
+ if (hdisp * blizzard.pix_tx_time > blizzard.line_upd_time)
+ /* transfer speed too low, we might have to use both
+ * HS and VS */
+ use_hsvs = 1;
+ else
+ /* decent transfer speed, we'll always use only VS */
+ use_hsvs = 0;
+
+ if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
+ /* HS or'ed with VS doesn't work, use the active high
+ * TE signal based on HNDP / VNDP */
+ use_ndp = 1;
+ hs_pol_inv = 0;
+ vs_pol_inv = 0;
+ hs = hndp;
+ vs = vndp;
+ } else {
+ /* Use HS or'ed with VS as a TE signal if both are needed
+ * or VNDP if only vsync is needed. */
+ use_ndp = 0;
+ hs = hsw;
+ vs = vsw;
+ if (!use_hsvs) {
+ hs_pol_inv = 0;
+ vs_pol_inv = 0;
+ }
+ }
+
+ hs = hs * 1000000 / (pix_clk / 1000); /* ps */
+ hs *= 1000;
+
+ vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
+ vs *= 1000;
+
+ if (vs <= hs)
+ return -EDOM;
+ /* set VS to 120% of HS to minimize VS detection time */
+ vs = hs * 12 / 10;
+ /* minimize HS too */
+ if (hs > 10000)
+ hs = 10000;
+
+ b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+ b &= ~0x3;
+ b |= use_hsvs ? 1 : 0;
+ b |= (use_ndp && use_hsvs) ? 0 : 2;
+ blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
+
+ blizzard.vsync_only = !use_hsvs;
+
+ dev_dbg(blizzard.fbdev->dev,
+ "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
+ pix_clk, blizzard.pix_tx_time, blizzard.line_upd_time);
+ dev_dbg(blizzard.fbdev->dev,
+ "hs %d ps vs %d ps mode %d vsync_only %d\n",
+ hs, vs, b & 0x3, !use_hsvs);
+
+ return blizzard.extif->setup_tearsync(1, hs, vs,
+ hs_pol_inv, vs_pol_inv,
+ extif_div);
+}
+
+static void blizzard_get_caps(int plane, struct omapfb_caps *caps)
+{
+ blizzard.int_ctrl->get_caps(plane, caps);
+ caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
+ OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE |
+ OMAPFB_CAPS_WINDOW_SCALE |
+ OMAPFB_CAPS_WINDOW_OVERLAY;
+ if (blizzard.te_connected)
+ caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
+ caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
+ (1 << OMAPFB_COLOR_YUV420);
+}
+
+static void _save_regs(struct blizzard_reg_list *list, int cnt)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++, list++) {
+ int reg;
+ for (reg = list->start; reg <= list->end; reg += 2)
+ blizzard_reg_cache[reg / 2] = blizzard_read_reg(reg);
+ }
+}
+
+static void _restore_regs(struct blizzard_reg_list *list, int cnt)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++, list++) {
+ int reg;
+ for (reg = list->start; reg <= list->end; reg += 2)
+ blizzard_write_reg(reg, blizzard_reg_cache[reg / 2]);
+ }
+}
+
+static void blizzard_save_all_regs(void)
+{
+ _save_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
+ _save_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
+}
+
+static void blizzard_restore_pll_regs(void)
+{
+ _restore_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
+}
+
+static void blizzard_restore_gen_regs(void)
+{
+ _restore_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
+}
+
+static void blizzard_suspend(void)
+{
+ u32 l;
+ unsigned long tmo;
+
+ if (blizzard.last_color_mode) {
+ update_full_screen();
+ blizzard_sync();
+ }
+ blizzard.update_mode_before_suspend = blizzard.update_mode;
+ /* the following will disable clocks as well */
+ blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
+
+ blizzard_save_all_regs();
+
+ blizzard_stop_sdram();
+
+ l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
+ /* Standby, Sleep. We assume we use an external clock. */
+ l |= 0x03;
+ blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
+
+ tmo = jiffies + msecs_to_jiffies(100);
+ while (!(blizzard_read_reg(BLIZZARD_PLL_MODE) & (1 << 1))) {
+ if (time_after(jiffies, tmo)) {
+ dev_err(blizzard.fbdev->dev,
+ "s1d1374x: sleep timeout, stopping PLL manually\n");
+ l = blizzard_read_reg(BLIZZARD_PLL_MODE);
+ l &= ~0x03;
+ /* Disable PLL, counter function */
+ l |= 0x2;
+ blizzard_write_reg(BLIZZARD_PLL_MODE, l);
+ break;
+ }
+ msleep(1);
+ }
+
+ if (blizzard.power_down != NULL)
+ blizzard.power_down(blizzard.fbdev->dev);
+}
+
+static void blizzard_resume(void)
+{
+ u32 l;
+
+ if (blizzard.power_up != NULL)
+ blizzard.power_up(blizzard.fbdev->dev);
+
+ l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
+ /* Standby, Sleep */
+ l &= ~0x03;
+ blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
+
+ blizzard_restore_pll_regs();
+ l = blizzard_read_reg(BLIZZARD_PLL_MODE);
+ l &= ~0x03;
+ /* Enable PLL, counter function */
+ l |= 0x1;
+ blizzard_write_reg(BLIZZARD_PLL_MODE, l);
+
+ while (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & (1 << 7)))
+ msleep(1);
+
+ blizzard_restart_sdram();
+
+ blizzard_restore_gen_regs();
+
+ /* Enable display */
+ blizzard_write_reg(BLIZZARD_DISPLAY_MODE, 0x01);
+
+ /* the following will enable clocks as necessary */
+ blizzard_set_update_mode(blizzard.update_mode_before_suspend);
+
+ /* Force a background update */
+ blizzard.zoom_on = 1;
+ update_full_screen();
+ blizzard_sync();
+}
+
+static int blizzard_init(struct omapfb_device *fbdev, int ext_mode,
+ struct omapfb_mem_desc *req_vram)
+{
+ int r = 0, i;
+ u8 rev, conf;
+ unsigned long ext_clk;
+ int extif_div;
+ unsigned long sys_clk, pix_clk;
+ struct omapfb_platform_data *omapfb_conf;
+ struct blizzard_platform_data *ctrl_conf;
+
+ blizzard.fbdev = fbdev;
+
+ BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
+
+ blizzard.fbdev = fbdev;
+ blizzard.extif = fbdev->ext_if;
+ blizzard.int_ctrl = fbdev->int_ctrl;
+
+ omapfb_conf = fbdev->dev->platform_data;
+ ctrl_conf = omapfb_conf->ctrl_platform_data;
+ if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
+ dev_err(fbdev->dev, "s1d1374x: missing platform data\n");
+ r = -ENOENT;
+ goto err1;
+ }
+
+ blizzard.power_down = ctrl_conf->power_down;
+ blizzard.power_up = ctrl_conf->power_up;
+
+ spin_lock_init(&blizzard.req_lock);
+
+ if ((r = blizzard.int_ctrl->init(fbdev, 1, req_vram)) < 0)
+ goto err1;
+
+ if ((r = blizzard.extif->init(fbdev)) < 0)
+ goto err2;
+
+ blizzard_ctrl.set_color_key = blizzard.int_ctrl->set_color_key;
+ blizzard_ctrl.get_color_key = blizzard.int_ctrl->get_color_key;
+ blizzard_ctrl.setup_mem = blizzard.int_ctrl->setup_mem;
+ blizzard_ctrl.mmap = blizzard.int_ctrl->mmap;
+
+ ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
+ if ((r = calc_extif_timings(ext_clk, &extif_div)) < 0)
+ goto err3;
+
+ set_extif_timings(&blizzard.reg_timings);
+
+ if (blizzard.power_up != NULL)
+ blizzard.power_up(fbdev->dev);
+
+ calc_blizzard_clk_rates(ext_clk, &sys_clk, &pix_clk);
+
+ if ((r = calc_extif_timings(sys_clk, &extif_div)) < 0)
+ goto err3;
+ set_extif_timings(&blizzard.reg_timings);
+
+ if (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x80)) {
+ dev_err(fbdev->dev,
+ "controller not initialized by the bootloader\n");
+ r = -ENODEV;
+ goto err3;
+ }
+
+ if (ctrl_conf->te_connected) {
+ if ((r = setup_tearsync(pix_clk, extif_div)) < 0)
+ goto err3;
+ blizzard.te_connected = 1;
+ }
+
+ rev = blizzard_read_reg(BLIZZARD_REV_CODE);
+ conf = blizzard_read_reg(BLIZZARD_CONFIG);
+
+ switch (rev & 0xfc) {
+ case 0x9c:
+ blizzard.version = BLIZZARD_VERSION_S1D13744;
+ pr_info("omapfb: s1d13744 LCD controller rev %d "
+ "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+ break;
+ case 0xa4:
+ blizzard.version = BLIZZARD_VERSION_S1D13745;
+ pr_info("omapfb: s1d13745 LCD controller rev %d "
+ "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+ break;
+ default:
+ dev_err(fbdev->dev, "invalid s1d1374x revision %02x\n",
+ rev);
+ r = -ENODEV;
+ goto err3;
+ }
+
+ blizzard.max_transmit_size = blizzard.extif->max_transmit_size;
+
+ blizzard.update_mode = OMAPFB_UPDATE_DISABLED;
+
+ blizzard.auto_update_window.x = 0;
+ blizzard.auto_update_window.y = 0;
+ blizzard.auto_update_window.width = fbdev->panel->x_res;
+ blizzard.auto_update_window.height = fbdev->panel->y_res;
+ blizzard.auto_update_window.out_x = 0;
+ blizzard.auto_update_window.out_x = 0;
+ blizzard.auto_update_window.out_width = fbdev->panel->x_res;
+ blizzard.auto_update_window.out_height = fbdev->panel->y_res;
+ blizzard.auto_update_window.format = 0;
+
+ blizzard.screen_width = fbdev->panel->x_res;
+ blizzard.screen_height = fbdev->panel->y_res;
+
+ init_timer(&blizzard.auto_update_timer);
+ blizzard.auto_update_timer.function = blizzard_update_window_auto;
+ blizzard.auto_update_timer.data = 0;
+
+ INIT_LIST_HEAD(&blizzard.free_req_list);
+ INIT_LIST_HEAD(&blizzard.pending_req_list);
+ for (i = 0; i < ARRAY_SIZE(blizzard.req_pool); i++)
+ list_add(&blizzard.req_pool[i].entry, &blizzard.free_req_list);
+ BUG_ON(i <= IRQ_REQ_POOL_SIZE);
+ sema_init(&blizzard.req_sema, i - IRQ_REQ_POOL_SIZE);
+
+ return 0;
+err3:
+ if (blizzard.power_down != NULL)
+ blizzard.power_down(fbdev->dev);
+ blizzard.extif->cleanup();
+err2:
+ blizzard.int_ctrl->cleanup();
+err1:
+ return r;
+}
+
+static void blizzard_cleanup(void)
+{
+ blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
+ blizzard.extif->cleanup();
+ blizzard.int_ctrl->cleanup();
+ if (blizzard.power_down != NULL)
+ blizzard.power_down(blizzard.fbdev->dev);
+}
+
+struct lcd_ctrl blizzard_ctrl = {
+ .name = "blizzard",
+ .init = blizzard_init,
+ .cleanup = blizzard_cleanup,
+ .bind_client = blizzard_bind_client,
+ .get_caps = blizzard_get_caps,
+ .set_update_mode = blizzard_set_update_mode,
+ .get_update_mode = blizzard_get_update_mode,
+ .setup_plane = blizzard_setup_plane,
+ .set_scale = blizzard_set_scale,
+ .enable_plane = blizzard_enable_plane,
+ .update_window = blizzard_update_window_async,
+ .sync = blizzard_sync,
+ .suspend = blizzard_suspend,
+ .resume = blizzard_resume,
+};
+
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
new file mode 100644
index 00000000000..f4c23434de6
--- /dev/null
+++ b/drivers/video/omap/dispc.c
@@ -0,0 +1,1502 @@
+/*
+ * OMAP2 display controller support
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/sram.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/board.h>
+
+#include "dispc.h"
+
+#define MODULE_NAME "dispc"
+
+#define DSS_BASE 0x48050000
+#define DSS_SYSCONFIG 0x0010
+
+#define DISPC_BASE 0x48050400
+
+/* DISPC common */
+#define DISPC_REVISION 0x0000
+#define DISPC_SYSCONFIG 0x0010
+#define DISPC_SYSSTATUS 0x0014
+#define DISPC_IRQSTATUS 0x0018
+#define DISPC_IRQENABLE 0x001C
+#define DISPC_CONTROL 0x0040
+#define DISPC_CONFIG 0x0044
+#define DISPC_CAPABLE 0x0048
+#define DISPC_DEFAULT_COLOR0 0x004C
+#define DISPC_DEFAULT_COLOR1 0x0050
+#define DISPC_TRANS_COLOR0 0x0054
+#define DISPC_TRANS_COLOR1 0x0058
+#define DISPC_LINE_STATUS 0x005C
+#define DISPC_LINE_NUMBER 0x0060
+#define DISPC_TIMING_H 0x0064
+#define DISPC_TIMING_V 0x0068
+#define DISPC_POL_FREQ 0x006C
+#define DISPC_DIVISOR 0x0070
+#define DISPC_SIZE_DIG 0x0078
+#define DISPC_SIZE_LCD 0x007C
+
+#define DISPC_DATA_CYCLE1 0x01D4
+#define DISPC_DATA_CYCLE2 0x01D8
+#define DISPC_DATA_CYCLE3 0x01DC
+
+/* DISPC GFX plane */
+#define DISPC_GFX_BA0 0x0080
+#define DISPC_GFX_BA1 0x0084
+#define DISPC_GFX_POSITION 0x0088
+#define DISPC_GFX_SIZE 0x008C
+#define DISPC_GFX_ATTRIBUTES 0x00A0
+#define DISPC_GFX_FIFO_THRESHOLD 0x00A4
+#define DISPC_GFX_FIFO_SIZE_STATUS 0x00A8
+#define DISPC_GFX_ROW_INC 0x00AC
+#define DISPC_GFX_PIXEL_INC 0x00B0
+#define DISPC_GFX_WINDOW_SKIP 0x00B4
+#define DISPC_GFX_TABLE_BA 0x00B8
+
+/* DISPC Video plane 1/2 */
+#define DISPC_VID1_BASE 0x00BC
+#define DISPC_VID2_BASE 0x014C
+
+/* Offsets into DISPC_VID1/2_BASE */
+#define DISPC_VID_BA0 0x0000
+#define DISPC_VID_BA1 0x0004
+#define DISPC_VID_POSITION 0x0008
+#define DISPC_VID_SIZE 0x000C
+#define DISPC_VID_ATTRIBUTES 0x0010
+#define DISPC_VID_FIFO_THRESHOLD 0x0014
+#define DISPC_VID_FIFO_SIZE_STATUS 0x0018
+#define DISPC_VID_ROW_INC 0x001C
+#define DISPC_VID_PIXEL_INC 0x0020
+#define DISPC_VID_FIR 0x0024
+#define DISPC_VID_PICTURE_SIZE 0x0028
+#define DISPC_VID_ACCU0 0x002C
+#define DISPC_VID_ACCU1 0x0030
+
+/* 8 elements in 8 byte increments */
+#define DISPC_VID_FIR_COEF_H0 0x0034
+/* 8 elements in 8 byte increments */
+#define DISPC_VID_FIR_COEF_HV0 0x0038
+/* 5 elements in 4 byte increments */
+#define DISPC_VID_CONV_COEF0 0x0074
+
+#define DISPC_IRQ_FRAMEMASK 0x0001
+#define DISPC_IRQ_VSYNC 0x0002
+#define DISPC_IRQ_EVSYNC_EVEN 0x0004
+#define DISPC_IRQ_EVSYNC_ODD 0x0008
+#define DISPC_IRQ_ACBIAS_COUNT_STAT 0x0010
+#define DISPC_IRQ_PROG_LINE_NUM 0x0020
+#define DISPC_IRQ_GFX_FIFO_UNDERFLOW 0x0040
+#define DISPC_IRQ_GFX_END_WIN 0x0080
+#define DISPC_IRQ_PAL_GAMMA_MASK 0x0100
+#define DISPC_IRQ_OCP_ERR 0x0200
+#define DISPC_IRQ_VID1_FIFO_UNDERFLOW 0x0400
+#define DISPC_IRQ_VID1_END_WIN 0x0800
+#define DISPC_IRQ_VID2_FIFO_UNDERFLOW 0x1000
+#define DISPC_IRQ_VID2_END_WIN 0x2000
+#define DISPC_IRQ_SYNC_LOST 0x4000
+
+#define DISPC_IRQ_MASK_ALL 0x7fff
+
+#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
+ DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
+ DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
+ DISPC_IRQ_SYNC_LOST)
+
+#define RFBI_CONTROL 0x48050040
+
+#define MAX_PALETTE_SIZE (256 * 16)
+
+#define FLD_MASK(pos, len) (((1 << len) - 1) << pos)
+
+#define MOD_REG_FLD(reg, mask, val) \
+ dispc_write_reg((reg), (dispc_read_reg(reg) & ~(mask)) | (val));
+
+#define OMAP2_SRAM_START 0x40200000
+/* Maximum size, in reality this is smaller if SRAM is partially locked. */
+#define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
+
+/* We support the SDRAM / SRAM types. See OMAPFB_PLANE_MEMTYPE_* in omapfb.h */
+#define DISPC_MEMTYPE_NUM 2
+
+#define RESMAP_SIZE(_page_cnt) \
+ ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
+#define RESMAP_PTR(_res_map, _page_nr) \
+ (((_res_map)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
+#define RESMAP_MASK(_page_nr) \
+ (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
+
+struct resmap {
+ unsigned long start;
+ unsigned page_cnt;
+ unsigned long *map;
+};
+
+static struct {
+ u32 base;
+
+ struct omapfb_mem_desc mem_desc;
+ struct resmap *res_map[DISPC_MEMTYPE_NUM];
+ atomic_t map_count[OMAPFB_PLANE_NUM];
+
+ dma_addr_t palette_paddr;
+ void *palette_vaddr;
+
+ int ext_mode;
+
+ unsigned long enabled_irqs;
+ void (*irq_callback)(void *);
+ void *irq_callback_data;
+ struct completion frame_done;
+
+ int fir_hinc[OMAPFB_PLANE_NUM];
+ int fir_vinc[OMAPFB_PLANE_NUM];
+
+ struct clk *dss_ick, *dss1_fck;
+ struct clk *dss_54m_fck;
+
+ enum omapfb_update_mode update_mode;
+ struct omapfb_device *fbdev;
+
+ struct omapfb_color_key color_key;
+} dispc;
+
+static void enable_lcd_clocks(int enable);
+
+static void inline dispc_write_reg(int idx, u32 val)
+{
+ __raw_writel(val, dispc.base + idx);
+}
+
+static u32 inline dispc_read_reg(int idx)
+{
+ u32 l = __raw_readl(dispc.base + idx);
+ return l;
+}
+
+/* Select RFBI or bypass mode */
+static void enable_rfbi_mode(int enable)
+{
+ u32 l;
+
+ l = dispc_read_reg(DISPC_CONTROL);
+ /* Enable RFBI, GPIO0/1 */
+ l &= ~((1 << 11) | (1 << 15) | (1 << 16));
+ l |= enable ? (1 << 11) : 0;
+ /* RFBI En: GPIO0/1=10 RFBI Dis: GPIO0/1=11 */
+ l |= 1 << 15;
+ l |= enable ? 0 : (1 << 16);
+ dispc_write_reg(DISPC_CONTROL, l);
+
+ /* Set bypass mode in RFBI module */
+ l = __raw_readl(io_p2v(RFBI_CONTROL));
+ l |= enable ? 0 : (1 << 1);
+ __raw_writel(l, io_p2v(RFBI_CONTROL));
+}
+
+static void set_lcd_data_lines(int data_lines)
+{
+ u32 l;
+ int code = 0;
+
+ switch (data_lines) {
+ case 12:
+ code = 0;
+ break;
+ case 16:
+ code = 1;
+ break;
+ case 18:
+ code = 2;
+ break;
+ case 24:
+ code = 3;
+ break;
+ default:
+ BUG();
+ }
+
+ l = dispc_read_reg(DISPC_CONTROL);
+ l &= ~(0x03 << 8);
+ l |= code << 8;
+ dispc_write_reg(DISPC_CONTROL, l);
+}
+
+static void set_load_mode(int mode)
+{
+ BUG_ON(mode & ~(DISPC_LOAD_CLUT_ONLY | DISPC_LOAD_FRAME_ONLY |
+ DISPC_LOAD_CLUT_ONCE_FRAME));
+ MOD_REG_FLD(DISPC_CONFIG, 0x03 << 1, mode << 1);
+}
+
+void omap_dispc_set_lcd_size(int x, int y)
+{
+ BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_SIZE_LCD, FLD_MASK(16, 11) | FLD_MASK(0, 11),
+ ((y - 1) << 16) | (x - 1));
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_set_lcd_size);
+
+void omap_dispc_set_digit_size(int x, int y)
+{
+ BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_SIZE_DIG, FLD_MASK(16, 11) | FLD_MASK(0, 11),
+ ((y - 1) << 16) | (x - 1));
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_set_digit_size);
+
+static void setup_plane_fifo(int plane, int ext_mode)
+{
+ const u32 ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
+ DISPC_VID1_BASE + DISPC_VID_FIFO_THRESHOLD,
+ DISPC_VID2_BASE + DISPC_VID_FIFO_THRESHOLD };
+ const u32 fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
+ DISPC_VID1_BASE + DISPC_VID_FIFO_SIZE_STATUS,
+ DISPC_VID2_BASE + DISPC_VID_FIFO_SIZE_STATUS };
+ int low, high;
+ u32 l;
+
+ BUG_ON(plane > 2);
+
+ l = dispc_read_reg(fsz_reg[plane]);
+ l &= FLD_MASK(0, 9);
+ if (ext_mode) {
+ low = l * 3 / 4;
+ high = l;
+ } else {
+ low = l / 4;
+ high = l * 3 / 4;
+ }
+ MOD_REG_FLD(ftrs_reg[plane], FLD_MASK(16, 9) | FLD_MASK(0, 9),
+ (high << 16) | low);
+}
+
+void omap_dispc_enable_lcd_out(int enable)
+{
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_CONTROL, 1, enable ? 1 : 0);
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_enable_lcd_out);
+
+void omap_dispc_enable_digit_out(int enable)
+{
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_CONTROL, 1 << 1, enable ? 1 << 1 : 0);
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_enable_digit_out);
+
+static inline int _setup_plane(int plane, int channel_out,
+ u32 paddr, int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
+ DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+ DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
+ const u32 ba_reg[] = { DISPC_GFX_BA0, DISPC_VID1_BASE + DISPC_VID_BA0,
+ DISPC_VID2_BASE + DISPC_VID_BA0 };
+ const u32 ps_reg[] = { DISPC_GFX_POSITION,
+ DISPC_VID1_BASE + DISPC_VID_POSITION,
+ DISPC_VID2_BASE + DISPC_VID_POSITION };
+ const u32 sz_reg[] = { DISPC_GFX_SIZE,
+ DISPC_VID1_BASE + DISPC_VID_PICTURE_SIZE,
+ DISPC_VID2_BASE + DISPC_VID_PICTURE_SIZE };
+ const u32 ri_reg[] = { DISPC_GFX_ROW_INC,
+ DISPC_VID1_BASE + DISPC_VID_ROW_INC,
+ DISPC_VID2_BASE + DISPC_VID_ROW_INC };
+ const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
+ DISPC_VID2_BASE + DISPC_VID_SIZE };
+
+ int chout_shift, burst_shift;
+ int chout_val;
+ int color_code;
+ int bpp;
+ int cconv_en;
+ int set_vsize;
+ u32 l;
+
+#ifdef VERBOSE
+ dev_dbg(dispc.fbdev->dev, "plane %d channel %d paddr %#08x scr_width %d"
+ " pos_x %d pos_y %d width %d height %d color_mode %d\n",
+ plane, channel_out, paddr, screen_width, pos_x, pos_y,
+ width, height, color_mode);
+#endif
+
+ set_vsize = 0;
+ switch (plane) {
+ case OMAPFB_PLANE_GFX:
+ burst_shift = 6;
+ chout_shift = 8;
+ break;
+ case OMAPFB_PLANE_VID1:
+ case OMAPFB_PLANE_VID2:
+ burst_shift = 14;
+ chout_shift = 16;
+ set_vsize = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (channel_out) {
+ case OMAPFB_CHANNEL_OUT_LCD:
+ chout_val = 0;
+ break;
+ case OMAPFB_CHANNEL_OUT_DIGIT:
+ chout_val = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cconv_en = 0;
+ switch (color_mode) {
+ case OMAPFB_COLOR_RGB565:
+ color_code = DISPC_RGB_16_BPP;
+ bpp = 16;
+ break;
+ case OMAPFB_COLOR_YUV422:
+ if (plane == 0)
+ return -EINVAL;
+ color_code = DISPC_UYVY_422;
+ cconv_en = 1;
+ bpp = 16;
+ break;
+ case OMAPFB_COLOR_YUY422:
+ if (plane == 0)
+ return -EINVAL;
+ color_code = DISPC_YUV2_422;
+ cconv_en = 1;
+ bpp = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ l = dispc_read_reg(at_reg[plane]);
+
+ l &= ~(0x0f << 1);
+ l |= color_code << 1;
+ l &= ~(1 << 9);
+ l |= cconv_en << 9;
+
+ l &= ~(0x03 << burst_shift);
+ l |= DISPC_BURST_8x32 << burst_shift;
+
+ l &= ~(1 << chout_shift);
+ l |= chout_val << chout_shift;
+
+ dispc_write_reg(at_reg[plane], l);
+
+ dispc_write_reg(ba_reg[plane], paddr);
+ MOD_REG_FLD(ps_reg[plane],
+ FLD_MASK(16, 11) | FLD_MASK(0, 11), (pos_y << 16) | pos_x);
+
+ MOD_REG_FLD(sz_reg[plane], FLD_MASK(16, 11) | FLD_MASK(0, 11),
+ ((height - 1) << 16) | (width - 1));
+
+ if (set_vsize) {
+ /* Set video size if set_scale hasn't set it */
+ if (!dispc.fir_vinc[plane])
+ MOD_REG_FLD(vs_reg[plane],
+ FLD_MASK(16, 11), (height - 1) << 16);
+ if (!dispc.fir_hinc[plane])
+ MOD_REG_FLD(vs_reg[plane],
+ FLD_MASK(0, 11), width - 1);
+ }
+
+ dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1);
+
+ return height * screen_width * bpp / 8;
+}
+
+static int omap_dispc_setup_plane(int plane, int channel_out,
+ unsigned long offset,
+ int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ u32 paddr;
+ int r;
+
+ if ((unsigned)plane > dispc.mem_desc.region_cnt)
+ return -EINVAL;
+ paddr = dispc.mem_desc.region[plane].paddr + offset;
+ enable_lcd_clocks(1);
+ r = _setup_plane(plane, channel_out, paddr,
+ screen_width,
+ pos_x, pos_y, width, height, color_mode);
+ enable_lcd_clocks(0);
+ return r;
+}
+
+static void write_firh_reg(int plane, int reg, u32 value)
+{
+ u32 base;
+
+ if (plane == 1)
+ base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_H0;
+ else
+ base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_H0;
+ dispc_write_reg(base + reg * 8, value);
+}
+
+static void write_firhv_reg(int plane, int reg, u32 value)
+{
+ u32 base;
+
+ if (plane == 1)
+ base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_HV0;
+ else
+ base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_HV0;
+ dispc_write_reg(base + reg * 8, value);
+}
+
+static void set_upsampling_coef_table(int plane)
+{
+ const u32 coef[][2] = {
+ { 0x00800000, 0x00800000 },
+ { 0x0D7CF800, 0x037B02FF },
+ { 0x1E70F5FF, 0x0C6F05FE },
+ { 0x335FF5FE, 0x205907FB },
+ { 0xF74949F7, 0x00404000 },
+ { 0xF55F33FB, 0x075920FE },
+ { 0xF5701EFE, 0x056F0CFF },
+ { 0xF87C0DFF, 0x027B0300 },
+ };
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ write_firh_reg(plane, i, coef[i][0]);
+ write_firhv_reg(plane, i, coef[i][1]);
+ }
+}
+
+static int omap_dispc_set_scale(int plane,
+ int orig_width, int orig_height,
+ int out_width, int out_height)
+{
+ const u32 at_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+ DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
+ const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
+ DISPC_VID2_BASE + DISPC_VID_SIZE };
+ const u32 fir_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_FIR,
+ DISPC_VID2_BASE + DISPC_VID_FIR };
+
+ u32 l;
+ int fir_hinc;
+ int fir_vinc;
+
+ if ((unsigned)plane > OMAPFB_PLANE_NUM)
+ return -ENODEV;
+
+ if (plane == OMAPFB_PLANE_GFX &&
+ (out_width != orig_width || out_height != orig_height))
+ return -EINVAL;
+
+ enable_lcd_clocks(1);
+ if (orig_width < out_width) {
+ /*
+ * Upsampling.
+ * Currently you can only scale both dimensions in one way.
+ */
+ if (orig_height > out_height ||
+ orig_width * 8 < out_width ||
+ orig_height * 8 < out_height) {
+ enable_lcd_clocks(0);
+ return -EINVAL;
+ }
+ set_upsampling_coef_table(plane);
+ } else if (orig_width > out_width) {
+ /* Downsampling not yet supported
+ */
+
+ enable_lcd_clocks(0);
+ return -EINVAL;
+ }
+ if (!orig_width || orig_width == out_width)
+ fir_hinc = 0;
+ else
+ fir_hinc = 1024 * orig_width / out_width;
+ if (!orig_height || orig_height == out_height)
+ fir_vinc = 0;
+ else
+ fir_vinc = 1024 * orig_height / out_height;
+ dispc.fir_hinc[plane] = fir_hinc;
+ dispc.fir_vinc[plane] = fir_vinc;
+
+ MOD_REG_FLD(fir_reg[plane],
+ FLD_MASK(16, 12) | FLD_MASK(0, 12),
+ ((fir_vinc & 4095) << 16) |
+ (fir_hinc & 4095));
+
+ dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d "
+ "orig_height %d fir_hinc %d fir_vinc %d\n",
+ out_width, out_height, orig_width, orig_height,
+ fir_hinc, fir_vinc);
+
+ MOD_REG_FLD(vs_reg[plane],
+ FLD_MASK(16, 11) | FLD_MASK(0, 11),
+ ((out_height - 1) << 16) | (out_width - 1));
+
+ l = dispc_read_reg(at_reg[plane]);
+ l &= ~(0x03 << 5);
+ l |= fir_hinc ? (1 << 5) : 0;
+ l |= fir_vinc ? (1 << 6) : 0;
+ dispc_write_reg(at_reg[plane], l);
+
+ enable_lcd_clocks(0);
+ return 0;
+}
+
+static int omap_dispc_enable_plane(int plane, int enable)
+{
+ const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
+ DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+ DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
+ if ((unsigned int)plane > dispc.mem_desc.region_cnt)
+ return -EINVAL;
+
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(at_reg[plane], 1, enable ? 1 : 0);
+ enable_lcd_clocks(0);
+
+ return 0;
+}
+
+static int omap_dispc_set_color_key(struct omapfb_color_key *ck)
+{
+ u32 df_reg, tr_reg;
+ int shift, val;
+
+ switch (ck->channel_out) {
+ case OMAPFB_CHANNEL_OUT_LCD:
+ df_reg = DISPC_DEFAULT_COLOR0;
+ tr_reg = DISPC_TRANS_COLOR0;
+ shift = 10;
+ break;
+ case OMAPFB_CHANNEL_OUT_DIGIT:
+ df_reg = DISPC_DEFAULT_COLOR1;
+ tr_reg = DISPC_TRANS_COLOR1;
+ shift = 12;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (ck->key_type) {
+ case OMAPFB_COLOR_KEY_DISABLED:
+ val = 0;
+ break;
+ case OMAPFB_COLOR_KEY_GFX_DST:
+ val = 1;
+ break;
+ case OMAPFB_COLOR_KEY_VID_SRC:
+ val = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_CONFIG, FLD_MASK(shift, 2), val << shift);
+
+ if (val != 0)
+ dispc_write_reg(tr_reg, ck->trans_key);
+ dispc_write_reg(df_reg, ck->background);
+ enable_lcd_clocks(0);
+
+ dispc.color_key = *ck;
+
+ return 0;
+}
+
+static int omap_dispc_get_color_key(struct omapfb_color_key *ck)
+{
+ *ck = dispc.color_key;
+ return 0;
+}
+
+static void load_palette(void)
+{
+}
+
+static int omap_dispc_set_update_mode(enum omapfb_update_mode mode)
+{
+ int r = 0;
+
+ if (mode != dispc.update_mode) {
+ switch (mode) {
+ case OMAPFB_AUTO_UPDATE:
+ case OMAPFB_MANUAL_UPDATE:
+ enable_lcd_clocks(1);
+ omap_dispc_enable_lcd_out(1);
+ dispc.update_mode = mode;
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ init_completion(&dispc.frame_done);
+ omap_dispc_enable_lcd_out(0);
+ if (!wait_for_completion_timeout(&dispc.frame_done,
+ msecs_to_jiffies(500))) {
+ dev_err(dispc.fbdev->dev,
+ "timeout waiting for FRAME DONE\n");
+ }
+ dispc.update_mode = mode;
+ enable_lcd_clocks(0);
+ break;
+ default:
+ r = -EINVAL;
+ }
+ }
+
+ return r;
+}
+
+static void omap_dispc_get_caps(int plane, struct omapfb_caps *caps)
+{
+ caps->ctrl |= OMAPFB_CAPS_PLANE_RELOCATE_MEM;
+ if (plane > 0)
+ caps->ctrl |= OMAPFB_CAPS_PLANE_SCALE;
+ caps->plane_color |= (1 << OMAPFB_COLOR_RGB565) |
+ (1 << OMAPFB_COLOR_YUV422) |
+ (1 << OMAPFB_COLOR_YUY422);
+ if (plane == 0)
+ caps->plane_color |= (1 << OMAPFB_COLOR_CLUT_8BPP) |
+ (1 << OMAPFB_COLOR_CLUT_4BPP) |
+ (1 << OMAPFB_COLOR_CLUT_2BPP) |
+ (1 << OMAPFB_COLOR_CLUT_1BPP) |
+ (1 << OMAPFB_COLOR_RGB444);
+}
+
+static enum omapfb_update_mode omap_dispc_get_update_mode(void)
+{
+ return dispc.update_mode;
+}
+
+static void setup_color_conv_coef(void)
+{
+ u32 mask = FLD_MASK(16, 11) | FLD_MASK(0, 11);
+ int cf1_reg = DISPC_VID1_BASE + DISPC_VID_CONV_COEF0;
+ int cf2_reg = DISPC_VID2_BASE + DISPC_VID_CONV_COEF0;
+ int at1_reg = DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES;
+ int at2_reg = DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES;
+ const struct color_conv_coef {
+ int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
+ int full_range;
+ } ctbl_bt601_5 = {
+ 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
+ };
+ const struct color_conv_coef *ct;
+#define CVAL(x, y) (((x & 2047) << 16) | (y & 2047))
+
+ ct = &ctbl_bt601_5;
+
+ MOD_REG_FLD(cf1_reg, mask, CVAL(ct->rcr, ct->ry));
+ MOD_REG_FLD(cf1_reg + 4, mask, CVAL(ct->gy, ct->rcb));
+ MOD_REG_FLD(cf1_reg + 8, mask, CVAL(ct->gcb, ct->gcr));
+ MOD_REG_FLD(cf1_reg + 12, mask, CVAL(ct->bcr, ct->by));
+ MOD_REG_FLD(cf1_reg + 16, mask, CVAL(0, ct->bcb));
+
+ MOD_REG_FLD(cf2_reg, mask, CVAL(ct->rcr, ct->ry));
+ MOD_REG_FLD(cf2_reg + 4, mask, CVAL(ct->gy, ct->rcb));
+ MOD_REG_FLD(cf2_reg + 8, mask, CVAL(ct->gcb, ct->gcr));
+ MOD_REG_FLD(cf2_reg + 12, mask, CVAL(ct->bcr, ct->by));
+ MOD_REG_FLD(cf2_reg + 16, mask, CVAL(0, ct->bcb));
+#undef CVAL
+
+ MOD_REG_FLD(at1_reg, (1 << 11), ct->full_range);
+ MOD_REG_FLD(at2_reg, (1 << 11), ct->full_range);
+}
+
+static void calc_ck_div(int is_tft, int pck, int *lck_div, int *pck_div)
+{
+ unsigned long fck, lck;
+
+ *lck_div = 1;
+ pck = max(1, pck);
+ fck = clk_get_rate(dispc.dss1_fck);
+ lck = fck;
+ *pck_div = (lck + pck - 1) / pck;
+ if (is_tft)
+ *pck_div = max(2, *pck_div);
+ else
+ *pck_div = max(3, *pck_div);
+ if (*pck_div > 255) {
+ *pck_div = 255;
+ lck = pck * *pck_div;
+ *lck_div = fck / lck;
+ BUG_ON(*lck_div < 1);
+ if (*lck_div > 255) {
+ *lck_div = 255;
+ dev_warn(dispc.fbdev->dev, "pixclock %d kHz too low.\n",
+ pck / 1000);
+ }
+ }
+}
+
+static void set_lcd_tft_mode(int enable)
+{
+ u32 mask;
+
+ mask = 1 << 3;
+ MOD_REG_FLD(DISPC_CONTROL, mask, enable ? mask : 0);
+}
+
+static void set_lcd_timings(void)
+{
+ u32 l;
+ int lck_div, pck_div;
+ struct lcd_panel *panel = dispc.fbdev->panel;
+ int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
+ unsigned long fck;
+
+ l = dispc_read_reg(DISPC_TIMING_H);
+ l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
+ l |= ( max(1, (min(64, panel->hsw))) - 1 ) << 0;
+ l |= ( max(1, (min(256, panel->hfp))) - 1 ) << 8;
+ l |= ( max(1, (min(256, panel->hbp))) - 1 ) << 20;
+ dispc_write_reg(DISPC_TIMING_H, l);
+
+ l = dispc_read_reg(DISPC_TIMING_V);
+ l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
+ l |= ( max(1, (min(64, panel->vsw))) - 1 ) << 0;
+ l |= ( max(0, (min(255, panel->vfp))) - 0 ) << 8;
+ l |= ( max(0, (min(255, panel->vbp))) - 0 ) << 20;
+ dispc_write_reg(DISPC_TIMING_V, l);
+
+ l = dispc_read_reg(DISPC_POL_FREQ);
+ l &= ~FLD_MASK(12, 6);
+ l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 12;
+ l |= panel->acb & 0xff;
+ dispc_write_reg(DISPC_POL_FREQ, l);
+
+ calc_ck_div(is_tft, panel->pixel_clock * 1000, &lck_div, &pck_div);
+
+ l = dispc_read_reg(DISPC_DIVISOR);
+ l &= ~(FLD_MASK(16, 8) | FLD_MASK(0, 8));
+ l |= (lck_div << 16) | (pck_div << 0);
+ dispc_write_reg(DISPC_DIVISOR, l);
+
+ /* update panel info with the exact clock */
+ fck = clk_get_rate(dispc.dss1_fck);
+ panel->pixel_clock = fck / lck_div / pck_div / 1000;
+}
+
+int omap_dispc_request_irq(void (*callback)(void *data), void *data)
+{
+ int r = 0;
+
+ BUG_ON(callback == NULL);
+
+ if (dispc.irq_callback)
+ r = -EBUSY;
+ else {
+ dispc.irq_callback = callback;
+ dispc.irq_callback_data = data;
+ }
+
+ return r;
+}
+EXPORT_SYMBOL(omap_dispc_request_irq);
+
+void omap_dispc_enable_irqs(int irq_mask)
+{
+ enable_lcd_clocks(1);
+ dispc.enabled_irqs = irq_mask;
+ irq_mask |= DISPC_IRQ_MASK_ERROR;
+ MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_enable_irqs);
+
+void omap_dispc_disable_irqs(int irq_mask)
+{
+ enable_lcd_clocks(1);
+ dispc.enabled_irqs &= ~irq_mask;
+ irq_mask &= ~DISPC_IRQ_MASK_ERROR;
+ MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_disable_irqs);
+
+void omap_dispc_free_irq(void)
+{
+ enable_lcd_clocks(1);
+ omap_dispc_disable_irqs(DISPC_IRQ_MASK_ALL);
+ dispc.irq_callback = NULL;
+ dispc.irq_callback_data = NULL;
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_free_irq);
+
+static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
+{
+ u32 stat = dispc_read_reg(DISPC_IRQSTATUS);
+
+ if (stat & DISPC_IRQ_FRAMEMASK)
+ complete(&dispc.frame_done);
+
+ if (stat & DISPC_IRQ_MASK_ERROR) {
+ if (printk_ratelimit()) {
+ dev_err(dispc.fbdev->dev, "irq error status %04x\n",
+ stat & 0x7fff);
+ }
+ }
+
+ if ((stat & dispc.enabled_irqs) && dispc.irq_callback)
+ dispc.irq_callback(dispc.irq_callback_data);
+
+ dispc_write_reg(DISPC_IRQSTATUS, stat);
+
+ return IRQ_HANDLED;
+}
+
+static int get_dss_clocks(void)
+{
+ if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) {
+ dev_err(dispc.fbdev->dev, "can't get dss_ick");
+ return PTR_ERR(dispc.dss_ick);
+ }
+
+ if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) {
+ dev_err(dispc.fbdev->dev, "can't get dss1_fck");
+ clk_put(dispc.dss_ick);
+ return PTR_ERR(dispc.dss1_fck);
+ }
+
+ if (IS_ERR((dispc.dss_54m_fck =
+ clk_get(dispc.fbdev->dev, "dss_54m_fck")))) {
+ dev_err(dispc.fbdev->dev, "can't get dss_54m_fck");
+ clk_put(dispc.dss_ick);
+ clk_put(dispc.dss1_fck);
+ return PTR_ERR(dispc.dss_54m_fck);
+ }
+
+ return 0;
+}
+
+static void put_dss_clocks(void)
+{
+ clk_put(dispc.dss_54m_fck);
+ clk_put(dispc.dss1_fck);
+ clk_put(dispc.dss_ick);
+}
+
+static void enable_lcd_clocks(int enable)
+{
+ if (enable)
+ clk_enable(dispc.dss1_fck);
+ else
+ clk_disable(dispc.dss1_fck);
+}
+
+static void enable_interface_clocks(int enable)
+{
+ if (enable)
+ clk_enable(dispc.dss_ick);
+ else
+ clk_disable(dispc.dss_ick);
+}
+
+static void enable_digit_clocks(int enable)
+{
+ if (enable)
+ clk_enable(dispc.dss_54m_fck);
+ else
+ clk_disable(dispc.dss_54m_fck);
+}
+
+static void omap_dispc_suspend(void)
+{
+ if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
+ init_completion(&dispc.frame_done);
+ omap_dispc_enable_lcd_out(0);
+ if (!wait_for_completion_timeout(&dispc.frame_done,
+ msecs_to_jiffies(500))) {
+ dev_err(dispc.fbdev->dev,
+ "timeout waiting for FRAME DONE\n");
+ }
+ enable_lcd_clocks(0);
+ }
+}
+
+static void omap_dispc_resume(void)
+{
+ if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
+ enable_lcd_clocks(1);
+ if (!dispc.ext_mode) {
+ set_lcd_timings();
+ load_palette();
+ }
+ omap_dispc_enable_lcd_out(1);
+ }
+}
+
+
+static int omap_dispc_update_window(struct fb_info *fbi,
+ struct omapfb_update_window *win,
+ void (*complete_callback)(void *arg),
+ void *complete_callback_data)
+{
+ return dispc.update_mode == OMAPFB_UPDATE_DISABLED ? -ENODEV : 0;
+}
+
+static int mmap_kern(struct omapfb_mem_region *region)
+{
+ struct vm_struct *kvma;
+ struct vm_area_struct vma;
+ pgprot_t pgprot;
+ unsigned long vaddr;
+
+ kvma = get_vm_area(region->size, VM_IOREMAP);
+ if (kvma == NULL) {
+ dev_err(dispc.fbdev->dev, "can't get kernel vm area\n");
+ return -ENOMEM;
+ }
+ vma.vm_mm = &init_mm;
+
+ vaddr = (unsigned long)kvma->addr;
+
+ pgprot = pgprot_writecombine(pgprot_kernel);
+ vma.vm_start = vaddr;
+ vma.vm_end = vaddr + region->size;
+ if (io_remap_pfn_range(&vma, vaddr, region->paddr >> PAGE_SHIFT,
+ region->size, pgprot) < 0) {
+ dev_err(dispc.fbdev->dev, "kernel mmap for FBMEM failed\n");
+ return -EAGAIN;
+ }
+ region->vaddr = (void *)vaddr;
+
+ return 0;
+}
+
+static void mmap_user_open(struct vm_area_struct *vma)
+{
+ int plane = (int)vma->vm_private_data;
+
+ atomic_inc(&dispc.map_count[plane]);
+}
+
+static void mmap_user_close(struct vm_area_struct *vma)
+{
+ int plane = (int)vma->vm_private_data;
+
+ atomic_dec(&dispc.map_count[plane]);
+}
+
+static struct vm_operations_struct mmap_user_ops = {
+ .open = mmap_user_open,
+ .close = mmap_user_close,
+};
+
+static int omap_dispc_mmap_user(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct omapfb_plane_struct *plane = info->par;
+ unsigned long off;
+ unsigned long start;
+ u32 len;
+
+ if (vma->vm_end - vma->vm_start == 0)
+ return 0;
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+ off = vma->vm_pgoff << PAGE_SHIFT;
+
+ start = info->fix.smem_start;
+ len = info->fix.smem_len;
+ if (off >= len)
+ return -EINVAL;
+ if ((vma->vm_end - vma->vm_start + off) > len)
+ return -EINVAL;
+ off += start;
+ vma->vm_pgoff = off >> PAGE_SHIFT;
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mmap_user_ops;
+ vma->vm_private_data = (void *)plane->idx;
+ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ return -EAGAIN;
+ /* vm_ops.open won't be called for mmap itself. */
+ atomic_inc(&dispc.map_count[plane->idx]);
+ return 0;
+}
+
+static void unmap_kern(struct omapfb_mem_region *region)
+{
+ vunmap(region->vaddr);
+}
+
+static int alloc_palette_ram(void)
+{
+ dispc.palette_vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
+ MAX_PALETTE_SIZE, &dispc.palette_paddr, GFP_KERNEL);
+ if (dispc.palette_vaddr == NULL) {
+ dev_err(dispc.fbdev->dev, "failed to alloc palette memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void free_palette_ram(void)
+{
+ dma_free_writecombine(dispc.fbdev->dev, MAX_PALETTE_SIZE,
+ dispc.palette_vaddr, dispc.palette_paddr);
+}
+
+static int alloc_fbmem(struct omapfb_mem_region *region)
+{
+ region->vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
+ region->size, &region->paddr, GFP_KERNEL);
+
+ if (region->vaddr == NULL) {
+ dev_err(dispc.fbdev->dev, "unable to allocate FB DMA memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void free_fbmem(struct omapfb_mem_region *region)
+{
+ dma_free_writecombine(dispc.fbdev->dev, region->size,
+ region->vaddr, region->paddr);
+}
+
+static struct resmap *init_resmap(unsigned long start, size_t size)
+{
+ unsigned page_cnt;
+ struct resmap *res_map;
+
+ page_cnt = PAGE_ALIGN(size) / PAGE_SIZE;
+ res_map =
+ kzalloc(sizeof(struct resmap) + RESMAP_SIZE(page_cnt), GFP_KERNEL);
+ if (res_map == NULL)
+ return NULL;
+ res_map->start = start;
+ res_map->page_cnt = page_cnt;
+ res_map->map = (unsigned long *)(res_map + 1);
+ return res_map;
+}
+
+static void cleanup_resmap(struct resmap *res_map)
+{
+ kfree(res_map);
+}
+
+static inline int resmap_mem_type(unsigned long start)
+{
+ if (start >= OMAP2_SRAM_START &&
+ start < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
+ return OMAPFB_MEMTYPE_SRAM;
+ else
+ return OMAPFB_MEMTYPE_SDRAM;
+}
+
+static inline int resmap_page_reserved(struct resmap *res_map, unsigned page_nr)
+{
+ return *RESMAP_PTR(res_map, page_nr) & RESMAP_MASK(page_nr) ? 1 : 0;
+}
+
+static inline void resmap_reserve_page(struct resmap *res_map, unsigned page_nr)
+{
+ BUG_ON(resmap_page_reserved(res_map, page_nr));
+ *RESMAP_PTR(res_map, page_nr) |= RESMAP_MASK(page_nr);
+}
+
+static inline void resmap_free_page(struct resmap *res_map, unsigned page_nr)
+{
+ BUG_ON(!resmap_page_reserved(res_map, page_nr));
+ *RESMAP_PTR(res_map, page_nr) &= ~RESMAP_MASK(page_nr);
+}
+
+static void resmap_reserve_region(unsigned long start, size_t size)
+{
+
+ struct resmap *res_map;
+ unsigned start_page;
+ unsigned end_page;
+ int mtype;
+ unsigned i;
+
+ mtype = resmap_mem_type(start);
+ res_map = dispc.res_map[mtype];
+ dev_dbg(dispc.fbdev->dev, "reserve mem type %d start %08lx size %d\n",
+ mtype, start, size);
+ start_page = (start - res_map->start) / PAGE_SIZE;
+ end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
+ for (i = start_page; i < end_page; i++)
+ resmap_reserve_page(res_map, i);
+}
+
+static void resmap_free_region(unsigned long start, size_t size)
+{
+ struct resmap *res_map;
+ unsigned start_page;
+ unsigned end_page;
+ unsigned i;
+ int mtype;
+
+ mtype = resmap_mem_type(start);
+ res_map = dispc.res_map[mtype];
+ dev_dbg(dispc.fbdev->dev, "free mem type %d start %08lx size %d\n",
+ mtype, start, size);
+ start_page = (start - res_map->start) / PAGE_SIZE;
+ end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
+ for (i = start_page; i < end_page; i++)
+ resmap_free_page(res_map, i);
+}
+
+static unsigned long resmap_alloc_region(int mtype, size_t size)
+{
+ unsigned i;
+ unsigned total;
+ unsigned start_page;
+ unsigned long start;
+ struct resmap *res_map = dispc.res_map[mtype];
+
+ BUG_ON(mtype >= DISPC_MEMTYPE_NUM || res_map == NULL || !size);
+
+ size = PAGE_ALIGN(size) / PAGE_SIZE;
+ start_page = 0;
+ total = 0;
+ for (i = 0; i < res_map->page_cnt; i++) {
+ if (resmap_page_reserved(res_map, i)) {
+ start_page = i + 1;
+ total = 0;
+ } else if (++total == size)
+ break;
+ }
+ if (total < size)
+ return 0;
+
+ start = res_map->start + start_page * PAGE_SIZE;
+ resmap_reserve_region(start, size * PAGE_SIZE);
+
+ return start;
+}
+
+/* Note that this will only work for user mappings, we don't deal with
+ * kernel mappings here, so fbcon will keep using the old region.
+ */
+static int omap_dispc_setup_mem(int plane, size_t size, int mem_type,
+ unsigned long *paddr)
+{
+ struct omapfb_mem_region *rg;
+ unsigned long new_addr = 0;
+
+ if ((unsigned)plane > dispc.mem_desc.region_cnt)
+ return -EINVAL;
+ if (mem_type >= DISPC_MEMTYPE_NUM)
+ return -EINVAL;
+ if (dispc.res_map[mem_type] == NULL)
+ return -ENOMEM;
+ rg = &dispc.mem_desc.region[plane];
+ if (size == rg->size && mem_type == rg->type)
+ return 0;
+ if (atomic_read(&dispc.map_count[plane]))
+ return -EBUSY;
+ if (rg->size != 0)
+ resmap_free_region(rg->paddr, rg->size);
+ if (size != 0) {
+ new_addr = resmap_alloc_region(mem_type, size);
+ if (!new_addr) {
+ /* Reallocate old region. */
+ resmap_reserve_region(rg->paddr, rg->size);
+ return -ENOMEM;
+ }
+ }
+ rg->paddr = new_addr;
+ rg->size = size;
+ rg->type = mem_type;
+
+ *paddr = new_addr;
+
+ return 0;
+}
+
+static int setup_fbmem(struct omapfb_mem_desc *req_md)
+{
+ struct omapfb_mem_region *rg;
+ int i;
+ int r;
+ unsigned long mem_start[DISPC_MEMTYPE_NUM];
+ unsigned long mem_end[DISPC_MEMTYPE_NUM];
+
+ if (!req_md->region_cnt) {
+ dev_err(dispc.fbdev->dev, "no memory regions defined\n");
+ return -ENOENT;
+ }
+
+ rg = &req_md->region[0];
+ memset(mem_start, 0xff, sizeof(mem_start));
+ memset(mem_end, 0, sizeof(mem_end));
+
+ for (i = 0; i < req_md->region_cnt; i++, rg++) {
+ int mtype;
+ if (rg->paddr) {
+ rg->alloc = 0;
+ if (rg->vaddr == NULL) {
+ rg->map = 1;
+ if ((r = mmap_kern(rg)) < 0)
+ return r;
+ }
+ } else {
+ if (rg->type != OMAPFB_MEMTYPE_SDRAM) {
+ dev_err(dispc.fbdev->dev,
+ "unsupported memory type\n");
+ return -EINVAL;
+ }
+ rg->alloc = rg->map = 1;
+ if ((r = alloc_fbmem(rg)) < 0)
+ return r;
+ }
+ mtype = rg->type;
+
+ if (rg->paddr < mem_start[mtype])
+ mem_start[mtype] = rg->paddr;
+ if (rg->paddr + rg->size > mem_end[mtype])
+ mem_end[mtype] = rg->paddr + rg->size;
+ }
+
+ for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
+ unsigned long start;
+ size_t size;
+ if (mem_end[i] == 0)
+ continue;
+ start = mem_start[i];
+ size = mem_end[i] - start;
+ dispc.res_map[i] = init_resmap(start, size);
+ r = -ENOMEM;
+ if (dispc.res_map[i] == NULL)
+ goto fail;
+ /* Initial state is that everything is reserved. This
+ * includes possible holes as well, which will never be
+ * freed.
+ */
+ resmap_reserve_region(start, size);
+ }
+
+ dispc.mem_desc = *req_md;
+
+ return 0;
+fail:
+ for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
+ if (dispc.res_map[i] != NULL)
+ cleanup_resmap(dispc.res_map[i]);
+ }
+ return r;
+}
+
+static void cleanup_fbmem(void)
+{
+ struct omapfb_mem_region *rg;
+ int i;
+
+ for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
+ if (dispc.res_map[i] != NULL)
+ cleanup_resmap(dispc.res_map[i]);
+ }
+ rg = &dispc.mem_desc.region[0];
+ for (i = 0; i < dispc.mem_desc.region_cnt; i++, rg++) {
+ if (rg->alloc)
+ free_fbmem(rg);
+ else {
+ if (rg->map)
+ unmap_kern(rg);
+ }
+ }
+}
+
+static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
+ struct omapfb_mem_desc *req_vram)
+{
+ int r;
+ u32 l;
+ struct lcd_panel *panel = fbdev->panel;
+ int tmo = 10000;
+ int skip_init = 0;
+ int i;
+
+ memset(&dispc, 0, sizeof(dispc));
+
+ dispc.base = io_p2v(DISPC_BASE);
+ dispc.fbdev = fbdev;
+ dispc.ext_mode = ext_mode;
+
+ init_completion(&dispc.frame_done);
+
+ if ((r = get_dss_clocks()) < 0)
+ return r;
+
+ enable_interface_clocks(1);
+ enable_lcd_clocks(1);
+
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ l = dispc_read_reg(DISPC_CONTROL);
+ /* LCD enabled ? */
+ if (l & 1) {
+ pr_info("omapfb: skipping hardware initialization\n");
+ skip_init = 1;
+ }
+#endif
+
+ if (!skip_init) {
+ /* Reset monitoring works only w/ the 54M clk */
+ enable_digit_clocks(1);
+
+ /* Soft reset */
+ MOD_REG_FLD(DISPC_SYSCONFIG, 1 << 1, 1 << 1);
+
+ while (!(dispc_read_reg(DISPC_SYSSTATUS) & 1)) {
+ if (!--tmo) {
+ dev_err(dispc.fbdev->dev, "soft reset failed\n");
+ r = -ENODEV;
+ enable_digit_clocks(0);
+ goto fail1;
+ }
+ }
+
+ enable_digit_clocks(0);
+ }
+
+ /* Enable smart idle and autoidle */
+ l = dispc_read_reg(DISPC_CONTROL);
+ l &= ~((3 << 12) | (3 << 3));
+ l |= (2 << 12) | (2 << 3) | (1 << 0);
+ dispc_write_reg(DISPC_SYSCONFIG, l);
+ omap_writel(1 << 0, DSS_BASE + DSS_SYSCONFIG);
+
+ /* Set functional clock autogating */
+ l = dispc_read_reg(DISPC_CONFIG);
+ l |= 1 << 9;
+ dispc_write_reg(DISPC_CONFIG, l);
+
+ l = dispc_read_reg(DISPC_IRQSTATUS);
+ dispc_write_reg(l, DISPC_IRQSTATUS);
+
+ /* Enable those that we handle always */
+ omap_dispc_enable_irqs(DISPC_IRQ_FRAMEMASK);
+
+ if ((r = request_irq(INT_24XX_DSS_IRQ, omap_dispc_irq_handler,
+ 0, MODULE_NAME, fbdev)) < 0) {
+ dev_err(dispc.fbdev->dev, "can't get DSS IRQ\n");
+ goto fail1;
+ }
+
+ /* L3 firewall setting: enable access to OCM RAM */
+ __raw_writel(0x402000b0, io_p2v(0x680050a0));
+
+ if ((r = alloc_palette_ram()) < 0)
+ goto fail2;
+
+ if ((r = setup_fbmem(req_vram)) < 0)
+ goto fail3;
+
+ if (!skip_init) {
+ for (i = 0; i < dispc.mem_desc.region_cnt; i++) {
+ memset(dispc.mem_desc.region[i].vaddr, 0,
+ dispc.mem_desc.region[i].size);
+ }
+
+ /* Set logic clock to fck, pixel clock to fck/2 for now */
+ MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(16, 8), 1 << 16);
+ MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(0, 8), 2 << 0);
+
+ setup_plane_fifo(0, ext_mode);
+ setup_plane_fifo(1, ext_mode);
+ setup_plane_fifo(2, ext_mode);
+
+ setup_color_conv_coef();
+
+ set_lcd_tft_mode(panel->config & OMAP_LCDC_PANEL_TFT);
+ set_load_mode(DISPC_LOAD_FRAME_ONLY);
+
+ if (!ext_mode) {
+ set_lcd_data_lines(panel->data_lines);
+ omap_dispc_set_lcd_size(panel->x_res, panel->y_res);
+ set_lcd_timings();
+ } else
+ set_lcd_data_lines(panel->bpp);
+ enable_rfbi_mode(ext_mode);
+ }
+
+ l = dispc_read_reg(DISPC_REVISION);
+ pr_info("omapfb: DISPC version %d.%d initialized\n",
+ l >> 4 & 0x0f, l & 0x0f);
+ enable_lcd_clocks(0);
+
+ return 0;
+fail3:
+ free_palette_ram();
+fail2:
+ free_irq(INT_24XX_DSS_IRQ, fbdev);
+fail1:
+ enable_lcd_clocks(0);
+ enable_interface_clocks(0);
+ put_dss_clocks();
+
+ return r;
+}
+
+static void omap_dispc_cleanup(void)
+{
+ int i;
+
+ omap_dispc_set_update_mode(OMAPFB_UPDATE_DISABLED);
+ /* This will also disable clocks that are on */
+ for (i = 0; i < dispc.mem_desc.region_cnt; i++)
+ omap_dispc_enable_plane(i, 0);
+ cleanup_fbmem();
+ free_palette_ram();
+ free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
+ enable_interface_clocks(0);
+ put_dss_clocks();
+}
+
+const struct lcd_ctrl omap2_int_ctrl = {
+ .name = "internal",
+ .init = omap_dispc_init,
+ .cleanup = omap_dispc_cleanup,
+ .get_caps = omap_dispc_get_caps,
+ .set_update_mode = omap_dispc_set_update_mode,
+ .get_update_mode = omap_dispc_get_update_mode,
+ .update_window = omap_dispc_update_window,
+ .suspend = omap_dispc_suspend,
+ .resume = omap_dispc_resume,
+ .setup_plane = omap_dispc_setup_plane,
+ .setup_mem = omap_dispc_setup_mem,
+ .set_scale = omap_dispc_set_scale,
+ .enable_plane = omap_dispc_enable_plane,
+ .set_color_key = omap_dispc_set_color_key,
+ .get_color_key = omap_dispc_get_color_key,
+ .mmap = omap_dispc_mmap_user,
+};
diff --git a/drivers/video/omap/dispc.h b/drivers/video/omap/dispc.h
new file mode 100644
index 00000000000..eb1512b56ce
--- /dev/null
+++ b/drivers/video/omap/dispc.h
@@ -0,0 +1,43 @@
+#ifndef _DISPC_H
+#define _DISPC_H
+
+#include <linux/interrupt.h>
+
+#define DISPC_PLANE_GFX 0
+#define DISPC_PLANE_VID1 1
+#define DISPC_PLANE_VID2 2
+
+#define DISPC_RGB_1_BPP 0x00
+#define DISPC_RGB_2_BPP 0x01
+#define DISPC_RGB_4_BPP 0x02
+#define DISPC_RGB_8_BPP 0x03
+#define DISPC_RGB_12_BPP 0x04
+#define DISPC_RGB_16_BPP 0x06
+#define DISPC_RGB_24_BPP 0x08
+#define DISPC_RGB_24_BPP_UNPACK_32 0x09
+#define DISPC_YUV2_422 0x0a
+#define DISPC_UYVY_422 0x0b
+
+#define DISPC_BURST_4x32 0
+#define DISPC_BURST_8x32 1
+#define DISPC_BURST_16x32 2
+
+#define DISPC_LOAD_CLUT_AND_FRAME 0x00
+#define DISPC_LOAD_CLUT_ONLY 0x01
+#define DISPC_LOAD_FRAME_ONLY 0x02
+#define DISPC_LOAD_CLUT_ONCE_FRAME 0x03
+
+#define DISPC_TFT_DATA_LINES_12 0
+#define DISPC_TFT_DATA_LINES_16 1
+#define DISPC_TFT_DATA_LINES_18 2
+#define DISPC_TFT_DATA_LINES_24 3
+
+extern void omap_dispc_set_lcd_size(int width, int height);
+
+extern void omap_dispc_enable_lcd_out(int enable);
+extern void omap_dispc_enable_digit_out(int enable);
+
+extern int omap_dispc_request_irq(void (*callback)(void *data), void *data);
+extern void omap_dispc_free_irq(void);
+
+#endif
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
new file mode 100644
index 00000000000..dc48e02f215
--- /dev/null
+++ b/drivers/video/omap/hwa742.c
@@ -0,0 +1,1077 @@
+/*
+ * Epson HWA742 LCD controller driver
+ *
+ * Copyright (C) 2004-2005 Nokia Corporation
+ * Authors: Juha Yrjölä <juha.yrjola@nokia.com>
+ * Imre Deak <imre.deak@nokia.com>
+ * YUV support: Jussi Laako <jussi.laako@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/hwa742.h>
+
+#define HWA742_REV_CODE_REG 0x0
+#define HWA742_CONFIG_REG 0x2
+#define HWA742_PLL_DIV_REG 0x4
+#define HWA742_PLL_0_REG 0x6
+#define HWA742_PLL_1_REG 0x8
+#define HWA742_PLL_2_REG 0xa
+#define HWA742_PLL_3_REG 0xc
+#define HWA742_PLL_4_REG 0xe
+#define HWA742_CLK_SRC_REG 0x12
+#define HWA742_PANEL_TYPE_REG 0x14
+#define HWA742_H_DISP_REG 0x16
+#define HWA742_H_NDP_REG 0x18
+#define HWA742_V_DISP_1_REG 0x1a
+#define HWA742_V_DISP_2_REG 0x1c
+#define HWA742_V_NDP_REG 0x1e
+#define HWA742_HS_W_REG 0x20
+#define HWA742_HP_S_REG 0x22
+#define HWA742_VS_W_REG 0x24
+#define HWA742_VP_S_REG 0x26
+#define HWA742_PCLK_POL_REG 0x28
+#define HWA742_INPUT_MODE_REG 0x2a
+#define HWA742_TRANSL_MODE_REG1 0x2e
+#define HWA742_DISP_MODE_REG 0x34
+#define HWA742_WINDOW_TYPE 0x36
+#define HWA742_WINDOW_X_START_0 0x38
+#define HWA742_WINDOW_X_START_1 0x3a
+#define HWA742_WINDOW_Y_START_0 0x3c
+#define HWA742_WINDOW_Y_START_1 0x3e
+#define HWA742_WINDOW_X_END_0 0x40
+#define HWA742_WINDOW_X_END_1 0x42
+#define HWA742_WINDOW_Y_END_0 0x44
+#define HWA742_WINDOW_Y_END_1 0x46
+#define HWA742_MEMORY_WRITE_LSB 0x48
+#define HWA742_MEMORY_WRITE_MSB 0x49
+#define HWA742_MEMORY_READ_0 0x4a
+#define HWA742_MEMORY_READ_1 0x4c
+#define HWA742_MEMORY_READ_2 0x4e
+#define HWA742_POWER_SAVE 0x56
+#define HWA742_NDP_CTRL 0x58
+
+#define HWA742_AUTO_UPDATE_TIME (HZ / 20)
+
+/* Reserve 4 request slots for requests in irq context */
+#define REQ_POOL_SIZE 24
+#define IRQ_REQ_POOL_SIZE 4
+
+#define REQ_FROM_IRQ_POOL 0x01
+
+#define REQ_COMPLETE 0
+#define REQ_PENDING 1
+
+struct update_param {
+ int x, y, width, height;
+ int color_mode;
+ int flags;
+};
+
+struct hwa742_request {
+ struct list_head entry;
+ unsigned int flags;
+
+ int (*handler)(struct hwa742_request *req);
+ void (*complete)(void *data);
+ void *complete_data;
+
+ union {
+ struct update_param update;
+ struct completion *sync;
+ } par;
+};
+
+struct {
+ enum omapfb_update_mode update_mode;
+ enum omapfb_update_mode update_mode_before_suspend;
+
+ struct timer_list auto_update_timer;
+ int stop_auto_update;
+ struct omapfb_update_window auto_update_window;
+ unsigned te_connected:1;
+ unsigned vsync_only:1;
+
+ struct hwa742_request req_pool[REQ_POOL_SIZE];
+ struct list_head pending_req_list;
+ struct list_head free_req_list;
+ struct semaphore req_sema;
+ spinlock_t req_lock;
+
+ struct extif_timings reg_timings, lut_timings;
+
+ int prev_color_mode;
+ int prev_flags;
+ int window_type;
+
+ u32 max_transmit_size;
+ u32 extif_clk_period;
+ unsigned long pix_tx_time;
+ unsigned long line_upd_time;
+
+
+ struct omapfb_device *fbdev;
+ struct lcd_ctrl_extif *extif;
+ struct lcd_ctrl *int_ctrl;
+
+ void (*power_up)(struct device *dev);
+ void (*power_down)(struct device *dev);
+} hwa742;
+
+struct lcd_ctrl hwa742_ctrl;
+
+static u8 hwa742_read_reg(u8 reg)
+{
+ u8 data;
+
+ hwa742.extif->set_bits_per_cycle(8);
+ hwa742.extif->write_command(&reg, 1);
+ hwa742.extif->read_data(&data, 1);
+
+ return data;
+}
+
+static void hwa742_write_reg(u8 reg, u8 data)
+{
+ hwa742.extif->set_bits_per_cycle(8);
+ hwa742.extif->write_command(&reg, 1);
+ hwa742.extif->write_data(&data, 1);
+}
+
+static void set_window_regs(int x_start, int y_start, int x_end, int y_end)
+{
+ u8 tmp[8];
+ u8 cmd;
+
+ x_end--;
+ y_end--;
+ tmp[0] = x_start;
+ tmp[1] = x_start >> 8;
+ tmp[2] = y_start;
+ tmp[3] = y_start >> 8;
+ tmp[4] = x_end;
+ tmp[5] = x_end >> 8;
+ tmp[6] = y_end;
+ tmp[7] = y_end >> 8;
+
+ hwa742.extif->set_bits_per_cycle(8);
+ cmd = HWA742_WINDOW_X_START_0;
+
+ hwa742.extif->write_command(&cmd, 1);
+
+ hwa742.extif->write_data(tmp, 8);
+}
+
+static void set_format_regs(int conv, int transl, int flags)
+{
+ if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
+ hwa742.window_type = ((hwa742.window_type & 0xfc) | 0x01);
+#ifdef VERBOSE
+ dev_dbg(hwa742.fbdev->dev, "hwa742: enabled pixel doubling\n");
+#endif
+ } else {
+ hwa742.window_type = (hwa742.window_type & 0xfc);
+#ifdef VERBOSE
+ dev_dbg(hwa742.fbdev->dev, "hwa742: disabled pixel doubling\n");
+#endif
+ }
+
+ hwa742_write_reg(HWA742_INPUT_MODE_REG, conv);
+ hwa742_write_reg(HWA742_TRANSL_MODE_REG1, transl);
+ hwa742_write_reg(HWA742_WINDOW_TYPE, hwa742.window_type);
+}
+
+static void enable_tearsync(int y, int width, int height, int screen_height,
+ int force_vsync)
+{
+ u8 b;
+
+ b = hwa742_read_reg(HWA742_NDP_CTRL);
+ b |= 1 << 2;
+ hwa742_write_reg(HWA742_NDP_CTRL, b);
+
+ if (likely(hwa742.vsync_only || force_vsync)) {
+ hwa742.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ if (width * hwa742.pix_tx_time < hwa742.line_upd_time) {
+ hwa742.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ if ((width * hwa742.pix_tx_time / 1000) * height <
+ (y + height) * (hwa742.line_upd_time / 1000)) {
+ hwa742.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ hwa742.extif->enable_tearsync(1, y + 1);
+}
+
+static void disable_tearsync(void)
+{
+ u8 b;
+
+ hwa742.extif->enable_tearsync(0, 0);
+
+ b = hwa742_read_reg(HWA742_NDP_CTRL);
+ b &= ~(1 << 2);
+ hwa742_write_reg(HWA742_NDP_CTRL, b);
+}
+
+static inline struct hwa742_request *alloc_req(void)
+{
+ unsigned long flags;
+ struct hwa742_request *req;
+ int req_flags = 0;
+
+ if (!in_interrupt())
+ down(&hwa742.req_sema);
+ else
+ req_flags = REQ_FROM_IRQ_POOL;
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+ BUG_ON(list_empty(&hwa742.free_req_list));
+ req = list_entry(hwa742.free_req_list.next,
+ struct hwa742_request, entry);
+ list_del(&req->entry);
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+
+ INIT_LIST_HEAD(&req->entry);
+ req->flags = req_flags;
+
+ return req;
+}
+
+static inline void free_req(struct hwa742_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+
+ list_del(&req->entry);
+ list_add(&req->entry, &hwa742.free_req_list);
+ if (!(req->flags & REQ_FROM_IRQ_POOL))
+ up(&hwa742.req_sema);
+
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+}
+
+static void process_pending_requests(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+
+ while (!list_empty(&hwa742.pending_req_list)) {
+ struct hwa742_request *req;
+ void (*complete)(void *);
+ void *complete_data;
+
+ req = list_entry(hwa742.pending_req_list.next,
+ struct hwa742_request, entry);
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+
+ if (req->handler(req) == REQ_PENDING)
+ return;
+
+ complete = req->complete;
+ complete_data = req->complete_data;
+ free_req(req);
+
+ if (complete)
+ complete(complete_data);
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+}
+
+static void submit_req_list(struct list_head *head)
+{
+ unsigned long flags;
+ int process = 1;
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+ if (likely(!list_empty(&hwa742.pending_req_list)))
+ process = 0;
+ list_splice_init(head, hwa742.pending_req_list.prev);
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+
+ if (process)
+ process_pending_requests();
+}
+
+static void request_complete(void *data)
+{
+ struct hwa742_request *req = (struct hwa742_request *)data;
+ void (*complete)(void *);
+ void *complete_data;
+
+ complete = req->complete;
+ complete_data = req->complete_data;
+
+ free_req(req);
+
+ if (complete)
+ complete(complete_data);
+
+ process_pending_requests();
+}
+
+static int send_frame_handler(struct hwa742_request *req)
+{
+ struct update_param *par = &req->par.update;
+ int x = par->x;
+ int y = par->y;
+ int w = par->width;
+ int h = par->height;
+ int bpp;
+ int conv, transl;
+ unsigned long offset;
+ int color_mode = par->color_mode;
+ int flags = par->flags;
+ int scr_width = hwa742.fbdev->panel->x_res;
+ int scr_height = hwa742.fbdev->panel->y_res;
+
+#ifdef VERBOSE
+ dev_dbg(hwa742.fbdev->dev, "x %d y %d w %d h %d scr_width %d "
+ "color_mode %d flags %d\n",
+ x, y, w, h, scr_width, color_mode, flags);
+#endif
+
+ switch (color_mode) {
+ case OMAPFB_COLOR_YUV422:
+ bpp = 16;
+ conv = 0x08;
+ transl = 0x25;
+ break;
+ case OMAPFB_COLOR_YUV420:
+ bpp = 12;
+ conv = 0x09;
+ transl = 0x25;
+ break;
+ case OMAPFB_COLOR_RGB565:
+ bpp = 16;
+ conv = 0x01;
+ transl = 0x05;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (hwa742.prev_flags != flags ||
+ hwa742.prev_color_mode != color_mode) {
+ set_format_regs(conv, transl, flags);
+ hwa742.prev_color_mode = color_mode;
+ hwa742.prev_flags = flags;
+ }
+ flags = req->par.update.flags;
+ if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
+ enable_tearsync(y, scr_width, h, scr_height,
+ flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
+ else
+ disable_tearsync();
+
+ set_window_regs(x, y, x + w, y + h);
+
+ offset = (scr_width * y + x) * bpp / 8;
+
+ hwa742.int_ctrl->setup_plane(OMAPFB_PLANE_GFX,
+ OMAPFB_CHANNEL_OUT_LCD, offset, scr_width, 0, 0, w, h,
+ color_mode);
+
+ hwa742.extif->set_bits_per_cycle(16);
+
+ hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
+ hwa742.extif->transfer_area(w, h, request_complete, req);
+
+ return REQ_PENDING;
+}
+
+static void send_frame_complete(void *data)
+{
+ hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 0);
+}
+
+#define ADD_PREQ(_x, _y, _w, _h) do { \
+ req = alloc_req(); \
+ req->handler = send_frame_handler; \
+ req->complete = send_frame_complete; \
+ req->par.update.x = _x; \
+ req->par.update.y = _y; \
+ req->par.update.width = _w; \
+ req->par.update.height = _h; \
+ req->par.update.color_mode = color_mode;\
+ req->par.update.flags = flags; \
+ list_add_tail(&req->entry, req_head); \
+} while(0)
+
+static void create_req_list(struct omapfb_update_window *win,
+ struct list_head *req_head)
+{
+ struct hwa742_request *req;
+ int x = win->x;
+ int y = win->y;
+ int width = win->width;
+ int height = win->height;
+ int color_mode;
+ int flags;
+
+ flags = win->format & ~OMAPFB_FORMAT_MASK;
+ color_mode = win->format & OMAPFB_FORMAT_MASK;
+
+ if (x & 1) {
+ ADD_PREQ(x, y, 1, height);
+ width--;
+ x++;
+ flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+ }
+ if (width & ~1) {
+ unsigned int xspan = width & ~1;
+ unsigned int ystart = y;
+ unsigned int yspan = height;
+
+ if (xspan * height * 2 > hwa742.max_transmit_size) {
+ yspan = hwa742.max_transmit_size / (xspan * 2);
+ ADD_PREQ(x, ystart, xspan, yspan);
+ ystart += yspan;
+ yspan = height - yspan;
+ flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+ }
+
+ ADD_PREQ(x, ystart, xspan, yspan);
+ x += xspan;
+ width -= xspan;
+ flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+ }
+ if (width)
+ ADD_PREQ(x, y, 1, height);
+}
+
+static void auto_update_complete(void *data)
+{
+ if (!hwa742.stop_auto_update)
+ mod_timer(&hwa742.auto_update_timer,
+ jiffies + HWA742_AUTO_UPDATE_TIME);
+}
+
+static void hwa742_update_window_auto(unsigned long arg)
+{
+ LIST_HEAD(req_list);
+ struct hwa742_request *last;
+
+ create_req_list(&hwa742.auto_update_window, &req_list);
+ last = list_entry(req_list.prev, struct hwa742_request, entry);
+
+ last->complete = auto_update_complete;
+ last->complete_data = NULL;
+
+ submit_req_list(&req_list);
+}
+
+int hwa742_update_window_async(struct fb_info *fbi,
+ struct omapfb_update_window *win,
+ void (*complete_callback)(void *arg),
+ void *complete_callback_data)
+{
+ LIST_HEAD(req_list);
+ struct hwa742_request *last;
+ int r = 0;
+
+ if (hwa742.update_mode != OMAPFB_MANUAL_UPDATE) {
+ dev_dbg(hwa742.fbdev->dev, "invalid update mode\n");
+ r = -EINVAL;
+ goto out;
+ }
+ if (unlikely(win->format &
+ ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE |
+ OMAPFB_FORMAT_FLAG_TEARSYNC | OMAPFB_FORMAT_FLAG_FORCE_VSYNC))) {
+ dev_dbg(hwa742.fbdev->dev, "invalid window flag");
+ r = -EINVAL;
+ goto out;
+ }
+
+ create_req_list(win, &req_list);
+ last = list_entry(req_list.prev, struct hwa742_request, entry);
+
+ last->complete = complete_callback;
+ last->complete_data = (void *)complete_callback_data;
+
+ submit_req_list(&req_list);
+
+out:
+ return r;
+}
+EXPORT_SYMBOL(hwa742_update_window_async);
+
+static int hwa742_setup_plane(int plane, int channel_out,
+ unsigned long offset, int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ if (plane != OMAPFB_PLANE_GFX ||
+ channel_out != OMAPFB_CHANNEL_OUT_LCD)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hwa742_enable_plane(int plane, int enable)
+{
+ if (plane != 0)
+ return -EINVAL;
+
+ hwa742.int_ctrl->enable_plane(plane, enable);
+
+ return 0;
+}
+
+static int sync_handler(struct hwa742_request *req)
+{
+ complete(req->par.sync);
+ return REQ_COMPLETE;
+}
+
+static void hwa742_sync(void)
+{
+ LIST_HEAD(req_list);
+ struct hwa742_request *req;
+ struct completion comp;
+
+ req = alloc_req();
+
+ req->handler = sync_handler;
+ req->complete = NULL;
+ init_completion(&comp);
+ req->par.sync = &comp;
+
+ list_add(&req->entry, &req_list);
+ submit_req_list(&req_list);
+
+ wait_for_completion(&comp);
+}
+
+static void hwa742_bind_client(struct omapfb_notifier_block *nb)
+{
+ dev_dbg(hwa742.fbdev->dev, "update_mode %d\n", hwa742.update_mode);
+ if (hwa742.update_mode == OMAPFB_MANUAL_UPDATE) {
+ omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
+ }
+}
+
+static int hwa742_set_update_mode(enum omapfb_update_mode mode)
+{
+ if (mode != OMAPFB_MANUAL_UPDATE && mode != OMAPFB_AUTO_UPDATE &&
+ mode != OMAPFB_UPDATE_DISABLED)
+ return -EINVAL;
+
+ if (mode == hwa742.update_mode)
+ return 0;
+
+ dev_info(hwa742.fbdev->dev, "HWA742: setting update mode to %s\n",
+ mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
+ (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
+
+ switch (hwa742.update_mode) {
+ case OMAPFB_MANUAL_UPDATE:
+ omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_DISABLED);
+ break;
+ case OMAPFB_AUTO_UPDATE:
+ hwa742.stop_auto_update = 1;
+ del_timer_sync(&hwa742.auto_update_timer);
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ break;
+ }
+
+ hwa742.update_mode = mode;
+ hwa742_sync();
+ hwa742.stop_auto_update = 0;
+
+ switch (mode) {
+ case OMAPFB_MANUAL_UPDATE:
+ omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
+ break;
+ case OMAPFB_AUTO_UPDATE:
+ hwa742_update_window_auto(0);
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ break;
+ }
+
+ return 0;
+}
+
+static enum omapfb_update_mode hwa742_get_update_mode(void)
+{
+ return hwa742.update_mode;
+}
+
+static unsigned long round_to_extif_ticks(unsigned long ps, int div)
+{
+ int bus_tick = hwa742.extif_clk_period * div;
+ return (ps + bus_tick - 1) / bus_tick * bus_tick;
+}
+
+static int calc_reg_timing(unsigned long sysclk, int div)
+{
+ struct extif_timings *t;
+ unsigned long systim;
+
+ /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+ * AccessTime 2 ns + 12.2 ns (regs),
+ * WEOffTime = WEOnTime + 1 ns,
+ * REOffTime = REOnTime + 16 ns (regs),
+ * CSOffTime = REOffTime + 1 ns
+ * ReadCycle = 2ns + 2*SYSCLK (regs),
+ * WriteCycle = 2*SYSCLK + 2 ns,
+ * CSPulseWidth = 10 ns */
+ systim = 1000000000 / (sysclk / 1000);
+ dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
+ "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
+
+ t = &hwa742.reg_timings;
+ memset(t, 0, sizeof(*t));
+ t->clk_div = div;
+ t->cs_on_time = 0;
+ t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
+ t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+ t->re_off_time = round_to_extif_ticks(t->re_on_time + 16000, div);
+ t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+ t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->we_cycle_time < t->we_off_time)
+ t->we_cycle_time = t->we_off_time;
+ t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->re_cycle_time < t->re_off_time)
+ t->re_cycle_time = t->re_off_time;
+ t->cs_pulse_width = 0;
+
+ dev_dbg(hwa742.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+ dev_dbg(hwa742.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
+ t->we_on_time, t->we_off_time, t->re_cycle_time,
+ t->we_cycle_time);
+ dev_dbg(hwa742.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
+ t->access_time, t->cs_pulse_width);
+
+ return hwa742.extif->convert_timings(t);
+}
+
+static int calc_lut_timing(unsigned long sysclk, int div)
+{
+ struct extif_timings *t;
+ unsigned long systim;
+
+ /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+ * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
+ * WEOffTime = WEOnTime + 1 ns,
+ * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
+ * CSOffTime = REOffTime + 1 ns
+ * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
+ * WriteCycle = 2*SYSCLK + 2 ns,
+ * CSPulseWidth = 10 ns
+ */
+ systim = 1000000000 / (sysclk / 1000);
+ dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
+ "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
+
+ t = &hwa742.lut_timings;
+ memset(t, 0, sizeof(*t));
+
+ t->clk_div = div;
+
+ t->cs_on_time = 0;
+ t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+ 26000, div);
+ t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+ t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+ 26000, div);
+ t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+ t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->we_cycle_time < t->we_off_time)
+ t->we_cycle_time = t->we_off_time;
+ t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
+ if (t->re_cycle_time < t->re_off_time)
+ t->re_cycle_time = t->re_off_time;
+ t->cs_pulse_width = 0;
+
+ dev_dbg(hwa742.fbdev->dev, "[lut]cson %d csoff %d reon %d reoff %d\n",
+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+ dev_dbg(hwa742.fbdev->dev, "[lut]weon %d weoff %d recyc %d wecyc %d\n",
+ t->we_on_time, t->we_off_time, t->re_cycle_time,
+ t->we_cycle_time);
+ dev_dbg(hwa742.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
+ t->access_time, t->cs_pulse_width);
+
+ return hwa742.extif->convert_timings(t);
+}
+
+static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
+{
+ int max_clk_div;
+ int div;
+
+ hwa742.extif->get_clk_info(&hwa742.extif_clk_period, &max_clk_div);
+ for (div = 1; div < max_clk_div; div++) {
+ if (calc_reg_timing(sysclk, div) == 0)
+ break;
+ }
+ if (div > max_clk_div)
+ goto err;
+
+ *extif_mem_div = div;
+
+ for (div = 1; div < max_clk_div; div++) {
+ if (calc_lut_timing(sysclk, div) == 0)
+ break;
+ }
+
+ if (div > max_clk_div)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(hwa742.fbdev->dev, "can't setup timings\n");
+ return -1;
+}
+
+static void calc_hwa742_clk_rates(unsigned long ext_clk,
+ unsigned long *sys_clk, unsigned long *pix_clk)
+{
+ int pix_clk_src;
+ int sys_div = 0, sys_mul = 0;
+ int pix_div;
+
+ pix_clk_src = hwa742_read_reg(HWA742_CLK_SRC_REG);
+ pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
+ if ((pix_clk_src & (0x3 << 1)) == 0) {
+ /* Source is the PLL */
+ sys_div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1;
+ sys_mul = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1;
+ *sys_clk = ext_clk * sys_mul / sys_div;
+ } else /* else source is ext clk, or oscillator */
+ *sys_clk = ext_clk;
+
+ *pix_clk = *sys_clk / pix_div; /* HZ */
+ dev_dbg(hwa742.fbdev->dev,
+ "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
+ ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
+ dev_dbg(hwa742.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
+ *sys_clk, *pix_clk);
+}
+
+
+static int setup_tearsync(unsigned long pix_clk, int extif_div)
+{
+ int hdisp, vdisp;
+ int hndp, vndp;
+ int hsw, vsw;
+ int hs, vs;
+ int hs_pol_inv, vs_pol_inv;
+ int use_hsvs, use_ndp;
+ u8 b;
+
+ hsw = hwa742_read_reg(HWA742_HS_W_REG);
+ vsw = hwa742_read_reg(HWA742_VS_W_REG);
+ hs_pol_inv = !(hsw & 0x80);
+ vs_pol_inv = !(vsw & 0x80);
+ hsw = hsw & 0x7f;
+ vsw = vsw & 0x3f;
+
+ hdisp = (hwa742_read_reg(HWA742_H_DISP_REG) & 0x7f) * 8;
+ vdisp = hwa742_read_reg(HWA742_V_DISP_1_REG) +
+ ((hwa742_read_reg(HWA742_V_DISP_2_REG) & 0x3) << 8);
+
+ hndp = hwa742_read_reg(HWA742_H_NDP_REG) & 0x7f;
+ vndp = hwa742_read_reg(HWA742_V_NDP_REG);
+
+ /* time to transfer one pixel (16bpp) in ps */
+ hwa742.pix_tx_time = hwa742.reg_timings.we_cycle_time;
+ if (hwa742.extif->get_max_tx_rate != NULL) {
+ /*
+ * The external interface might have a rate limitation,
+ * if so, we have to maximize our transfer rate.
+ */
+ unsigned long min_tx_time;
+ unsigned long max_tx_rate = hwa742.extif->get_max_tx_rate();
+
+ dev_dbg(hwa742.fbdev->dev, "max_tx_rate %ld HZ\n",
+ max_tx_rate);
+ min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */
+ if (hwa742.pix_tx_time < min_tx_time)
+ hwa742.pix_tx_time = min_tx_time;
+ }
+
+ /* time to update one line in ps */
+ hwa742.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
+ hwa742.line_upd_time *= 1000;
+ if (hdisp * hwa742.pix_tx_time > hwa742.line_upd_time)
+ /*
+ * transfer speed too low, we might have to use both
+ * HS and VS
+ */
+ use_hsvs = 1;
+ else
+ /* decent transfer speed, we'll always use only VS */
+ use_hsvs = 0;
+
+ if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
+ /*
+ * HS or'ed with VS doesn't work, use the active high
+ * TE signal based on HNDP / VNDP
+ */
+ use_ndp = 1;
+ hs_pol_inv = 0;
+ vs_pol_inv = 0;
+ hs = hndp;
+ vs = vndp;
+ } else {
+ /*
+ * Use HS or'ed with VS as a TE signal if both are needed
+ * or VNDP if only vsync is needed.
+ */
+ use_ndp = 0;
+ hs = hsw;
+ vs = vsw;
+ if (!use_hsvs) {
+ hs_pol_inv = 0;
+ vs_pol_inv = 0;
+ }
+ }
+
+ hs = hs * 1000000 / (pix_clk / 1000); /* ps */
+ hs *= 1000;
+
+ vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
+ vs *= 1000;
+
+ if (vs <= hs)
+ return -EDOM;
+ /* set VS to 120% of HS to minimize VS detection time */
+ vs = hs * 12 / 10;
+ /* minimize HS too */
+ hs = 10000;
+
+ b = hwa742_read_reg(HWA742_NDP_CTRL);
+ b &= ~0x3;
+ b |= use_hsvs ? 1 : 0;
+ b |= (use_ndp && use_hsvs) ? 0 : 2;
+ hwa742_write_reg(HWA742_NDP_CTRL, b);
+
+ hwa742.vsync_only = !use_hsvs;
+
+ dev_dbg(hwa742.fbdev->dev,
+ "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
+ pix_clk, hwa742.pix_tx_time, hwa742.line_upd_time);
+ dev_dbg(hwa742.fbdev->dev,
+ "hs %d ps vs %d ps mode %d vsync_only %d\n",
+ hs, vs, (b & 0x3), !use_hsvs);
+
+ return hwa742.extif->setup_tearsync(1, hs, vs,
+ hs_pol_inv, vs_pol_inv, extif_div);
+}
+
+static void hwa742_get_caps(int plane, struct omapfb_caps *caps)
+{
+ hwa742.int_ctrl->get_caps(plane, caps);
+ caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
+ OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE;
+ if (hwa742.te_connected)
+ caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
+ caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
+ (1 << OMAPFB_COLOR_YUV420);
+}
+
+static void hwa742_suspend(void)
+{
+ hwa742.update_mode_before_suspend = hwa742.update_mode;
+ hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
+ /* Enable sleep mode */
+ hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1);
+ if (hwa742.power_down != NULL)
+ hwa742.power_down(hwa742.fbdev->dev);
+}
+
+static void hwa742_resume(void)
+{
+ if (hwa742.power_up != NULL)
+ hwa742.power_up(hwa742.fbdev->dev);
+ /* Disable sleep mode */
+ hwa742_write_reg(HWA742_POWER_SAVE, 0);
+ while (1) {
+ /* Loop until PLL output is stabilized */
+ if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(5));
+ }
+ hwa742_set_update_mode(hwa742.update_mode_before_suspend);
+}
+
+static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
+ struct omapfb_mem_desc *req_vram)
+{
+ int r = 0, i;
+ u8 rev, conf;
+ unsigned long ext_clk;
+ unsigned long sys_clk, pix_clk;
+ int extif_mem_div;
+ struct omapfb_platform_data *omapfb_conf;
+ struct hwa742_platform_data *ctrl_conf;
+
+ BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
+
+ hwa742.fbdev = fbdev;
+ hwa742.extif = fbdev->ext_if;
+ hwa742.int_ctrl = fbdev->int_ctrl;
+
+ omapfb_conf = fbdev->dev->platform_data;
+ ctrl_conf = omapfb_conf->ctrl_platform_data;
+
+ if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
+ dev_err(fbdev->dev, "HWA742: missing platform data\n");
+ r = -ENOENT;
+ goto err1;
+ }
+
+ hwa742.power_down = ctrl_conf->power_down;
+ hwa742.power_up = ctrl_conf->power_up;
+
+ spin_lock_init(&hwa742.req_lock);
+
+ if ((r = hwa742.int_ctrl->init(fbdev, 1, req_vram)) < 0)
+ goto err1;
+
+ if ((r = hwa742.extif->init(fbdev)) < 0)
+ goto err2;
+
+ ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
+ if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
+ goto err3;
+ hwa742.extif->set_timings(&hwa742.reg_timings);
+ if (hwa742.power_up != NULL)
+ hwa742.power_up(fbdev->dev);
+
+ calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
+ if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
+ goto err4;
+ hwa742.extif->set_timings(&hwa742.reg_timings);
+
+ rev = hwa742_read_reg(HWA742_REV_CODE_REG);
+ if ((rev & 0xfc) != 0x80) {
+ dev_err(fbdev->dev, "HWA742: invalid revision %02x\n", rev);
+ r = -ENODEV;
+ goto err4;
+ }
+
+
+ if (!(hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x80)) {
+ dev_err(fbdev->dev,
+ "HWA742: controller not initialized by the bootloader\n");
+ r = -ENODEV;
+ goto err4;
+ }
+
+ if (ctrl_conf->te_connected) {
+ if ((r = setup_tearsync(pix_clk, extif_mem_div)) < 0) {
+ dev_err(hwa742.fbdev->dev,
+ "HWA742: can't setup tearing synchronization\n");
+ goto err4;
+ }
+ hwa742.te_connected = 1;
+ }
+
+ hwa742.max_transmit_size = hwa742.extif->max_transmit_size;
+
+ hwa742.update_mode = OMAPFB_UPDATE_DISABLED;
+
+ hwa742.auto_update_window.x = 0;
+ hwa742.auto_update_window.y = 0;
+ hwa742.auto_update_window.width = fbdev->panel->x_res;
+ hwa742.auto_update_window.height = fbdev->panel->y_res;
+ hwa742.auto_update_window.format = 0;
+
+ init_timer(&hwa742.auto_update_timer);
+ hwa742.auto_update_timer.function = hwa742_update_window_auto;
+ hwa742.auto_update_timer.data = 0;
+
+ hwa742.prev_color_mode = -1;
+ hwa742.prev_flags = 0;
+
+ hwa742.fbdev = fbdev;
+
+ INIT_LIST_HEAD(&hwa742.free_req_list);
+ INIT_LIST_HEAD(&hwa742.pending_req_list);
+ for (i = 0; i < ARRAY_SIZE(hwa742.req_pool); i++)
+ list_add(&hwa742.req_pool[i].entry, &hwa742.free_req_list);
+ BUG_ON(i <= IRQ_REQ_POOL_SIZE);
+ sema_init(&hwa742.req_sema, i - IRQ_REQ_POOL_SIZE);
+
+ conf = hwa742_read_reg(HWA742_CONFIG_REG);
+ dev_info(fbdev->dev, ": Epson HWA742 LCD controller rev %d "
+ "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+
+ return 0;
+err4:
+ if (hwa742.power_down != NULL)
+ hwa742.power_down(fbdev->dev);
+err3:
+ hwa742.extif->cleanup();
+err2:
+ hwa742.int_ctrl->cleanup();
+err1:
+ return r;
+}
+
+static void hwa742_cleanup(void)
+{
+ hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
+ hwa742.extif->cleanup();
+ hwa742.int_ctrl->cleanup();
+ if (hwa742.power_down != NULL)
+ hwa742.power_down(hwa742.fbdev->dev);
+}
+
+struct lcd_ctrl hwa742_ctrl = {
+ .name = "hwa742",
+ .init = hwa742_init,
+ .cleanup = hwa742_cleanup,
+ .bind_client = hwa742_bind_client,
+ .get_caps = hwa742_get_caps,
+ .set_update_mode = hwa742_set_update_mode,
+ .get_update_mode = hwa742_get_update_mode,
+ .setup_plane = hwa742_setup_plane,
+ .enable_plane = hwa742_enable_plane,
+ .update_window = hwa742_update_window_async,
+ .sync = hwa742_sync,
+ .suspend = hwa742_suspend,
+ .resume = hwa742_resume,
+};
+
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
new file mode 100644
index 00000000000..51807b4e26d
--- /dev/null
+++ b/drivers/video/omap/lcd_h3.c
@@ -0,0 +1,141 @@
+/*
+ * LCD panel support for the TI OMAP H3 board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/tps65010.h>
+#include <asm/arch/omapfb.h>
+
+#define MODULE_NAME "omapfb-lcd_h3"
+
+#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
+
+static int h3_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void h3_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int h3_panel_enable(struct lcd_panel *panel)
+{
+ int r = 0;
+
+ /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
+ r = tps65010_set_gpio_out_value(GPIO1, HIGH);
+ if (!r)
+ r = tps65010_set_gpio_out_value(GPIO2, HIGH);
+ if (r)
+ pr_err("Unable to turn on LCD panel\n");
+
+ return r;
+}
+
+static void h3_panel_disable(struct lcd_panel *panel)
+{
+ int r = 0;
+
+ /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
+ r = tps65010_set_gpio_out_value(GPIO1, LOW);
+ if (!r)
+ tps65010_set_gpio_out_value(GPIO2, LOW);
+ if (r)
+ pr_err("Unable to turn off LCD panel\n");
+}
+
+static unsigned long h3_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel h3_panel = {
+ .name = "h3",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .data_lines = 16,
+ .bpp = 16,
+ .x_res = 240,
+ .y_res = 320,
+ .pixel_clock = 12000,
+ .hsw = 12,
+ .hfp = 14,
+ .hbp = 72 - 12,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 0,
+ .pcd = 0,
+
+ .init = h3_panel_init,
+ .cleanup = h3_panel_cleanup,
+ .enable = h3_panel_enable,
+ .disable = h3_panel_disable,
+ .get_caps = h3_panel_get_caps,
+};
+
+static int h3_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&h3_panel);
+ return 0;
+}
+
+static int h3_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int h3_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int h3_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver h3_panel_driver = {
+ .probe = h3_panel_probe,
+ .remove = h3_panel_remove,
+ .suspend = h3_panel_suspend,
+ .resume = h3_panel_resume,
+ .driver = {
+ .name = "lcd_h3",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int h3_panel_drv_init(void)
+{
+ return platform_driver_register(&h3_panel_driver);
+}
+
+static void h3_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&h3_panel_driver);
+}
+
+module_init(h3_panel_drv_init);
+module_exit(h3_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
new file mode 100644
index 00000000000..fd6f0eb16de
--- /dev/null
+++ b/drivers/video/omap/lcd_h4.c
@@ -0,0 +1,117 @@
+/*
+ * LCD panel support for the TI OMAP H4 board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/omapfb.h>
+
+static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void h4_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int h4_panel_enable(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static void h4_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long h4_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel h4_panel = {
+ .name = "h4",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 240,
+ .y_res = 320,
+ .pixel_clock = 6250,
+ .hsw = 15,
+ .hfp = 15,
+ .hbp = 60,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 1,
+
+ .init = h4_panel_init,
+ .cleanup = h4_panel_cleanup,
+ .enable = h4_panel_enable,
+ .disable = h4_panel_disable,
+ .get_caps = h4_panel_get_caps,
+};
+
+static int h4_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&h4_panel);
+ return 0;
+}
+
+static int h4_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int h4_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int h4_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver h4_panel_driver = {
+ .probe = h4_panel_probe,
+ .remove = h4_panel_remove,
+ .suspend = h4_panel_suspend,
+ .resume = h4_panel_resume,
+ .driver = {
+ .name = "lcd_h4",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int h4_panel_drv_init(void)
+{
+ return platform_driver_register(&h4_panel_driver);
+}
+
+static void h4_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&h4_panel_driver);
+}
+
+module_init(h4_panel_drv_init);
+module_exit(h4_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
new file mode 100644
index 00000000000..551f385861d
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -0,0 +1,124 @@
+/*
+ * LCD panel support for the TI OMAP1510 Innovator board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/arch/fpga.h>
+#include <asm/arch/omapfb.h>
+
+static int innovator1510_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void innovator1510_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int innovator1510_panel_enable(struct lcd_panel *panel)
+{
+ fpga_write(0x7, OMAP1510_FPGA_LCD_PANEL_CONTROL);
+ return 0;
+}
+
+static void innovator1510_panel_disable(struct lcd_panel *panel)
+{
+ fpga_write(0x0, OMAP1510_FPGA_LCD_PANEL_CONTROL);
+}
+
+static unsigned long innovator1510_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel innovator1510_panel = {
+ .name = "inn1510",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 240,
+ .y_res = 320,
+ .pixel_clock = 12500,
+ .hsw = 40,
+ .hfp = 40,
+ .hbp = 72,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 0,
+ .pcd = 12,
+
+ .init = innovator1510_panel_init,
+ .cleanup = innovator1510_panel_cleanup,
+ .enable = innovator1510_panel_enable,
+ .disable = innovator1510_panel_disable,
+ .get_caps = innovator1510_panel_get_caps,
+};
+
+static int innovator1510_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&innovator1510_panel);
+ return 0;
+}
+
+static int innovator1510_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int innovator1510_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int innovator1510_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver innovator1510_panel_driver = {
+ .probe = innovator1510_panel_probe,
+ .remove = innovator1510_panel_remove,
+ .suspend = innovator1510_panel_suspend,
+ .resume = innovator1510_panel_resume,
+ .driver = {
+ .name = "lcd_inn1510",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int innovator1510_panel_drv_init(void)
+{
+ return platform_driver_register(&innovator1510_panel_driver);
+}
+
+static void innovator1510_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&innovator1510_panel_driver);
+}
+
+module_init(innovator1510_panel_drv_init);
+module_exit(innovator1510_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
new file mode 100644
index 00000000000..95604ca4330
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -0,0 +1,150 @@
+/*
+ * LCD panel support for the TI OMAP1610 Innovator board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/omapfb.h>
+
+#define MODULE_NAME "omapfb-lcd_h3"
+
+#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
+
+static int innovator1610_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ int r = 0;
+
+ if (omap_request_gpio(14)) {
+ pr_err("can't request GPIO 14\n");
+ r = -1;
+ goto exit;
+ }
+ if (omap_request_gpio(15)) {
+ pr_err("can't request GPIO 15\n");
+ omap_free_gpio(14);
+ r = -1;
+ goto exit;
+ }
+ /* configure GPIO(14, 15) as outputs */
+ omap_set_gpio_direction(14, 0);
+ omap_set_gpio_direction(15, 0);
+exit:
+ return r;
+}
+
+static void innovator1610_panel_cleanup(struct lcd_panel *panel)
+{
+ omap_free_gpio(15);
+ omap_free_gpio(14);
+}
+
+static int innovator1610_panel_enable(struct lcd_panel *panel)
+{
+ /* set GPIO14 and GPIO15 high */
+ omap_set_gpio_dataout(14, 1);
+ omap_set_gpio_dataout(15, 1);
+ return 0;
+}
+
+static void innovator1610_panel_disable(struct lcd_panel *panel)
+{
+ /* set GPIO13, GPIO14 and GPIO15 low */
+ omap_set_gpio_dataout(14, 0);
+ omap_set_gpio_dataout(15, 0);
+}
+
+static unsigned long innovator1610_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel innovator1610_panel = {
+ .name = "inn1610",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 320,
+ .y_res = 240,
+ .pixel_clock = 12500,
+ .hsw = 40,
+ .hfp = 40,
+ .hbp = 72,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 0,
+ .pcd = 12,
+
+ .init = innovator1610_panel_init,
+ .cleanup = innovator1610_panel_cleanup,
+ .enable = innovator1610_panel_enable,
+ .disable = innovator1610_panel_disable,
+ .get_caps = innovator1610_panel_get_caps,
+};
+
+static int innovator1610_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&innovator1610_panel);
+ return 0;
+}
+
+static int innovator1610_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int innovator1610_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int innovator1610_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver innovator1610_panel_driver = {
+ .probe = innovator1610_panel_probe,
+ .remove = innovator1610_panel_remove,
+ .suspend = innovator1610_panel_suspend,
+ .resume = innovator1610_panel_resume,
+ .driver = {
+ .name = "lcd_inn1610",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int innovator1610_panel_drv_init(void)
+{
+ return platform_driver_register(&innovator1610_panel_driver);
+}
+
+static void innovator1610_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&innovator1610_panel_driver);
+}
+
+module_init(innovator1610_panel_drv_init);
+module_exit(innovator1610_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
new file mode 100644
index 00000000000..a38038840fd
--- /dev/null
+++ b/drivers/video/omap/lcd_osk.c
@@ -0,0 +1,144 @@
+/*
+ * LCD panel support for the TI OMAP OSK board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ * Adapted for OSK by <dirk.behme@de.bosch.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/omapfb.h>
+
+static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void osk_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int osk_panel_enable(struct lcd_panel *panel)
+{
+ /* configure PWL pin */
+ omap_cfg_reg(PWL);
+
+ /* Enable PWL unit */
+ omap_writeb(0x01, OMAP_PWL_CLK_ENABLE);
+
+ /* Set PWL level */
+ omap_writeb(0xFF, OMAP_PWL_ENABLE);
+
+ /* configure GPIO2 as output */
+ omap_set_gpio_direction(2, 0);
+
+ /* set GPIO2 high */
+ omap_set_gpio_dataout(2, 1);
+
+ return 0;
+}
+
+static void osk_panel_disable(struct lcd_panel *panel)
+{
+ /* Set PWL level to zero */
+ omap_writeb(0x00, OMAP_PWL_ENABLE);
+
+ /* Disable PWL unit */
+ omap_writeb(0x00, OMAP_PWL_CLK_ENABLE);
+
+ /* set GPIO2 low */
+ omap_set_gpio_dataout(2, 0);
+}
+
+static unsigned long osk_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel osk_panel = {
+ .name = "osk",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 240,
+ .y_res = 320,
+ .pixel_clock = 12500,
+ .hsw = 40,
+ .hfp = 40,
+ .hbp = 72,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 0,
+ .pcd = 12,
+
+ .init = osk_panel_init,
+ .cleanup = osk_panel_cleanup,
+ .enable = osk_panel_enable,
+ .disable = osk_panel_disable,
+ .get_caps = osk_panel_get_caps,
+};
+
+static int osk_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&osk_panel);
+ return 0;
+}
+
+static int osk_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int osk_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int osk_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver osk_panel_driver = {
+ .probe = osk_panel_probe,
+ .remove = osk_panel_remove,
+ .suspend = osk_panel_suspend,
+ .resume = osk_panel_resume,
+ .driver = {
+ .name = "lcd_osk",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int osk_panel_drv_init(void)
+{
+ return platform_driver_register(&osk_panel_driver);
+}
+
+static void osk_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&osk_panel_driver);
+}
+
+module_init(osk_panel_drv_init);
+module_exit(osk_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
new file mode 100644
index 00000000000..52bdfdac42c
--- /dev/null
+++ b/drivers/video/omap/lcd_palmte.c
@@ -0,0 +1,123 @@
+/*
+ * LCD panel support for the Palm Tungsten E
+ *
+ * Original version : Romain Goyet <r.goyet@gmail.com>
+ * Current version : Laurent Gonzalez <palmte.linux@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/arch/fpga.h>
+#include <asm/arch/omapfb.h>
+
+static int palmte_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void palmte_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int palmte_panel_enable(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static void palmte_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long palmte_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel palmte_panel = {
+ .name = "palmte",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
+ OMAP_LCDC_HSVS_OPPOSITE,
+
+ .data_lines = 16,
+ .bpp = 8,
+ .pixel_clock = 12000,
+ .x_res = 320,
+ .y_res = 320,
+ .hsw = 4,
+ .hfp = 8,
+ .hbp = 28,
+ .vsw = 1,
+ .vfp = 8,
+ .vbp = 7,
+ .pcd = 0,
+
+ .init = palmte_panel_init,
+ .cleanup = palmte_panel_cleanup,
+ .enable = palmte_panel_enable,
+ .disable = palmte_panel_disable,
+ .get_caps = palmte_panel_get_caps,
+};
+
+static int palmte_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&palmte_panel);
+ return 0;
+}
+
+static int palmte_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int palmte_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int palmte_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver palmte_panel_driver = {
+ .probe = palmte_panel_probe,
+ .remove = palmte_panel_remove,
+ .suspend = palmte_panel_suspend,
+ .resume = palmte_panel_resume,
+ .driver = {
+ .name = "lcd_palmte",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int palmte_panel_drv_init(void)
+{
+ return platform_driver_register(&palmte_panel_driver);
+}
+
+static void palmte_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&palmte_panel_driver);
+}
+
+module_init(palmte_panel_drv_init);
+module_exit(palmte_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
new file mode 100644
index 00000000000..4bb349f5435
--- /dev/null
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -0,0 +1,127 @@
+/*
+ * LCD panel support for Palm Tungsten|T
+ * Current version : Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Modified from lcd_inn1510.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+GPIO11 - backlight
+GPIO12 - screen blanking
+GPIO13 - screen blanking
+*/
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/omapfb.h>
+
+static int palmtt_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void palmtt_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int palmtt_panel_enable(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static void palmtt_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long palmtt_panel_get_caps(struct lcd_panel *panel)
+{
+ return OMAPFB_CAPS_SET_BACKLIGHT;
+}
+
+struct lcd_panel palmtt_panel = {
+ .name = "palmtt",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
+ OMAP_LCDC_HSVS_OPPOSITE,
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 320,
+ .y_res = 320,
+ .pixel_clock = 10000,
+ .hsw = 4,
+ .hfp = 8,
+ .hbp = 28,
+ .vsw = 1,
+ .vfp = 8,
+ .vbp = 7,
+ .pcd = 0,
+
+ .init = palmtt_panel_init,
+ .cleanup = palmtt_panel_cleanup,
+ .enable = palmtt_panel_enable,
+ .disable = palmtt_panel_disable,
+ .get_caps = palmtt_panel_get_caps,
+};
+
+static int palmtt_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&palmtt_panel);
+ return 0;
+}
+
+static int palmtt_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int palmtt_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int palmtt_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver palmtt_panel_driver = {
+ .probe = palmtt_panel_probe,
+ .remove = palmtt_panel_remove,
+ .suspend = palmtt_panel_suspend,
+ .resume = palmtt_panel_resume,
+ .driver = {
+ .name = "lcd_palmtt",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int palmtt_panel_drv_init(void)
+{
+ return platform_driver_register(&palmtt_panel_driver);
+}
+
+static void palmtt_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&palmtt_panel_driver);
+}
+
+module_init(palmtt_panel_drv_init);
+module_exit(palmtt_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
new file mode 100644
index 00000000000..ea6170ddff3
--- /dev/null
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -0,0 +1,123 @@
+/*
+ * LCD panel support for the Palm Zire71
+ *
+ * Original version : Romain Goyet
+ * Current version : Laurent Gonzalez
+ * Modified for zire71 : Marek Vasut
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/arch/omapfb.h>
+
+static int palmz71_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void palmz71_panel_cleanup(struct lcd_panel *panel)
+{
+
+}
+
+static int palmz71_panel_enable(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static void palmz71_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long palmz71_panel_get_caps(struct lcd_panel *panel)
+{
+ return OMAPFB_CAPS_SET_BACKLIGHT;
+}
+
+struct lcd_panel palmz71_panel = {
+ .name = "palmz71",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
+ OMAP_LCDC_HSVS_OPPOSITE,
+ .data_lines = 16,
+ .bpp = 16,
+ .pixel_clock = 24000,
+ .x_res = 320,
+ .y_res = 320,
+ .hsw = 4,
+ .hfp = 8,
+ .hbp = 28,
+ .vsw = 1,
+ .vfp = 8,
+ .vbp = 7,
+ .pcd = 0,
+
+ .init = palmz71_panel_init,
+ .cleanup = palmz71_panel_cleanup,
+ .enable = palmz71_panel_enable,
+ .disable = palmz71_panel_disable,
+ .get_caps = palmz71_panel_get_caps,
+};
+
+static int palmz71_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&palmz71_panel);
+ return 0;
+}
+
+static int palmz71_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int palmz71_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int palmz71_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver palmz71_panel_driver = {
+ .probe = palmz71_panel_probe,
+ .remove = palmz71_panel_remove,
+ .suspend = palmz71_panel_suspend,
+ .resume = palmz71_panel_resume,
+ .driver = {
+ .name = "lcd_palmz71",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int palmz71_panel_drv_init(void)
+{
+ return platform_driver_register(&palmz71_panel_driver);
+}
+
+static void palmz71_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&palmz71_panel_driver);
+}
+
+module_init(palmz71_panel_drv_init);
+module_exit(palmz71_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_sx1.c b/drivers/video/omap/lcd_sx1.c
new file mode 100644
index 00000000000..c4f306a4e5c
--- /dev/null
+++ b/drivers/video/omap/lcd_sx1.c
@@ -0,0 +1,334 @@
+/*
+ * LCD panel support for the Siemens SX1 mobile phone
+ *
+ * Current version : Vovan888@gmail.com, great help from FCA00000
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/mcbsp.h>
+#include <asm/arch/mux.h>
+
+/*
+ * OMAP310 GPIO registers
+ */
+#define GPIO_DATA_INPUT 0xfffce000
+#define GPIO_DATA_OUTPUT 0xfffce004
+#define GPIO_DIR_CONTROL 0xfffce008
+#define GPIO_INT_CONTROL 0xfffce00c
+#define GPIO_INT_MASK 0xfffce010
+#define GPIO_INT_STATUS 0xfffce014
+#define GPIO_PIN_CONTROL 0xfffce018
+
+
+#define A_LCD_SSC_RD 3
+#define A_LCD_SSC_SD 7
+#define _A_LCD_RESET 9
+#define _A_LCD_SSC_CS 12
+#define _A_LCD_SSC_A0 13
+
+#define DSP_REG 0xE1017024
+
+const unsigned char INIT_1[12] = {
+ 0x1C, 0x02, 0x88, 0x00, 0x1E, 0xE0, 0x00, 0xDC, 0x00, 0x02, 0x00
+};
+
+const unsigned char INIT_2[127] = {
+ 0x15, 0x00, 0x29, 0x00, 0x3E, 0x00, 0x51, 0x00,
+ 0x65, 0x00, 0x7A, 0x00, 0x8D, 0x00, 0xA1, 0x00,
+ 0xB6, 0x00, 0xC7, 0x00, 0xD8, 0x00, 0xEB, 0x00,
+ 0xFB, 0x00, 0x0B, 0x01, 0x1B, 0x01, 0x27, 0x01,
+ 0x34, 0x01, 0x41, 0x01, 0x4C, 0x01, 0x55, 0x01,
+ 0x5F, 0x01, 0x68, 0x01, 0x70, 0x01, 0x78, 0x01,
+ 0x7E, 0x01, 0x86, 0x01, 0x8C, 0x01, 0x94, 0x01,
+ 0x9B, 0x01, 0xA1, 0x01, 0xA4, 0x01, 0xA9, 0x01,
+ 0xAD, 0x01, 0xB2, 0x01, 0xB7, 0x01, 0xBC, 0x01,
+ 0xC0, 0x01, 0xC4, 0x01, 0xC8, 0x01, 0xCB, 0x01,
+ 0xCF, 0x01, 0xD2, 0x01, 0xD5, 0x01, 0xD8, 0x01,
+ 0xDB, 0x01, 0xE0, 0x01, 0xE3, 0x01, 0xE6, 0x01,
+ 0xE8, 0x01, 0xEB, 0x01, 0xEE, 0x01, 0xF1, 0x01,
+ 0xF3, 0x01, 0xF8, 0x01, 0xF9, 0x01, 0xFC, 0x01,
+ 0x00, 0x02, 0x03, 0x02, 0x07, 0x02, 0x09, 0x02,
+ 0x0E, 0x02, 0x13, 0x02, 0x1C, 0x02, 0x00
+};
+
+const unsigned char INIT_3[15] = {
+ 0x14, 0x26, 0x33, 0x3D, 0x45, 0x4D, 0x53, 0x59,
+ 0x5E, 0x63, 0x67, 0x6D, 0x71, 0x78, 0xFF
+};
+
+static void epson_sendbyte(int flag, unsigned char byte)
+{
+ int i, shifter = 0x80;
+
+ if (!flag)
+ omap_set_gpio_dataout(_A_LCD_SSC_A0, 0);
+ mdelay(2);
+ omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
+
+ omap_set_gpio_dataout(A_LCD_SSC_SD, flag);
+
+ OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
+ OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
+ for (i = 0; i < 8; i++) {
+ OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
+ omap_set_gpio_dataout(A_LCD_SSC_SD, shifter & byte);
+ OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
+ shifter >>= 1;
+ }
+ omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
+}
+
+static void init_system(void)
+{
+ omap_mcbsp_request(OMAP_MCBSP3);
+ omap_mcbsp_stop(OMAP_MCBSP3);
+}
+
+static void setup_GPIO(void)
+{
+ /* new wave */
+ omap_request_gpio(A_LCD_SSC_RD);
+ omap_request_gpio(A_LCD_SSC_SD);
+ omap_request_gpio(_A_LCD_RESET);
+ omap_request_gpio(_A_LCD_SSC_CS);
+ omap_request_gpio(_A_LCD_SSC_A0);
+
+ /* set all GPIOs to output */
+ omap_set_gpio_direction(A_LCD_SSC_RD, 0);
+ omap_set_gpio_direction(A_LCD_SSC_SD, 0);
+ omap_set_gpio_direction(_A_LCD_RESET, 0);
+ omap_set_gpio_direction(_A_LCD_SSC_CS, 0);
+ omap_set_gpio_direction(_A_LCD_SSC_A0, 0);
+
+ /* set GPIO data */
+ omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
+ omap_set_gpio_dataout(A_LCD_SSC_SD, 0);
+ omap_set_gpio_dataout(_A_LCD_RESET, 0);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
+}
+
+static void display_init(void)
+{
+ int i;
+
+ omap_cfg_reg(MCBSP3_CLKX);
+
+ mdelay(2);
+ setup_GPIO();
+ mdelay(2);
+
+ /* reset LCD */
+ omap_set_gpio_dataout(A_LCD_SSC_SD, 1);
+ epson_sendbyte(0, 0x25);
+
+ omap_set_gpio_dataout(_A_LCD_RESET, 0);
+ mdelay(10);
+ omap_set_gpio_dataout(_A_LCD_RESET, 1);
+
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ mdelay(2);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD, phase 1 */
+ epson_sendbyte(0, 0xCA);
+ for (i = 0; i < 10; i++)
+ epson_sendbyte(1, INIT_1[i]);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 2 */
+ epson_sendbyte(0, 0xCB);
+ for (i = 0; i < 125; i++)
+ epson_sendbyte(1, INIT_2[i]);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 2a */
+ epson_sendbyte(0, 0xCC);
+ for (i = 0; i < 14; i++)
+ epson_sendbyte(1, INIT_3[i]);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 3 */
+ epson_sendbyte(0, 0xBC);
+ epson_sendbyte(1, 0x08);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 4 */
+ epson_sendbyte(0, 0x07);
+ epson_sendbyte(1, 0x05);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 5 */
+ epson_sendbyte(0, 0x94);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 6 */
+ epson_sendbyte(0, 0xC6);
+ epson_sendbyte(1, 0x80);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ mdelay(100); /* used to be 1000 */
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 7 */
+ epson_sendbyte(0, 0x16);
+ epson_sendbyte(1, 0x02);
+ epson_sendbyte(1, 0x00);
+ epson_sendbyte(1, 0xB1);
+ epson_sendbyte(1, 0x00);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 8 */
+ epson_sendbyte(0, 0x76);
+ epson_sendbyte(1, 0x00);
+ epson_sendbyte(1, 0x00);
+ epson_sendbyte(1, 0xDB);
+ epson_sendbyte(1, 0x00);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 9 */
+ epson_sendbyte(0, 0xAF);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+}
+
+static int sx1_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void sx1_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static void sx1_panel_disable(struct lcd_panel *panel)
+{
+ printk(KERN_INFO "SX1: LCD panel disable\n");
+ sx1_setmmipower(0);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+
+ epson_sendbyte(0, 0x25);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ epson_sendbyte(0, 0xAE);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ mdelay(100);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ epson_sendbyte(0, 0x95);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+}
+
+static int sx1_panel_enable(struct lcd_panel *panel)
+{
+ printk(KERN_INFO "lcd_sx1: LCD panel enable\n");
+ init_system();
+ display_init();
+
+ sx1_setmmipower(1);
+ sx1_setbacklight(0x18);
+ sx1_setkeylight (0x06);
+ return 0;
+}
+
+
+static unsigned long sx1_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel sx1_panel = {
+ .name = "sx1",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_PIX_CLOCK |
+ OMAP_LCDC_INV_OUTPUT_EN,
+
+ .x_res = 176,
+ .y_res = 220,
+ .data_lines = 16,
+ .bpp = 16,
+ .hsw = 5,
+ .hfp = 5,
+ .hbp = 5,
+ .vsw = 2,
+ .vfp = 1,
+ .vbp = 1,
+ .pixel_clock = 1500,
+
+ .init = sx1_panel_init,
+ .cleanup = sx1_panel_cleanup,
+ .enable = sx1_panel_enable,
+ .disable = sx1_panel_disable,
+ .get_caps = sx1_panel_get_caps,
+};
+
+static int sx1_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&sx1_panel);
+ return 0;
+}
+
+static int sx1_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int sx1_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int sx1_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver sx1_panel_driver = {
+ .probe = sx1_panel_probe,
+ .remove = sx1_panel_remove,
+ .suspend = sx1_panel_suspend,
+ .resume = sx1_panel_resume,
+ .driver = {
+ .name = "lcd_sx1",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int sx1_panel_drv_init(void)
+{
+ return platform_driver_register(&sx1_panel_driver);
+}
+
+static void sx1_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&sx1_panel_driver);
+}
+
+module_init(sx1_panel_drv_init);
+module_exit(sx1_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
new file mode 100644
index 00000000000..9085188d815
--- /dev/null
+++ b/drivers/video/omap/lcdc.c
@@ -0,0 +1,893 @@
+/*
+ * OMAP1 internal LCD controller
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+
+#include <asm/mach-types.h>
+
+#define MODULE_NAME "lcdc"
+
+#define OMAP_LCDC_BASE 0xfffec000
+#define OMAP_LCDC_SIZE 256
+#define OMAP_LCDC_IRQ INT_LCD_CTRL
+
+#define OMAP_LCDC_CONTROL (OMAP_LCDC_BASE + 0x00)
+#define OMAP_LCDC_TIMING0 (OMAP_LCDC_BASE + 0x04)
+#define OMAP_LCDC_TIMING1 (OMAP_LCDC_BASE + 0x08)
+#define OMAP_LCDC_TIMING2 (OMAP_LCDC_BASE + 0x0c)
+#define OMAP_LCDC_STATUS (OMAP_LCDC_BASE + 0x10)
+#define OMAP_LCDC_SUBPANEL (OMAP_LCDC_BASE + 0x14)
+#define OMAP_LCDC_LINE_INT (OMAP_LCDC_BASE + 0x18)
+#define OMAP_LCDC_DISPLAY_STATUS (OMAP_LCDC_BASE + 0x1c)
+
+#define OMAP_LCDC_STAT_DONE (1 << 0)
+#define OMAP_LCDC_STAT_VSYNC (1 << 1)
+#define OMAP_LCDC_STAT_SYNC_LOST (1 << 2)
+#define OMAP_LCDC_STAT_ABC (1 << 3)
+#define OMAP_LCDC_STAT_LINE_INT (1 << 4)
+#define OMAP_LCDC_STAT_FUF (1 << 5)
+#define OMAP_LCDC_STAT_LOADED_PALETTE (1 << 6)
+
+#define OMAP_LCDC_CTRL_LCD_EN (1 << 0)
+#define OMAP_LCDC_CTRL_LCD_TFT (1 << 7)
+#define OMAP_LCDC_CTRL_LINE_IRQ_CLR_SEL (1 << 10)
+
+#define OMAP_LCDC_IRQ_VSYNC (1 << 2)
+#define OMAP_LCDC_IRQ_DONE (1 << 3)
+#define OMAP_LCDC_IRQ_LOADED_PALETTE (1 << 4)
+#define OMAP_LCDC_IRQ_LINE_NIRQ (1 << 5)
+#define OMAP_LCDC_IRQ_LINE (1 << 6)
+#define OMAP_LCDC_IRQ_MASK (((1 << 5) - 1) << 2)
+
+#define MAX_PALETTE_SIZE PAGE_SIZE
+
+enum lcdc_load_mode {
+ OMAP_LCDC_LOAD_PALETTE,
+ OMAP_LCDC_LOAD_FRAME,
+ OMAP_LCDC_LOAD_PALETTE_AND_FRAME
+};
+
+static struct omap_lcd_controller {
+ enum omapfb_update_mode update_mode;
+ int ext_mode;
+
+ unsigned long frame_offset;
+ int screen_width;
+ int xres;
+ int yres;
+
+ enum omapfb_color_format color_mode;
+ int bpp;
+ void *palette_virt;
+ dma_addr_t palette_phys;
+ int palette_code;
+ int palette_size;
+
+ unsigned int irq_mask;
+ struct completion last_frame_complete;
+ struct completion palette_load_complete;
+ struct clk *lcd_ck;
+ struct omapfb_device *fbdev;
+
+ void (*dma_callback)(void *data);
+ void *dma_callback_data;
+
+ int fbmem_allocated;
+ dma_addr_t vram_phys;
+ void *vram_virt;
+ unsigned long vram_size;
+} lcdc;
+
+static void inline enable_irqs(int mask)
+{
+ lcdc.irq_mask |= mask;
+}
+
+static void inline disable_irqs(int mask)
+{
+ lcdc.irq_mask &= ~mask;
+}
+
+static void set_load_mode(enum lcdc_load_mode mode)
+{
+ u32 l;
+
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ l &= ~(3 << 20);
+ switch (mode) {
+ case OMAP_LCDC_LOAD_PALETTE:
+ l |= 1 << 20;
+ break;
+ case OMAP_LCDC_LOAD_FRAME:
+ l |= 2 << 20;
+ break;
+ case OMAP_LCDC_LOAD_PALETTE_AND_FRAME:
+ break;
+ default:
+ BUG();
+ }
+ omap_writel(l, OMAP_LCDC_CONTROL);
+}
+
+static void enable_controller(void)
+{
+ u32 l;
+
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ l |= OMAP_LCDC_CTRL_LCD_EN;
+ l &= ~OMAP_LCDC_IRQ_MASK;
+ l |= lcdc.irq_mask | OMAP_LCDC_IRQ_DONE; /* enabled IRQs */
+ omap_writel(l, OMAP_LCDC_CONTROL);
+}
+
+static void disable_controller_async(void)
+{
+ u32 l;
+ u32 mask;
+
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ mask = OMAP_LCDC_CTRL_LCD_EN | OMAP_LCDC_IRQ_MASK;
+ /*
+ * Preserve the DONE mask, since we still want to get the
+ * final DONE irq. It will be disabled in the IRQ handler.
+ */
+ mask &= ~OMAP_LCDC_IRQ_DONE;
+ l &= ~mask;
+ omap_writel(l, OMAP_LCDC_CONTROL);
+}
+
+static void disable_controller(void)
+{
+ init_completion(&lcdc.last_frame_complete);
+ disable_controller_async();
+ if (!wait_for_completion_timeout(&lcdc.last_frame_complete,
+ msecs_to_jiffies(500)))
+ dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
+}
+
+static void reset_controller(u32 status)
+{
+ static unsigned long reset_count;
+ static unsigned long last_jiffies;
+
+ disable_controller_async();
+ reset_count++;
+ if (reset_count == 1 || time_after(jiffies, last_jiffies + HZ)) {
+ dev_err(lcdc.fbdev->dev,
+ "resetting (status %#010x,reset count %lu)\n",
+ status, reset_count);
+ last_jiffies = jiffies;
+ }
+ if (reset_count < 100) {
+ enable_controller();
+ } else {
+ reset_count = 0;
+ dev_err(lcdc.fbdev->dev,
+ "too many reset attempts, giving up.\n");
+ }
+}
+
+/*
+ * Configure the LCD DMA according to the current mode specified by parameters
+ * in lcdc.fbdev and fbdev->var.
+ */
+static void setup_lcd_dma(void)
+{
+ static const int dma_elem_type[] = {
+ 0,
+ OMAP_DMA_DATA_TYPE_S8,
+ OMAP_DMA_DATA_TYPE_S16,
+ 0,
+ OMAP_DMA_DATA_TYPE_S32,
+ };
+ struct omapfb_plane_struct *plane = lcdc.fbdev->fb_info[0]->par;
+ struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
+ unsigned long src;
+ int esize, xelem, yelem;
+
+ src = lcdc.vram_phys + lcdc.frame_offset;
+
+ switch (var->rotate) {
+ case 0:
+ if (plane->info.mirror || (src & 3) ||
+ lcdc.color_mode == OMAPFB_COLOR_YUV420 ||
+ (lcdc.xres & 1))
+ esize = 2;
+ else
+ esize = 4;
+ xelem = lcdc.xres * lcdc.bpp / 8 / esize;
+ yelem = lcdc.yres;
+ break;
+ case 90:
+ case 180:
+ case 270:
+ if (cpu_is_omap15xx()) {
+ BUG();
+ }
+ esize = 2;
+ xelem = lcdc.yres * lcdc.bpp / 16;
+ yelem = lcdc.xres;
+ break;
+ default:
+ BUG();
+ return;
+ }
+#ifdef VERBOSE
+ dev_dbg(lcdc.fbdev->dev,
+ "setup_dma: src %#010lx esize %d xelem %d yelem %d\n",
+ src, esize, xelem, yelem);
+#endif
+ omap_set_lcd_dma_b1(src, xelem, yelem, dma_elem_type[esize]);
+ if (!cpu_is_omap15xx()) {
+ int bpp = lcdc.bpp;
+
+ /*
+ * YUV support is only for external mode when we have the
+ * YUV window embedded in a 16bpp frame buffer.
+ */
+ if (lcdc.color_mode == OMAPFB_COLOR_YUV420)
+ bpp = 16;
+ /* Set virtual xres elem size */
+ omap_set_lcd_dma_b1_vxres(
+ lcdc.screen_width * bpp / 8 / esize);
+ /* Setup transformations */
+ omap_set_lcd_dma_b1_rotation(var->rotate);
+ omap_set_lcd_dma_b1_mirror(plane->info.mirror);
+ }
+ omap_setup_lcd_dma();
+}
+
+static irqreturn_t lcdc_irq_handler(int irq, void *dev_id)
+{
+ u32 status;
+
+ status = omap_readl(OMAP_LCDC_STATUS);
+
+ if (status & (OMAP_LCDC_STAT_FUF | OMAP_LCDC_STAT_SYNC_LOST))
+ reset_controller(status);
+ else {
+ if (status & OMAP_LCDC_STAT_DONE) {
+ u32 l;
+
+ /*
+ * Disable IRQ_DONE. The status bit will be cleared
+ * only when the controller is reenabled and we don't
+ * want to get more interrupts.
+ */
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ l &= ~OMAP_LCDC_IRQ_DONE;
+ omap_writel(l, OMAP_LCDC_CONTROL);
+ complete(&lcdc.last_frame_complete);
+ }
+ if (status & OMAP_LCDC_STAT_LOADED_PALETTE) {
+ disable_controller_async();
+ complete(&lcdc.palette_load_complete);
+ }
+ }
+
+ /*
+ * Clear these interrupt status bits.
+ * Sync_lost, FUF bits were cleared by disabling the LCD controller
+ * LOADED_PALETTE can be cleared this way only in palette only
+ * load mode. In other load modes it's cleared by disabling the
+ * controller.
+ */
+ status &= ~(OMAP_LCDC_STAT_VSYNC |
+ OMAP_LCDC_STAT_LOADED_PALETTE |
+ OMAP_LCDC_STAT_ABC |
+ OMAP_LCDC_STAT_LINE_INT);
+ omap_writel(status, OMAP_LCDC_STATUS);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Change to a new video mode. We defer this to a later time to avoid any
+ * flicker and not to mess up the current LCD DMA context. For this we disable
+ * the LCD controler, which will generate a DONE irq after the last frame has
+ * been transferred. Then it'll be safe to reconfigure both the LCD controller
+ * as well as the LCD DMA.
+ */
+static int omap_lcdc_setup_plane(int plane, int channel_out,
+ unsigned long offset, int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
+ struct lcd_panel *panel = lcdc.fbdev->panel;
+ int rot_x, rot_y;
+
+ if (var->rotate == 0) {
+ rot_x = panel->x_res;
+ rot_y = panel->y_res;
+ } else {
+ rot_x = panel->y_res;
+ rot_y = panel->x_res;
+ }
+ if (plane != 0 || channel_out != 0 || pos_x != 0 || pos_y != 0 ||
+ width > rot_x || height > rot_y) {
+#ifdef VERBOSE
+ dev_dbg(lcdc.fbdev->dev,
+ "invalid plane params plane %d pos_x %d pos_y %d "
+ "w %d h %d\n", plane, pos_x, pos_y, width, height);
+#endif
+ return -EINVAL;
+ }
+
+ lcdc.frame_offset = offset;
+ lcdc.xres = width;
+ lcdc.yres = height;
+ lcdc.screen_width = screen_width;
+ lcdc.color_mode = color_mode;
+
+ switch (color_mode) {
+ case OMAPFB_COLOR_CLUT_8BPP:
+ lcdc.bpp = 8;
+ lcdc.palette_code = 0x3000;
+ lcdc.palette_size = 512;
+ break;
+ case OMAPFB_COLOR_RGB565:
+ lcdc.bpp = 16;
+ lcdc.palette_code = 0x4000;
+ lcdc.palette_size = 32;
+ break;
+ case OMAPFB_COLOR_RGB444:
+ lcdc.bpp = 16;
+ lcdc.palette_code = 0x4000;
+ lcdc.palette_size = 32;
+ break;
+ case OMAPFB_COLOR_YUV420:
+ if (lcdc.ext_mode) {
+ lcdc.bpp = 12;
+ break;
+ }
+ /* fallthrough */
+ case OMAPFB_COLOR_YUV422:
+ if (lcdc.ext_mode) {
+ lcdc.bpp = 16;
+ break;
+ }
+ /* fallthrough */
+ default:
+ /* FIXME: other BPPs.
+ * bpp1: code 0, size 256
+ * bpp2: code 0x1000 size 256
+ * bpp4: code 0x2000 size 256
+ * bpp12: code 0x4000 size 32
+ */
+ dev_dbg(lcdc.fbdev->dev, "invalid color mode %d\n", color_mode);
+ BUG();
+ return -1;
+ }
+
+ if (lcdc.ext_mode) {
+ setup_lcd_dma();
+ return 0;
+ }
+
+ if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
+ disable_controller();
+ omap_stop_lcd_dma();
+ setup_lcd_dma();
+ enable_controller();
+ }
+
+ return 0;
+}
+
+static int omap_lcdc_enable_plane(int plane, int enable)
+{
+ dev_dbg(lcdc.fbdev->dev,
+ "plane %d enable %d update_mode %d ext_mode %d\n",
+ plane, enable, lcdc.update_mode, lcdc.ext_mode);
+ if (plane != OMAPFB_PLANE_GFX)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Configure the LCD DMA for a palette load operation and do the palette
+ * downloading synchronously. We don't use the frame+palette load mode of
+ * the controller, since the palette can always be downloaded seperately.
+ */
+static void load_palette(void)
+{
+ u16 *palette;
+
+ palette = (u16 *)lcdc.palette_virt;
+
+ *(u16 *)palette &= 0x0fff;
+ *(u16 *)palette |= lcdc.palette_code;
+
+ omap_set_lcd_dma_b1(lcdc.palette_phys,
+ lcdc.palette_size / 4 + 1, 1, OMAP_DMA_DATA_TYPE_S32);
+
+ omap_set_lcd_dma_single_transfer(1);
+ omap_setup_lcd_dma();
+
+ init_completion(&lcdc.palette_load_complete);
+ enable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
+ set_load_mode(OMAP_LCDC_LOAD_PALETTE);
+ enable_controller();
+ if (!wait_for_completion_timeout(&lcdc.palette_load_complete,
+ msecs_to_jiffies(500)))
+ dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
+ /* The controller gets disabled in the irq handler */
+ disable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
+ omap_stop_lcd_dma();
+
+ omap_set_lcd_dma_single_transfer(lcdc.ext_mode);
+}
+
+/* Used only in internal controller mode */
+static int omap_lcdc_setcolreg(u_int regno, u16 red, u16 green, u16 blue,
+ u16 transp, int update_hw_pal)
+{
+ u16 *palette;
+
+ if (lcdc.color_mode != OMAPFB_COLOR_CLUT_8BPP || regno > 255)
+ return -EINVAL;
+
+ palette = (u16 *)lcdc.palette_virt;
+
+ palette[regno] &= ~0x0fff;
+ palette[regno] |= ((red >> 12) << 8) | ((green >> 12) << 4 ) |
+ (blue >> 12);
+
+ if (update_hw_pal) {
+ disable_controller();
+ omap_stop_lcd_dma();
+ load_palette();
+ setup_lcd_dma();
+ set_load_mode(OMAP_LCDC_LOAD_FRAME);
+ enable_controller();
+ }
+
+ return 0;
+}
+
+static void calc_ck_div(int is_tft, int pck, int *pck_div)
+{
+ unsigned long lck;
+
+ pck = max(1, pck);
+ lck = clk_get_rate(lcdc.lcd_ck);
+ *pck_div = (lck + pck - 1) / pck;
+ if (is_tft)
+ *pck_div = max(2, *pck_div);
+ else
+ *pck_div = max(3, *pck_div);
+ if (*pck_div > 255) {
+ /* FIXME: try to adjust logic clock divider as well */
+ *pck_div = 255;
+ dev_warn(lcdc.fbdev->dev, "pixclock %d kHz too low.\n",
+ pck / 1000);
+ }
+}
+
+static void inline setup_regs(void)
+{
+ u32 l;
+ struct lcd_panel *panel = lcdc.fbdev->panel;
+ int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
+ unsigned long lck;
+ int pcd;
+
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ l &= ~OMAP_LCDC_CTRL_LCD_TFT;
+ l |= is_tft ? OMAP_LCDC_CTRL_LCD_TFT : 0;
+#ifdef CONFIG_MACH_OMAP_PALMTE
+/* FIXME:if (machine_is_omap_palmte()) { */
+ /* PalmTE uses alternate TFT setting in 8BPP mode */
+ l |= (is_tft && panel->bpp == 8) ? 0x810000 : 0;
+/* } */
+#endif
+ omap_writel(l, OMAP_LCDC_CONTROL);
+
+ l = omap_readl(OMAP_LCDC_TIMING2);
+ l &= ~(((1 << 6) - 1) << 20);
+ l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 20;
+ omap_writel(l, OMAP_LCDC_TIMING2);
+
+ l = panel->x_res - 1;
+ l |= (panel->hsw - 1) << 10;
+ l |= (panel->hfp - 1) << 16;
+ l |= (panel->hbp - 1) << 24;
+ omap_writel(l, OMAP_LCDC_TIMING0);
+
+ l = panel->y_res - 1;
+ l |= (panel->vsw - 1) << 10;
+ l |= panel->vfp << 16;
+ l |= panel->vbp << 24;
+ omap_writel(l, OMAP_LCDC_TIMING1);
+
+ l = omap_readl(OMAP_LCDC_TIMING2);
+ l &= ~0xff;
+
+ lck = clk_get_rate(lcdc.lcd_ck);
+
+ if (!panel->pcd)
+ calc_ck_div(is_tft, panel->pixel_clock * 1000, &pcd);
+ else {
+ dev_warn(lcdc.fbdev->dev,
+ "Pixel clock divider value is obsolete.\n"
+ "Try to set pixel_clock to %lu and pcd to 0 "
+ "in drivers/video/omap/lcd_%s.c and submit a patch.\n",
+ lck / panel->pcd / 1000, panel->name);
+
+ pcd = panel->pcd;
+ }
+ l |= pcd & 0xff;
+ l |= panel->acb << 8;
+ omap_writel(l, OMAP_LCDC_TIMING2);
+
+ /* update panel info with the exact clock */
+ panel->pixel_clock = lck / pcd / 1000;
+}
+
+/*
+ * Configure the LCD controller, download the color palette and start a looped
+ * DMA transfer of the frame image data. Called only in internal
+ * controller mode.
+ */
+static int omap_lcdc_set_update_mode(enum omapfb_update_mode mode)
+{
+ int r = 0;
+
+ if (mode != lcdc.update_mode) {
+ switch (mode) {
+ case OMAPFB_AUTO_UPDATE:
+ setup_regs();
+ load_palette();
+
+ /* Setup and start LCD DMA */
+ setup_lcd_dma();
+
+ set_load_mode(OMAP_LCDC_LOAD_FRAME);
+ enable_irqs(OMAP_LCDC_IRQ_DONE);
+ /* This will start the actual DMA transfer */
+ enable_controller();
+ lcdc.update_mode = mode;
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ disable_controller();
+ omap_stop_lcd_dma();
+ lcdc.update_mode = mode;
+ break;
+ default:
+ r = -EINVAL;
+ }
+ }
+
+ return r;
+}
+
+static enum omapfb_update_mode omap_lcdc_get_update_mode(void)
+{
+ return lcdc.update_mode;
+}
+
+/* PM code called only in internal controller mode */
+static void omap_lcdc_suspend(void)
+{
+ if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
+ disable_controller();
+ omap_stop_lcd_dma();
+ }
+}
+
+static void omap_lcdc_resume(void)
+{
+ if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
+ setup_regs();
+ load_palette();
+ setup_lcd_dma();
+ set_load_mode(OMAP_LCDC_LOAD_FRAME);
+ enable_irqs(OMAP_LCDC_IRQ_DONE);
+ enable_controller();
+ }
+}
+
+static void omap_lcdc_get_caps(int plane, struct omapfb_caps *caps)
+{
+ return;
+}
+
+int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data)
+{
+ BUG_ON(callback == NULL);
+
+ if (lcdc.dma_callback)
+ return -EBUSY;
+ else {
+ lcdc.dma_callback = callback;
+ lcdc.dma_callback_data = data;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_lcdc_set_dma_callback);
+
+void omap_lcdc_free_dma_callback(void)
+{
+ lcdc.dma_callback = NULL;
+}
+EXPORT_SYMBOL(omap_lcdc_free_dma_callback);
+
+static void lcdc_dma_handler(u16 status, void *data)
+{
+ if (lcdc.dma_callback)
+ lcdc.dma_callback(lcdc.dma_callback_data);
+}
+
+static int mmap_kern(void)
+{
+ struct vm_struct *kvma;
+ struct vm_area_struct vma;
+ pgprot_t pgprot;
+ unsigned long vaddr;
+
+ kvma = get_vm_area(lcdc.vram_size, VM_IOREMAP);
+ if (kvma == NULL) {
+ dev_err(lcdc.fbdev->dev, "can't get kernel vm area\n");
+ return -ENOMEM;
+ }
+ vma.vm_mm = &init_mm;
+
+ vaddr = (unsigned long)kvma->addr;
+ vma.vm_start = vaddr;
+ vma.vm_end = vaddr + lcdc.vram_size;
+
+ pgprot = pgprot_writecombine(pgprot_kernel);
+ if (io_remap_pfn_range(&vma, vaddr,
+ lcdc.vram_phys >> PAGE_SHIFT,
+ lcdc.vram_size, pgprot) < 0) {
+ dev_err(lcdc.fbdev->dev, "kernel mmap for FB memory failed\n");
+ return -EAGAIN;
+ }
+
+ lcdc.vram_virt = (void *)vaddr;
+
+ return 0;
+}
+
+static void unmap_kern(void)
+{
+ vunmap(lcdc.vram_virt);
+}
+
+static int alloc_palette_ram(void)
+{
+ lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
+ MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL);
+ if (lcdc.palette_virt == NULL) {
+ dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
+ return -ENOMEM;
+ }
+ memset(lcdc.palette_virt, 0, MAX_PALETTE_SIZE);
+
+ return 0;
+}
+
+static void free_palette_ram(void)
+{
+ dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
+ lcdc.palette_virt, lcdc.palette_phys);
+}
+
+static int alloc_fbmem(struct omapfb_mem_region *region)
+{
+ int bpp;
+ int frame_size;
+ struct lcd_panel *panel = lcdc.fbdev->panel;
+
+ bpp = panel->bpp;
+ if (bpp == 12)
+ bpp = 16;
+ frame_size = PAGE_ALIGN(panel->x_res * bpp / 8 * panel->y_res);
+ if (region->size > frame_size)
+ frame_size = region->size;
+ lcdc.vram_size = frame_size;
+ lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
+ lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL);
+ if (lcdc.vram_virt == NULL) {
+ dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
+ return -ENOMEM;
+ }
+ region->size = frame_size;
+ region->paddr = lcdc.vram_phys;
+ region->vaddr = lcdc.vram_virt;
+ region->alloc = 1;
+
+ memset(lcdc.vram_virt, 0, lcdc.vram_size);
+
+ return 0;
+}
+
+static void free_fbmem(void)
+{
+ dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size,
+ lcdc.vram_virt, lcdc.vram_phys);
+}
+
+static int setup_fbmem(struct omapfb_mem_desc *req_md)
+{
+ int r;
+
+ if (!req_md->region_cnt) {
+ dev_err(lcdc.fbdev->dev, "no memory regions defined\n");
+ return -EINVAL;
+ }
+
+ if (req_md->region_cnt > 1) {
+ dev_err(lcdc.fbdev->dev, "only one plane is supported\n");
+ req_md->region_cnt = 1;
+ }
+
+ if (req_md->region[0].paddr == 0) {
+ lcdc.fbmem_allocated = 1;
+ if ((r = alloc_fbmem(&req_md->region[0])) < 0)
+ return r;
+ return 0;
+ }
+
+ lcdc.vram_phys = req_md->region[0].paddr;
+ lcdc.vram_size = req_md->region[0].size;
+
+ if ((r = mmap_kern()) < 0)
+ return r;
+
+ dev_dbg(lcdc.fbdev->dev, "vram at %08x size %08lx mapped to 0x%p\n",
+ lcdc.vram_phys, lcdc.vram_size, lcdc.vram_virt);
+
+ return 0;
+}
+
+static void cleanup_fbmem(void)
+{
+ if (lcdc.fbmem_allocated)
+ free_fbmem();
+ else
+ unmap_kern();
+}
+
+static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
+ struct omapfb_mem_desc *req_vram)
+{
+ int r;
+ u32 l;
+ int rate;
+ struct clk *tc_ck;
+
+ lcdc.irq_mask = 0;
+
+ lcdc.fbdev = fbdev;
+ lcdc.ext_mode = ext_mode;
+
+ l = 0;
+ omap_writel(l, OMAP_LCDC_CONTROL);
+
+ /* FIXME:
+ * According to errata some platforms have a clock rate limitiation
+ */
+ lcdc.lcd_ck = clk_get(NULL, "lcd_ck");
+ if (IS_ERR(lcdc.lcd_ck)) {
+ dev_err(fbdev->dev, "unable to access LCD clock\n");
+ r = PTR_ERR(lcdc.lcd_ck);
+ goto fail0;
+ }
+
+ tc_ck = clk_get(NULL, "tc_ck");
+ if (IS_ERR(tc_ck)) {
+ dev_err(fbdev->dev, "unable to access TC clock\n");
+ r = PTR_ERR(tc_ck);
+ goto fail1;
+ }
+
+ rate = clk_get_rate(tc_ck);
+ clk_put(tc_ck);
+
+ if (machine_is_ams_delta())
+ rate /= 4;
+ if (machine_is_omap_h3())
+ rate /= 3;
+ r = clk_set_rate(lcdc.lcd_ck, rate);
+ if (r) {
+ dev_err(fbdev->dev, "failed to adjust LCD rate\n");
+ goto fail1;
+ }
+ clk_enable(lcdc.lcd_ck);
+
+ r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
+ if (r) {
+ dev_err(fbdev->dev, "unable to get IRQ\n");
+ goto fail2;
+ }
+
+ r = omap_request_lcd_dma(lcdc_dma_handler, NULL);
+ if (r) {
+ dev_err(fbdev->dev, "unable to get LCD DMA\n");
+ goto fail3;
+ }
+
+ omap_set_lcd_dma_single_transfer(ext_mode);
+ omap_set_lcd_dma_ext_controller(ext_mode);
+
+ if (!ext_mode)
+ if ((r = alloc_palette_ram()) < 0)
+ goto fail4;
+
+ if ((r = setup_fbmem(req_vram)) < 0)
+ goto fail5;
+
+ pr_info("omapfb: LCDC initialized\n");
+
+ return 0;
+fail5:
+ if (!ext_mode)
+ free_palette_ram();
+fail4:
+ omap_free_lcd_dma();
+fail3:
+ free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
+fail2:
+ clk_disable(lcdc.lcd_ck);
+fail1:
+ clk_put(lcdc.lcd_ck);
+fail0:
+ return r;
+}
+
+static void omap_lcdc_cleanup(void)
+{
+ if (!lcdc.ext_mode)
+ free_palette_ram();
+ cleanup_fbmem();
+ omap_free_lcd_dma();
+ free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
+ clk_disable(lcdc.lcd_ck);
+ clk_put(lcdc.lcd_ck);
+}
+
+const struct lcd_ctrl omap1_int_ctrl = {
+ .name = "internal",
+ .init = omap_lcdc_init,
+ .cleanup = omap_lcdc_cleanup,
+ .get_caps = omap_lcdc_get_caps,
+ .set_update_mode = omap_lcdc_set_update_mode,
+ .get_update_mode = omap_lcdc_get_update_mode,
+ .update_window = NULL,
+ .suspend = omap_lcdc_suspend,
+ .resume = omap_lcdc_resume,
+ .setup_plane = omap_lcdc_setup_plane,
+ .enable_plane = omap_lcdc_enable_plane,
+ .setcolreg = omap_lcdc_setcolreg,
+};
diff --git a/drivers/video/omap/lcdc.h b/drivers/video/omap/lcdc.h
new file mode 100644
index 00000000000..adb731e5314
--- /dev/null
+++ b/drivers/video/omap/lcdc.h
@@ -0,0 +1,7 @@
+#ifndef LCDC_H
+#define LCDC_H
+
+int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data);
+void omap_lcdc_free_dma_callback(void);
+
+#endif
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
new file mode 100644
index 00000000000..14d0f7a1114
--- /dev/null
+++ b/drivers/video/omap/omapfb_main.c
@@ -0,0 +1,1941 @@
+/*
+ * Framebuffer driver for TI OMAP boards
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * Acknowledgements:
+ * Alex McMains <aam@ridgerun.com> - Original driver
+ * Juha Yrjola <juha.yrjola@nokia.com> - Original driver and improvements
+ * Dirk Behme <dirk.behme@de.bosch.com> - changes for 2.6 kernel API
+ * Texas Instruments - H3 support
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+
+#define MODULE_NAME "omapfb"
+
+static unsigned int def_accel;
+static unsigned long def_vram[OMAPFB_PLANE_NUM];
+static int def_vram_cnt;
+static unsigned long def_vxres;
+static unsigned long def_vyres;
+static unsigned int def_rotate;
+static unsigned int def_mirror;
+
+#ifdef CONFIG_FB_OMAP_MANUAL_UPDATE
+static int manual_update = 1;
+#else
+static int manual_update;
+#endif
+
+static struct platform_device *fbdev_pdev;
+static struct lcd_panel *fbdev_panel;
+static struct omapfb_device *omapfb_dev;
+
+struct caps_table_struct {
+ unsigned long flag;
+ const char *name;
+};
+
+static struct caps_table_struct ctrl_caps[] = {
+ { OMAPFB_CAPS_MANUAL_UPDATE, "manual update" },
+ { OMAPFB_CAPS_TEARSYNC, "tearing synchronization" },
+ { OMAPFB_CAPS_PLANE_RELOCATE_MEM, "relocate plane memory" },
+ { OMAPFB_CAPS_PLANE_SCALE, "scale plane" },
+ { OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE, "pixel double window" },
+ { OMAPFB_CAPS_WINDOW_SCALE, "scale window" },
+ { OMAPFB_CAPS_WINDOW_OVERLAY, "overlay window" },
+ { OMAPFB_CAPS_SET_BACKLIGHT, "backlight setting" },
+};
+
+static struct caps_table_struct color_caps[] = {
+ { 1 << OMAPFB_COLOR_RGB565, "RGB565", },
+ { 1 << OMAPFB_COLOR_YUV422, "YUV422", },
+ { 1 << OMAPFB_COLOR_YUV420, "YUV420", },
+ { 1 << OMAPFB_COLOR_CLUT_8BPP, "CLUT8", },
+ { 1 << OMAPFB_COLOR_CLUT_4BPP, "CLUT4", },
+ { 1 << OMAPFB_COLOR_CLUT_2BPP, "CLUT2", },
+ { 1 << OMAPFB_COLOR_CLUT_1BPP, "CLUT1", },
+ { 1 << OMAPFB_COLOR_RGB444, "RGB444", },
+ { 1 << OMAPFB_COLOR_YUY422, "YUY422", },
+};
+
+/*
+ * ---------------------------------------------------------------------------
+ * LCD panel
+ * ---------------------------------------------------------------------------
+ */
+extern struct lcd_ctrl omap1_int_ctrl;
+extern struct lcd_ctrl omap2_int_ctrl;
+extern struct lcd_ctrl hwa742_ctrl;
+extern struct lcd_ctrl blizzard_ctrl;
+
+static struct lcd_ctrl *ctrls[] = {
+#ifdef CONFIG_ARCH_OMAP1
+ &omap1_int_ctrl,
+#else
+ &omap2_int_ctrl,
+#endif
+
+#ifdef CONFIG_FB_OMAP_LCDC_HWA742
+ &hwa742_ctrl,
+#endif
+#ifdef CONFIG_FB_OMAP_LCDC_BLIZZARD
+ &blizzard_ctrl,
+#endif
+};
+
+#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
+#ifdef CONFIG_ARCH_OMAP1
+extern struct lcd_ctrl_extif omap1_ext_if;
+#else
+extern struct lcd_ctrl_extif omap2_ext_if;
+#endif
+#endif
+
+static void omapfb_rqueue_lock(struct omapfb_device *fbdev)
+{
+ mutex_lock(&fbdev->rqueue_mutex);
+}
+
+static void omapfb_rqueue_unlock(struct omapfb_device *fbdev)
+{
+ mutex_unlock(&fbdev->rqueue_mutex);
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * LCD controller and LCD DMA
+ * ---------------------------------------------------------------------------
+ */
+/* Lookup table to map elem size to elem type. */
+static const int dma_elem_type[] = {
+ 0,
+ OMAP_DMA_DATA_TYPE_S8,
+ OMAP_DMA_DATA_TYPE_S16,
+ 0,
+ OMAP_DMA_DATA_TYPE_S32,
+};
+
+/*
+ * Allocate resources needed for LCD controller and LCD DMA operations. Video
+ * memory is allocated from system memory according to the virtual display
+ * size, except if a bigger memory size is specified explicitly as a kernel
+ * parameter.
+ */
+static int ctrl_init(struct omapfb_device *fbdev)
+{
+ int r;
+ int i;
+
+ /* kernel/module vram parameters override boot tags/board config */
+ if (def_vram_cnt) {
+ for (i = 0; i < def_vram_cnt; i++)
+ fbdev->mem_desc.region[i].size =
+ PAGE_ALIGN(def_vram[i]);
+ fbdev->mem_desc.region_cnt = i;
+ } else {
+ struct omapfb_platform_data *conf;
+
+ conf = fbdev->dev->platform_data;
+ fbdev->mem_desc = conf->mem_desc;
+ }
+
+ if (!fbdev->mem_desc.region_cnt) {
+ struct lcd_panel *panel = fbdev->panel;
+ int def_size;
+ int bpp = panel->bpp;
+
+ /* 12 bpp is packed in 16 bits */
+ if (bpp == 12)
+ bpp = 16;
+ def_size = def_vxres * def_vyres * bpp / 8;
+ fbdev->mem_desc.region_cnt = 1;
+ fbdev->mem_desc.region[0].size = PAGE_ALIGN(def_size);
+ }
+ r = fbdev->ctrl->init(fbdev, 0, &fbdev->mem_desc);
+ if (r < 0) {
+ dev_err(fbdev->dev, "controller initialization failed (%d)\n",
+ r);
+ return r;
+ }
+
+#ifdef DEBUG
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+ dev_dbg(fbdev->dev, "region%d phys %08x virt %p size=%lu\n",
+ i,
+ fbdev->mem_desc.region[i].paddr,
+ fbdev->mem_desc.region[i].vaddr,
+ fbdev->mem_desc.region[i].size);
+ }
+#endif
+ return 0;
+}
+
+static void ctrl_cleanup(struct omapfb_device *fbdev)
+{
+ fbdev->ctrl->cleanup();
+}
+
+/* Must be called with fbdev->rqueue_mutex held. */
+static int ctrl_change_mode(struct fb_info *fbi)
+{
+ int r;
+ unsigned long offset;
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct fb_var_screeninfo *var = &fbi->var;
+
+ offset = var->yoffset * fbi->fix.line_length +
+ var->xoffset * var->bits_per_pixel / 8;
+
+ if (fbdev->ctrl->sync)
+ fbdev->ctrl->sync();
+ r = fbdev->ctrl->setup_plane(plane->idx, plane->info.channel_out,
+ offset, var->xres_virtual,
+ plane->info.pos_x, plane->info.pos_y,
+ var->xres, var->yres, plane->color_mode);
+ if (fbdev->ctrl->set_scale != NULL)
+ r = fbdev->ctrl->set_scale(plane->idx,
+ var->xres, var->yres,
+ plane->info.out_width,
+ plane->info.out_height);
+
+ return r;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * fbdev framework callbacks and the ioctl interface
+ * ---------------------------------------------------------------------------
+ */
+/* Called each time the omapfb device is opened */
+static int omapfb_open(struct fb_info *info, int user)
+{
+ return 0;
+}
+
+static void omapfb_sync(struct fb_info *info);
+
+/* Called when the omapfb device is closed. We make sure that any pending
+ * gfx DMA operations are ended, before we return. */
+static int omapfb_release(struct fb_info *info, int user)
+{
+ omapfb_sync(info);
+ return 0;
+}
+
+/* Store a single color palette entry into a pseudo palette or the hardware
+ * palette if one is available. For now we support only 16bpp and thus store
+ * the entry only to the pseudo palette.
+ */
+static int _setcolreg(struct fb_info *info, u_int regno, u_int red, u_int green,
+ u_int blue, u_int transp, int update_hw_pal)
+{
+ struct omapfb_plane_struct *plane = info->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct fb_var_screeninfo *var = &info->var;
+ int r = 0;
+
+ switch (plane->color_mode) {
+ case OMAPFB_COLOR_YUV422:
+ case OMAPFB_COLOR_YUV420:
+ case OMAPFB_COLOR_YUY422:
+ r = -EINVAL;
+ break;
+ case OMAPFB_COLOR_CLUT_8BPP:
+ case OMAPFB_COLOR_CLUT_4BPP:
+ case OMAPFB_COLOR_CLUT_2BPP:
+ case OMAPFB_COLOR_CLUT_1BPP:
+ if (fbdev->ctrl->setcolreg)
+ r = fbdev->ctrl->setcolreg(regno, red, green, blue,
+ transp, update_hw_pal);
+ /* Fallthrough */
+ case OMAPFB_COLOR_RGB565:
+ case OMAPFB_COLOR_RGB444:
+ if (r != 0)
+ break;
+
+ if (regno < 0) {
+ r = -EINVAL;
+ break;
+ }
+
+ if (regno < 16) {
+ u16 pal;
+ pal = ((red >> (16 - var->red.length)) <<
+ var->red.offset) |
+ ((green >> (16 - var->green.length)) <<
+ var->green.offset) |
+ (blue >> (16 - var->blue.length));
+ ((u32 *)(info->pseudo_palette))[regno] = pal;
+ }
+ break;
+ default:
+ BUG();
+ }
+ return r;
+}
+
+static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ return _setcolreg(info, regno, red, green, blue, transp, 1);
+}
+
+static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+ int count, index, r;
+ u16 *red, *green, *blue, *transp;
+ u16 trans = 0xffff;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ index = cmap->start;
+
+ for (count = 0; count < cmap->len; count++) {
+ if (transp)
+ trans = *transp++;
+ r = _setcolreg(info, index++, *red++, *green++, *blue++, trans,
+ count == cmap->len - 1);
+ if (r != 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int omapfb_update_full_screen(struct fb_info *fbi);
+
+static int omapfb_blank(int blank, struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int do_update = 0;
+ int r = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ switch (blank) {
+ case VESA_NO_BLANKING:
+ if (fbdev->state == OMAPFB_SUSPENDED) {
+ if (fbdev->ctrl->resume)
+ fbdev->ctrl->resume();
+ fbdev->panel->enable(fbdev->panel);
+ fbdev->state = OMAPFB_ACTIVE;
+ if (fbdev->ctrl->get_update_mode() ==
+ OMAPFB_MANUAL_UPDATE)
+ do_update = 1;
+ }
+ break;
+ case VESA_POWERDOWN:
+ if (fbdev->state == OMAPFB_ACTIVE) {
+ fbdev->panel->disable(fbdev->panel);
+ if (fbdev->ctrl->suspend)
+ fbdev->ctrl->suspend();
+ fbdev->state = OMAPFB_SUSPENDED;
+ }
+ break;
+ default:
+ r = -EINVAL;
+ }
+ omapfb_rqueue_unlock(fbdev);
+
+ if (r == 0 && do_update)
+ r = omapfb_update_full_screen(fbi);
+
+ return r;
+}
+
+static void omapfb_sync(struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+
+ omapfb_rqueue_lock(fbdev);
+ if (fbdev->ctrl->sync)
+ fbdev->ctrl->sync();
+ omapfb_rqueue_unlock(fbdev);
+}
+
+/*
+ * Set fb_info.fix fields and also updates fbdev.
+ * When calling this fb_info.var must be set up already.
+ */
+static void set_fb_fix(struct fb_info *fbi)
+{
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ struct fb_var_screeninfo *var = &fbi->var;
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_mem_region *rg;
+ int bpp;
+
+ rg = &plane->fbdev->mem_desc.region[plane->idx];
+ fbi->screen_base = (char __iomem *)rg->vaddr;
+ fix->smem_start = rg->paddr;
+ fix->smem_len = rg->size;
+
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ bpp = var->bits_per_pixel;
+ if (var->nonstd)
+ fix->visual = FB_VISUAL_PSEUDOCOLOR;
+ else switch (var->bits_per_pixel) {
+ case 16:
+ case 12:
+ fix->visual = FB_VISUAL_TRUECOLOR;
+ /* 12bpp is stored in 16 bits */
+ bpp = 16;
+ break;
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ fix->visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ }
+ fix->accel = FB_ACCEL_OMAP1610;
+ fix->line_length = var->xres_virtual * bpp / 8;
+}
+
+static int set_color_mode(struct omapfb_plane_struct *plane,
+ struct fb_var_screeninfo *var)
+{
+ switch (var->nonstd) {
+ case 0:
+ break;
+ case OMAPFB_COLOR_YUV422:
+ var->bits_per_pixel = 16;
+ plane->color_mode = var->nonstd;
+ return 0;
+ case OMAPFB_COLOR_YUV420:
+ var->bits_per_pixel = 12;
+ plane->color_mode = var->nonstd;
+ return 0;
+ case OMAPFB_COLOR_YUY422:
+ var->bits_per_pixel = 16;
+ plane->color_mode = var->nonstd;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ switch (var->bits_per_pixel) {
+ case 1:
+ plane->color_mode = OMAPFB_COLOR_CLUT_1BPP;
+ return 0;
+ case 2:
+ plane->color_mode = OMAPFB_COLOR_CLUT_2BPP;
+ return 0;
+ case 4:
+ plane->color_mode = OMAPFB_COLOR_CLUT_4BPP;
+ return 0;
+ case 8:
+ plane->color_mode = OMAPFB_COLOR_CLUT_8BPP;
+ return 0;
+ case 12:
+ var->bits_per_pixel = 16;
+ plane->color_mode = OMAPFB_COLOR_RGB444;
+ return 0;
+ case 16:
+ plane->color_mode = OMAPFB_COLOR_RGB565;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Check the values in var against our capabilities and in case of out of
+ * bound values try to adjust them.
+ */
+static int set_fb_var(struct fb_info *fbi,
+ struct fb_var_screeninfo *var)
+{
+ int bpp;
+ unsigned long max_frame_size;
+ unsigned long line_size;
+ int xres_min, xres_max;
+ int yres_min, yres_max;
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct lcd_panel *panel = fbdev->panel;
+
+ if (set_color_mode(plane, var) < 0)
+ return -EINVAL;
+
+ bpp = var->bits_per_pixel;
+ if (plane->color_mode == OMAPFB_COLOR_RGB444)
+ bpp = 16;
+
+ switch (var->rotate) {
+ case 0:
+ case 180:
+ xres_min = OMAPFB_PLANE_XRES_MIN;
+ xres_max = panel->x_res;
+ yres_min = OMAPFB_PLANE_YRES_MIN;
+ yres_max = panel->y_res;
+ if (cpu_is_omap15xx()) {
+ var->xres = panel->x_res;
+ var->yres = panel->y_res;
+ }
+ break;
+ case 90:
+ case 270:
+ xres_min = OMAPFB_PLANE_YRES_MIN;
+ xres_max = panel->y_res;
+ yres_min = OMAPFB_PLANE_XRES_MIN;
+ yres_max = panel->x_res;
+ if (cpu_is_omap15xx()) {
+ var->xres = panel->y_res;
+ var->yres = panel->x_res;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (var->xres < xres_min)
+ var->xres = xres_min;
+ if (var->yres < yres_min)
+ var->yres = yres_min;
+ if (var->xres > xres_max)
+ var->xres = xres_max;
+ if (var->yres > yres_max)
+ var->yres = yres_max;
+
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+ if (var->yres_virtual < var->yres)
+ var->yres_virtual = var->yres;
+ max_frame_size = fbdev->mem_desc.region[plane->idx].size;
+ line_size = var->xres_virtual * bpp / 8;
+ if (line_size * var->yres_virtual > max_frame_size) {
+ /* Try to keep yres_virtual first */
+ line_size = max_frame_size / var->yres_virtual;
+ var->xres_virtual = line_size * 8 / bpp;
+ if (var->xres_virtual < var->xres) {
+ /* Still doesn't fit. Shrink yres_virtual too */
+ var->xres_virtual = var->xres;
+ line_size = var->xres * bpp / 8;
+ var->yres_virtual = max_frame_size / line_size;
+ }
+ /* Recheck this, as the virtual size changed. */
+ if (var->xres_virtual < var->xres)
+ var->xres = var->xres_virtual;
+ if (var->yres_virtual < var->yres)
+ var->yres = var->yres_virtual;
+ if (var->xres < xres_min || var->yres < yres_min)
+ return -EINVAL;
+ }
+ if (var->xres + var->xoffset > var->xres_virtual)
+ var->xoffset = var->xres_virtual - var->xres;
+ if (var->yres + var->yoffset > var->yres_virtual)
+ var->yoffset = var->yres_virtual - var->yres;
+ line_size = var->xres * bpp / 8;
+
+ if (plane->color_mode == OMAPFB_COLOR_RGB444) {
+ var->red.offset = 8; var->red.length = 4;
+ var->red.msb_right = 0;
+ var->green.offset = 4; var->green.length = 4;
+ var->green.msb_right = 0;
+ var->blue.offset = 0; var->blue.length = 4;
+ var->blue.msb_right = 0;
+ } else {
+ var->red.offset = 11; var->red.length = 5;
+ var->red.msb_right = 0;
+ var->green.offset = 5; var->green.length = 6;
+ var->green.msb_right = 0;
+ var->blue.offset = 0; var->blue.length = 5;
+ var->blue.msb_right = 0;
+ }
+
+ var->height = -1;
+ var->width = -1;
+ var->grayscale = 0;
+
+ /* pixclock in ps, the rest in pixclock */
+ var->pixclock = 10000000 / (panel->pixel_clock / 100);
+ var->left_margin = panel->hfp;
+ var->right_margin = panel->hbp;
+ var->upper_margin = panel->vfp;
+ var->lower_margin = panel->vbp;
+ var->hsync_len = panel->hsw;
+ var->vsync_len = panel->vsw;
+
+ /* TODO: get these from panel->config */
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->sync = 0;
+
+ return 0;
+}
+
+
+/* Set rotation (0, 90, 180, 270 degree), and switch to the new mode. */
+static void omapfb_rotate(struct fb_info *fbi, int rotate)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+
+ omapfb_rqueue_lock(fbdev);
+ if (cpu_is_omap15xx() && rotate != fbi->var.rotate) {
+ struct fb_var_screeninfo *new_var = &fbdev->new_var;
+
+ memcpy(new_var, &fbi->var, sizeof(*new_var));
+ new_var->rotate = rotate;
+ if (set_fb_var(fbi, new_var) == 0 &&
+ memcmp(new_var, &fbi->var, sizeof(*new_var))) {
+ memcpy(&fbi->var, new_var, sizeof(*new_var));
+ ctrl_change_mode(fbi);
+ }
+ }
+ omapfb_rqueue_unlock(fbdev);
+}
+
+/*
+ * Set new x,y offsets in the virtual display for the visible area and switch
+ * to the new mode.
+ */
+static int omapfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ if (var->xoffset != fbi->var.xoffset ||
+ var->yoffset != fbi->var.yoffset) {
+ struct fb_var_screeninfo *new_var = &fbdev->new_var;
+
+ memcpy(new_var, &fbi->var, sizeof(*new_var));
+ new_var->xoffset = var->xoffset;
+ new_var->yoffset = var->yoffset;
+ if (set_fb_var(fbi, new_var))
+ r = -EINVAL;
+ else {
+ memcpy(&fbi->var, new_var, sizeof(*new_var));
+ ctrl_change_mode(fbi);
+ }
+ }
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+/* Set mirror to vertical axis and switch to the new mode. */
+static int omapfb_mirror(struct fb_info *fbi, int mirror)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ mirror = mirror ? 1 : 0;
+ if (cpu_is_omap15xx())
+ r = -EINVAL;
+ else if (mirror != plane->info.mirror) {
+ plane->info.mirror = mirror;
+ r = ctrl_change_mode(fbi);
+ }
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+/*
+ * Check values in var, try to adjust them in case of out of bound values if
+ * possible, or return error.
+ */
+static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r;
+
+ omapfb_rqueue_lock(fbdev);
+ if (fbdev->ctrl->sync != NULL)
+ fbdev->ctrl->sync();
+ r = set_fb_var(fbi, var);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+/*
+ * Switch to a new mode. The parameters for it has been check already by
+ * omapfb_check_var.
+ */
+static int omapfb_set_par(struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ set_fb_fix(fbi);
+ r = ctrl_change_mode(fbi);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+int omapfb_update_window_async(struct fb_info *fbi,
+ struct omapfb_update_window *win,
+ void (*callback)(void *),
+ void *callback_data)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct fb_var_screeninfo *var;
+
+ var = &fbi->var;
+ if (win->x >= var->xres || win->y >= var->yres ||
+ win->out_x > var->xres || win->out_y >= var->yres)
+ return -EINVAL;
+
+ if (!fbdev->ctrl->update_window ||
+ fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
+ return -ENODEV;
+
+ if (win->x + win->width >= var->xres)
+ win->width = var->xres - win->x;
+ if (win->y + win->height >= var->yres)
+ win->height = var->yres - win->y;
+ /* The out sizes should be cropped to the LCD size */
+ if (win->out_x + win->out_width > fbdev->panel->x_res)
+ win->out_width = fbdev->panel->x_res - win->out_x;
+ if (win->out_y + win->out_height > fbdev->panel->y_res)
+ win->out_height = fbdev->panel->y_res - win->out_y;
+ if (!win->width || !win->height || !win->out_width || !win->out_height)
+ return 0;
+
+ return fbdev->ctrl->update_window(fbi, win, callback, callback_data);
+}
+EXPORT_SYMBOL(omapfb_update_window_async);
+
+static int omapfb_update_win(struct fb_info *fbi,
+ struct omapfb_update_window *win)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ int ret;
+
+ omapfb_rqueue_lock(plane->fbdev);
+ ret = omapfb_update_window_async(fbi, win, NULL, 0);
+ omapfb_rqueue_unlock(plane->fbdev);
+
+ return ret;
+}
+
+static int omapfb_update_full_screen(struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct omapfb_update_window win;
+ int r;
+
+ if (!fbdev->ctrl->update_window ||
+ fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
+ return -ENODEV;
+
+ win.x = 0;
+ win.y = 0;
+ win.width = fbi->var.xres;
+ win.height = fbi->var.yres;
+ win.out_x = 0;
+ win.out_y = 0;
+ win.out_width = fbi->var.xres;
+ win.out_height = fbi->var.yres;
+ win.format = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->update_window(fbi, &win, NULL, 0);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct lcd_panel *panel = fbdev->panel;
+ struct omapfb_plane_info old_info;
+ int r = 0;
+
+ if (pi->pos_x + pi->out_width > panel->x_res ||
+ pi->pos_y + pi->out_height > panel->y_res)
+ return -EINVAL;
+
+ omapfb_rqueue_lock(fbdev);
+ if (pi->enabled && !fbdev->mem_desc.region[plane->idx].size) {
+ /*
+ * This plane's memory was freed, can't enable it
+ * until it's reallocated.
+ */
+ r = -EINVAL;
+ goto out;
+ }
+ old_info = plane->info;
+ plane->info = *pi;
+ if (pi->enabled) {
+ r = ctrl_change_mode(fbi);
+ if (r < 0) {
+ plane->info = old_info;
+ goto out;
+ }
+ }
+ r = fbdev->ctrl->enable_plane(plane->idx, pi->enabled);
+ if (r < 0) {
+ plane->info = old_info;
+ goto out;
+ }
+out:
+ omapfb_rqueue_unlock(fbdev);
+ return r;
+}
+
+static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+
+ *pi = plane->info;
+ return 0;
+}
+
+static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct omapfb_mem_region *rg = &fbdev->mem_desc.region[plane->idx];
+ size_t size;
+ int r = 0;
+
+ if (fbdev->ctrl->setup_mem == NULL)
+ return -ENODEV;
+ if (mi->type > OMAPFB_MEMTYPE_MAX)
+ return -EINVAL;
+
+ size = PAGE_ALIGN(mi->size);
+ omapfb_rqueue_lock(fbdev);
+ if (plane->info.enabled) {
+ r = -EBUSY;
+ goto out;
+ }
+ if (rg->size != size || rg->type != mi->type) {
+ struct fb_var_screeninfo *new_var = &fbdev->new_var;
+ unsigned long old_size = rg->size;
+ u8 old_type = rg->type;
+ unsigned long paddr;
+
+ rg->size = size;
+ rg->type = mi->type;
+ /*
+ * size == 0 is a special case, for which we
+ * don't check / adjust the screen parameters.
+ * This isn't a problem since the plane can't
+ * be reenabled unless its size is > 0.
+ */
+ if (old_size != size && size) {
+ if (size) {
+ memcpy(new_var, &fbi->var, sizeof(*new_var));
+ r = set_fb_var(fbi, new_var);
+ if (r < 0)
+ goto out;
+ }
+ }
+
+ if (fbdev->ctrl->sync)
+ fbdev->ctrl->sync();
+ r = fbdev->ctrl->setup_mem(plane->idx, size, mi->type, &paddr);
+ if (r < 0) {
+ /* Revert changes. */
+ rg->size = old_size;
+ rg->type = old_type;
+ goto out;
+ }
+ rg->paddr = paddr;
+
+ if (old_size != size) {
+ if (size) {
+ memcpy(&fbi->var, new_var, sizeof(fbi->var));
+ set_fb_fix(fbi);
+ } else {
+ /*
+ * Set these explicitly to indicate that the
+ * plane memory is dealloce'd, the other
+ * screen parameters in var / fix are invalid.
+ */
+ fbi->fix.smem_start = 0;
+ fbi->fix.smem_len = 0;
+ }
+ }
+ }
+out:
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct omapfb_mem_region *rg;
+
+ rg = &fbdev->mem_desc.region[plane->idx];
+ memset(mi, 0, sizeof(*mi));
+ mi->size = rg->size;
+ mi->type = rg->type;
+
+ return 0;
+}
+
+static int omapfb_set_color_key(struct omapfb_device *fbdev,
+ struct omapfb_color_key *ck)
+{
+ int r;
+
+ if (!fbdev->ctrl->set_color_key)
+ return -ENODEV;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->set_color_key(ck);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static int omapfb_get_color_key(struct omapfb_device *fbdev,
+ struct omapfb_color_key *ck)
+{
+ int r;
+
+ if (!fbdev->ctrl->get_color_key)
+ return -ENODEV;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->get_color_key(ck);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static struct blocking_notifier_head omapfb_client_list[OMAPFB_PLANE_NUM];
+static int notifier_inited;
+
+static void omapfb_init_notifier(void)
+{
+ int i;
+
+ for (i = 0; i < OMAPFB_PLANE_NUM; i++)
+ BLOCKING_INIT_NOTIFIER_HEAD(&omapfb_client_list[i]);
+}
+
+int omapfb_register_client(struct omapfb_notifier_block *omapfb_nb,
+ omapfb_notifier_callback_t callback,
+ void *callback_data)
+{
+ int r;
+
+ if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM)
+ return -EINVAL;
+
+ if (!notifier_inited) {
+ omapfb_init_notifier();
+ notifier_inited = 1;
+ }
+
+ omapfb_nb->nb.notifier_call = (int (*)(struct notifier_block *,
+ unsigned long, void *))callback;
+ omapfb_nb->data = callback_data;
+ r = blocking_notifier_chain_register(
+ &omapfb_client_list[omapfb_nb->plane_idx],
+ &omapfb_nb->nb);
+ if (r)
+ return r;
+ if (omapfb_dev != NULL &&
+ omapfb_dev->ctrl && omapfb_dev->ctrl->bind_client) {
+ omapfb_dev->ctrl->bind_client(omapfb_nb);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(omapfb_register_client);
+
+int omapfb_unregister_client(struct omapfb_notifier_block *omapfb_nb)
+{
+ return blocking_notifier_chain_unregister(
+ &omapfb_client_list[omapfb_nb->plane_idx], &omapfb_nb->nb);
+}
+EXPORT_SYMBOL(omapfb_unregister_client);
+
+void omapfb_notify_clients(struct omapfb_device *fbdev, unsigned long event)
+{
+ int i;
+
+ if (!notifier_inited)
+ /* no client registered yet */
+ return;
+
+ for (i = 0; i < OMAPFB_PLANE_NUM; i++)
+ blocking_notifier_call_chain(&omapfb_client_list[i], event,
+ fbdev->fb_info[i]);
+}
+EXPORT_SYMBOL(omapfb_notify_clients);
+
+static int omapfb_set_update_mode(struct omapfb_device *fbdev,
+ enum omapfb_update_mode mode)
+{
+ int r;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->set_update_mode(mode);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static enum omapfb_update_mode omapfb_get_update_mode(struct omapfb_device *fbdev)
+{
+ int r;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->get_update_mode();
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static void omapfb_get_caps(struct omapfb_device *fbdev, int plane,
+ struct omapfb_caps *caps)
+{
+ memset(caps, 0, sizeof(*caps));
+ fbdev->ctrl->get_caps(plane, caps);
+ caps->ctrl |= fbdev->panel->get_caps(fbdev->panel);
+}
+
+/* For lcd testing */
+void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval)
+{
+ omapfb_rqueue_lock(fbdev);
+ *(u16 *)fbdev->mem_desc.region[0].vaddr = pixval;
+ if (fbdev->ctrl->get_update_mode() == OMAPFB_MANUAL_UPDATE) {
+ struct omapfb_update_window win;
+
+ memset(&win, 0, sizeof(win));
+ win.width = 2;
+ win.height = 2;
+ win.out_width = 2;
+ win.out_height = 2;
+ fbdev->ctrl->update_window(fbdev->fb_info[0], &win, NULL, 0);
+ }
+ omapfb_rqueue_unlock(fbdev);
+}
+EXPORT_SYMBOL(omapfb_write_first_pixel);
+
+/*
+ * Ioctl interface. Part of the kernel mode frame buffer API is duplicated
+ * here to be accessible by user mode code.
+ */
+static int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd,
+ unsigned long arg)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct fb_ops *ops = fbi->fbops;
+ union {
+ struct omapfb_update_window update_window;
+ struct omapfb_plane_info plane_info;
+ struct omapfb_mem_info mem_info;
+ struct omapfb_color_key color_key;
+ enum omapfb_update_mode update_mode;
+ struct omapfb_caps caps;
+ unsigned int mirror;
+ int plane_out;
+ int enable_plane;
+ } p;
+ int r = 0;
+
+ BUG_ON(!ops);
+ switch (cmd) {
+ case OMAPFB_MIRROR:
+ if (get_user(p.mirror, (int __user *)arg))
+ r = -EFAULT;
+ else
+ omapfb_mirror(fbi, p.mirror);
+ break;
+ case OMAPFB_SYNC_GFX:
+ omapfb_sync(fbi);
+ break;
+ case OMAPFB_VSYNC:
+ break;
+ case OMAPFB_SET_UPDATE_MODE:
+ if (get_user(p.update_mode, (int __user *)arg))
+ r = -EFAULT;
+ else
+ r = omapfb_set_update_mode(fbdev, p.update_mode);
+ break;
+ case OMAPFB_GET_UPDATE_MODE:
+ p.update_mode = omapfb_get_update_mode(fbdev);
+ if (put_user(p.update_mode,
+ (enum omapfb_update_mode __user *)arg))
+ r = -EFAULT;
+ break;
+ case OMAPFB_UPDATE_WINDOW_OLD:
+ if (copy_from_user(&p.update_window, (void __user *)arg,
+ sizeof(struct omapfb_update_window_old)))
+ r = -EFAULT;
+ else {
+ struct omapfb_update_window *u = &p.update_window;
+ u->out_x = u->x;
+ u->out_y = u->y;
+ u->out_width = u->width;
+ u->out_height = u->height;
+ memset(u->reserved, 0, sizeof(u->reserved));
+ r = omapfb_update_win(fbi, u);
+ }
+ break;
+ case OMAPFB_UPDATE_WINDOW:
+ if (copy_from_user(&p.update_window, (void __user *)arg,
+ sizeof(p.update_window)))
+ r = -EFAULT;
+ else
+ r = omapfb_update_win(fbi, &p.update_window);
+ break;
+ case OMAPFB_SETUP_PLANE:
+ if (copy_from_user(&p.plane_info, (void __user *)arg,
+ sizeof(p.plane_info)))
+ r = -EFAULT;
+ else
+ r = omapfb_setup_plane(fbi, &p.plane_info);
+ break;
+ case OMAPFB_QUERY_PLANE:
+ if ((r = omapfb_query_plane(fbi, &p.plane_info)) < 0)
+ break;
+ if (copy_to_user((void __user *)arg, &p.plane_info,
+ sizeof(p.plane_info)))
+ r = -EFAULT;
+ break;
+ case OMAPFB_SETUP_MEM:
+ if (copy_from_user(&p.mem_info, (void __user *)arg,
+ sizeof(p.mem_info)))
+ r = -EFAULT;
+ else
+ r = omapfb_setup_mem(fbi, &p.mem_info);
+ break;
+ case OMAPFB_QUERY_MEM:
+ if ((r = omapfb_query_mem(fbi, &p.mem_info)) < 0)
+ break;
+ if (copy_to_user((void __user *)arg, &p.mem_info,
+ sizeof(p.mem_info)))
+ r = -EFAULT;
+ break;
+ case OMAPFB_SET_COLOR_KEY:
+ if (copy_from_user(&p.color_key, (void __user *)arg,
+ sizeof(p.color_key)))
+ r = -EFAULT;
+ else
+ r = omapfb_set_color_key(fbdev, &p.color_key);
+ break;
+ case OMAPFB_GET_COLOR_KEY:
+ if ((r = omapfb_get_color_key(fbdev, &p.color_key)) < 0)
+ break;
+ if (copy_to_user((void __user *)arg, &p.color_key,
+ sizeof(p.color_key)))
+ r = -EFAULT;
+ break;
+ case OMAPFB_GET_CAPS:
+ omapfb_get_caps(fbdev, plane->idx, &p.caps);
+ if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
+ r = -EFAULT;
+ break;
+ case OMAPFB_LCD_TEST:
+ {
+ int test_num;
+
+ if (get_user(test_num, (int __user *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+ if (!fbdev->panel->run_test) {
+ r = -EINVAL;
+ break;
+ }
+ r = fbdev->panel->run_test(fbdev->panel, test_num);
+ break;
+ }
+ case OMAPFB_CTRL_TEST:
+ {
+ int test_num;
+
+ if (get_user(test_num, (int __user *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+ if (!fbdev->ctrl->run_test) {
+ r = -EINVAL;
+ break;
+ }
+ r = fbdev->ctrl->run_test(test_num);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct omapfb_plane_struct *plane = info->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->mmap(info, vma);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+/*
+ * Callback table for the frame buffer framework. Some of these pointers
+ * will be changed according to the current setting of fb_info->accel_flags.
+ */
+static struct fb_ops omapfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = omapfb_open,
+ .fb_release = omapfb_release,
+ .fb_setcolreg = omapfb_setcolreg,
+ .fb_setcmap = omapfb_setcmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_blank = omapfb_blank,
+ .fb_ioctl = omapfb_ioctl,
+ .fb_check_var = omapfb_check_var,
+ .fb_set_par = omapfb_set_par,
+ .fb_rotate = omapfb_rotate,
+ .fb_pan_display = omapfb_pan_display,
+};
+
+/*
+ * ---------------------------------------------------------------------------
+ * Sysfs interface
+ * ---------------------------------------------------------------------------
+ */
+/* omapfbX sysfs entries */
+static ssize_t omapfb_show_caps_num(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int plane;
+ size_t size;
+ struct omapfb_caps caps;
+
+ plane = 0;
+ size = 0;
+ while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
+ omapfb_get_caps(fbdev, plane, &caps);
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ "plane#%d %#010x %#010x %#010x\n",
+ plane, caps.ctrl, caps.plane_color, caps.wnd_color);
+ plane++;
+ }
+ return size;
+}
+
+static ssize_t omapfb_show_caps_text(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int i;
+ struct omapfb_caps caps;
+ int plane;
+ size_t size;
+
+ plane = 0;
+ size = 0;
+ while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
+ omapfb_get_caps(fbdev, plane, &caps);
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ "plane#%d:\n", plane);
+ for (i = 0; i < ARRAY_SIZE(ctrl_caps) &&
+ size < PAGE_SIZE; i++) {
+ if (ctrl_caps[i].flag & caps.ctrl)
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " %s\n", ctrl_caps[i].name);
+ }
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " plane colors:\n");
+ for (i = 0; i < ARRAY_SIZE(color_caps) &&
+ size < PAGE_SIZE; i++) {
+ if (color_caps[i].flag & caps.plane_color)
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " %s\n", color_caps[i].name);
+ }
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " window colors:\n");
+ for (i = 0; i < ARRAY_SIZE(color_caps) &&
+ size < PAGE_SIZE; i++) {
+ if (color_caps[i].flag & caps.wnd_color)
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " %s\n", color_caps[i].name);
+ }
+
+ plane++;
+ }
+ return size;
+}
+
+static DEVICE_ATTR(caps_num, 0444, omapfb_show_caps_num, NULL);
+static DEVICE_ATTR(caps_text, 0444, omapfb_show_caps_text, NULL);
+
+/* panel sysfs entries */
+static ssize_t omapfb_show_panel_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name);
+}
+
+static ssize_t omapfb_show_bklight_level(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int r;
+
+ if (fbdev->panel->get_bklight_level) {
+ r = snprintf(buf, PAGE_SIZE, "%d\n",
+ fbdev->panel->get_bklight_level(fbdev->panel));
+ } else
+ r = -ENODEV;
+ return r;
+}
+
+static ssize_t omapfb_store_bklight_level(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int r;
+
+ if (fbdev->panel->set_bklight_level) {
+ unsigned int level;
+
+ if (sscanf(buf, "%10d", &level) == 1) {
+ r = fbdev->panel->set_bklight_level(fbdev->panel,
+ level);
+ } else
+ r = -EINVAL;
+ } else
+ r = -ENODEV;
+ return r ? r : size;
+}
+
+static ssize_t omapfb_show_bklight_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int r;
+
+ if (fbdev->panel->get_bklight_level) {
+ r = snprintf(buf, PAGE_SIZE, "%d\n",
+ fbdev->panel->get_bklight_max(fbdev->panel));
+ } else
+ r = -ENODEV;
+ return r;
+}
+
+static struct device_attribute dev_attr_panel_name =
+ __ATTR(name, 0444, omapfb_show_panel_name, NULL);
+static DEVICE_ATTR(backlight_level, 0664,
+ omapfb_show_bklight_level, omapfb_store_bklight_level);
+static DEVICE_ATTR(backlight_max, 0444, omapfb_show_bklight_max, NULL);
+
+static struct attribute *panel_attrs[] = {
+ &dev_attr_panel_name.attr,
+ &dev_attr_backlight_level.attr,
+ &dev_attr_backlight_max.attr,
+ NULL,
+};
+
+static struct attribute_group panel_attr_grp = {
+ .name = "panel",
+ .attrs = panel_attrs,
+};
+
+/* ctrl sysfs entries */
+static ssize_t omapfb_show_ctrl_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name);
+}
+
+static struct device_attribute dev_attr_ctrl_name =
+ __ATTR(name, 0444, omapfb_show_ctrl_name, NULL);
+
+static struct attribute *ctrl_attrs[] = {
+ &dev_attr_ctrl_name.attr,
+ NULL,
+};
+
+static struct attribute_group ctrl_attr_grp = {
+ .name = "ctrl",
+ .attrs = ctrl_attrs,
+};
+
+static int omapfb_register_sysfs(struct omapfb_device *fbdev)
+{
+ int r;
+
+ if ((r = device_create_file(fbdev->dev, &dev_attr_caps_num)))
+ goto fail0;
+
+ if ((r = device_create_file(fbdev->dev, &dev_attr_caps_text)))
+ goto fail1;
+
+ if ((r = sysfs_create_group(&fbdev->dev->kobj, &panel_attr_grp)))
+ goto fail2;
+
+ if ((r = sysfs_create_group(&fbdev->dev->kobj, &ctrl_attr_grp)))
+ goto fail3;
+
+ return 0;
+fail3:
+ sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
+fail2:
+ device_remove_file(fbdev->dev, &dev_attr_caps_text);
+fail1:
+ device_remove_file(fbdev->dev, &dev_attr_caps_num);
+fail0:
+ dev_err(fbdev->dev, "unable to register sysfs interface\n");
+ return r;
+}
+
+static void omapfb_unregister_sysfs(struct omapfb_device *fbdev)
+{
+ sysfs_remove_group(&fbdev->dev->kobj, &ctrl_attr_grp);
+ sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
+ device_remove_file(fbdev->dev, &dev_attr_caps_num);
+ device_remove_file(fbdev->dev, &dev_attr_caps_text);
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * LDM callbacks
+ * ---------------------------------------------------------------------------
+ */
+/* Initialize system fb_info object and set the default video mode.
+ * The frame buffer memory already allocated by lcddma_init
+ */
+static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
+{
+ struct fb_var_screeninfo *var = &info->var;
+ struct fb_fix_screeninfo *fix = &info->fix;
+ int r = 0;
+
+ info->fbops = &omapfb_ops;
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ strncpy(fix->id, MODULE_NAME, sizeof(fix->id));
+
+ info->pseudo_palette = fbdev->pseudo_palette;
+
+ var->accel_flags = def_accel ? FB_ACCELF_TEXT : 0;
+ var->xres = def_vxres;
+ var->yres = def_vyres;
+ var->xres_virtual = def_vxres;
+ var->yres_virtual = def_vyres;
+ var->rotate = def_rotate;
+ var->bits_per_pixel = fbdev->panel->bpp;
+
+ set_fb_var(info, var);
+ set_fb_fix(info);
+
+ r = fb_alloc_cmap(&info->cmap, 16, 0);
+ if (r != 0)
+ dev_err(fbdev->dev, "unable to allocate color map memory\n");
+
+ return r;
+}
+
+/* Release the fb_info object */
+static void fbinfo_cleanup(struct omapfb_device *fbdev, struct fb_info *fbi)
+{
+ fb_dealloc_cmap(&fbi->cmap);
+}
+
+static void planes_cleanup(struct omapfb_device *fbdev)
+{
+ int i;
+
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+ if (fbdev->fb_info[i] == NULL)
+ break;
+ fbinfo_cleanup(fbdev, fbdev->fb_info[i]);
+ framebuffer_release(fbdev->fb_info[i]);
+ }
+}
+
+static int planes_init(struct omapfb_device *fbdev)
+{
+ struct fb_info *fbi;
+ int i;
+ int r;
+
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+ struct omapfb_plane_struct *plane;
+ fbi = framebuffer_alloc(sizeof(struct omapfb_plane_struct),
+ fbdev->dev);
+ if (fbi == NULL) {
+ dev_err(fbdev->dev,
+ "unable to allocate memory for plane info\n");
+ planes_cleanup(fbdev);
+ return -ENOMEM;
+ }
+ plane = fbi->par;
+ plane->idx = i;
+ plane->fbdev = fbdev;
+ plane->info.mirror = def_mirror;
+ fbdev->fb_info[i] = fbi;
+
+ if ((r = fbinfo_init(fbdev, fbi)) < 0) {
+ framebuffer_release(fbi);
+ planes_cleanup(fbdev);
+ return r;
+ }
+ plane->info.out_width = fbi->var.xres;
+ plane->info.out_height = fbi->var.yres;
+ }
+ return 0;
+}
+
+/*
+ * Free driver resources. Can be called to rollback an aborted initialization
+ * sequence.
+ */
+static void omapfb_free_resources(struct omapfb_device *fbdev, int state)
+{
+ int i;
+
+ switch (state) {
+ case OMAPFB_ACTIVE:
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++)
+ unregister_framebuffer(fbdev->fb_info[i]);
+ case 7:
+ omapfb_unregister_sysfs(fbdev);
+ case 6:
+ fbdev->panel->disable(fbdev->panel);
+ case 5:
+ omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED);
+ case 4:
+ planes_cleanup(fbdev);
+ case 3:
+ ctrl_cleanup(fbdev);
+ case 2:
+ fbdev->panel->cleanup(fbdev->panel);
+ case 1:
+ dev_set_drvdata(fbdev->dev, NULL);
+ kfree(fbdev);
+ case 0:
+ /* nothing to free */
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int omapfb_find_ctrl(struct omapfb_device *fbdev)
+{
+ struct omapfb_platform_data *conf;
+ char name[17];
+ int i;
+
+ conf = fbdev->dev->platform_data;
+
+ fbdev->ctrl = NULL;
+
+ strncpy(name, conf->lcd.ctrl_name, sizeof(name) - 1);
+ name[sizeof(name) - 1] = '\0';
+
+ if (strcmp(name, "internal") == 0) {
+ fbdev->ctrl = fbdev->int_ctrl;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctrls); i++) {
+ dev_dbg(fbdev->dev, "ctrl %s\n", ctrls[i]->name);
+ if (strcmp(ctrls[i]->name, name) == 0) {
+ fbdev->ctrl = ctrls[i];
+ break;
+ }
+ }
+
+ if (fbdev->ctrl == NULL) {
+ dev_dbg(fbdev->dev, "ctrl %s not supported\n", name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void check_required_callbacks(struct omapfb_device *fbdev)
+{
+#define _C(x) (fbdev->ctrl->x != NULL)
+#define _P(x) (fbdev->panel->x != NULL)
+ BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
+ BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
+ _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
+ _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
+ _P(get_caps)));
+#undef _P
+#undef _C
+}
+
+/*
+ * Called by LDM binding to probe and attach a new device.
+ * Initialization sequence:
+ * 1. allocate system omapfb_device structure
+ * 2. select controller type according to platform configuration
+ * init LCD panel
+ * 3. init LCD controller and LCD DMA
+ * 4. init system fb_info structure for all planes
+ * 5. setup video mode for first plane and enable it
+ * 6. enable LCD panel
+ * 7. register sysfs attributes
+ * OMAPFB_ACTIVE: register system fb_info structure for all planes
+ */
+static int omapfb_do_probe(struct platform_device *pdev,
+ struct lcd_panel *panel)
+{
+ struct omapfb_device *fbdev = NULL;
+ int init_state;
+ unsigned long phz, hhz, vhz;
+ unsigned long vram;
+ int i;
+ int r = 0;
+
+ init_state = 0;
+
+ if (pdev->num_resources != 0) {
+ dev_err(&pdev->dev, "probed for an unknown device\n");
+ r = -ENODEV;
+ goto cleanup;
+ }
+
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ r = -ENOENT;
+ goto cleanup;
+ }
+
+ fbdev = kzalloc(sizeof(struct omapfb_device), GFP_KERNEL);
+ if (fbdev == NULL) {
+ dev_err(&pdev->dev,
+ "unable to allocate memory for device info\n");
+ r = -ENOMEM;
+ goto cleanup;
+ }
+ init_state++;
+
+ fbdev->dev = &pdev->dev;
+ fbdev->panel = panel;
+ platform_set_drvdata(pdev, fbdev);
+
+ mutex_init(&fbdev->rqueue_mutex);
+
+#ifdef CONFIG_ARCH_OMAP1
+ fbdev->int_ctrl = &omap1_int_ctrl;
+#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
+ fbdev->ext_if = &omap1_ext_if;
+#endif
+#else /* OMAP2 */
+ fbdev->int_ctrl = &omap2_int_ctrl;
+#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
+ fbdev->ext_if = &omap2_ext_if;
+#endif
+#endif
+ if (omapfb_find_ctrl(fbdev) < 0) {
+ dev_err(fbdev->dev,
+ "LCD controller not found, board not supported\n");
+ r = -ENODEV;
+ goto cleanup;
+ }
+
+ r = fbdev->panel->init(fbdev->panel, fbdev);
+ if (r)
+ goto cleanup;
+
+ pr_info("omapfb: configured for panel %s\n", fbdev->panel->name);
+
+ def_vxres = def_vxres ? : fbdev->panel->x_res;
+ def_vyres = def_vyres ? : fbdev->panel->y_res;
+
+ init_state++;
+
+ r = ctrl_init(fbdev);
+ if (r)
+ goto cleanup;
+ if (fbdev->ctrl->mmap != NULL)
+ omapfb_ops.fb_mmap = omapfb_mmap;
+ init_state++;
+
+ check_required_callbacks(fbdev);
+
+ r = planes_init(fbdev);
+ if (r)
+ goto cleanup;
+ init_state++;
+
+#ifdef CONFIG_FB_OMAP_DMA_TUNE
+ /* Set DMA priority for EMIFF access to highest */
+ if (cpu_class_is_omap1())
+ omap_set_dma_priority(0, OMAP_DMA_PORT_EMIFF, 15);
+#endif
+
+ r = ctrl_change_mode(fbdev->fb_info[0]);
+ if (r) {
+ dev_err(fbdev->dev, "mode setting failed\n");
+ goto cleanup;
+ }
+
+ /* GFX plane is enabled by default */
+ r = fbdev->ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
+ if (r)
+ goto cleanup;
+
+ omapfb_set_update_mode(fbdev, manual_update ?
+ OMAPFB_MANUAL_UPDATE : OMAPFB_AUTO_UPDATE);
+ init_state++;
+
+ r = fbdev->panel->enable(fbdev->panel);
+ if (r)
+ goto cleanup;
+ init_state++;
+
+ r = omapfb_register_sysfs(fbdev);
+ if (r)
+ goto cleanup;
+ init_state++;
+
+ vram = 0;
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+ r = register_framebuffer(fbdev->fb_info[i]);
+ if (r != 0) {
+ dev_err(fbdev->dev,
+ "registering framebuffer %d failed\n", i);
+ goto cleanup;
+ }
+ vram += fbdev->mem_desc.region[i].size;
+ }
+
+ fbdev->state = OMAPFB_ACTIVE;
+
+ panel = fbdev->panel;
+ phz = panel->pixel_clock * 1000;
+ hhz = phz * 10 / (panel->hfp + panel->x_res + panel->hbp + panel->hsw);
+ vhz = hhz / (panel->vfp + panel->y_res + panel->vbp + panel->vsw);
+
+ omapfb_dev = fbdev;
+
+ pr_info("omapfb: Framebuffer initialized. Total vram %lu planes %d\n",
+ vram, fbdev->mem_desc.region_cnt);
+ pr_info("omapfb: Pixclock %lu kHz hfreq %lu.%lu kHz "
+ "vfreq %lu.%lu Hz\n",
+ phz / 1000, hhz / 10000, hhz % 10, vhz / 10, vhz % 10);
+
+ return 0;
+
+cleanup:
+ omapfb_free_resources(fbdev, init_state);
+
+ return r;
+}
+
+static int omapfb_probe(struct platform_device *pdev)
+{
+ BUG_ON(fbdev_pdev != NULL);
+
+ /* Delay actual initialization until the LCD is registered */
+ fbdev_pdev = pdev;
+ if (fbdev_panel != NULL)
+ omapfb_do_probe(fbdev_pdev, fbdev_panel);
+ return 0;
+}
+
+void omapfb_register_panel(struct lcd_panel *panel)
+{
+ BUG_ON(fbdev_panel != NULL);
+
+ fbdev_panel = panel;
+ if (fbdev_pdev != NULL)
+ omapfb_do_probe(fbdev_pdev, fbdev_panel);
+}
+
+/* Called when the device is being detached from the driver */
+static int omapfb_remove(struct platform_device *pdev)
+{
+ struct omapfb_device *fbdev = platform_get_drvdata(pdev);
+ enum omapfb_state saved_state = fbdev->state;
+
+ /* FIXME: wait till completion of pending events */
+
+ fbdev->state = OMAPFB_DISABLED;
+ omapfb_free_resources(fbdev, saved_state);
+
+ return 0;
+}
+
+/* PM suspend */
+static int omapfb_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct omapfb_device *fbdev = platform_get_drvdata(pdev);
+
+ omapfb_blank(VESA_POWERDOWN, fbdev->fb_info[0]);
+
+ return 0;
+}
+
+/* PM resume */
+static int omapfb_resume(struct platform_device *pdev)
+{
+ struct omapfb_device *fbdev = platform_get_drvdata(pdev);
+
+ omapfb_blank(VESA_NO_BLANKING, fbdev->fb_info[0]);
+ return 0;
+}
+
+static struct platform_driver omapfb_driver = {
+ .probe = omapfb_probe,
+ .remove = omapfb_remove,
+ .suspend = omapfb_suspend,
+ .resume = omapfb_resume,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+#ifndef MODULE
+
+/* Process kernel command line parameters */
+static int __init omapfb_setup(char *options)
+{
+ char *this_opt = NULL;
+ int r = 0;
+
+ pr_debug("omapfb: options %s\n", options);
+
+ if (!options || !*options)
+ return 0;
+
+ while (!r && (this_opt = strsep(&options, ",")) != NULL) {
+ if (!strncmp(this_opt, "accel", 5))
+ def_accel = 1;
+ else if (!strncmp(this_opt, "vram:", 5)) {
+ char *suffix;
+ unsigned long vram;
+ vram = (simple_strtoul(this_opt + 5, &suffix, 0));
+ switch (suffix[0]) {
+ case '\0':
+ break;
+ case 'm':
+ case 'M':
+ vram *= 1024;
+ /* Fall through */
+ case 'k':
+ case 'K':
+ vram *= 1024;
+ break;
+ default:
+ pr_debug("omapfb: invalid vram suffix %c\n",
+ suffix[0]);
+ r = -1;
+ }
+ def_vram[def_vram_cnt++] = vram;
+ }
+ else if (!strncmp(this_opt, "vxres:", 6))
+ def_vxres = simple_strtoul(this_opt + 6, NULL, 0);
+ else if (!strncmp(this_opt, "vyres:", 6))
+ def_vyres = simple_strtoul(this_opt + 6, NULL, 0);
+ else if (!strncmp(this_opt, "rotate:", 7))
+ def_rotate = (simple_strtoul(this_opt + 7, NULL, 0));
+ else if (!strncmp(this_opt, "mirror:", 7))
+ def_mirror = (simple_strtoul(this_opt + 7, NULL, 0));
+ else if (!strncmp(this_opt, "manual_update", 13))
+ manual_update = 1;
+ else {
+ pr_debug("omapfb: invalid option\n");
+ r = -1;
+ }
+ }
+
+ return r;
+}
+
+#endif
+
+/* Register both the driver and the device */
+static int __init omapfb_init(void)
+{
+#ifndef MODULE
+ char *option;
+
+ if (fb_get_options("omapfb", &option))
+ return -ENODEV;
+ omapfb_setup(option);
+#endif
+ /* Register the driver with LDM */
+ if (platform_driver_register(&omapfb_driver)) {
+ pr_debug("failed to register omapfb driver\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit omapfb_cleanup(void)
+{
+ platform_driver_unregister(&omapfb_driver);
+}
+
+module_param_named(accel, def_accel, uint, 0664);
+module_param_array_named(vram, def_vram, ulong, &def_vram_cnt, 0664);
+module_param_named(vxres, def_vxres, long, 0664);
+module_param_named(vyres, def_vyres, long, 0664);
+module_param_named(rotate, def_rotate, uint, 0664);
+module_param_named(mirror, def_mirror, uint, 0664);
+module_param_named(manual_update, manual_update, bool, 0664);
+
+module_init(omapfb_init);
+module_exit(omapfb_cleanup);
+
+MODULE_DESCRIPTION("TI OMAP framebuffer driver");
+MODULE_AUTHOR("Imre Deak <imre.deak@nokia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
new file mode 100644
index 00000000000..2b4269813b2
--- /dev/null
+++ b/drivers/video/omap/rfbi.c
@@ -0,0 +1,588 @@
+/*
+ * OMAP2 Remote Frame Buffer Interface support
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/omapfb.h>
+
+#include "dispc.h"
+
+/* To work around an RFBI transfer rate limitation */
+#define OMAP_RFBI_RATE_LIMIT 1
+
+#define RFBI_BASE 0x48050800
+#define RFBI_REVISION 0x0000
+#define RFBI_SYSCONFIG 0x0010
+#define RFBI_SYSSTATUS 0x0014
+#define RFBI_CONTROL 0x0040
+#define RFBI_PIXEL_CNT 0x0044
+#define RFBI_LINE_NUMBER 0x0048
+#define RFBI_CMD 0x004c
+#define RFBI_PARAM 0x0050
+#define RFBI_DATA 0x0054
+#define RFBI_READ 0x0058
+#define RFBI_STATUS 0x005c
+#define RFBI_CONFIG0 0x0060
+#define RFBI_ONOFF_TIME0 0x0064
+#define RFBI_CYCLE_TIME0 0x0068
+#define RFBI_DATA_CYCLE1_0 0x006c
+#define RFBI_DATA_CYCLE2_0 0x0070
+#define RFBI_DATA_CYCLE3_0 0x0074
+#define RFBI_VSYNC_WIDTH 0x0090
+#define RFBI_HSYNC_WIDTH 0x0094
+
+#define DISPC_BASE 0x48050400
+#define DISPC_CONTROL 0x0040
+
+static struct {
+ u32 base;
+ void (*lcdc_callback)(void *data);
+ void *lcdc_callback_data;
+ unsigned long l4_khz;
+ int bits_per_cycle;
+ struct omapfb_device *fbdev;
+ struct clk *dss_ick;
+ struct clk *dss1_fck;
+ unsigned tearsync_pin_cnt;
+ unsigned tearsync_mode;
+} rfbi;
+
+static inline void rfbi_write_reg(int idx, u32 val)
+{
+ __raw_writel(val, rfbi.base + idx);
+}
+
+static inline u32 rfbi_read_reg(int idx)
+{
+ return __raw_readl(rfbi.base + idx);
+}
+
+static int rfbi_get_clocks(void)
+{
+ if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) {
+ dev_err(rfbi.fbdev->dev, "can't get dss_ick");
+ return PTR_ERR(rfbi.dss_ick);
+ }
+
+ if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) {
+ dev_err(rfbi.fbdev->dev, "can't get dss1_fck");
+ clk_put(rfbi.dss_ick);
+ return PTR_ERR(rfbi.dss1_fck);
+ }
+
+ return 0;
+}
+
+static void rfbi_put_clocks(void)
+{
+ clk_put(rfbi.dss1_fck);
+ clk_put(rfbi.dss_ick);
+}
+
+static void rfbi_enable_clocks(int enable)
+{
+ if (enable) {
+ clk_enable(rfbi.dss_ick);
+ clk_enable(rfbi.dss1_fck);
+ } else {
+ clk_disable(rfbi.dss1_fck);
+ clk_disable(rfbi.dss_ick);
+ }
+}
+
+
+#ifdef VERBOSE
+static void rfbi_print_timings(void)
+{
+ u32 l;
+ u32 time;
+
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ time = 1000000000 / rfbi.l4_khz;
+ if (l & (1 << 4))
+ time *= 2;
+
+ dev_dbg(rfbi.fbdev->dev, "Tick time %u ps\n", time);
+ l = rfbi_read_reg(RFBI_ONOFF_TIME0);
+ dev_dbg(rfbi.fbdev->dev,
+ "CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
+ "REONTIME %d, REOFFTIME %d\n",
+ l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
+ (l >> 20) & 0x0f, (l >> 24) & 0x3f);
+
+ l = rfbi_read_reg(RFBI_CYCLE_TIME0);
+ dev_dbg(rfbi.fbdev->dev,
+ "WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
+ "ACCESSTIME %d\n",
+ (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
+ (l >> 22) & 0x3f);
+}
+#else
+static void rfbi_print_timings(void) {}
+#endif
+
+static void rfbi_set_timings(const struct extif_timings *t)
+{
+ u32 l;
+
+ BUG_ON(!t->converted);
+
+ rfbi_enable_clocks(1);
+ rfbi_write_reg(RFBI_ONOFF_TIME0, t->tim[0]);
+ rfbi_write_reg(RFBI_CYCLE_TIME0, t->tim[1]);
+
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ l &= ~(1 << 4);
+ l |= (t->tim[2] ? 1 : 0) << 4;
+ rfbi_write_reg(RFBI_CONFIG0, l);
+
+ rfbi_print_timings();
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
+{
+ *clk_period = 1000000000 / rfbi.l4_khz;
+ *max_clk_div = 2;
+}
+
+static int ps_to_rfbi_ticks(int time, int div)
+{
+ unsigned long tick_ps;
+ int ret;
+
+ /* Calculate in picosecs to yield more exact results */
+ tick_ps = 1000000000 / (rfbi.l4_khz) * div;
+
+ ret = (time + tick_ps - 1) / tick_ps;
+
+ return ret;
+}
+
+#ifdef OMAP_RFBI_RATE_LIMIT
+static unsigned long rfbi_get_max_tx_rate(void)
+{
+ unsigned long l4_rate, dss1_rate;
+ int min_l4_ticks = 0;
+ int i;
+
+ /* According to TI this can't be calculated so make the
+ * adjustments for a couple of known frequencies and warn for
+ * others.
+ */
+ static const struct {
+ unsigned long l4_clk; /* HZ */
+ unsigned long dss1_clk; /* HZ */
+ unsigned long min_l4_ticks;
+ } ftab[] = {
+ { 55, 132, 7, }, /* 7.86 MPix/s */
+ { 110, 110, 12, }, /* 9.16 MPix/s */
+ { 110, 132, 10, }, /* 11 Mpix/s */
+ { 120, 120, 10, }, /* 12 Mpix/s */
+ { 133, 133, 10, }, /* 13.3 Mpix/s */
+ };
+
+ l4_rate = rfbi.l4_khz / 1000;
+ dss1_rate = clk_get_rate(rfbi.dss1_fck) / 1000000;
+
+ for (i = 0; i < ARRAY_SIZE(ftab); i++) {
+ /* Use a window instead of an exact match, to account
+ * for different DPLL multiplier / divider pairs.
+ */
+ if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
+ abs(ftab[i].dss1_clk - dss1_rate) < 3) {
+ min_l4_ticks = ftab[i].min_l4_ticks;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(ftab)) {
+ /* Can't be sure, return anyway the maximum not
+ * rate-limited. This might cause a problem only for the
+ * tearing synchronisation.
+ */
+ dev_err(rfbi.fbdev->dev,
+ "can't determine maximum RFBI transfer rate\n");
+ return rfbi.l4_khz * 1000;
+ }
+ return rfbi.l4_khz * 1000 / min_l4_ticks;
+}
+#else
+static int rfbi_get_max_tx_rate(void)
+{
+ return rfbi.l4_khz * 1000;
+}
+#endif
+
+
+static int rfbi_convert_timings(struct extif_timings *t)
+{
+ u32 l;
+ int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
+ int actim, recyc, wecyc;
+ int div = t->clk_div;
+
+ if (div <= 0 || div > 2)
+ return -1;
+
+ /* Make sure that after conversion it still holds that:
+ * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
+ * csoff > cson, csoff >= max(weoff, reoff), actim > reon
+ */
+ weon = ps_to_rfbi_ticks(t->we_on_time, div);
+ weoff = ps_to_rfbi_ticks(t->we_off_time, div);
+ if (weoff <= weon)
+ weoff = weon + 1;
+ if (weon > 0x0f)
+ return -1;
+ if (weoff > 0x3f)
+ return -1;
+
+ reon = ps_to_rfbi_ticks(t->re_on_time, div);
+ reoff = ps_to_rfbi_ticks(t->re_off_time, div);
+ if (reoff <= reon)
+ reoff = reon + 1;
+ if (reon > 0x0f)
+ return -1;
+ if (reoff > 0x3f)
+ return -1;
+
+ cson = ps_to_rfbi_ticks(t->cs_on_time, div);
+ csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
+ if (csoff <= cson)
+ csoff = cson + 1;
+ if (csoff < max(weoff, reoff))
+ csoff = max(weoff, reoff);
+ if (cson > 0x0f)
+ return -1;
+ if (csoff > 0x3f)
+ return -1;
+
+ l = cson;
+ l |= csoff << 4;
+ l |= weon << 10;
+ l |= weoff << 14;
+ l |= reon << 20;
+ l |= reoff << 24;
+
+ t->tim[0] = l;
+
+ actim = ps_to_rfbi_ticks(t->access_time, div);
+ if (actim <= reon)
+ actim = reon + 1;
+ if (actim > 0x3f)
+ return -1;
+
+ wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
+ if (wecyc < weoff)
+ wecyc = weoff;
+ if (wecyc > 0x3f)
+ return -1;
+
+ recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
+ if (recyc < reoff)
+ recyc = reoff;
+ if (recyc > 0x3f)
+ return -1;
+
+ cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
+ if (cs_pulse > 0x3f)
+ return -1;
+
+ l = wecyc;
+ l |= recyc << 6;
+ l |= cs_pulse << 12;
+ l |= actim << 22;
+
+ t->tim[1] = l;
+
+ t->tim[2] = div - 1;
+
+ t->converted = 1;
+
+ return 0;
+}
+
+static int rfbi_setup_tearsync(unsigned pin_cnt,
+ unsigned hs_pulse_time, unsigned vs_pulse_time,
+ int hs_pol_inv, int vs_pol_inv, int extif_div)
+{
+ int hs, vs;
+ int min;
+ u32 l;
+
+ if (pin_cnt != 1 && pin_cnt != 2)
+ return -EINVAL;
+
+ hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
+ vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
+ if (hs < 2)
+ return -EDOM;
+ if (pin_cnt == 2)
+ min = 2;
+ else
+ min = 4;
+ if (vs < min)
+ return -EDOM;
+ if (vs == hs)
+ return -EINVAL;
+ rfbi.tearsync_pin_cnt = pin_cnt;
+ dev_dbg(rfbi.fbdev->dev,
+ "setup_tearsync: pins %d hs %d vs %d hs_inv %d vs_inv %d\n",
+ pin_cnt, hs, vs, hs_pol_inv, vs_pol_inv);
+
+ rfbi_enable_clocks(1);
+ rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
+ rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
+
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ if (hs_pol_inv)
+ l &= ~(1 << 21);
+ else
+ l |= 1 << 21;
+ if (vs_pol_inv)
+ l &= ~(1 << 20);
+ else
+ l |= 1 << 20;
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+static int rfbi_enable_tearsync(int enable, unsigned line)
+{
+ u32 l;
+
+ dev_dbg(rfbi.fbdev->dev, "tearsync %d line %d mode %d\n",
+ enable, line, rfbi.tearsync_mode);
+ if (line > (1 << 11) - 1)
+ return -EINVAL;
+
+ rfbi_enable_clocks(1);
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ l &= ~(0x3 << 2);
+ if (enable) {
+ rfbi.tearsync_mode = rfbi.tearsync_pin_cnt;
+ l |= rfbi.tearsync_mode << 2;
+ } else
+ rfbi.tearsync_mode = 0;
+ rfbi_write_reg(RFBI_CONFIG0, l);
+ rfbi_write_reg(RFBI_LINE_NUMBER, line);
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+static void rfbi_write_command(const void *buf, unsigned int len)
+{
+ rfbi_enable_clocks(1);
+ if (rfbi.bits_per_cycle == 16) {
+ const u16 *w = buf;
+ BUG_ON(len & 1);
+ for (; len; len -= 2)
+ rfbi_write_reg(RFBI_CMD, *w++);
+ } else {
+ const u8 *b = buf;
+ BUG_ON(rfbi.bits_per_cycle != 8);
+ for (; len; len--)
+ rfbi_write_reg(RFBI_CMD, *b++);
+ }
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_read_data(void *buf, unsigned int len)
+{
+ rfbi_enable_clocks(1);
+ if (rfbi.bits_per_cycle == 16) {
+ u16 *w = buf;
+ BUG_ON(len & ~1);
+ for (; len; len -= 2) {
+ rfbi_write_reg(RFBI_READ, 0);
+ *w++ = rfbi_read_reg(RFBI_READ);
+ }
+ } else {
+ u8 *b = buf;
+ BUG_ON(rfbi.bits_per_cycle != 8);
+ for (; len; len--) {
+ rfbi_write_reg(RFBI_READ, 0);
+ *b++ = rfbi_read_reg(RFBI_READ);
+ }
+ }
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_write_data(const void *buf, unsigned int len)
+{
+ rfbi_enable_clocks(1);
+ if (rfbi.bits_per_cycle == 16) {
+ const u16 *w = buf;
+ BUG_ON(len & 1);
+ for (; len; len -= 2)
+ rfbi_write_reg(RFBI_PARAM, *w++);
+ } else {
+ const u8 *b = buf;
+ BUG_ON(rfbi.bits_per_cycle != 8);
+ for (; len; len--)
+ rfbi_write_reg(RFBI_PARAM, *b++);
+ }
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_transfer_area(int width, int height,
+ void (callback)(void * data), void *data)
+{
+ u32 w;
+
+ BUG_ON(callback == NULL);
+
+ rfbi_enable_clocks(1);
+ omap_dispc_set_lcd_size(width, height);
+
+ rfbi.lcdc_callback = callback;
+ rfbi.lcdc_callback_data = data;
+
+ rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
+
+ w = rfbi_read_reg(RFBI_CONTROL);
+ w |= 1; /* enable */
+ if (!rfbi.tearsync_mode)
+ w |= 1 << 4; /* internal trigger, reset by HW */
+ rfbi_write_reg(RFBI_CONTROL, w);
+
+ omap_dispc_enable_lcd_out(1);
+}
+
+static inline void _stop_transfer(void)
+{
+ u32 w;
+
+ w = rfbi_read_reg(RFBI_CONTROL);
+ rfbi_write_reg(RFBI_CONTROL, w & ~(1 << 0));
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_dma_callback(void *data)
+{
+ _stop_transfer();
+ rfbi.lcdc_callback(rfbi.lcdc_callback_data);
+}
+
+static void rfbi_set_bits_per_cycle(int bpc)
+{
+ u32 l;
+
+ rfbi_enable_clocks(1);
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ l &= ~(0x03 << 0);
+
+ switch (bpc) {
+ case 8:
+ break;
+ case 16:
+ l |= 3;
+ break;
+ default:
+ BUG();
+ }
+ rfbi_write_reg(RFBI_CONFIG0, l);
+ rfbi.bits_per_cycle = bpc;
+ rfbi_enable_clocks(0);
+}
+
+static int rfbi_init(struct omapfb_device *fbdev)
+{
+ u32 l;
+ int r;
+
+ rfbi.fbdev = fbdev;
+ rfbi.base = io_p2v(RFBI_BASE);
+
+ if ((r = rfbi_get_clocks()) < 0)
+ return r;
+ rfbi_enable_clocks(1);
+
+ rfbi.l4_khz = clk_get_rate(rfbi.dss_ick) / 1000;
+
+ /* Reset */
+ rfbi_write_reg(RFBI_SYSCONFIG, 1 << 1);
+ while (!(rfbi_read_reg(RFBI_SYSSTATUS) & (1 << 0)));
+
+ l = rfbi_read_reg(RFBI_SYSCONFIG);
+ /* Enable autoidle and smart-idle */
+ l |= (1 << 0) | (2 << 3);
+ rfbi_write_reg(RFBI_SYSCONFIG, l);
+
+ /* 16-bit interface, ITE trigger mode, 16-bit data */
+ l = (0x03 << 0) | (0x00 << 2) | (0x01 << 5) | (0x02 << 7);
+ l |= (0 << 9) | (1 << 20) | (1 << 21);
+ rfbi_write_reg(RFBI_CONFIG0, l);
+
+ rfbi_write_reg(RFBI_DATA_CYCLE1_0, 0x00000010);
+
+ l = rfbi_read_reg(RFBI_CONTROL);
+ /* Select CS0, clear bypass mode */
+ l = (0x01 << 2);
+ rfbi_write_reg(RFBI_CONTROL, l);
+
+ if ((r = omap_dispc_request_irq(rfbi_dma_callback, NULL)) < 0) {
+ dev_err(fbdev->dev, "can't get DISPC irq\n");
+ rfbi_enable_clocks(0);
+ return r;
+ }
+
+ l = rfbi_read_reg(RFBI_REVISION);
+ pr_info("omapfb: RFBI version %d.%d initialized\n",
+ (l >> 4) & 0x0f, l & 0x0f);
+
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+static void rfbi_cleanup(void)
+{
+ omap_dispc_free_irq();
+ rfbi_put_clocks();
+}
+
+const struct lcd_ctrl_extif omap2_ext_if = {
+ .init = rfbi_init,
+ .cleanup = rfbi_cleanup,
+ .get_clk_info = rfbi_get_clk_info,
+ .get_max_tx_rate = rfbi_get_max_tx_rate,
+ .set_bits_per_cycle = rfbi_set_bits_per_cycle,
+ .convert_timings = rfbi_convert_timings,
+ .set_timings = rfbi_set_timings,
+ .write_command = rfbi_write_command,
+ .read_data = rfbi_read_data,
+ .write_data = rfbi_write_data,
+ .transfer_area = rfbi_transfer_area,
+ .setup_tearsync = rfbi_setup_tearsync,
+ .enable_tearsync = rfbi_enable_tearsync,
+
+ .max_transmit_size = (u32) ~0,
+};
+
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
new file mode 100644
index 00000000000..81dbcf53cf0
--- /dev/null
+++ b/drivers/video/omap/sossi.c
@@ -0,0 +1,686 @@
+/*
+ * OMAP1 Special OptimiSed Screen Interface support
+ *
+ * Copyright (C) 2004-2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+
+#include "lcdc.h"
+
+#define MODULE_NAME "omapfb-sossi"
+
+#define OMAP_SOSSI_BASE 0xfffbac00
+#define SOSSI_ID_REG 0x00
+#define SOSSI_INIT1_REG 0x04
+#define SOSSI_INIT2_REG 0x08
+#define SOSSI_INIT3_REG 0x0c
+#define SOSSI_FIFO_REG 0x10
+#define SOSSI_REOTABLE_REG 0x14
+#define SOSSI_TEARING_REG 0x18
+#define SOSSI_INIT1B_REG 0x1c
+#define SOSSI_FIFOB_REG 0x20
+
+#define DMA_GSCR 0xfffedc04
+#define DMA_LCD_CCR 0xfffee3c2
+#define DMA_LCD_CTRL 0xfffee3c4
+#define DMA_LCD_LCH_CTRL 0xfffee3ea
+
+#define CONF_SOSSI_RESET_R (1 << 23)
+
+#define RD_ACCESS 0
+#define WR_ACCESS 1
+
+#define SOSSI_MAX_XMIT_BYTES (512 * 1024)
+
+static struct {
+ void __iomem *base;
+ struct clk *fck;
+ unsigned long fck_hz;
+ spinlock_t lock;
+ int bus_pick_count;
+ int bus_pick_width;
+ int tearsync_mode;
+ int tearsync_line;
+ void (*lcdc_callback)(void *data);
+ void *lcdc_callback_data;
+ int vsync_dma_pending;
+ /* timing for read and write access */
+ int clk_div;
+ u8 clk_tw0[2];
+ u8 clk_tw1[2];
+ /*
+ * if last_access is the same as current we don't have to change
+ * the timings
+ */
+ int last_access;
+
+ struct omapfb_device *fbdev;
+} sossi;
+
+static inline u32 sossi_read_reg(int reg)
+{
+ return readl(sossi.base + reg);
+}
+
+static inline u16 sossi_read_reg16(int reg)
+{
+ return readw(sossi.base + reg);
+}
+
+static inline u8 sossi_read_reg8(int reg)
+{
+ return readb(sossi.base + reg);
+}
+
+static inline void sossi_write_reg(int reg, u32 value)
+{
+ writel(value, sossi.base + reg);
+}
+
+static inline void sossi_write_reg16(int reg, u16 value)
+{
+ writew(value, sossi.base + reg);
+}
+
+static inline void sossi_write_reg8(int reg, u8 value)
+{
+ writeb(value, sossi.base + reg);
+}
+
+static void sossi_set_bits(int reg, u32 bits)
+{
+ sossi_write_reg(reg, sossi_read_reg(reg) | bits);
+}
+
+static void sossi_clear_bits(int reg, u32 bits)
+{
+ sossi_write_reg(reg, sossi_read_reg(reg) & ~bits);
+}
+
+#define HZ_TO_PS(x) (1000000000 / (x / 1000))
+
+static u32 ps_to_sossi_ticks(u32 ps, int div)
+{
+ u32 clk_period = HZ_TO_PS(sossi.fck_hz) * div;
+ return (clk_period + ps - 1) / clk_period;
+}
+
+static int calc_rd_timings(struct extif_timings *t)
+{
+ u32 tw0, tw1;
+ int reon, reoff, recyc, actim;
+ int div = t->clk_div;
+
+ /*
+ * Make sure that after conversion it still holds that:
+ * reoff > reon, recyc >= reoff, actim > reon
+ */
+ reon = ps_to_sossi_ticks(t->re_on_time, div);
+ /* reon will be exactly one sossi tick */
+ if (reon > 1)
+ return -1;
+
+ reoff = ps_to_sossi_ticks(t->re_off_time, div);
+
+ if (reoff <= reon)
+ reoff = reon + 1;
+
+ tw0 = reoff - reon;
+ if (tw0 > 0x10)
+ return -1;
+
+ recyc = ps_to_sossi_ticks(t->re_cycle_time, div);
+ if (recyc <= reoff)
+ recyc = reoff + 1;
+
+ tw1 = recyc - tw0;
+ /* values less then 3 result in the SOSSI block resetting itself */
+ if (tw1 < 3)
+ tw1 = 3;
+ if (tw1 > 0x40)
+ return -1;
+
+ actim = ps_to_sossi_ticks(t->access_time, div);
+ if (actim < reoff)
+ actim++;
+ /*
+ * access time (data hold time) will be exactly one sossi
+ * tick
+ */
+ if (actim - reoff > 1)
+ return -1;
+
+ t->tim[0] = tw0 - 1;
+ t->tim[1] = tw1 - 1;
+
+ return 0;
+}
+
+static int calc_wr_timings(struct extif_timings *t)
+{
+ u32 tw0, tw1;
+ int weon, weoff, wecyc;
+ int div = t->clk_div;
+
+ /*
+ * Make sure that after conversion it still holds that:
+ * weoff > weon, wecyc >= weoff
+ */
+ weon = ps_to_sossi_ticks(t->we_on_time, div);
+ /* weon will be exactly one sossi tick */
+ if (weon > 1)
+ return -1;
+
+ weoff = ps_to_sossi_ticks(t->we_off_time, div);
+ if (weoff <= weon)
+ weoff = weon + 1;
+ tw0 = weoff - weon;
+ if (tw0 > 0x10)
+ return -1;
+
+ wecyc = ps_to_sossi_ticks(t->we_cycle_time, div);
+ if (wecyc <= weoff)
+ wecyc = weoff + 1;
+
+ tw1 = wecyc - tw0;
+ /* values less then 3 result in the SOSSI block resetting itself */
+ if (tw1 < 3)
+ tw1 = 3;
+ if (tw1 > 0x40)
+ return -1;
+
+ t->tim[2] = tw0 - 1;
+ t->tim[3] = tw1 - 1;
+
+ return 0;
+}
+
+static void _set_timing(int div, int tw0, int tw1)
+{
+ u32 l;
+
+#ifdef VERBOSE
+ dev_dbg(sossi.fbdev->dev, "Using TW0 = %d, TW1 = %d, div = %d\n",
+ tw0 + 1, tw1 + 1, div);
+#endif
+
+ clk_set_rate(sossi.fck, sossi.fck_hz / div);
+ clk_enable(sossi.fck);
+ l = sossi_read_reg(SOSSI_INIT1_REG);
+ l &= ~((0x0f << 20) | (0x3f << 24));
+ l |= (tw0 << 20) | (tw1 << 24);
+ sossi_write_reg(SOSSI_INIT1_REG, l);
+ clk_disable(sossi.fck);
+}
+
+static void _set_bits_per_cycle(int bus_pick_count, int bus_pick_width)
+{
+ u32 l;
+
+ l = sossi_read_reg(SOSSI_INIT3_REG);
+ l &= ~0x3ff;
+ l |= ((bus_pick_count - 1) << 5) | ((bus_pick_width - 1) & 0x1f);
+ sossi_write_reg(SOSSI_INIT3_REG, l);
+}
+
+static void _set_tearsync_mode(int mode, unsigned line)
+{
+ u32 l;
+
+ l = sossi_read_reg(SOSSI_TEARING_REG);
+ l &= ~(((1 << 11) - 1) << 15);
+ l |= line << 15;
+ l &= ~(0x3 << 26);
+ l |= mode << 26;
+ sossi_write_reg(SOSSI_TEARING_REG, l);
+ if (mode)
+ sossi_set_bits(SOSSI_INIT2_REG, 1 << 6); /* TE logic */
+ else
+ sossi_clear_bits(SOSSI_INIT2_REG, 1 << 6);
+}
+
+static inline void set_timing(int access)
+{
+ if (access != sossi.last_access) {
+ sossi.last_access = access;
+ _set_timing(sossi.clk_div,
+ sossi.clk_tw0[access], sossi.clk_tw1[access]);
+ }
+}
+
+static void sossi_start_transfer(void)
+{
+ /* WE */
+ sossi_clear_bits(SOSSI_INIT2_REG, 1 << 4);
+ /* CS active low */
+ sossi_clear_bits(SOSSI_INIT1_REG, 1 << 30);
+}
+
+static void sossi_stop_transfer(void)
+{
+ /* WE */
+ sossi_set_bits(SOSSI_INIT2_REG, 1 << 4);
+ /* CS active low */
+ sossi_set_bits(SOSSI_INIT1_REG, 1 << 30);
+}
+
+static void wait_end_of_write(void)
+{
+ /* Before reading we must check if some writings are going on */
+ while (!(sossi_read_reg(SOSSI_INIT2_REG) & (1 << 3)));
+}
+
+static void send_data(const void *data, unsigned int len)
+{
+ while (len >= 4) {
+ sossi_write_reg(SOSSI_FIFO_REG, *(const u32 *) data);
+ len -= 4;
+ data += 4;
+ }
+ while (len >= 2) {
+ sossi_write_reg16(SOSSI_FIFO_REG, *(const u16 *) data);
+ len -= 2;
+ data += 2;
+ }
+ while (len) {
+ sossi_write_reg8(SOSSI_FIFO_REG, *(const u8 *) data);
+ len--;
+ data++;
+ }
+}
+
+static void set_cycles(unsigned int len)
+{
+ unsigned long nr_cycles = len / (sossi.bus_pick_width / 8);
+
+ BUG_ON((nr_cycles - 1) & ~0x3ffff);
+
+ sossi_clear_bits(SOSSI_INIT1_REG, 0x3ffff);
+ sossi_set_bits(SOSSI_INIT1_REG, (nr_cycles - 1) & 0x3ffff);
+}
+
+static int sossi_convert_timings(struct extif_timings *t)
+{
+ int r = 0;
+ int div = t->clk_div;
+
+ t->converted = 0;
+
+ if (div <= 0 || div > 8)
+ return -1;
+
+ /* no CS on SOSSI, so ignore cson, csoff, cs_pulsewidth */
+ if ((r = calc_rd_timings(t)) < 0)
+ return r;
+
+ if ((r = calc_wr_timings(t)) < 0)
+ return r;
+
+ t->tim[4] = div;
+
+ t->converted = 1;
+
+ return 0;
+}
+
+static void sossi_set_timings(const struct extif_timings *t)
+{
+ BUG_ON(!t->converted);
+
+ sossi.clk_tw0[RD_ACCESS] = t->tim[0];
+ sossi.clk_tw1[RD_ACCESS] = t->tim[1];
+
+ sossi.clk_tw0[WR_ACCESS] = t->tim[2];
+ sossi.clk_tw1[WR_ACCESS] = t->tim[3];
+
+ sossi.clk_div = t->tim[4];
+}
+
+static void sossi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
+{
+ *clk_period = HZ_TO_PS(sossi.fck_hz);
+ *max_clk_div = 8;
+}
+
+static void sossi_set_bits_per_cycle(int bpc)
+{
+ int bus_pick_count, bus_pick_width;
+
+ /*
+ * We set explicitly the the bus_pick_count as well, although
+ * with remapping/reordering disabled it will be calculated by HW
+ * as (32 / bus_pick_width).
+ */
+ switch (bpc) {
+ case 8:
+ bus_pick_count = 4;
+ bus_pick_width = 8;
+ break;
+ case 16:
+ bus_pick_count = 2;
+ bus_pick_width = 16;
+ break;
+ default:
+ BUG();
+ return;
+ }
+ sossi.bus_pick_width = bus_pick_width;
+ sossi.bus_pick_count = bus_pick_count;
+}
+
+static int sossi_setup_tearsync(unsigned pin_cnt,
+ unsigned hs_pulse_time, unsigned vs_pulse_time,
+ int hs_pol_inv, int vs_pol_inv, int div)
+{
+ int hs, vs;
+ u32 l;
+
+ if (pin_cnt != 1 || div < 1 || div > 8)
+ return -EINVAL;
+
+ hs = ps_to_sossi_ticks(hs_pulse_time, div);
+ vs = ps_to_sossi_ticks(vs_pulse_time, div);
+ if (vs < 8 || vs <= hs || vs >= (1 << 12))
+ return -EDOM;
+ vs /= 8;
+ vs--;
+ if (hs > 8)
+ hs = 8;
+ if (hs)
+ hs--;
+
+ dev_dbg(sossi.fbdev->dev,
+ "setup_tearsync: hs %d vs %d hs_inv %d vs_inv %d\n",
+ hs, vs, hs_pol_inv, vs_pol_inv);
+
+ clk_enable(sossi.fck);
+ l = sossi_read_reg(SOSSI_TEARING_REG);
+ l &= ~((1 << 15) - 1);
+ l |= vs << 3;
+ l |= hs;
+ if (hs_pol_inv)
+ l |= 1 << 29;
+ else
+ l &= ~(1 << 29);
+ if (vs_pol_inv)
+ l |= 1 << 28;
+ else
+ l &= ~(1 << 28);
+ sossi_write_reg(SOSSI_TEARING_REG, l);
+ clk_disable(sossi.fck);
+
+ return 0;
+}
+
+static int sossi_enable_tearsync(int enable, unsigned line)
+{
+ int mode;
+
+ dev_dbg(sossi.fbdev->dev, "tearsync %d line %d\n", enable, line);
+ if (line >= 1 << 11)
+ return -EINVAL;
+ if (enable) {
+ if (line)
+ mode = 2; /* HS or VS */
+ else
+ mode = 3; /* VS only */
+ } else
+ mode = 0;
+ sossi.tearsync_line = line;
+ sossi.tearsync_mode = mode;
+
+ return 0;
+}
+
+static void sossi_write_command(const void *data, unsigned int len)
+{
+ clk_enable(sossi.fck);
+ set_timing(WR_ACCESS);
+ _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+ /* CMD#/DATA */
+ sossi_clear_bits(SOSSI_INIT1_REG, 1 << 18);
+ set_cycles(len);
+ sossi_start_transfer();
+ send_data(data, len);
+ sossi_stop_transfer();
+ wait_end_of_write();
+ clk_disable(sossi.fck);
+}
+
+static void sossi_write_data(const void *data, unsigned int len)
+{
+ clk_enable(sossi.fck);
+ set_timing(WR_ACCESS);
+ _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+ /* CMD#/DATA */
+ sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
+ set_cycles(len);
+ sossi_start_transfer();
+ send_data(data, len);
+ sossi_stop_transfer();
+ wait_end_of_write();
+ clk_disable(sossi.fck);
+}
+
+static void sossi_transfer_area(int width, int height,
+ void (callback)(void *data), void *data)
+{
+ BUG_ON(callback == NULL);
+
+ sossi.lcdc_callback = callback;
+ sossi.lcdc_callback_data = data;
+
+ clk_enable(sossi.fck);
+ set_timing(WR_ACCESS);
+ _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+ _set_tearsync_mode(sossi.tearsync_mode, sossi.tearsync_line);
+ /* CMD#/DATA */
+ sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
+ set_cycles(width * height * sossi.bus_pick_width / 8);
+
+ sossi_start_transfer();
+ if (sossi.tearsync_mode) {
+ /*
+ * Wait for the sync signal and start the transfer only
+ * then. We can't seem to be able to use HW sync DMA for
+ * this since LCD DMA shows huge latencies, as if it
+ * would ignore some of the DMA requests from SoSSI.
+ */
+ unsigned long flags;
+
+ spin_lock_irqsave(&sossi.lock, flags);
+ sossi.vsync_dma_pending++;
+ spin_unlock_irqrestore(&sossi.lock, flags);
+ } else
+ /* Just start the transfer right away. */
+ omap_enable_lcd_dma();
+}
+
+static void sossi_dma_callback(void *data)
+{
+ omap_stop_lcd_dma();
+ sossi_stop_transfer();
+ clk_disable(sossi.fck);
+ sossi.lcdc_callback(sossi.lcdc_callback_data);
+}
+
+static void sossi_read_data(void *data, unsigned int len)
+{
+ clk_enable(sossi.fck);
+ set_timing(RD_ACCESS);
+ _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+ /* CMD#/DATA */
+ sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
+ set_cycles(len);
+ sossi_start_transfer();
+ while (len >= 4) {
+ *(u32 *) data = sossi_read_reg(SOSSI_FIFO_REG);
+ len -= 4;
+ data += 4;
+ }
+ while (len >= 2) {
+ *(u16 *) data = sossi_read_reg16(SOSSI_FIFO_REG);
+ len -= 2;
+ data += 2;
+ }
+ while (len) {
+ *(u8 *) data = sossi_read_reg8(SOSSI_FIFO_REG);
+ len--;
+ data++;
+ }
+ sossi_stop_transfer();
+ clk_disable(sossi.fck);
+}
+
+static irqreturn_t sossi_match_irq(int irq, void *data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sossi.lock, flags);
+ if (sossi.vsync_dma_pending) {
+ sossi.vsync_dma_pending--;
+ omap_enable_lcd_dma();
+ }
+ spin_unlock_irqrestore(&sossi.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int sossi_init(struct omapfb_device *fbdev)
+{
+ u32 l, k;
+ struct clk *fck;
+ struct clk *dpll1out_ck;
+ int r;
+
+ sossi.base = (void __iomem *)IO_ADDRESS(OMAP_SOSSI_BASE);
+ sossi.fbdev = fbdev;
+ spin_lock_init(&sossi.lock);
+
+ dpll1out_ck = clk_get(fbdev->dev, "ck_dpll1out");
+ if (IS_ERR(dpll1out_ck)) {
+ dev_err(fbdev->dev, "can't get DPLL1OUT clock\n");
+ return PTR_ERR(dpll1out_ck);
+ }
+ /*
+ * We need the parent clock rate, which we might divide further
+ * depending on the timing requirements of the controller. See
+ * _set_timings.
+ */
+ sossi.fck_hz = clk_get_rate(dpll1out_ck);
+ clk_put(dpll1out_ck);
+
+ fck = clk_get(fbdev->dev, "ck_sossi");
+ if (IS_ERR(fck)) {
+ dev_err(fbdev->dev, "can't get SoSSI functional clock\n");
+ return PTR_ERR(fck);
+ }
+ sossi.fck = fck;
+
+ /* Reset and enable the SoSSI module */
+ l = omap_readl(MOD_CONF_CTRL_1);
+ l |= CONF_SOSSI_RESET_R;
+ omap_writel(l, MOD_CONF_CTRL_1);
+ l &= ~CONF_SOSSI_RESET_R;
+ omap_writel(l, MOD_CONF_CTRL_1);
+
+ clk_enable(sossi.fck);
+ l = omap_readl(ARM_IDLECT2);
+ l &= ~(1 << 8); /* DMACK_REQ */
+ omap_writel(l, ARM_IDLECT2);
+
+ l = sossi_read_reg(SOSSI_INIT2_REG);
+ /* Enable and reset the SoSSI block */
+ l |= (1 << 0) | (1 << 1);
+ sossi_write_reg(SOSSI_INIT2_REG, l);
+ /* Take SoSSI out of reset */
+ l &= ~(1 << 1);
+ sossi_write_reg(SOSSI_INIT2_REG, l);
+
+ sossi_write_reg(SOSSI_ID_REG, 0);
+ l = sossi_read_reg(SOSSI_ID_REG);
+ k = sossi_read_reg(SOSSI_ID_REG);
+
+ if (l != 0x55555555 || k != 0xaaaaaaaa) {
+ dev_err(fbdev->dev,
+ "invalid SoSSI sync pattern: %08x, %08x\n", l, k);
+ r = -ENODEV;
+ goto err;
+ }
+
+ if ((r = omap_lcdc_set_dma_callback(sossi_dma_callback, NULL)) < 0) {
+ dev_err(fbdev->dev, "can't get LCDC IRQ\n");
+ r = -ENODEV;
+ goto err;
+ }
+
+ l = sossi_read_reg(SOSSI_ID_REG); /* Component code */
+ l = sossi_read_reg(SOSSI_ID_REG);
+ dev_info(fbdev->dev, "SoSSI version %d.%d initialized\n",
+ l >> 16, l & 0xffff);
+
+ l = sossi_read_reg(SOSSI_INIT1_REG);
+ l |= (1 << 19); /* DMA_MODE */
+ l &= ~(1 << 31); /* REORDERING */
+ sossi_write_reg(SOSSI_INIT1_REG, l);
+
+ if ((r = request_irq(INT_1610_SoSSI_MATCH, sossi_match_irq,
+ IRQT_FALLING,
+ "sossi_match", sossi.fbdev->dev)) < 0) {
+ dev_err(sossi.fbdev->dev, "can't get SoSSI match IRQ\n");
+ goto err;
+ }
+
+ clk_disable(sossi.fck);
+ return 0;
+
+err:
+ clk_disable(sossi.fck);
+ clk_put(sossi.fck);
+ return r;
+}
+
+static void sossi_cleanup(void)
+{
+ omap_lcdc_free_dma_callback();
+ clk_put(sossi.fck);
+}
+
+struct lcd_ctrl_extif omap1_ext_if = {
+ .init = sossi_init,
+ .cleanup = sossi_cleanup,
+ .get_clk_info = sossi_get_clk_info,
+ .convert_timings = sossi_convert_timings,
+ .set_timings = sossi_set_timings,
+ .set_bits_per_cycle = sossi_set_bits_per_cycle,
+ .setup_tearsync = sossi_setup_tearsync,
+ .enable_tearsync = sossi_enable_tearsync,
+ .write_command = sossi_write_command,
+ .read_data = sossi_read_data,
+ .write_data = sossi_write_data,
+ .transfer_area = sossi_transfer_area,
+
+ .max_transmit_size = SOSSI_MAX_XMIT_BYTES,
+};
+
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index e64f8b5d005..8503e733a17 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -52,7 +52,7 @@ struct fb_info_platinum {
struct {
__u8 red, green, blue;
} palette[256];
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
volatile struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 0a04483aa3e..10c0cc6e93f 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -24,7 +24,7 @@
* License. See the file COPYING in the main directory of this archive for
* more details.
*
- *
+ *
*/
#include <linux/module.h>
@@ -58,7 +58,7 @@
#endif
/*
- * Driver data
+ * Driver data
*/
static char *mode __devinitdata = NULL;
@@ -82,12 +82,12 @@ struct pm2fb_par
{
pm2type_t type; /* Board type */
unsigned char __iomem *v_regs;/* virtual address of p_regs */
- u32 memclock; /* memclock */
+ u32 memclock; /* memclock */
u32 video; /* video flags before blanking */
u32 mem_config; /* MemConfig reg at probe */
u32 mem_control; /* MemControl reg at probe */
u32 boot_address; /* BootAddress reg at probe */
- u32 palette[16];
+ u32 palette[16];
};
/*
@@ -95,12 +95,12 @@ struct pm2fb_par
* if we don't use modedb.
*/
static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
- .id = "",
+ .id = "",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 1,
.ypanstep = 1,
- .ywrapstep = 0,
+ .ywrapstep = 0,
.accel = FB_ACCEL_3DLABS_PERMEDIA2,
};
@@ -109,26 +109,26 @@ static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
*/
static struct fb_var_screeninfo pm2fb_var __devinitdata = {
/* "640x480, 8 bpp @ 60 Hz */
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel =8,
- .red = {0, 8, 0},
- .blue = {0, 8, 0},
- .green = {0, 8, 0},
- .activate = FB_ACTIVATE_NOW,
- .height = -1,
- .width = -1,
- .accel_flags = 0,
- .pixclock = 39721,
- .left_margin = 40,
- .right_margin = 24,
- .upper_margin = 32,
- .lower_margin = 11,
- .hsync_len = 96,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED
+ .xres = 640,
+ .yres = 480,
+ .xres_virtual = 640,
+ .yres_virtual = 480,
+ .bits_per_pixel = 8,
+ .red = {0, 8, 0},
+ .blue = {0, 8, 0},
+ .green = {0, 8, 0},
+ .activate = FB_ACTIVATE_NOW,
+ .height = -1,
+ .width = -1,
+ .accel_flags = 0,
+ .pixclock = 39721,
+ .left_margin = 40,
+ .right_margin = 24,
+ .upper_margin = 32,
+ .lower_margin = 11,
+ .hsync_len = 96,
+ .vsync_len = 2,
+ .vmode = FB_VMODE_NONINTERLACED
};
/*
@@ -166,7 +166,7 @@ static inline u32 pm2_RDAC_RD(struct pm2fb_par* p, s32 idx)
pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
index = PM2VR_RD_INDEXED_DATA;
break;
- }
+ }
mb();
return pm2_RD(p, index);
}
@@ -182,7 +182,7 @@ static inline void pm2_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
index = PM2VR_RD_INDEXED_DATA;
break;
- }
+ }
wmb();
pm2_WR(p, index, v);
wmb();
@@ -197,7 +197,7 @@ static inline void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
}
#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT
-#define WAIT_FIFO(p,a)
+#define WAIT_FIFO(p, a)
#else
static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
{
@@ -209,7 +209,7 @@ static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
/*
* partial products for the supported horizontal resolutions.
*/
-#define PACKPP(p0,p1,p2) (((p2) << 6) | ((p1) << 3) | (p0))
+#define PACKPP(p0, p1, p2) (((p2) << 6) | ((p1) << 3) | (p0))
static const struct {
u16 width;
u16 pp;
@@ -357,7 +357,7 @@ static void reset_card(struct pm2fb_par* p)
static void reset_config(struct pm2fb_par* p)
{
WAIT_FIFO(p, 52);
- pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG)&
+ pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG) &
~(PM2F_VGA_ENABLE|PM2F_VGA_FIXED));
pm2_WR(p, PM2R_BYPASS_WRITE_MASK, ~(0L));
pm2_WR(p, PM2R_FRAMEBUFFER_WRITE_MASK, ~(0L));
@@ -367,7 +367,7 @@ static void reset_config(struct pm2fb_par* p)
pm2_WR(p, PM2R_RASTERIZER_MODE, 0);
pm2_WR(p, PM2R_DELTA_MODE, PM2F_DELTA_ORDER_RGB);
pm2_WR(p, PM2R_LB_READ_FORMAT, 0);
- pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0);
+ pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0);
pm2_WR(p, PM2R_LB_READ_MODE, 0);
pm2_WR(p, PM2R_LB_SOURCE_OFFSET, 0);
pm2_WR(p, PM2R_FB_SOURCE_OFFSET, 0);
@@ -535,7 +535,7 @@ static void set_video(struct pm2fb_par* p, u32 video) {
vsync = video;
DPRINTK("video = 0x%x\n", video);
-
+
/*
* The hardware cursor needs +vsync to recognise vert retrace.
* We may not be using the hardware cursor, but the X Glint
@@ -574,9 +574,9 @@ static void set_video(struct pm2fb_par* p, u32 video) {
*/
/**
- * pm2fb_check_var - Optional function. Validates a var passed in.
- * @var: frame buffer variable screen structure
- * @info: frame buffer structure that represents a single frame buffer
+ * pm2fb_check_var - Optional function. Validates a var passed in.
+ * @var: frame buffer variable screen structure
+ * @info: frame buffer structure that represents a single frame buffer
*
* Checks to see if the hardware supports the state requested by
* var passed in.
@@ -615,23 +615,23 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->xres = (var->xres + 15) & ~15; /* could sometimes be 8 */
lpitch = var->xres * ((var->bits_per_pixel + 7)>>3);
-
+
if (var->xres < 320 || var->xres > 1600) {
DPRINTK("width not supported: %u\n", var->xres);
return -EINVAL;
}
-
+
if (var->yres < 200 || var->yres > 1200) {
DPRINTK("height not supported: %u\n", var->yres);
return -EINVAL;
}
-
+
if (lpitch * var->yres_virtual > info->fix.smem_len) {
DPRINTK("no memory for screen (%ux%ux%u)\n",
var->xres, var->yres_virtual, var->bits_per_pixel);
return -EINVAL;
}
-
+
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock));
return -EINVAL;
@@ -672,17 +672,17 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
break;
}
var->height = var->width = -1;
-
+
var->accel_flags = 0; /* Can't mmap if this is on */
-
+
DPRINTK("Checking graphics mode at %dx%d depth %d\n",
var->xres, var->yres, var->bits_per_pixel);
return 0;
}
/**
- * pm2fb_set_par - Alters the hardware state.
- * @info: frame buffer structure that represents a single frame buffer
+ * pm2fb_set_par - Alters the hardware state.
+ * @info: frame buffer structure that represents a single frame buffer
*
* Using the fb_var_screeninfo in fb_info we set the resolution of the
* this particular framebuffer.
@@ -709,7 +709,7 @@ static int pm2fb_set_par(struct fb_info *info)
clear_palette(par);
if ( par->memclock )
set_memclock(par, par->memclock);
-
+
width = (info->var.xres_virtual + 7) & ~7;
height = info->var.yres_virtual;
depth = (info->var.bits_per_pixel + 7) & ~7;
@@ -722,7 +722,7 @@ static int pm2fb_set_par(struct fb_info *info)
DPRINTK("pixclock too high (%uKHz)\n", pixclock);
return -EINVAL;
}
-
+
hsstart = to3264(info->var.right_margin, depth, data64);
hsend = hsstart + to3264(info->var.hsync_len, depth, data64);
hbend = hsend + to3264(info->var.left_margin, depth, data64);
@@ -737,7 +737,7 @@ static int pm2fb_set_par(struct fb_info *info)
base = to3264(info->var.yoffset * xres + info->var.xoffset, depth, 1);
if (data64)
video |= PM2F_DATA_64_ENABLE;
-
+
if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) {
if (lowhsync) {
DPRINTK("ignoring +hsync, using -hsync.\n");
@@ -778,9 +778,9 @@ static int pm2fb_set_par(struct fb_info *info)
WAIT_FIFO(par, 1);
pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0);
}
-
+
set_aperture(par, depth);
-
+
mb();
WAIT_FIFO(par, 19);
pm2_RDAC_WR(par, PM2I_RD_COLOR_KEY_CONTROL,
@@ -847,22 +847,22 @@ static int pm2fb_set_par(struct fb_info *info)
set_pixclock(par, pixclock);
DPRINTK("Setting graphics mode at %dx%d depth %d\n",
info->var.xres, info->var.yres, info->var.bits_per_pixel);
- return 0;
+ return 0;
}
/**
- * pm2fb_setcolreg - Sets a color register.
- * @regno: boolean, 0 copy local, 1 get_user() function
- * @red: frame buffer colormap structure
- * @green: The green value which can be up to 16 bits wide
+ * pm2fb_setcolreg - Sets a color register.
+ * @regno: boolean, 0 copy local, 1 get_user() function
+ * @red: frame buffer colormap structure
+ * @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
- * @transp: If supported the alpha value which can be up to 16 bits wide.
- * @info: frame buffer info structure
- *
- * Set a single color register. The values supplied have a 16 bit
- * magnitude which needs to be scaled in this function for the hardware.
+ * @transp: If supported the alpha value which can be up to 16 bits wide.
+ * @info: frame buffer info structure
+ *
+ * Set a single color register. The values supplied have a 16 bit
+ * magnitude which needs to be scaled in this function for the hardware.
* Pretty much a direct lift from tdfxfb.c.
- *
+ *
* Returns negative errno on error, or zero on success.
*/
static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -906,7 +906,7 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
* (blue << blue.offset) | (transp << transp.offset)
* RAMDAC does not exist
*/
-#define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16)
+#define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF -(val)) >> 16)
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
case FB_VISUAL_PSEUDOCOLOR:
@@ -916,9 +916,9 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
transp = CNVT_TOHW(transp, info->var.transp.length);
break;
case FB_VISUAL_DIRECTCOLOR:
- /* example here assumes 8 bit DAC. Might be different
- * for your hardware */
- red = CNVT_TOHW(red, 8);
+ /* example here assumes 8 bit DAC. Might be different
+ * for your hardware */
+ red = CNVT_TOHW(red, 8);
green = CNVT_TOHW(green, 8);
blue = CNVT_TOHW(blue, 8);
/* hey, there is bug in transp handling... */
@@ -940,11 +940,11 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
switch (info->var.bits_per_pixel) {
case 8:
- break;
- case 16:
+ break;
+ case 16:
case 24:
- case 32:
- par->palette[regno] = v;
+ case 32:
+ par->palette[regno] = v;
break;
}
return 0;
@@ -956,15 +956,15 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
}
/**
- * pm2fb_pan_display - Pans the display.
- * @var: frame buffer variable screen structure
- * @info: frame buffer structure that represents a single frame buffer
+ * pm2fb_pan_display - Pans the display.
+ * @var: frame buffer variable screen structure
+ * @info: frame buffer structure that represents a single frame buffer
*
* Pan (or wrap, depending on the `vmode' field) the display using the
- * `xoffset' and `yoffset' fields of the `var' structure.
- * If the values don't fit, return -EINVAL.
+ * `xoffset' and `yoffset' fields of the `var' structure.
+ * If the values don't fit, return -EINVAL.
*
- * Returns negative errno on error, or zero on success.
+ * Returns negative errno on error, or zero on success.
*
*/
static int pm2fb_pan_display(struct fb_var_screeninfo *var,
@@ -980,24 +980,24 @@ static int pm2fb_pan_display(struct fb_var_screeninfo *var,
depth = (depth > 32) ? 32 : depth;
base = to3264(var->yoffset * xres + var->xoffset, depth, 1);
WAIT_FIFO(p, 1);
- pm2_WR(p, PM2R_SCREEN_BASE, base);
+ pm2_WR(p, PM2R_SCREEN_BASE, base);
return 0;
}
/**
- * pm2fb_blank - Blanks the display.
- * @blank_mode: the blank mode we want.
- * @info: frame buffer structure that represents a single frame buffer
+ * pm2fb_blank - Blanks the display.
+ * @blank_mode: the blank mode we want.
+ * @info: frame buffer structure that represents a single frame buffer
*
- * Blank the screen if blank_mode != 0, else unblank. Return 0 if
- * blanking succeeded, != 0 if un-/blanking failed due to e.g. a
- * video mode which doesn't support it. Implements VESA suspend
- * and powerdown modes on hardware that supports disabling hsync/vsync:
- * blank_mode == 2: suspend vsync
- * blank_mode == 3: suspend hsync
- * blank_mode == 4: powerdown
+ * Blank the screen if blank_mode != 0, else unblank. Return 0 if
+ * blanking succeeded, != 0 if un-/blanking failed due to e.g. a
+ * video mode which doesn't support it. Implements VESA suspend
+ * and powerdown modes on hardware that supports disabling hsync/vsync:
+ * blank_mode == 2: suspend vsync
+ * blank_mode == 3: suspend hsync
+ * blank_mode == 4: powerdown
*
- * Returns negative errno on error, or zero on success.
+ * Returns negative errno on error, or zero on success.
*
*/
static int pm2fb_blank(int blank_mode, struct fb_info *info)
@@ -1071,7 +1071,7 @@ static void pm2fb_block_op(struct fb_info* info, int copy,
pm2_WR(par, PM2R_RECTANGLE_ORIGIN, (y << 16) | x);
pm2_WR(par, PM2R_RECTANGLE_SIZE, (h << 16) | w);
wmb();
- pm2_WR(par, PM2R_RENDER,PM2F_RENDER_RECTANGLE |
+ pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE |
(x<xsrc ? PM2F_INCREASE_X : 0) |
(y<ysrc ? PM2F_INCREASE_Y : 0) |
(copy ? 0 : PM2F_RENDER_FASTFILL));
@@ -1234,7 +1234,7 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
DPRINTK("Adjusting register base for big-endian.\n");
#endif
DPRINTK("Register base at 0x%lx\n", pm2fb_fix.mmio_start);
-
+
/* Registers - request region and map it. */
if ( !request_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len,
"pm2fb regbase") ) {
@@ -1317,17 +1317,17 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
}
info->fbops = &pm2fb_ops;
- info->fix = pm2fb_fix;
+ info->fix = pm2fb_fix;
info->pseudo_palette = default_par->palette;
info->flags = FBINFO_DEFAULT |
- FBINFO_HWACCEL_YPAN |
- FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT;
+ FBINFO_HWACCEL_YPAN |
+ FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT;
if (!mode)
mode = "640x480@60";
-
- err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8);
+
+ err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8);
if (!err || err == 4)
info->var = pm2fb_var;
@@ -1348,8 +1348,8 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
return 0;
err_exit_all:
- fb_dealloc_cmap(&info->cmap);
- err_exit_both:
+ fb_dealloc_cmap(&info->cmap);
+ err_exit_both:
iounmap(info->screen_base);
release_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len);
err_exit_mmio:
@@ -1374,7 +1374,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev)
struct pm2fb_par *par = info->par;
unregister_framebuffer(info);
-
+
iounmap(info->screen_base);
release_mem_region(fix->smem_start, fix->smem_len);
iounmap(par->v_regs);
@@ -1402,9 +1402,9 @@ static struct pci_device_id pm2fb_id_table[] = {
static struct pci_driver pm2fb_driver = {
.name = "pm2fb",
- .id_table = pm2fb_id_table,
- .probe = pm2fb_probe,
- .remove = __devexit_p(pm2fb_remove),
+ .id_table = pm2fb_id_table,
+ .probe = pm2fb_probe,
+ .remove = __devexit_p(pm2fb_remove),
};
MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
@@ -1423,7 +1423,7 @@ static int __init pm2fb_setup(char *options)
if (!options || !*options)
return 0;
- while ((this_opt = strsep(&options, ",")) != NULL) {
+ while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
if(!strcmp(this_opt, "lowhsync")) {
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index b52e883f0a5..5b3f54c0918 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -77,7 +77,7 @@ static struct fb_fix_screeninfo pm3fb_fix __devinitdata = {
.xpanstep = 1,
.ypanstep = 1,
.ywrapstep = 0,
- .accel = FB_ACCEL_NONE,
+ .accel = FB_ACCEL_3DLABS_PERMEDIA3,
};
/*
@@ -185,6 +185,238 @@ static inline int pm3fb_shift_bpp(unsigned bpp, int v)
return 0;
}
+/* acceleration */
+static int pm3fb_sync(struct fb_info *info)
+{
+ struct pm3_par *par = info->par;
+
+ PM3_WAIT(par, 2);
+ PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
+ PM3_WRITE_REG(par, PM3Sync, 0);
+ mb();
+ do {
+ while ((PM3_READ_REG(par, PM3OutFIFOWords)) == 0);
+ rmb();
+ } while ((PM3_READ_REG(par, PM3OutputFifo)) != PM3Sync_Tag);
+
+ return 0;
+}
+
+static void pm3fb_init_engine(struct fb_info *info)
+{
+ struct pm3_par *par = info->par;
+ const u32 width = (info->var.xres_virtual + 7) & ~7;
+
+ PM3_WAIT(par, 50);
+ PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
+ PM3_WRITE_REG(par, PM3StatisticMode, 0x0);
+ PM3_WRITE_REG(par, PM3DeltaMode, 0x0);
+ PM3_WRITE_REG(par, PM3RasterizerMode, 0x0);
+ PM3_WRITE_REG(par, PM3ScissorMode, 0x0);
+ PM3_WRITE_REG(par, PM3LineStippleMode, 0x0);
+ PM3_WRITE_REG(par, PM3AreaStippleMode, 0x0);
+ PM3_WRITE_REG(par, PM3GIDMode, 0x0);
+ PM3_WRITE_REG(par, PM3DepthMode, 0x0);
+ PM3_WRITE_REG(par, PM3StencilMode, 0x0);
+ PM3_WRITE_REG(par, PM3StencilData, 0x0);
+ PM3_WRITE_REG(par, PM3ColorDDAMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCoordMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureIndexMode0, 0x0);
+ PM3_WRITE_REG(par, PM3TextureIndexMode1, 0x0);
+ PM3_WRITE_REG(par, PM3TextureReadMode, 0x0);
+ PM3_WRITE_REG(par, PM3LUTMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureFilterMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureApplicationMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeColorMode1, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode1, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeColorMode0, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode0, 0x0);
+ PM3_WRITE_REG(par, PM3FogMode, 0x0);
+ PM3_WRITE_REG(par, PM3ChromaTestMode, 0x0);
+ PM3_WRITE_REG(par, PM3AlphaTestMode, 0x0);
+ PM3_WRITE_REG(par, PM3AntialiasMode, 0x0);
+ PM3_WRITE_REG(par, PM3YUVMode, 0x0);
+ PM3_WRITE_REG(par, PM3AlphaBlendColorMode, 0x0);
+ PM3_WRITE_REG(par, PM3AlphaBlendAlphaMode, 0x0);
+ PM3_WRITE_REG(par, PM3DitherMode, 0x0);
+ PM3_WRITE_REG(par, PM3LogicalOpMode, 0x0);
+ PM3_WRITE_REG(par, PM3RouterMode, 0x0);
+ PM3_WRITE_REG(par, PM3Window, 0x0);
+
+ PM3_WRITE_REG(par, PM3Config2D, 0x0);
+
+ PM3_WRITE_REG(par, PM3SpanColorMask, 0xffffffff);
+
+ PM3_WRITE_REG(par, PM3XBias, 0x0);
+ PM3_WRITE_REG(par, PM3YBias, 0x0);
+ PM3_WRITE_REG(par, PM3DeltaControl, 0x0);
+
+ PM3_WRITE_REG(par, PM3BitMaskPattern, 0xffffffff);
+
+ PM3_WRITE_REG(par, PM3FBDestReadEnables,
+ PM3FBDestReadEnables_E(0xff) |
+ PM3FBDestReadEnables_R(0xff) |
+ PM3FBDestReadEnables_ReferenceAlpha(0xff));
+ PM3_WRITE_REG(par, PM3FBDestReadBufferAddr0, 0x0);
+ PM3_WRITE_REG(par, PM3FBDestReadBufferOffset0, 0x0);
+ PM3_WRITE_REG(par, PM3FBDestReadBufferWidth0,
+ PM3FBDestReadBufferWidth_Width(width));
+
+ PM3_WRITE_REG(par, PM3FBDestReadMode,
+ PM3FBDestReadMode_ReadEnable |
+ PM3FBDestReadMode_Enable0);
+ PM3_WRITE_REG(par, PM3FBSourceReadBufferAddr, 0x0);
+ PM3_WRITE_REG(par, PM3FBSourceReadBufferOffset, 0x0);
+ PM3_WRITE_REG(par, PM3FBSourceReadBufferWidth,
+ PM3FBSourceReadBufferWidth_Width(width));
+ PM3_WRITE_REG(par, PM3FBSourceReadMode,
+ PM3FBSourceReadMode_Blocking |
+ PM3FBSourceReadMode_ReadEnable);
+
+ PM3_WAIT(par, 2);
+ {
+ unsigned long rm = 1;
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ PM3_WRITE_REG(par, PM3PixelSize,
+ PM3PixelSize_GLOBAL_8BIT);
+ break;
+ case 16:
+ PM3_WRITE_REG(par, PM3PixelSize,
+ PM3PixelSize_GLOBAL_16BIT);
+ break;
+ case 32:
+ PM3_WRITE_REG(par, PM3PixelSize,
+ PM3PixelSize_GLOBAL_32BIT);
+ break;
+ default:
+ DPRINTK(1, "Unsupported depth %d\n",
+ info->var.bits_per_pixel);
+ break;
+ }
+ PM3_WRITE_REG(par, PM3RasterizerMode, rm);
+ }
+
+ PM3_WAIT(par, 20);
+ PM3_WRITE_REG(par, PM3FBSoftwareWriteMask, 0xffffffff);
+ PM3_WRITE_REG(par, PM3FBHardwareWriteMask, 0xffffffff);
+ PM3_WRITE_REG(par, PM3FBWriteMode,
+ PM3FBWriteMode_WriteEnable |
+ PM3FBWriteMode_OpaqueSpan |
+ PM3FBWriteMode_Enable0);
+ PM3_WRITE_REG(par, PM3FBWriteBufferAddr0, 0x0);
+ PM3_WRITE_REG(par, PM3FBWriteBufferOffset0, 0x0);
+ PM3_WRITE_REG(par, PM3FBWriteBufferWidth0,
+ PM3FBWriteBufferWidth_Width(width));
+
+ PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 0x0);
+ {
+ /* size in lines of FB */
+ unsigned long sofb = info->screen_size /
+ info->fix.line_length;
+ if (sofb > 4095)
+ PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 4095);
+ else
+ PM3_WRITE_REG(par, PM3SizeOfFramebuffer, sofb);
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ PM3_WRITE_REG(par, PM3DitherMode,
+ (1 << 10) | (2 << 3));
+ break;
+ case 16:
+ PM3_WRITE_REG(par, PM3DitherMode,
+ (1 << 10) | (1 << 3));
+ break;
+ case 32:
+ PM3_WRITE_REG(par, PM3DitherMode,
+ (1 << 10) | (0 << 3));
+ break;
+ default:
+ DPRINTK(1, "Unsupported depth %d\n",
+ info->current_par->depth);
+ break;
+ }
+ }
+
+ PM3_WRITE_REG(par, PM3dXDom, 0x0);
+ PM3_WRITE_REG(par, PM3dXSub, 0x0);
+ PM3_WRITE_REG(par, PM3dY, (1 << 16));
+ PM3_WRITE_REG(par, PM3StartXDom, 0x0);
+ PM3_WRITE_REG(par, PM3StartXSub, 0x0);
+ PM3_WRITE_REG(par, PM3StartY, 0x0);
+ PM3_WRITE_REG(par, PM3Count, 0x0);
+
+/* Disable LocalBuffer. better safe than sorry */
+ PM3_WRITE_REG(par, PM3LBDestReadMode, 0x0);
+ PM3_WRITE_REG(par, PM3LBDestReadEnables, 0x0);
+ PM3_WRITE_REG(par, PM3LBSourceReadMode, 0x0);
+ PM3_WRITE_REG(par, PM3LBWriteMode, 0x0);
+
+ pm3fb_sync(info);
+}
+
+static void pm3fb_fillrect (struct fb_info *info,
+ const struct fb_fillrect *region)
+{
+ struct pm3_par *par = info->par;
+ struct fb_fillrect modded;
+ int vxres, vyres;
+ u32 color = (info->fix.visual == FB_VISUAL_TRUECOLOR) ?
+ ((u32*)info->pseudo_palette)[region->color] : region->color;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+ if ((info->flags & FBINFO_HWACCEL_DISABLED) ||
+ region->rop != ROP_COPY ) {
+ cfb_fillrect(info, region);
+ return;
+ }
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ memcpy(&modded, region, sizeof(struct fb_fillrect));
+
+ if(!modded.width || !modded.height ||
+ modded.dx >= vxres || modded.dy >= vyres)
+ return;
+
+ if(modded.dx + modded.width > vxres)
+ modded.width = vxres - modded.dx;
+ if(modded.dy + modded.height > vyres)
+ modded.height = vyres - modded.dy;
+
+ if(info->var.bits_per_pixel == 8)
+ color |= color << 8;
+ if(info->var.bits_per_pixel <= 16)
+ color |= color << 16;
+
+ PM3_WAIT(par, 4);
+
+ PM3_WRITE_REG(par, PM3Config2D,
+ PM3Config2D_UseConstantSource |
+ PM3Config2D_ForegroundROPEnable |
+ (PM3Config2D_ForegroundROP(0x3)) | /* Ox3 is GXcopy */
+ PM3Config2D_FBWriteEnable);
+
+ PM3_WRITE_REG(par, PM3ForegroundColor, color);
+
+ PM3_WRITE_REG(par, PM3RectanglePosition,
+ (PM3RectanglePosition_XOffset(modded.dx)) |
+ (PM3RectanglePosition_YOffset(modded.dy)));
+
+ PM3_WRITE_REG(par, PM3Render2D,
+ PM3Render2D_XPositive |
+ PM3Render2D_YPositive |
+ PM3Render2D_Operation_Normal |
+ PM3Render2D_SpanOperation |
+ (PM3Render2D_Width(modded.width)) |
+ (PM3Render2D_Height(modded.height)));
+}
+/* end of acceleration functions */
+
/* write the mode to registers */
static void pm3fb_write_mode(struct fb_info *info)
{
@@ -380,8 +612,6 @@ static void pm3fb_write_mode(struct fb_info *info)
/*
* hardware independent functions
*/
-int pm3fb_init(void);
-
static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
u32 lpitch;
@@ -528,6 +758,7 @@ static int pm3fb_set_par(struct fb_info *info)
pm3fb_clear_colormap(par, 0, 0, 0);
PM3_WRITE_DAC_REG(par, PM3RD_CursorMode,
PM3RD_CursorMode_CURSOR_DISABLE);
+ pm3fb_init_engine(info);
pm3fb_write_mode(info);
return 0;
}
@@ -675,10 +906,11 @@ static struct fb_ops pm3fb_ops = {
.fb_set_par = pm3fb_set_par,
.fb_setcolreg = pm3fb_setcolreg,
.fb_pan_display = pm3fb_pan_display,
- .fb_fillrect = cfb_fillrect,
+ .fb_fillrect = pm3fb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_blank = pm3fb_blank,
+ .fb_sync = pm3fb_sync,
};
/* ------------------------------------------------------------------------- */
@@ -847,7 +1079,8 @@ static int __devinit pm3fb_probe(struct pci_dev *dev,
info->fix = pm3fb_fix;
info->pseudo_palette = par->palette;
- info->flags = FBINFO_DEFAULT;/* | FBINFO_HWACCEL_YPAN;*/
+ info->flags = FBINFO_DEFAULT |
+ FBINFO_HWACCEL_FILLRECT;/* | FBINFO_HWACCEL_YPAN;*/
/*
* This should give a reasonable default video mode. The following is
@@ -935,35 +1168,12 @@ static struct pci_driver pm3fb_driver = {
MODULE_DEVICE_TABLE(pci, pm3fb_id_table);
-#ifndef MODULE
- /*
- * Setup
- */
-
-/*
- * Only necessary if your driver takes special options,
- * otherwise we fall back on the generic fb_setup().
- */
-static int __init pm3fb_setup(char *options)
+static int __init pm3fb_init(void)
{
- /* Parse user speficied options (`video=pm3fb:') */
- return 0;
-}
-#endif /* MODULE */
-
-int __init pm3fb_init(void)
-{
- /*
- * For kernel boot options (in 'video=pm3fb:<options>' format)
- */
#ifndef MODULE
- char *option = NULL;
-
- if (fb_get_options("pm3fb", &option))
+ if (fb_get_options("pm3fb", NULL))
return -ENODEV;
- pm3fb_setup(option);
#endif
-
return pci_register_driver(&pm3fb_driver);
}
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 08b7ffbbbbd..3972aa8cf85 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -812,6 +812,7 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
static int ps3fbd(void *arg)
{
+ set_freezable();
while (!kthread_should_stop()) {
try_to_freeze();
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 2ba959a83eb..0f88c30f94f 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -333,24 +333,25 @@ static int pvr2fb_setcolreg(unsigned int regno, unsigned int red,
((blue & 0xf800) >> 11);
pvr2fb_set_pal_entry(par, regno, tmp);
- ((u16*)(info->pseudo_palette))[regno] = tmp;
break;
case 24: /* RGB 888 */
red >>= 8; green >>= 8; blue >>= 8;
- ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue;
+ tmp = (red << 16) | (green << 8) | blue;
break;
case 32: /* ARGB 8888 */
red >>= 8; green >>= 8; blue >>= 8;
tmp = (transp << 24) | (red << 16) | (green << 8) | blue;
pvr2fb_set_pal_entry(par, regno, tmp);
- ((u32*)(info->pseudo_palette))[regno] = tmp;
break;
default:
pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel);
return 1;
}
+ if (regno < 16)
+ ((u32*)(info->pseudo_palette))[regno] = tmp;
+
return 0;
}
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index 48536c3e58a..4beac1df617 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -95,7 +95,7 @@ static int __init q40fb_probe(struct platform_device *dev)
/* mapped in q40/config.c */
q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR;
- info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
+ info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
if (!info)
return -ENOMEM;
diff --git a/drivers/video/riva/riva_hw.c b/drivers/video/riva/riva_hw.c
index 70bfd78eca8..13307703a9f 100644
--- a/drivers/video/riva/riva_hw.c
+++ b/drivers/video/riva/riva_hw.c
@@ -1223,6 +1223,8 @@ static int CalcVClock
}
}
}
+
+ /* non-zero: M/N/P/clock values assigned. zero: error (not set) */
return (DeltaOld != 0xFFFFFFFF);
}
/*
@@ -1240,7 +1242,10 @@ int CalcStateExt
int dotClock
)
{
- int pixelDepth, VClk, m, n, p;
+ int pixelDepth;
+ int uninitialized_var(VClk),uninitialized_var(m),
+ uninitialized_var(n), uninitialized_var(p);
+
/*
* Save mode parameters.
*/
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index ebb6756aea0..4fb16240c04 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -752,7 +752,7 @@ static int __init sgivwfb_probe(struct platform_device *dev)
struct fb_info *info;
char *monitor;
- info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 256, &dev->dev);
+ info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 16, &dev->dev);
if (!info)
return -ENOMEM;
par = info->par;
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index d5e2d9c2784..d53bf6945f0 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -479,7 +479,7 @@ struct sis_video_info {
struct fb_var_screeninfo default_var;
struct fb_fix_screeninfo sisfb_fix;
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
struct sisfb_monitor {
u16 hmin;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 93d07ef8527..e8ccace0125 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1405,12 +1405,18 @@ sisfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
}
break;
case 16:
+ if (regno >= 16)
+ break;
+
((u32 *)(info->pseudo_palette))[regno] =
(red & 0xf800) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
break;
case 32:
+ if (regno >= 16)
+ break;
+
red >>= 8;
green >>= 8;
blue >>= 8;
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index 5c0dab62809..89facb73edf 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1634,7 +1634,7 @@ tgafb_register(struct device *dev)
FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT;
info->fbops = &tgafb_ops;
info->screen_base = par->tga_fb_base;
- info->pseudo_palette = (void *)(par + 1);
+ info->pseudo_palette = par->palette;
/* This should give a reasonable default video mode. */
if (tga_bus_pci) {
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index 55e8aa450bf..c699864b6f4 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -976,7 +976,7 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
- if (bpp==8) {
+ if (bpp == 8) {
t_outb(0xFF,0x3C6);
t_outb(regno,0x3C8);
@@ -984,19 +984,21 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
t_outb(green>>10,0x3C9);
t_outb(blue>>10,0x3C9);
- } else if (bpp == 16) { /* RGB 565 */
- u32 col;
-
- col = (red & 0xF800) | ((green & 0xFC00) >> 5) |
- ((blue & 0xF800) >> 11);
- col |= col << 16;
- ((u32 *)(info->pseudo_palette))[regno] = col;
- } else if (bpp == 32) /* ARGB 8888 */
- ((u32*)info->pseudo_palette)[regno] =
- ((transp & 0xFF00) <<16) |
- ((red & 0xFF00) << 8) |
- ((green & 0xFF00)) |
- ((blue & 0xFF00)>>8);
+ } else if (regno < 16) {
+ if (bpp == 16) { /* RGB 565 */
+ u32 col;
+
+ col = (red & 0xF800) | ((green & 0xFC00) >> 5) |
+ ((blue & 0xF800) >> 11);
+ col |= col << 16;
+ ((u32 *)(info->pseudo_palette))[regno] = col;
+ } else if (bpp == 32) /* ARGB 8888 */
+ ((u32*)info->pseudo_palette)[regno] =
+ ((transp & 0xFF00) <<16) |
+ ((red & 0xFF00) << 8) |
+ ((green & 0xFF00)) |
+ ((blue & 0xFF00)>>8);
+ }
// debug("exit\n");
return 0;
diff --git a/drivers/video/tx3912fb.c b/drivers/video/tx3912fb.c
index 07389ba01ef..e6f7c78da68 100644
--- a/drivers/video/tx3912fb.c
+++ b/drivers/video/tx3912fb.c
@@ -291,7 +291,7 @@ int __init tx3912fb_init(void)
fb_info.fbops = &tx3912fb_ops;
fb_info.var = tx3912fb_var;
fb_info.fix = tx3912fb_fix;
- fb_info.pseudo_palette = pseudo_palette;
+ fb_info.pseudo_palette = cfb8;
fb_info.flags = FBINFO_DEFAULT;
/* Clear the framebuffer */
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 30c0b948852..4c3a63308df 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -68,26 +68,26 @@ static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
/* CRT timing register sets */
-struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
-struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
-struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
-struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
-
-struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
-struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
-struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
-struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
-struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
-
-struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
-struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
-struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
-
-struct svga_timing_regs vt8623_timing_regs = {
+static struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
+
+static struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
+
+static struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
+static struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
+static struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
+
+static struct svga_timing_regs vt8623_timing_regs = {
vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
@@ -903,7 +903,7 @@ static void __exit vt8623fb_cleanup(void)
/* Driver Initialisation */
-int __init vt8623fb_init(void)
+static int __init vt8623fb_init(void)
{
#ifndef MODULE
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index f5c5b760ed7..c6332108f1c 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -805,6 +805,7 @@ static int w1_control(void *data)
struct w1_master *dev, *n;
int have_to_wait = 0;
+ set_freezable();
while (!kthread_should_stop() || have_to_wait) {
have_to_wait = 0;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 45c35986d49..0a7068e30ec 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -131,7 +131,9 @@ static void v9fs_parse_options(char *options, struct v9fs_session_info *v9ses)
switch (token) {
case Opt_debug:
v9ses->debug = option;
+#ifdef CONFIG_NET_9P_DEBUG
p9_debug_level = option;
+#endif
break;
case Opt_port:
v9ses->port = option;
diff --git a/fs/Kconfig b/fs/Kconfig
index ee11f8d9408..613df554728 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1675,6 +1675,7 @@ config NFSD_V3_ACL
config NFSD_V4
bool "Provide NFSv4 server support (EXPERIMENTAL)"
depends on NFSD_V3 && EXPERIMENTAL
+ select RPCSEC_GSS_KRB5
help
If you would like to include the NFSv4 server as well as the NFSv2
and NFSv3 servers, say Y here. This feature is experimental, and
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index a260198306c..b4a75880f6f 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -139,6 +139,7 @@ err_put_filp:
put_filp(file);
return error;
}
+EXPORT_SYMBOL_GPL(anon_inode_getfd);
/*
* A single inode exists for all anon_inode files. Contrary to pipes,
diff --git a/fs/attr.c b/fs/attr.c
index a0a0c7b07ba..f8dfc2269d8 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -42,7 +42,7 @@ int inode_change_ok(struct inode *inode, struct iattr *attr)
/* Make sure a caller can chmod. */
if (ia_valid & ATTR_MODE) {
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
goto error;
/* Also check the setgid bit! */
if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
@@ -52,7 +52,7 @@ int inode_change_ok(struct inode *inode, struct iattr *attr)
/* Check for setting the inode time. */
if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
- if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
goto error;
}
fine:
diff --git a/fs/buffer.c b/fs/buffer.c
index 424165b569f..0f900671423 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -356,7 +356,7 @@ static void free_more_memory(void)
for_each_online_pgdat(pgdat) {
zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
if (*zones)
- try_to_free_pages(zones, GFP_NOFS);
+ try_to_free_pages(zones, 0, GFP_NOFS);
}
}
@@ -676,6 +676,39 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
EXPORT_SYMBOL(mark_buffer_dirty_inode);
/*
+ * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
+ * dirty.
+ *
+ * If warn is true, then emit a warning if the page is not uptodate and has
+ * not been truncated.
+ */
+static int __set_page_dirty(struct page *page,
+ struct address_space *mapping, int warn)
+{
+ if (unlikely(!mapping))
+ return !TestSetPageDirty(page);
+
+ if (TestSetPageDirty(page))
+ return 0;
+
+ write_lock_irq(&mapping->tree_lock);
+ if (page->mapping) { /* Race with truncate? */
+ WARN_ON_ONCE(warn && !PageUptodate(page));
+
+ if (mapping_cap_account_dirty(mapping)) {
+ __inc_zone_page_state(page, NR_FILE_DIRTY);
+ task_io_account_write(PAGE_CACHE_SIZE);
+ }
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ }
+ write_unlock_irq(&mapping->tree_lock);
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+
+ return 1;
+}
+
+/*
* Add a page to the dirty page list.
*
* It is a sad fact of life that this function is called from several places
@@ -702,7 +735,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
*/
int __set_page_dirty_buffers(struct page *page)
{
- struct address_space * const mapping = page_mapping(page);
+ struct address_space *mapping = page_mapping(page);
if (unlikely(!mapping))
return !TestSetPageDirty(page);
@@ -719,21 +752,7 @@ int __set_page_dirty_buffers(struct page *page)
}
spin_unlock(&mapping->private_lock);
- if (TestSetPageDirty(page))
- return 0;
-
- write_lock_irq(&mapping->tree_lock);
- if (page->mapping) { /* Race with truncate? */
- if (mapping_cap_account_dirty(mapping)) {
- __inc_zone_page_state(page, NR_FILE_DIRTY);
- task_io_account_write(PAGE_CACHE_SIZE);
- }
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page), PAGECACHE_TAG_DIRTY);
- }
- write_unlock_irq(&mapping->tree_lock);
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return 1;
+ return __set_page_dirty(page, mapping, 1);
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
@@ -982,7 +1001,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
struct buffer_head *bh;
page = find_or_create_page(inode->i_mapping, index,
- mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+ (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
if (!page)
return NULL;
@@ -1132,8 +1151,9 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
*/
void fastcall mark_buffer_dirty(struct buffer_head *bh)
{
+ WARN_ON_ONCE(!buffer_uptodate(bh));
if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
- __set_page_dirty_nobuffers(bh->b_page);
+ __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
}
/*
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8b0cbf4a4ad..bd0f2f2353c 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -849,6 +849,7 @@ static int cifs_oplock_thread(void * dummyarg)
__u16 netfid;
int rc;
+ set_freezable();
do {
if (try_to_freeze())
continue;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f4e92661b22..0a1b8bd1dfc 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -363,6 +363,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
GFP_KERNEL);
}
+ set_freezable();
while (!kthread_should_stop()) {
if (try_to_freeze())
continue;
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 1d716392c3a..96df1d51fdc 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -29,6 +29,7 @@
*/
#include <linux/fs.h>
+#include <linux/exportfs.h>
#ifdef CONFIG_CIFS_EXPERIMENTAL
diff --git a/fs/dcache.c b/fs/dcache.c
index 0e73aa0a0e8..cb9d05056b5 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -883,6 +883,11 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+static struct shrinker dcache_shrinker = {
+ .shrink = shrink_dcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
/**
* d_alloc - allocate a dcache entry
* @parent: parent of entry to allocate
@@ -2115,7 +2120,7 @@ static void __init dcache_init(unsigned long mempages)
dentry_cache = KMEM_CACHE(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
- set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
+ register_shrinker(&dcache_shrinker);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
diff --git a/fs/dquot.c b/fs/dquot.c
index 8819d281500..7e273151f58 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -538,6 +538,11 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
}
+static struct shrinker dqcache_shrinker = {
+ .shrink = shrink_dqcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
/*
* Put reference to dquot
* NOTE: If you change this function please check whether dqput_blocks() works right...
@@ -1870,7 +1875,7 @@ static int __init dquot_init(void)
printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
nr_hash, order, (PAGE_SIZE << order));
- set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
+ register_shrinker(&dqcache_shrinker);
return 0;
}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 83e94fedd4e..e77a2ec71aa 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -282,7 +282,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *lower_dentry;
struct vfsmount *lower_mnt;
char *encoded_name;
- unsigned int encoded_namelen;
+ int encoded_namelen;
struct ecryptfs_crypt_stat *crypt_stat = NULL;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
char *page_virt = NULL;
@@ -473,7 +473,7 @@ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
struct dentry *lower_dir_dentry;
umode_t mode;
char *encoded_symname;
- unsigned int encoded_symlen;
+ int encoded_symlen;
struct ecryptfs_crypt_stat *crypt_stat = NULL;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index ed4a207fe22..5276b19423c 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -75,6 +75,38 @@ struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct namei
return NULL;
}
+struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp)
+{
+ __u32 *objp = vobjp;
+ unsigned long ino = objp[0];
+ __u32 generation = objp[1];
+ struct inode *inode;
+ struct dentry *result;
+
+ if (ino == 0)
+ return ERR_PTR(-ESTALE);
+ inode = iget(sb, ino);
+ if (inode == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ if (is_bad_inode(inode) ||
+ (generation && inode->i_generation != generation)) {
+ result = ERR_PTR(-ESTALE);
+ goto out_iput;
+ }
+
+ result = d_alloc_anon(inode);
+ if (!result) {
+ result = ERR_PTR(-ENOMEM);
+ goto out_iput;
+ }
+ return result;
+
+ out_iput:
+ iput(inode);
+ return result;
+}
+
struct dentry *efs_get_parent(struct dentry *child)
{
struct dentry *parent;
diff --git a/fs/efs/super.c b/fs/efs/super.c
index e0a6839e68a..d360c81f3a7 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -11,6 +11,7 @@
#include <linux/efs_fs.h>
#include <linux/efs_vh.h>
#include <linux/efs_fs_sb.h>
+#include <linux/exportfs.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
@@ -113,6 +114,7 @@ static const struct super_operations efs_superblock_operations = {
};
static struct export_operations efs_export_ops = {
+ .get_dentry = efs_get_dentry,
.get_parent = efs_get_parent,
};
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index e98f6cd7200..8adb32a9387 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -1,15 +1,45 @@
+#include <linux/exportfs.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/namei.h>
-struct export_operations export_op_default;
+#define dprintk(fmt, args...) do{}while(0)
-#define CALL(ops,fun) ((ops->fun)?(ops->fun):export_op_default.fun)
-#define dprintk(fmt, args...) do{}while(0)
+static int get_name(struct dentry *dentry, char *name,
+ struct dentry *child);
+
+
+static struct dentry *exportfs_get_dentry(struct super_block *sb, void *obj)
+{
+ struct dentry *result = ERR_PTR(-ESTALE);
+
+ if (sb->s_export_op->get_dentry) {
+ result = sb->s_export_op->get_dentry(sb, obj);
+ if (!result)
+ result = ERR_PTR(-ESTALE);
+ }
+
+ return result;
+}
+
+static int exportfs_get_name(struct dentry *dir, char *name,
+ struct dentry *child)
+{
+ struct export_operations *nop = dir->d_sb->s_export_op;
+ if (nop->get_name)
+ return nop->get_name(dir, name, child);
+ else
+ return get_name(dir, name, child);
+}
+
+/*
+ * Check if the dentry or any of it's aliases is acceptable.
+ */
static struct dentry *
find_acceptable_alias(struct dentry *result,
int (*acceptable)(void *context, struct dentry *dentry),
@@ -17,6 +47,9 @@ find_acceptable_alias(struct dentry *result,
{
struct dentry *dentry, *toput = NULL;
+ if (acceptable(context, result))
+ return result;
+
spin_lock(&dcache_lock);
list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) {
dget_locked(dentry);
@@ -37,130 +70,50 @@ find_acceptable_alias(struct dentry *result,
return NULL;
}
-/**
- * find_exported_dentry - helper routine to implement export_operations->decode_fh
- * @sb: The &super_block identifying the filesystem
- * @obj: An opaque identifier of the object to be found - passed to
- * get_inode
- * @parent: An optional opqaue identifier of the parent of the object.
- * @acceptable: A function used to test possible &dentries to see if they are
- * acceptable
- * @context: A parameter to @acceptable so that it knows on what basis to
- * judge.
- *
- * find_exported_dentry is the central helper routine to enable file systems
- * to provide the decode_fh() export_operation. It's main task is to take
- * an &inode, find or create an appropriate &dentry structure, and possibly
- * splice this into the dcache in the correct place.
- *
- * The decode_fh() operation provided by the filesystem should call
- * find_exported_dentry() with the same parameters that it received except
- * that instead of the file handle fragment, pointers to opaque identifiers
- * for the object and optionally its parent are passed. The default decode_fh
- * routine passes one pointer to the start of the filehandle fragment, and
- * one 8 bytes into the fragment. It is expected that most filesystems will
- * take this approach, though the offset to the parent identifier may well be
- * different.
- *
- * find_exported_dentry() will call get_dentry to get an dentry pointer from
- * the file system. If any &dentry in the d_alias list is acceptable, it will
- * be returned. Otherwise find_exported_dentry() will attempt to splice a new
- * &dentry into the dcache using get_name() and get_parent() to find the
- * appropriate place.
+/*
+ * Find root of a disconnected subtree and return a reference to it.
*/
-
-struct dentry *
-find_exported_dentry(struct super_block *sb, void *obj, void *parent,
- int (*acceptable)(void *context, struct dentry *de),
- void *context)
+static struct dentry *
+find_disconnected_root(struct dentry *dentry)
{
- struct dentry *result = NULL;
- struct dentry *target_dir;
- int err;
- struct export_operations *nops = sb->s_export_op;
- struct dentry *alias;
- int noprogress;
- char nbuf[NAME_MAX+1];
-
- /*
- * Attempt to find the inode.
- */
- result = CALL(sb->s_export_op,get_dentry)(sb,obj);
- err = -ESTALE;
- if (result == NULL)
- goto err_out;
- if (IS_ERR(result)) {
- err = PTR_ERR(result);
- goto err_out;
+ dget(dentry);
+ spin_lock(&dentry->d_lock);
+ while (!IS_ROOT(dentry) &&
+ (dentry->d_parent->d_flags & DCACHE_DISCONNECTED)) {
+ struct dentry *parent = dentry->d_parent;
+ dget(parent);
+ spin_unlock(&dentry->d_lock);
+ dput(dentry);
+ dentry = parent;
+ spin_lock(&dentry->d_lock);
}
- if (S_ISDIR(result->d_inode->i_mode) &&
- (result->d_flags & DCACHE_DISCONNECTED)) {
- /* it is an unconnected directory, we must connect it */
- ;
- } else {
- if (acceptable(context, result))
- return result;
- if (S_ISDIR(result->d_inode->i_mode)) {
- err = -EACCES;
- goto err_result;
- }
+ spin_unlock(&dentry->d_lock);
+ return dentry;
+}
- alias = find_acceptable_alias(result, acceptable, context);
- if (alias)
- return alias;
- }
-
- /* It's a directory, or we are required to confirm the file's
- * location in the tree based on the parent information
- */
- dprintk("find_exported_dentry: need to look harder for %s/%d\n",sb->s_id,*(int*)obj);
- if (S_ISDIR(result->d_inode->i_mode))
- target_dir = dget(result);
- else {
- if (parent == NULL)
- goto err_result;
- target_dir = CALL(sb->s_export_op,get_dentry)(sb,parent);
- if (IS_ERR(target_dir))
- err = PTR_ERR(target_dir);
- if (target_dir == NULL || IS_ERR(target_dir))
- goto err_result;
- }
- /*
- * Now we need to make sure that target_dir is properly connected.
- * It may already be, as the flag isn't always updated when connection
- * happens.
- * So, we walk up parent links until we find a connected directory,
- * or we run out of directories. Then we find the parent, find
- * the name of the child in that parent, and do a lookup.
- * This should connect the child into the parent
- * We then repeat.
- */
+/*
+ * Make sure target_dir is fully connected to the dentry tree.
+ *
+ * It may already be, as the flag isn't always updated when connection happens.
+ */
+static int
+reconnect_path(struct super_block *sb, struct dentry *target_dir)
+{
+ char nbuf[NAME_MAX+1];
+ int noprogress = 0;
+ int err = -ESTALE;
- /* it is possible that a confused file system might not let us complete
+ /*
+ * It is possible that a confused file system might not let us complete
* the path to the root. For example, if get_parent returns a directory
* in which we cannot find a name for the child. While this implies a
* very sick filesystem we don't want it to cause knfsd to spin. Hence
* the noprogress counter. If we go through the loop 10 times (2 is
* probably enough) without getting anywhere, we just give up
*/
- noprogress= 0;
while (target_dir->d_flags & DCACHE_DISCONNECTED && noprogress++ < 10) {
- struct dentry *pd = target_dir;
-
- dget(pd);
- spin_lock(&pd->d_lock);
- while (!IS_ROOT(pd) &&
- (pd->d_parent->d_flags&DCACHE_DISCONNECTED)) {
- struct dentry *parent = pd->d_parent;
-
- dget(parent);
- spin_unlock(&pd->d_lock);
- dput(pd);
- pd = parent;
- spin_lock(&pd->d_lock);
- }
- spin_unlock(&pd->d_lock);
+ struct dentry *pd = find_disconnected_root(target_dir);
if (!IS_ROOT(pd)) {
/* must have found a connected parent - great */
@@ -175,29 +128,40 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
spin_unlock(&pd->d_lock);
noprogress = 0;
} else {
- /* we have hit the top of a disconnected path. Try
- * to find parent and connect
- * note: racing with some other process renaming a
- * directory isn't much of a problem here. If someone
- * renames the directory, it will end up properly
- * connected, which is what we want
+ /*
+ * We have hit the top of a disconnected path, try to
+ * find parent and connect.
+ *
+ * Racing with some other process renaming a directory
+ * isn't much of a problem here. If someone renames
+ * the directory, it will end up properly connected,
+ * which is what we want
+ *
+ * Getting the parent can't be supported generically,
+ * the locking is too icky.
+ *
+ * Instead we just return EACCES. If server reboots
+ * or inodes get flushed, you lose
*/
- struct dentry *ppd;
+ struct dentry *ppd = ERR_PTR(-EACCES);
struct dentry *npd;
mutex_lock(&pd->d_inode->i_mutex);
- ppd = CALL(nops,get_parent)(pd);
+ if (sb->s_export_op->get_parent)
+ ppd = sb->s_export_op->get_parent(pd);
mutex_unlock(&pd->d_inode->i_mutex);
if (IS_ERR(ppd)) {
err = PTR_ERR(ppd);
- dprintk("find_exported_dentry: get_parent of %ld failed, err %d\n",
- pd->d_inode->i_ino, err);
+ dprintk("%s: get_parent of %ld failed, err %d\n",
+ __FUNCTION__, pd->d_inode->i_ino, err);
dput(pd);
break;
}
- dprintk("find_exported_dentry: find name of %lu in %lu\n", pd->d_inode->i_ino, ppd->d_inode->i_ino);
- err = CALL(nops,get_name)(ppd, nbuf, pd);
+
+ dprintk("%s: find name of %lu in %lu\n", __FUNCTION__,
+ pd->d_inode->i_ino, ppd->d_inode->i_ino);
+ err = exportfs_get_name(ppd, nbuf, pd);
if (err) {
dput(ppd);
dput(pd);
@@ -208,13 +172,14 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
continue;
break;
}
- dprintk("find_exported_dentry: found name: %s\n", nbuf);
+ dprintk("%s: found name: %s\n", __FUNCTION__, nbuf);
mutex_lock(&ppd->d_inode->i_mutex);
npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
mutex_unlock(&ppd->d_inode->i_mutex);
if (IS_ERR(npd)) {
err = PTR_ERR(npd);
- dprintk("find_exported_dentry: lookup failed: %d\n", err);
+ dprintk("%s: lookup failed: %d\n",
+ __FUNCTION__, err);
dput(ppd);
dput(pd);
break;
@@ -227,7 +192,7 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
if (npd == pd)
noprogress = 0;
else
- printk("find_exported_dentry: npd != pd\n");
+ printk("%s: npd != pd\n", __FUNCTION__);
dput(npd);
dput(ppd);
if (IS_ROOT(pd)) {
@@ -243,15 +208,101 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
/* something went wrong - oh-well */
if (!err)
err = -ESTALE;
- goto err_target;
+ return err;
}
- /* if we weren't after a directory, have one more step to go */
- if (result != target_dir) {
- struct dentry *nresult;
- err = CALL(nops,get_name)(target_dir, nbuf, result);
+
+ return 0;
+}
+
+/**
+ * find_exported_dentry - helper routine to implement export_operations->decode_fh
+ * @sb: The &super_block identifying the filesystem
+ * @obj: An opaque identifier of the object to be found - passed to
+ * get_inode
+ * @parent: An optional opqaue identifier of the parent of the object.
+ * @acceptable: A function used to test possible &dentries to see if they are
+ * acceptable
+ * @context: A parameter to @acceptable so that it knows on what basis to
+ * judge.
+ *
+ * find_exported_dentry is the central helper routine to enable file systems
+ * to provide the decode_fh() export_operation. It's main task is to take
+ * an &inode, find or create an appropriate &dentry structure, and possibly
+ * splice this into the dcache in the correct place.
+ *
+ * The decode_fh() operation provided by the filesystem should call
+ * find_exported_dentry() with the same parameters that it received except
+ * that instead of the file handle fragment, pointers to opaque identifiers
+ * for the object and optionally its parent are passed. The default decode_fh
+ * routine passes one pointer to the start of the filehandle fragment, and
+ * one 8 bytes into the fragment. It is expected that most filesystems will
+ * take this approach, though the offset to the parent identifier may well be
+ * different.
+ *
+ * find_exported_dentry() will call get_dentry to get an dentry pointer from
+ * the file system. If any &dentry in the d_alias list is acceptable, it will
+ * be returned. Otherwise find_exported_dentry() will attempt to splice a new
+ * &dentry into the dcache using get_name() and get_parent() to find the
+ * appropriate place.
+ */
+
+struct dentry *
+find_exported_dentry(struct super_block *sb, void *obj, void *parent,
+ int (*acceptable)(void *context, struct dentry *de),
+ void *context)
+{
+ struct dentry *result, *alias;
+ int err = -ESTALE;
+
+ /*
+ * Attempt to find the inode.
+ */
+ result = exportfs_get_dentry(sb, obj);
+ if (IS_ERR(result))
+ return result;
+
+ if (S_ISDIR(result->d_inode->i_mode)) {
+ if (!(result->d_flags & DCACHE_DISCONNECTED)) {
+ if (acceptable(context, result))
+ return result;
+ err = -EACCES;
+ goto err_result;
+ }
+
+ err = reconnect_path(sb, result);
+ if (err)
+ goto err_result;
+ } else {
+ struct dentry *target_dir, *nresult;
+ char nbuf[NAME_MAX+1];
+
+ alias = find_acceptable_alias(result, acceptable, context);
+ if (alias)
+ return alias;
+
+ if (parent == NULL)
+ goto err_result;
+
+ target_dir = exportfs_get_dentry(sb,parent);
+ if (IS_ERR(target_dir)) {
+ err = PTR_ERR(target_dir);
+ goto err_result;
+ }
+
+ err = reconnect_path(sb, target_dir);
+ if (err) {
+ dput(target_dir);
+ goto err_result;
+ }
+
+ /*
+ * As we weren't after a directory, have one more step to go.
+ */
+ err = exportfs_get_name(target_dir, nbuf, result);
if (!err) {
mutex_lock(&target_dir->d_inode->i_mutex);
- nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+ nresult = lookup_one_len(nbuf, target_dir,
+ strlen(nbuf));
mutex_unlock(&target_dir->d_inode->i_mutex);
if (!IS_ERR(nresult)) {
if (nresult->d_inode) {
@@ -261,11 +312,8 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
dput(nresult);
}
}
+ dput(target_dir);
}
- dput(target_dir);
- /* now result is properly connected, it is our best bet */
- if (acceptable(context, result))
- return result;
alias = find_acceptable_alias(result, acceptable, context);
if (alias)
@@ -275,32 +323,16 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
dput(result);
/* It might be justifiable to return ESTALE here,
* but the filehandle at-least looks reasonable good
- * and it just be a permission problem, so returning
+ * and it may just be a permission problem, so returning
* -EACCESS is safer
*/
return ERR_PTR(-EACCES);
- err_target:
- dput(target_dir);
err_result:
dput(result);
- err_out:
return ERR_PTR(err);
}
-
-
-static struct dentry *get_parent(struct dentry *child)
-{
- /* get_parent cannot be supported generically, the locking
- * is too icky.
- * instead, we just return EACCES. If server reboots or inodes
- * get flushed, you lose
- */
- return ERR_PTR(-EACCES);
-}
-
-
struct getdents_callback {
char *name; /* name that was found. It already points to a
buffer NAME_MAX+1 is size */
@@ -390,61 +422,6 @@ out:
return error;
}
-
-static struct dentry *export_iget(struct super_block *sb, unsigned long ino, __u32 generation)
-{
-
- /* iget isn't really right if the inode is currently unallocated!!
- * This should really all be done inside each filesystem
- *
- * ext2fs' read_inode has been strengthed to return a bad_inode if
- * the inode had been deleted.
- *
- * Currently we don't know the generation for parent directory, so
- * a generation of 0 means "accept any"
- */
- struct inode *inode;
- struct dentry *result;
- if (ino == 0)
- return ERR_PTR(-ESTALE);
- inode = iget(sb, ino);
- if (inode == NULL)
- return ERR_PTR(-ENOMEM);
- if (is_bad_inode(inode)
- || (generation && inode->i_generation != generation)
- ) {
- /* we didn't find the right inode.. */
- dprintk("fh_verify: Inode %lu, Bad count: %d %d or version %u %u\n",
- inode->i_ino,
- inode->i_nlink, atomic_read(&inode->i_count),
- inode->i_generation,
- generation);
-
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
- /* now to find a dentry.
- * If possible, get a well-connected one
- */
- result = d_alloc_anon(inode);
- if (!result) {
- iput(inode);
- return ERR_PTR(-ENOMEM);
- }
- return result;
-}
-
-
-static struct dentry *get_object(struct super_block *sb, void *vobjp)
-{
- __u32 *objp = vobjp;
- unsigned long ino = objp[0];
- __u32 generation = objp[1];
-
- return export_iget(sb, ino, generation);
-}
-
-
/**
* export_encode_fh - default export_operations->encode_fh function
* @dentry: the dentry to encode
@@ -517,16 +494,40 @@ static struct dentry *export_decode_fh(struct super_block *sb, __u32 *fh, int fh
acceptable, context);
}
-struct export_operations export_op_default = {
- .decode_fh = export_decode_fh,
- .encode_fh = export_encode_fh,
+int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
+ int connectable)
+{
+ struct export_operations *nop = dentry->d_sb->s_export_op;
+ int error;
+
+ if (nop->encode_fh)
+ error = nop->encode_fh(dentry, fh, max_len, connectable);
+ else
+ error = export_encode_fh(dentry, fh, max_len, connectable);
- .get_name = get_name,
- .get_parent = get_parent,
- .get_dentry = get_object,
-};
+ return error;
+}
+EXPORT_SYMBOL_GPL(exportfs_encode_fh);
+
+struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh, int fh_len,
+ int fileid_type, int (*acceptable)(void *, struct dentry *),
+ void *context)
+{
+ struct export_operations *nop = mnt->mnt_sb->s_export_op;
+ struct dentry *result;
+
+ if (nop->decode_fh) {
+ result = nop->decode_fh(mnt->mnt_sb, fh, fh_len, fileid_type,
+ acceptable, context);
+ } else {
+ result = export_decode_fh(mnt->mnt_sb, fh, fh_len, fileid_type,
+ acceptable, context);
+ }
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(exportfs_decode_fh);
-EXPORT_SYMBOL(export_op_default);
EXPORT_SYMBOL(find_exported_dentry);
MODULE_LICENSE("GPL");
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 7c420b800c3..e58669e1b87 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -464,7 +464,7 @@ ext2_xattr_set_acl(struct inode *inode, int type, const void *value,
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (value) {
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index e85c4821823..3bcd25422ee 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -36,7 +36,7 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
if (IS_RDONLY(inode))
return -EROFS;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
@@ -74,7 +74,7 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
case EXT2_IOC_GETVERSION:
return put_user(inode->i_generation, (int __user *) arg);
case EXT2_IOC_SETVERSION:
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (IS_RDONLY(inode))
return -EROFS;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index b2efd9083b9..3eefa97fe20 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -25,6 +25,7 @@
#include <linux/parser.h>
#include <linux/random.h>
#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
#include <linux/smp_lock.h>
#include <linux/vfs.h>
#include <linux/seq_file.h>
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 1e5038d9a01..d34e9967430 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -489,7 +489,7 @@ ext3_xattr_set_acl(struct inode *inode, int type, const void *value,
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (value) {
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 965006dba6b..4a2a02c95bf 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -41,7 +41,7 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
if (IS_RDONLY(inode))
return -EROFS;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
@@ -122,7 +122,7 @@ flags_err:
__u32 generation;
int err;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (IS_RDONLY(inode))
return -EROFS;
@@ -181,7 +181,7 @@ flags_err:
if (IS_RDONLY(inode))
return -EROFS;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EACCES;
if (get_user(rsv_window_size, (int __user *)arg))
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 51d1c456cda..4f84dc86628 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -29,6 +29,7 @@
#include <linux/parser.h>
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 9e882546d91..a8bae8cd1d5 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -489,7 +489,7 @@ ext4_xattr_set_acl(struct inode *inode, int type, const void *value,
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (value) {
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 500567dd53b..7b4aa4543c8 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -40,7 +40,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
if (IS_RDONLY(inode))
return -EROFS;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
@@ -121,7 +121,7 @@ flags_err:
__u32 generation;
int err;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (IS_RDONLY(inode))
return -EROFS;
@@ -180,7 +180,7 @@ flags_err:
if (IS_RDONLY(inode))
return -EROFS;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EACCES;
if (get_user(rsv_window_size, (int __user *)arg))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d0d8c76c7ed..b806e689c4a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -29,6 +29,7 @@
#include <linux/parser.h>
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
#include <linux/vfs.h>
#include <linux/random.h>
#include <linux/mount.h>
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index cfaf5877d98..0a7ddb39a59 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -20,6 +20,7 @@
#include <linux/pagemap.h>
#include <linux/mpage.h>
#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
#include <linux/mount.h>
#include <linux/vfs.h>
#include <linux/parser.h>
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 8e382a5d51b..3f22e9f4f69 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -215,7 +215,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
/* O_NOATIME can only be set by the owner or superuser */
if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
- if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
/* required for strict SunOS emulation */
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index 9ccb7894717..995d63b2e74 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -78,7 +78,7 @@ generic_acl_set(struct inode *inode, struct generic_acl_operations *ops,
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
- if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (value) {
acl = posix_acl_from_xattr(value, size);
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 6e80844367e..1047a8c7226 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -74,7 +74,7 @@ int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
{
if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
return -EOPNOTSUPP;
- if (current->fsuid != ip->i_inode.i_uid && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(&ip->i_inode))
return -EPERM;
if (S_ISLNK(ip->i_inode.i_mode))
return -EOPNOTSUPP;
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 99ea5659bc2..b8312edee0e 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -11,6 +11,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/lm_interface.h>
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 79fd10402ea..b60c0affbec 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -38,7 +38,7 @@ int hfsplus_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
if (IS_RDONLY(inode))
return -EROFS;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EACCES;
if (get_user(flags, (int __user *)arg))
diff --git a/fs/inode.c b/fs/inode.c
index 9a012cc5b6c..320e088d0b2 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -145,7 +145,7 @@ static struct inode *alloc_inode(struct super_block *sb)
mapping->a_ops = &empty_aops;
mapping->host = inode;
mapping->flags = 0;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
mapping->assoc_mapping = NULL;
mapping->backing_dev_info = &default_backing_dev_info;
@@ -462,6 +462,11 @@ static int shrink_icache_memory(int nr, gfp_t gfp_mask)
return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+static struct shrinker icache_shrinker = {
+ .shrink = shrink_icache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
static void __wait_on_freeing_inode(struct inode *inode);
/*
* Called with the inode lock held.
@@ -519,7 +524,13 @@ repeat:
* new_inode - obtain an inode
* @sb: superblock
*
- * Allocates a new inode for given superblock.
+ * Allocates a new inode for given superblock. The default gfp_mask
+ * for allocations related to inode->i_mapping is GFP_HIGHUSER_PAGECACHE.
+ * If HIGHMEM pages are unsuitable or it is known that pages allocated
+ * for the page cache are not reclaimable or migratable,
+ * mapping_set_gfp_mask() must be called with suitable flags on the
+ * newly created inode's mapping
+ *
*/
struct inode *new_inode(struct super_block *sb)
{
@@ -1379,7 +1390,7 @@ void __init inode_init(unsigned long mempages)
SLAB_MEM_SPREAD),
init_once,
NULL);
- set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
+ register_shrinker(&icache_shrinker);
/* Hash may have been set up in inode_init_early */
if (!hashdist)
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index efe2872cd4e..a07e67b1ea7 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -1,5 +1,6 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
#include <linux/iso_fs.h>
#include <asm/unaligned.h>
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index a46101ee867..65b3a1b5b88 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -435,7 +435,7 @@ static int jffs2_acl_setxattr(struct inode *inode, int type, const void *value,
struct posix_acl *acl;
int rc;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (value) {
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 0c82dfcfd24..143c5530caf 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -81,6 +81,7 @@ static int jffs2_garbage_collect_thread(void *_c)
set_user_nice(current, 10);
+ set_freezable();
for (;;) {
allow_signal(SIGHUP);
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index fe063af6fd2..3c8663bea98 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -69,7 +69,7 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
if (IS_RDONLY(inode))
return -EROFS;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 2374b595f2e..f0ec72b263f 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -32,6 +32,7 @@ extern void jfs_truncate_nolock(struct inode *, loff_t);
extern void jfs_free_zero_link(struct inode *);
extern struct dentry *jfs_get_parent(struct dentry *dentry);
extern void jfs_get_inode_flags(struct jfs_inode_info *);
+extern struct dentry *jfs_get_dentry(struct super_block *sb, void *vobjp);
extern void jfs_set_inode_flags(struct inode *);
extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 25161c4121e..932797ba433 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1477,6 +1477,38 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
return dentry;
}
+struct dentry *jfs_get_dentry(struct super_block *sb, void *vobjp)
+{
+ __u32 *objp = vobjp;
+ unsigned long ino = objp[0];
+ __u32 generation = objp[1];
+ struct inode *inode;
+ struct dentry *result;
+
+ if (ino == 0)
+ return ERR_PTR(-ESTALE);
+ inode = iget(sb, ino);
+ if (inode == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ if (is_bad_inode(inode) ||
+ (generation && inode->i_generation != generation)) {
+ result = ERR_PTR(-ESTALE);
+ goto out_iput;
+ }
+
+ result = d_alloc_anon(inode);
+ if (!result) {
+ result = ERR_PTR(-ENOMEM);
+ goto out_iput;
+ }
+ return result;
+
+ out_iput:
+ iput(inode);
+ return result;
+}
+
struct dentry *jfs_get_parent(struct dentry *dentry)
{
struct super_block *sb = dentry->d_inode->i_sb;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 20e4ac1c79a..929fceca799 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -27,6 +27,7 @@
#include <linux/kthread.h>
#include <linux/posix_acl.h>
#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
@@ -737,6 +738,7 @@ static const struct super_operations jfs_super_operations = {
};
static struct export_operations jfs_export_operations = {
+ .get_dentry = jfs_get_dentry,
.get_parent = jfs_get_parent,
};
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index b2375f0774b..9b7f2cdaae0 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -697,7 +697,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
struct posix_acl *acl;
int rc;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
/*
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 26809325469..82e2192a0d5 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/mutex.h>
+#include <linux/freezer.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h>
@@ -75,18 +76,31 @@ static const int nlm_port_min = 0, nlm_port_max = 65535;
static struct ctl_table_header * nlm_sysctl_table;
-static unsigned long set_grace_period(void)
+static unsigned long get_lockd_grace_period(void)
{
- unsigned long grace_period;
-
/* Note: nlm_timeout should always be nonzero */
if (nlm_grace_period)
- grace_period = ((nlm_grace_period + nlm_timeout - 1)
- / nlm_timeout) * nlm_timeout * HZ;
+ return roundup(nlm_grace_period, nlm_timeout) * HZ;
else
- grace_period = nlm_timeout * 5 * HZ;
+ return nlm_timeout * 5 * HZ;
+}
+
+unsigned long get_nfs_grace_period(void)
+{
+ unsigned long lockdgrace = get_lockd_grace_period();
+ unsigned long nfsdgrace = 0;
+
+ if (nlmsvc_ops)
+ nfsdgrace = nlmsvc_ops->get_grace_period();
+
+ return max(lockdgrace, nfsdgrace);
+}
+EXPORT_SYMBOL(get_nfs_grace_period);
+
+static unsigned long set_grace_period(void)
+{
nlmsvc_grace_period = 1;
- return grace_period + jiffies;
+ return get_nfs_grace_period() + jiffies;
}
static inline void clear_grace_period(void)
@@ -119,6 +133,7 @@ lockd(struct svc_rqst *rqstp)
complete(&lockd_start_done);
daemonize("lockd");
+ set_freezable();
/* Process request with signals blocked, but allow SIGKILL. */
allow_signal(SIGKILL);
diff --git a/fs/mbcache.c b/fs/mbcache.c
index deeb9dc062d..fbb1d02f879 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -100,7 +100,6 @@ struct mb_cache {
static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock);
-static struct shrinker *mb_shrinker;
static inline int
mb_cache_indexes(struct mb_cache *cache)
@@ -118,6 +117,10 @@ mb_cache_indexes(struct mb_cache *cache)
static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
+static struct shrinker mb_cache_shrinker = {
+ .shrink = mb_cache_shrink_fn,
+ .seeks = DEFAULT_SEEKS,
+};
static inline int
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
@@ -662,13 +665,13 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
static int __init init_mbcache(void)
{
- mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
+ register_shrinker(&mb_cache_shrinker);
return 0;
}
static void __exit exit_mbcache(void)
{
- remove_shrinker(mb_shrinker);
+ unregister_shrinker(&mb_cache_shrinker);
}
module_init(init_mbcache)
diff --git a/fs/namei.c b/fs/namei.c
index 5e2d98d10c5..defaa47c11d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1576,7 +1576,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
/* O_NOATIME can only be set by the owner or superuser */
if (flag & O_NOATIME)
- if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
/*
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 75f309c8741..a796be5051b 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -14,6 +14,7 @@
#include <linux/sunrpc/svcsock.h>
#include <linux/nfs_fs.h>
#include <linux/mutex.h>
+#include <linux/freezer.h>
#include <net/inet_sock.h>
@@ -67,6 +68,7 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
daemonize("nfsv4-svc");
/* Process request with signals blocked, but allow SIGKILL. */
allow_signal(SIGKILL);
+ set_freezable();
complete(&nfs_callback_info.started);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index a2b1af89ca1..adffe1615c5 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -300,7 +300,10 @@ static const struct super_operations nfs4_sops = {
};
#endif
-static struct shrinker *acl_shrinker;
+static struct shrinker acl_shrinker = {
+ .shrink = nfs_access_cache_shrinker,
+ .seeks = DEFAULT_SEEKS,
+};
/*
* Register the NFS filesystems
@@ -321,7 +324,7 @@ int __init register_nfs_fs(void)
if (ret < 0)
goto error_2;
#endif
- acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
+ register_shrinker(&acl_shrinker);
return 0;
#ifdef CONFIG_NFS_V4
@@ -339,8 +342,7 @@ error_0:
*/
void __exit unregister_nfs_fs(void)
{
- if (acl_shrinker != NULL)
- remove_shrinker(acl_shrinker);
+ unregister_shrinker(&acl_shrinker);
#ifdef CONFIG_NFS_V4
unregister_filesystem(&nfs4_fs_type);
nfs_unregister_sysctl();
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 6e92b0fe532..cf61dc8ae94 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -12,17 +12,31 @@
#define CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
+static int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
+{
+ struct exp_flavor_info *f;
+ struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
+
+ for (f = exp->ex_flavors; f < end; f++) {
+ if (f->pseudoflavor == rqstp->rq_flavor)
+ return f->flags;
+ }
+ return exp->ex_flags;
+
+}
+
int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
{
struct svc_cred cred = rqstp->rq_cred;
int i;
+ int flags = nfsexp_flags(rqstp, exp);
int ret;
- if (exp->ex_flags & NFSEXP_ALLSQUASH) {
+ if (flags & NFSEXP_ALLSQUASH) {
cred.cr_uid = exp->ex_anon_uid;
cred.cr_gid = exp->ex_anon_gid;
cred.cr_group_info = groups_alloc(0);
- } else if (exp->ex_flags & NFSEXP_ROOTSQUASH) {
+ } else if (flags & NFSEXP_ROOTSQUASH) {
struct group_info *gi;
if (!cred.cr_uid)
cred.cr_uid = exp->ex_anon_uid;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 79bd03b8bbf..c7bbf460b00 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -26,12 +26,15 @@
#include <linux/mount.h>
#include <linux/hash.h>
#include <linux/module.h>
+#include <linux/exportfs.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/nfsfh.h>
#include <linux/nfsd/syscall.h>
#include <linux/lockd/bind.h>
+#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/gss_api.h>
#define NFSDDBG_FACILITY NFSDDBG_EXPORT
@@ -451,8 +454,48 @@ out_free_all:
return err;
}
+static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
+{
+ int listsize, err;
+ struct exp_flavor_info *f;
+
+ err = get_int(mesg, &listsize);
+ if (err)
+ return err;
+ if (listsize < 0 || listsize > MAX_SECINFO_LIST)
+ return -EINVAL;
+
+ for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) {
+ err = get_int(mesg, &f->pseudoflavor);
+ if (err)
+ return err;
+ /*
+ * Just a quick sanity check; we could also try to check
+ * whether this pseudoflavor is supported, but at worst
+ * an unsupported pseudoflavor on the export would just
+ * be a pseudoflavor that won't match the flavor of any
+ * authenticated request. The administrator will
+ * probably discover the problem when someone fails to
+ * authenticate.
+ */
+ if (f->pseudoflavor < 0)
+ return -EINVAL;
+ err = get_int(mesg, &f->flags);
+ if (err)
+ return err;
+ /* Only some flags are allowed to differ between flavors: */
+ if (~NFSEXP_SECINFO_FLAGS & (f->flags ^ exp->ex_flags))
+ return -EINVAL;
+ }
+ exp->ex_nflavors = listsize;
+ return 0;
+}
+
#else /* CONFIG_NFSD_V4 */
-static inline int fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc) { return 0; }
+static inline int
+fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc){return 0;}
+static inline int
+secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
#endif
static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
@@ -476,6 +519,9 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
exp.ex_uuid = NULL;
+ /* secinfo */
+ exp.ex_nflavors = 0;
+
if (mesg[mlen-1] != '\n')
return -EINVAL;
mesg[mlen-1] = 0;
@@ -553,7 +599,9 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
if (exp.ex_uuid == NULL)
err = -ENOMEM;
}
- } else
+ } else if (strcmp(buf, "secinfo") == 0)
+ err = secinfo_parse(&mesg, buf, &exp);
+ else
/* quietly ignore unknown words and anything
* following. Newer user-space can try to set
* new values, then see what the result was.
@@ -593,6 +641,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
static void exp_flags(struct seq_file *m, int flag, int fsid,
uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fslocs);
+static void show_secinfo(struct seq_file *m, struct svc_export *exp);
static int svc_export_show(struct seq_file *m,
struct cache_detail *cd,
@@ -622,6 +671,7 @@ static int svc_export_show(struct seq_file *m,
seq_printf(m, "%02x", exp->ex_uuid[i]);
}
}
+ show_secinfo(m, exp);
}
seq_puts(m, ")\n");
return 0;
@@ -654,6 +704,7 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
{
struct svc_export *new = container_of(cnew, struct svc_export, h);
struct svc_export *item = container_of(citem, struct svc_export, h);
+ int i;
new->ex_flags = item->ex_flags;
new->ex_anon_uid = item->ex_anon_uid;
@@ -669,6 +720,10 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
item->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = item->ex_fslocs.migrated;
item->ex_fslocs.migrated = 0;
+ new->ex_nflavors = item->ex_nflavors;
+ for (i = 0; i < MAX_SECINFO_LIST; i++) {
+ new->ex_flavors[i] = item->ex_flavors[i];
+ }
}
static struct cache_head *svc_export_alloc(void)
@@ -738,16 +793,18 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
int err;
if (!clp)
- return NULL;
+ return ERR_PTR(-ENOENT);
key.ek_client = clp;
key.ek_fsidtype = fsid_type;
memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
ek = svc_expkey_lookup(&key);
- if (ek != NULL)
- if ((err = cache_check(&svc_expkey_cache, &ek->h, reqp)))
- ek = ERR_PTR(err);
+ if (ek == NULL)
+ return ERR_PTR(-ENOMEM);
+ err = cache_check(&svc_expkey_cache, &ek->h, reqp);
+ if (err)
+ return ERR_PTR(err);
return ek;
}
@@ -808,30 +865,21 @@ exp_get_by_name(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
struct cache_req *reqp)
{
struct svc_export *exp, key;
+ int err;
if (!clp)
- return NULL;
+ return ERR_PTR(-ENOENT);
key.ex_client = clp;
key.ex_mnt = mnt;
key.ex_dentry = dentry;
exp = svc_export_lookup(&key);
- if (exp != NULL) {
- int err;
-
- err = cache_check(&svc_export_cache, &exp->h, reqp);
- switch (err) {
- case 0: break;
- case -EAGAIN:
- case -ETIMEDOUT:
- exp = ERR_PTR(err);
- break;
- default:
- exp = NULL;
- }
- }
-
+ if (exp == NULL)
+ return ERR_PTR(-ENOMEM);
+ err = cache_check(&svc_export_cache, &exp->h, reqp);
+ if (err)
+ return ERR_PTR(err);
return exp;
}
@@ -847,7 +895,7 @@ exp_parent(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
dget(dentry);
exp = exp_get_by_name(clp, mnt, dentry, reqp);
- while (exp == NULL && !IS_ROOT(dentry)) {
+ while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(dentry)) {
struct dentry *parent;
parent = dget_parent(dentry);
@@ -900,7 +948,7 @@ static void exp_fsid_unhash(struct svc_export *exp)
return;
ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid);
- if (ek && !IS_ERR(ek)) {
+ if (!IS_ERR(ek)) {
ek->h.expiry_time = get_seconds()-1;
cache_put(&ek->h, &svc_expkey_cache);
}
@@ -938,7 +986,7 @@ static void exp_unhash(struct svc_export *exp)
struct inode *inode = exp->ex_dentry->d_inode;
ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino);
- if (ek && !IS_ERR(ek)) {
+ if (!IS_ERR(ek)) {
ek->h.expiry_time = get_seconds()-1;
cache_put(&ek->h, &svc_expkey_cache);
}
@@ -989,13 +1037,12 @@ exp_export(struct nfsctl_export *nxp)
/* must make sure there won't be an ex_fsid clash */
if ((nxp->ex_flags & NFSEXP_FSID) &&
- (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) &&
- !IS_ERR(fsid_key) &&
+ (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
fsid_key->ek_mnt &&
(fsid_key->ek_mnt != nd.mnt || fsid_key->ek_dentry != nd.dentry) )
goto finish;
- if (exp) {
+ if (!IS_ERR(exp)) {
/* just a flags/id/fsid update */
exp_fsid_unhash(exp);
@@ -1104,7 +1151,7 @@ exp_unexport(struct nfsctl_export *nxp)
err = -EINVAL;
exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
path_release(&nd);
- if (!exp)
+ if (IS_ERR(exp))
goto out_domain;
exp_do_unexport(exp);
@@ -1149,10 +1196,6 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
err = PTR_ERR(exp);
goto out;
}
- if (!exp) {
- dprintk("nfsd: exp_rootfh export not found.\n");
- goto out;
- }
/*
* fh must be initialized before calling fh_compose
@@ -1176,17 +1219,130 @@ exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
{
struct svc_export *exp;
struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
- if (!ek || IS_ERR(ek))
+ if (IS_ERR(ek))
return ERR_PTR(PTR_ERR(ek));
exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp);
cache_put(&ek->h, &svc_expkey_cache);
- if (!exp || IS_ERR(exp))
+ if (IS_ERR(exp))
return ERR_PTR(PTR_ERR(exp));
return exp;
}
+__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
+{
+ struct exp_flavor_info *f;
+ struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
+
+ /* legacy gss-only clients are always OK: */
+ if (exp->ex_client == rqstp->rq_gssclient)
+ return 0;
+ /* ip-address based client; check sec= export option: */
+ for (f = exp->ex_flavors; f < end; f++) {
+ if (f->pseudoflavor == rqstp->rq_flavor)
+ return 0;
+ }
+ /* defaults in absence of sec= options: */
+ if (exp->ex_nflavors == 0) {
+ if (rqstp->rq_flavor == RPC_AUTH_NULL ||
+ rqstp->rq_flavor == RPC_AUTH_UNIX)
+ return 0;
+ }
+ return nfserr_wrongsec;
+}
+
+/*
+ * Uses rq_client and rq_gssclient to find an export; uses rq_client (an
+ * auth_unix client) if it's available and has secinfo information;
+ * otherwise, will try to use rq_gssclient.
+ *
+ * Called from functions that handle requests; functions that do work on
+ * behalf of mountd are passed a single client name to use, and should
+ * use exp_get_by_name() or exp_find().
+ */
+struct svc_export *
+rqst_exp_get_by_name(struct svc_rqst *rqstp, struct vfsmount *mnt,
+ struct dentry *dentry)
+{
+ struct svc_export *gssexp, *exp = NULL;
+
+ if (rqstp->rq_client == NULL)
+ goto gss;
+
+ /* First try the auth_unix client: */
+ exp = exp_get_by_name(rqstp->rq_client, mnt, dentry,
+ &rqstp->rq_chandle);
+ if (PTR_ERR(exp) == -ENOENT)
+ goto gss;
+ if (IS_ERR(exp))
+ return exp;
+ /* If it has secinfo, assume there are no gss/... clients */
+ if (exp->ex_nflavors > 0)
+ return exp;
+gss:
+ /* Otherwise, try falling back on gss client */
+ if (rqstp->rq_gssclient == NULL)
+ return exp;
+ gssexp = exp_get_by_name(rqstp->rq_gssclient, mnt, dentry,
+ &rqstp->rq_chandle);
+ if (PTR_ERR(gssexp) == -ENOENT)
+ return exp;
+ if (exp && !IS_ERR(exp))
+ exp_put(exp);
+ return gssexp;
+}
+
+struct svc_export *
+rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
+{
+ struct svc_export *gssexp, *exp = NULL;
+
+ if (rqstp->rq_client == NULL)
+ goto gss;
+
+ /* First try the auth_unix client: */
+ exp = exp_find(rqstp->rq_client, fsid_type, fsidv, &rqstp->rq_chandle);
+ if (PTR_ERR(exp) == -ENOENT)
+ goto gss;
+ if (IS_ERR(exp))
+ return exp;
+ /* If it has secinfo, assume there are no gss/... clients */
+ if (exp->ex_nflavors > 0)
+ return exp;
+gss:
+ /* Otherwise, try falling back on gss client */
+ if (rqstp->rq_gssclient == NULL)
+ return exp;
+ gssexp = exp_find(rqstp->rq_gssclient, fsid_type, fsidv,
+ &rqstp->rq_chandle);
+ if (PTR_ERR(gssexp) == -ENOENT)
+ return exp;
+ if (exp && !IS_ERR(exp))
+ exp_put(exp);
+ return gssexp;
+}
+
+struct svc_export *
+rqst_exp_parent(struct svc_rqst *rqstp, struct vfsmount *mnt,
+ struct dentry *dentry)
+{
+ struct svc_export *exp;
+
+ dget(dentry);
+ exp = rqst_exp_get_by_name(rqstp, mnt, dentry);
+
+ while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(dentry)) {
+ struct dentry *parent;
+
+ parent = dget_parent(dentry);
+ dput(dentry);
+ dentry = parent;
+ exp = rqst_exp_get_by_name(rqstp, mnt, dentry);
+ }
+ dput(dentry);
+ return exp;
+}
/*
* Called when we need the filehandle for the root of the pseudofs,
@@ -1194,8 +1350,7 @@ exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
* export point with fsid==0
*/
__be32
-exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
- struct cache_req *creq)
+exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
{
struct svc_export *exp;
__be32 rv;
@@ -1203,12 +1358,16 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
- exp = exp_find(clp, FSID_NUM, fsidv, creq);
+ exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
+ if (PTR_ERR(exp) == -ENOENT)
+ return nfserr_perm;
if (IS_ERR(exp))
return nfserrno(PTR_ERR(exp));
- if (exp == NULL)
- return nfserr_perm;
rv = fh_compose(fhp, exp, exp->ex_dentry, NULL);
+ if (rv)
+ goto out;
+ rv = check_nfsd_access(exp, rqstp);
+out:
exp_put(exp);
return rv;
}
@@ -1296,28 +1455,62 @@ static struct flags {
{ 0, {"", ""}}
};
-static void exp_flags(struct seq_file *m, int flag, int fsid,
- uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fsloc)
+static void show_expflags(struct seq_file *m, int flags, int mask)
{
- int first = 0;
struct flags *flg;
+ int state, first = 0;
for (flg = expflags; flg->flag; flg++) {
- int state = (flg->flag & flag)?0:1;
+ if (flg->flag & ~mask)
+ continue;
+ state = (flg->flag & flags) ? 0 : 1;
if (*flg->name[state])
seq_printf(m, "%s%s", first++?",":"", flg->name[state]);
}
+}
+
+static void show_secinfo_flags(struct seq_file *m, int flags)
+{
+ seq_printf(m, ",");
+ show_expflags(m, flags, NFSEXP_SECINFO_FLAGS);
+}
+
+static void show_secinfo(struct seq_file *m, struct svc_export *exp)
+{
+ struct exp_flavor_info *f;
+ struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
+ int lastflags = 0, first = 0;
+
+ if (exp->ex_nflavors == 0)
+ return;
+ for (f = exp->ex_flavors; f < end; f++) {
+ if (first || f->flags != lastflags) {
+ if (!first)
+ show_secinfo_flags(m, lastflags);
+ seq_printf(m, ",sec=%d", f->pseudoflavor);
+ lastflags = f->flags;
+ } else {
+ seq_printf(m, ":%d", f->pseudoflavor);
+ }
+ }
+ show_secinfo_flags(m, lastflags);
+}
+
+static void exp_flags(struct seq_file *m, int flag, int fsid,
+ uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fsloc)
+{
+ show_expflags(m, flag, NFSEXP_ALLFLAGS);
if (flag & NFSEXP_FSID)
- seq_printf(m, "%sfsid=%d", first++?",":"", fsid);
+ seq_printf(m, ",fsid=%d", fsid);
if (anonu != (uid_t)-2 && anonu != (0x10000-2))
- seq_printf(m, "%sanonuid=%d", first++?",":"", anonu);
+ seq_printf(m, ",sanonuid=%d", anonu);
if (anong != (gid_t)-2 && anong != (0x10000-2))
- seq_printf(m, "%sanongid=%d", first++?",":"", anong);
+ seq_printf(m, ",sanongid=%d", anong);
if (fsloc && fsloc->locations_count > 0) {
char *loctype = (fsloc->migrated) ? "refer" : "replicas";
int i;
- seq_printf(m, "%s%s=", first++?",":"", loctype);
+ seq_printf(m, ",%s=", loctype);
seq_escape(m, fsloc->locations[0].path, ",;@ \t\n\\");
seq_putc(m, '@');
seq_escape(m, fsloc->locations[0].hosts, ",;@ \t\n\\");
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 221acd1f11f..9e4a568a501 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -65,6 +65,7 @@ nlm_fclose(struct file *filp)
static struct nlmsvc_binding nfsd_nlm_ops = {
.fopen = nlm_fopen, /* open file for locking */
.fclose = nlm_fclose, /* close file */
+ .get_grace_period = get_nfs4_grace_period,
};
void
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index cc3b7badd48..b6ed38380ab 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -183,8 +183,13 @@ static void
summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas)
{
struct posix_acl_entry *pa, *pe;
- pas->users = 0;
- pas->groups = 0;
+
+ /*
+ * Only pas.users and pas.groups need initialization; previous
+ * posix_acl_valid() calls ensure that the other fields will be
+ * initialized in the following loop. But, just to placate gcc:
+ */
+ memset(pas, 0, sizeof(*pas));
pas->mask = 07;
pe = acl->a_entries + acl->a_count;
@@ -732,13 +737,16 @@ int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl,
*pacl = posix_state_to_acl(&effective_acl_state, flags);
if (IS_ERR(*pacl)) {
ret = PTR_ERR(*pacl);
+ *pacl = NULL;
goto out_dstate;
}
*dpacl = posix_state_to_acl(&default_acl_state,
flags | NFS4_ACL_TYPE_DEFAULT);
if (IS_ERR(*dpacl)) {
ret = PTR_ERR(*dpacl);
+ *dpacl = NULL;
posix_acl_release(*pacl);
+ *pacl = NULL;
goto out_dstate;
}
sort_pacl(*pacl);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 5443c52b57a..31d6633c7fe 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -75,7 +75,7 @@ enum nfs_cb_opnum4 {
#define op_enc_sz 1
#define op_dec_sz 2
#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
-#define enc_stateid_sz 16
+#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
1 + enc_stateid_sz + \
enc_nfs4_fh_sz)
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 45aa21ce678..2cf9a9a2d89 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -587,6 +587,15 @@ idmap_lookup(struct svc_rqst *rqstp,
return ret;
}
+static char *
+rqst_authname(struct svc_rqst *rqstp)
+{
+ struct auth_domain *clp;
+
+ clp = rqstp->rq_gssclient ? rqstp->rq_gssclient : rqstp->rq_client;
+ return clp->name;
+}
+
static int
idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
uid_t *id)
@@ -600,7 +609,7 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
return -EINVAL;
memcpy(key.name, name, namelen);
key.name[namelen] = '\0';
- strlcpy(key.authname, rqstp->rq_client->name, sizeof(key.authname));
+ strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
if (ret == -ENOENT)
ret = -ESRCH; /* nfserr_badname */
@@ -620,7 +629,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
};
int ret;
- strlcpy(key.authname, rqstp->rq_client->name, sizeof(key.authname));
+ strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item);
if (ret == -ENOENT)
return sprintf(name, "%u", id);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 8522729830d..3c627128e20 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -47,6 +47,7 @@
#include <linux/nfsd/state.h>
#include <linux/nfsd/xdr4.h>
#include <linux/nfs4_acl.h>
+#include <linux/sunrpc/gss_api.h>
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -286,8 +287,7 @@ nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
fh_put(&cstate->current_fh);
- status = exp_pseudoroot(rqstp->rq_client, &cstate->current_fh,
- &rqstp->rq_chandle);
+ status = exp_pseudoroot(rqstp, &cstate->current_fh);
return status;
}
@@ -474,8 +474,8 @@ nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 ret;
fh_init(&tmp_fh, NFS4_FHSIZE);
- if((ret = exp_pseudoroot(rqstp->rq_client, &tmp_fh,
- &rqstp->rq_chandle)) != 0)
+ ret = exp_pseudoroot(rqstp, &tmp_fh);
+ if (ret)
return ret;
if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) {
fh_put(&tmp_fh);
@@ -611,6 +611,30 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
static __be32
+nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ struct nfsd4_secinfo *secinfo)
+{
+ struct svc_fh resfh;
+ struct svc_export *exp;
+ struct dentry *dentry;
+ __be32 err;
+
+ fh_init(&resfh, NFS4_FHSIZE);
+ err = nfsd_lookup_dentry(rqstp, &cstate->current_fh,
+ secinfo->si_name, secinfo->si_namelen,
+ &exp, &dentry);
+ if (err)
+ return err;
+ if (dentry->d_inode == NULL) {
+ exp_put(exp);
+ err = nfserr_noent;
+ } else
+ secinfo->si_exp = exp;
+ dput(dentry);
+ return err;
+}
+
+static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setattr *setattr)
{
@@ -1009,6 +1033,9 @@ static struct nfsd4_operation nfsd4_ops[OP_RELEASE_LOCKOWNER+1] = {
[OP_SAVEFH] = {
.op_func = (nfsd4op_func)nfsd4_savefh,
},
+ [OP_SECINFO] = {
+ .op_func = (nfsd4op_func)nfsd4_secinfo,
+ },
[OP_SETATTR] = {
.op_func = (nfsd4op_func)nfsd4_setattr,
},
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8c52913d7cb..e4a4c87ec8c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -49,8 +49,10 @@
#include <linux/nfsd/state.h>
#include <linux/nfsd/xdr4.h>
#include <linux/namei.h>
+#include <linux/swap.h>
#include <linux/mutex.h>
#include <linux/lockd/bind.h>
+#include <linux/module.h>
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -149,6 +151,7 @@ get_nfs4_file(struct nfs4_file *fi)
}
static int num_delegations;
+unsigned int max_delegations;
/*
* Open owner state (share locks)
@@ -192,7 +195,9 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback;
dprintk("NFSD alloc_init_deleg\n");
- if (num_delegations > STATEID_HASH_SIZE * 4)
+ if (fp->fi_had_conflict)
+ return NULL;
+ if (num_delegations > max_delegations)
return NULL;
dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
if (dp == NULL)
@@ -999,6 +1004,7 @@ alloc_init_file(struct inode *ino)
list_add(&fp->fi_hash, &file_hashtbl[hashval]);
fp->fi_inode = igrab(ino);
fp->fi_id = current_fileid++;
+ fp->fi_had_conflict = false;
return fp;
}
return NULL;
@@ -1325,6 +1331,7 @@ do_recall(void *__dp)
{
struct nfs4_delegation *dp = __dp;
+ dp->dl_file->fi_had_conflict = true;
nfsd4_cb_recall(dp);
return 0;
}
@@ -3190,20 +3197,49 @@ nfsd4_load_reboot_recovery_data(void)
printk("NFSD: Failure reading reboot recovery data\n");
}
+unsigned long
+get_nfs4_grace_period(void)
+{
+ return max(user_lease_time, lease_time) * HZ;
+}
+
+/*
+ * Since the lifetime of a delegation isn't limited to that of an open, a
+ * client may quite reasonably hang on to a delegation as long as it has
+ * the inode cached. This becomes an obvious problem the first time a
+ * client's inode cache approaches the size of the server's total memory.
+ *
+ * For now we avoid this problem by imposing a hard limit on the number
+ * of delegations, which varies according to the server's memory size.
+ */
+static void
+set_max_delegations(void)
+{
+ /*
+ * Allow at most 4 delegations per megabyte of RAM. Quick
+ * estimates suggest that in the worst case (where every delegation
+ * is for a different inode), a delegation could take about 1.5K,
+ * giving a worst case usage of about 6% of memory.
+ */
+ max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
+}
+
/* initialization to perform when the nfsd service is started: */
static void
__nfs4_state_start(void)
{
- time_t grace_time;
+ unsigned long grace_time;
boot_time = get_seconds();
- grace_time = max(user_lease_time, lease_time);
+ grace_time = get_nfs_grace_period();
lease_time = user_lease_time;
in_grace = 1;
- printk("NFSD: starting %ld-second grace period\n", grace_time);
+ printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
+ grace_time/HZ);
laundry_wq = create_singlethread_workqueue("nfsd4");
- queue_delayed_work(laundry_wq, &laundromat_work, grace_time*HZ);
+ queue_delayed_work(laundry_wq, &laundromat_work, grace_time);
+ set_max_delegations();
}
int
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 15809dfd88a..b3d55c6747f 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -56,6 +56,8 @@
#include <linux/nfsd_idmap.h>
#include <linux/nfs4.h>
#include <linux/nfs4_acl.h>
+#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/svcauth_gss.h>
#define NFSDDBG_FACILITY NFSDDBG_XDR
@@ -819,6 +821,23 @@ nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
}
static __be32
+nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
+ struct nfsd4_secinfo *secinfo)
+{
+ DECODE_HEAD;
+
+ READ_BUF(4);
+ READ32(secinfo->si_namelen);
+ READ_BUF(secinfo->si_namelen);
+ SAVEMEM(secinfo->si_name, secinfo->si_namelen);
+ status = check_filename(secinfo->si_name, secinfo->si_namelen,
+ nfserr_noent);
+ if (status)
+ return status;
+ DECODE_TAIL;
+}
+
+static __be32
nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
{
DECODE_HEAD;
@@ -1131,6 +1150,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
case OP_SAVEFH:
op->status = nfs_ok;
break;
+ case OP_SECINFO:
+ op->status = nfsd4_decode_secinfo(argp, &op->u.secinfo);
+ break;
case OP_SETATTR:
op->status = nfsd4_decode_setattr(argp, &op->u.setattr);
break;
@@ -1296,7 +1318,7 @@ static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 *
char *path, *rootpath;
fh_init(&tmp_fh, NFS4_FHSIZE);
- *stat = exp_pseudoroot(rqstp->rq_client, &tmp_fh, &rqstp->rq_chandle);
+ *stat = exp_pseudoroot(rqstp, &tmp_fh);
if (*stat)
return NULL;
rootpath = tmp_fh.fh_export->ex_path;
@@ -1847,11 +1869,19 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
if (d_mountpoint(dentry)) {
int err;
+ /*
+ * Why the heck aren't we just using nfsd_lookup??
+ * Different "."/".." handling? Something else?
+ * At least, add a comment here to explain....
+ */
err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp);
if (err) {
nfserr = nfserrno(err);
goto out_put;
}
+ nfserr = check_nfsd_access(exp, cd->rd_rqstp);
+ if (nfserr)
+ goto out_put;
}
nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval,
@@ -2419,6 +2449,72 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
}
}
+static void
+nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, int nfserr,
+ struct nfsd4_secinfo *secinfo)
+{
+ int i = 0;
+ struct svc_export *exp = secinfo->si_exp;
+ u32 nflavs;
+ struct exp_flavor_info *flavs;
+ struct exp_flavor_info def_flavs[2];
+ ENCODE_HEAD;
+
+ if (nfserr)
+ goto out;
+ if (exp->ex_nflavors) {
+ flavs = exp->ex_flavors;
+ nflavs = exp->ex_nflavors;
+ } else { /* Handling of some defaults in absence of real secinfo: */
+ flavs = def_flavs;
+ if (exp->ex_client->flavour->flavour == RPC_AUTH_UNIX) {
+ nflavs = 2;
+ flavs[0].pseudoflavor = RPC_AUTH_UNIX;
+ flavs[1].pseudoflavor = RPC_AUTH_NULL;
+ } else if (exp->ex_client->flavour->flavour == RPC_AUTH_GSS) {
+ nflavs = 1;
+ flavs[0].pseudoflavor
+ = svcauth_gss_flavor(exp->ex_client);
+ } else {
+ nflavs = 1;
+ flavs[0].pseudoflavor
+ = exp->ex_client->flavour->flavour;
+ }
+ }
+
+ RESERVE_SPACE(4);
+ WRITE32(nflavs);
+ ADJUST_ARGS();
+ for (i = 0; i < nflavs; i++) {
+ u32 flav = flavs[i].pseudoflavor;
+ struct gss_api_mech *gm = gss_mech_get_by_pseudoflavor(flav);
+
+ if (gm) {
+ RESERVE_SPACE(4);
+ WRITE32(RPC_AUTH_GSS);
+ ADJUST_ARGS();
+ RESERVE_SPACE(4 + gm->gm_oid.len);
+ WRITE32(gm->gm_oid.len);
+ WRITEMEM(gm->gm_oid.data, gm->gm_oid.len);
+ ADJUST_ARGS();
+ RESERVE_SPACE(4);
+ WRITE32(0); /* qop */
+ ADJUST_ARGS();
+ RESERVE_SPACE(4);
+ WRITE32(gss_pseudoflavor_to_service(gm, flav));
+ ADJUST_ARGS();
+ gss_mech_put(gm);
+ } else {
+ RESERVE_SPACE(4);
+ WRITE32(flav);
+ ADJUST_ARGS();
+ }
+ }
+out:
+ if (exp)
+ exp_put(exp);
+}
+
/*
* The SETATTR encode routine is special -- it always encodes a bitmap,
* regardless of the error status.
@@ -2559,6 +2655,9 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
break;
case OP_SAVEFH:
break;
+ case OP_SECINFO:
+ nfsd4_encode_secinfo(resp, op->status, &op->u.secinfo);
+ break;
case OP_SETATTR:
nfsd4_encode_setattr(resp, op->status, &op->u.setattr);
break;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 71c686dc725..baac89d917c 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -35,7 +35,6 @@
#include <linux/nfsd/cache.h>
#include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h>
-#include <linux/nfsd/interface.h>
#include <asm/uaccess.h>
@@ -245,7 +244,7 @@ static ssize_t write_getfs(struct file *file, char *buf, size_t size)
}
exp_readunlock();
if (err == 0)
- err = res->fh_size + (int)&((struct knfsd_fh*)0)->fh_base;
+ err = res->fh_size + offsetof(struct knfsd_fh, fh_base);
out:
return err;
}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 6ca2d24fc21..0eb464a39aa 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -15,10 +15,12 @@
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/dcache.h>
+#include <linux/exportfs.h>
#include <linux/mount.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svcauth_gss.h>
#include <linux/nfsd/nfsd.h>
#define NFSDDBG_FACILITY NFSDDBG_FH
@@ -27,10 +29,6 @@
static int nfsd_nr_verified;
static int nfsd_nr_put;
-extern struct export_operations export_op_default;
-
-#define CALL(ops,fun) ((ops->fun)?(ops->fun):export_op_default.fun)
-
/*
* our acceptability function.
* if NOSUBTREECHECK, accept anything
@@ -123,8 +121,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
int data_left = fh->fh_size/4;
error = nfserr_stale;
- if (rqstp->rq_client == NULL)
- goto out;
if (rqstp->rq_vers > 2)
error = nfserr_badhandle;
if (rqstp->rq_vers == 4 && fh->fh_size == 0)
@@ -148,7 +144,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
fh->fh_fsid[1] = fh->fh_fsid[2];
}
if ((data_left -= len)<0) goto out;
- exp = exp_find(rqstp->rq_client, fh->fh_fsid_type, datap, &rqstp->rq_chandle);
+ exp = rqst_exp_find(rqstp, fh->fh_fsid_type, datap);
datap += len;
} else {
dev_t xdev;
@@ -159,19 +155,17 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
xdev = old_decode_dev(fh->ofh_xdev);
xino = u32_to_ino_t(fh->ofh_xino);
mk_fsid(FSID_DEV, tfh, xdev, xino, 0, NULL);
- exp = exp_find(rqstp->rq_client, FSID_DEV, tfh,
- &rqstp->rq_chandle);
+ exp = rqst_exp_find(rqstp, FSID_DEV, tfh);
}
- if (IS_ERR(exp) && (PTR_ERR(exp) == -EAGAIN
- || PTR_ERR(exp) == -ETIMEDOUT)) {
- error = nfserrno(PTR_ERR(exp));
+ error = nfserr_stale;
+ if (PTR_ERR(exp) == -ENOENT)
goto out;
- }
- error = nfserr_stale;
- if (!exp || IS_ERR(exp))
+ if (IS_ERR(exp)) {
+ error = nfserrno(PTR_ERR(exp));
goto out;
+ }
/* Check if the request originated from a secure port. */
error = nfserr_perm;
@@ -211,11 +205,9 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
if (fileid_type == 0)
dentry = dget(exp->ex_dentry);
else {
- struct export_operations *nop = exp->ex_mnt->mnt_sb->s_export_op;
- dentry = CALL(nop,decode_fh)(exp->ex_mnt->mnt_sb,
- datap, data_left,
- fileid_type,
- nfsd_acceptable, exp);
+ dentry = exportfs_decode_fh(exp->ex_mnt, datap,
+ data_left, fileid_type,
+ nfsd_acceptable, exp);
}
if (dentry == NULL)
goto out;
@@ -257,8 +249,19 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
if (error)
goto out;
+ if (!(access & MAY_LOCK)) {
+ /*
+ * pseudoflavor restrictions are not enforced on NLM,
+ * which clients virtually always use auth_sys for,
+ * even while using RPCSEC_GSS for NFS.
+ */
+ error = check_nfsd_access(exp, rqstp);
+ if (error)
+ goto out;
+ }
+
/* Finally, check access permissions. */
- error = nfsd_permission(exp, dentry, access);
+ error = nfsd_permission(rqstp, exp, dentry, access);
if (error) {
dprintk("fh_verify: %s/%s permission failure, "
@@ -286,15 +289,13 @@ out:
static inline int _fh_update(struct dentry *dentry, struct svc_export *exp,
__u32 *datap, int *maxsize)
{
- struct export_operations *nop = exp->ex_mnt->mnt_sb->s_export_op;
-
if (dentry == exp->ex_dentry) {
*maxsize = 0;
return 0;
}
- return CALL(nop,encode_fh)(dentry, datap, maxsize,
- !(exp->ex_flags&NFSEXP_NOSUBTREECHECK));
+ return exportfs_encode_fh(dentry, datap, maxsize,
+ !(exp->ex_flags & NFSEXP_NOSUBTREECHECK));
}
/*
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index b2c7147aa92..977a71f64e1 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -278,7 +278,8 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
* echo thing > device-special-file-or-pipe
* by doing a CREATE with type==0
*/
- nfserr = nfsd_permission(newfhp->fh_export,
+ nfserr = nfsd_permission(rqstp,
+ newfhp->fh_export,
newfhp->fh_dentry,
MAY_WRITE|MAY_LOCAL_ACCESS);
if (nfserr && nfserr != nfserr_rofs)
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index ff55950efb4..a8c89ae4c74 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
+#include <linux/freezer.h>
#include <linux/fs_struct.h>
#include <linux/sunrpc/types.h>
@@ -432,6 +433,7 @@ nfsd(struct svc_rqst *rqstp)
* dirty pages.
*/
current->flags |= PF_LESS_THROTTLE;
+ set_freezable();
/*
* The main request loop
@@ -492,6 +494,15 @@ out:
module_put_and_exit(0);
}
+static __be32 map_new_errors(u32 vers, __be32 nfserr)
+{
+ if (nfserr == nfserr_jukebox && vers == 2)
+ return nfserr_dropit;
+ if (nfserr == nfserr_wrongsec && vers < 4)
+ return nfserr_acces;
+ return nfserr;
+}
+
int
nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
{
@@ -534,6 +545,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
/* Now call the procedure handler, and encode NFS status. */
nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
+ nfserr = map_new_errors(rqstp->rq_vers, nfserr);
if (nfserr == nfserr_jukebox && rqstp->rq_vers == 2)
nfserr = nfserr_dropit;
if (nfserr == nfserr_dropit) {
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 945b1cedde2..e90f4a8a1d0 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -113,7 +113,7 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
while (follow_down(&mnt,&mounts)&&d_mountpoint(mounts));
- exp2 = exp_get_by_name(exp->ex_client, mnt, mounts, &rqstp->rq_chandle);
+ exp2 = rqst_exp_get_by_name(rqstp, mnt, mounts);
if (IS_ERR(exp2)) {
err = PTR_ERR(exp2);
dput(mounts);
@@ -135,21 +135,10 @@ out:
return err;
}
-/*
- * Look up one component of a pathname.
- * N.B. After this call _both_ fhp and resfh need an fh_put
- *
- * If the lookup would cross a mountpoint, and the mounted filesystem
- * is exported to the client with NFSEXP_NOHIDE, then the lookup is
- * accepted as it stands and the mounted directory is
- * returned. Otherwise the covered directory is returned.
- * NOTE: this mountpoint crossing is not supported properly by all
- * clients and is explicitly disallowed for NFSv3
- * NeilBrown <neilb@cse.unsw.edu.au>
- */
__be32
-nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
- int len, struct svc_fh *resfh)
+nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ const char *name, int len,
+ struct svc_export **exp_ret, struct dentry **dentry_ret)
{
struct svc_export *exp;
struct dentry *dparent;
@@ -168,8 +157,6 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
exp = fhp->fh_export;
exp_get(exp);
- err = nfserr_acces;
-
/* Lookup the name, but don't follow links */
if (isdotent(name, len)) {
if (len==1)
@@ -190,17 +177,15 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
dput(dentry);
dentry = dp;
- exp2 = exp_parent(exp->ex_client, mnt, dentry,
- &rqstp->rq_chandle);
- if (IS_ERR(exp2)) {
+ exp2 = rqst_exp_parent(rqstp, mnt, dentry);
+ if (PTR_ERR(exp2) == -ENOENT) {
+ dput(dentry);
+ dentry = dget(dparent);
+ } else if (IS_ERR(exp2)) {
host_err = PTR_ERR(exp2);
dput(dentry);
mntput(mnt);
goto out_nfserr;
- }
- if (!exp2) {
- dput(dentry);
- dentry = dget(dparent);
} else {
exp_put(exp);
exp = exp2;
@@ -223,6 +208,41 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
}
}
}
+ *dentry_ret = dentry;
+ *exp_ret = exp;
+ return 0;
+
+out_nfserr:
+ exp_put(exp);
+ return nfserrno(host_err);
+}
+
+/*
+ * Look up one component of a pathname.
+ * N.B. After this call _both_ fhp and resfh need an fh_put
+ *
+ * If the lookup would cross a mountpoint, and the mounted filesystem
+ * is exported to the client with NFSEXP_NOHIDE, then the lookup is
+ * accepted as it stands and the mounted directory is
+ * returned. Otherwise the covered directory is returned.
+ * NOTE: this mountpoint crossing is not supported properly by all
+ * clients and is explicitly disallowed for NFSv3
+ * NeilBrown <neilb@cse.unsw.edu.au>
+ */
+__be32
+nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
+ int len, struct svc_fh *resfh)
+{
+ struct svc_export *exp;
+ struct dentry *dentry;
+ __be32 err;
+
+ err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
+ if (err)
+ return err;
+ err = check_nfsd_access(exp, rqstp);
+ if (err)
+ goto out;
/*
* Note: we compose the file handle now, but as the
* dentry may be negative, it may need to be updated.
@@ -230,16 +250,13 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
err = fh_compose(resfh, exp, dentry, fhp);
if (!err && !dentry->d_inode)
err = nfserr_noent;
- dput(dentry);
out:
+ dput(dentry);
exp_put(exp);
return err;
-
-out_nfserr:
- err = nfserrno(host_err);
- goto out;
}
+
/*
* Set various file attributes.
* N.B. After this call fhp needs an fh_put
@@ -311,7 +328,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
/* The size case is special. It changes the file as well as the attributes. */
if (iap->ia_valid & ATTR_SIZE) {
if (iap->ia_size < inode->i_size) {
- err = nfsd_permission(fhp->fh_export, dentry, MAY_TRUNC|MAY_OWNER_OVERRIDE);
+ err = nfsd_permission(rqstp, fhp->fh_export, dentry, MAY_TRUNC|MAY_OWNER_OVERRIDE);
if (err)
goto out;
}
@@ -435,7 +452,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
/* Get inode */
error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, MAY_SATTR);
if (error)
- goto out;
+ return error;
dentry = fhp->fh_dentry;
inode = dentry->d_inode;
@@ -444,33 +461,25 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags);
if (host_error == -EINVAL) {
- error = nfserr_attrnotsupp;
- goto out;
+ return nfserr_attrnotsupp;
} else if (host_error < 0)
goto out_nfserr;
host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
if (host_error < 0)
- goto out_nfserr;
+ goto out_release;
- if (S_ISDIR(inode->i_mode)) {
+ if (S_ISDIR(inode->i_mode))
host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
- if (host_error < 0)
- goto out_nfserr;
- }
-
- error = nfs_ok;
-out:
+out_release:
posix_acl_release(pacl);
posix_acl_release(dpacl);
- return (error);
out_nfserr:
if (host_error == -EOPNOTSUPP)
- error = nfserr_attrnotsupp;
+ return nfserr_attrnotsupp;
else
- error = nfserrno(host_error);
- goto out;
+ return nfserrno(host_error);
}
static struct posix_acl *
@@ -607,7 +616,7 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
sresult |= map->access;
- err2 = nfsd_permission(export, dentry, map->how);
+ err2 = nfsd_permission(rqstp, export, dentry, map->how);
switch (err2) {
case nfs_ok:
result |= map->access;
@@ -1034,7 +1043,7 @@ nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
__be32 err;
if (file) {
- err = nfsd_permission(fhp->fh_export, fhp->fh_dentry,
+ err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
MAY_READ|MAY_OWNER_OVERRIDE);
if (err)
goto out;
@@ -1063,7 +1072,7 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
__be32 err = 0;
if (file) {
- err = nfsd_permission(fhp->fh_export, fhp->fh_dentry,
+ err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
MAY_WRITE|MAY_OWNER_OVERRIDE);
if (err)
goto out;
@@ -1792,7 +1801,8 @@ nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat)
* Check for a user's access permissions to this inode.
*/
__be32
-nfsd_permission(struct svc_export *exp, struct dentry *dentry, int acc)
+nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
+ struct dentry *dentry, int acc)
{
struct inode *inode = dentry->d_inode;
int err;
@@ -1823,7 +1833,7 @@ nfsd_permission(struct svc_export *exp, struct dentry *dentry, int acc)
*/
if (!(acc & MAY_LOCAL_ACCESS))
if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
- if (EX_RDONLY(exp) || IS_RDONLY(inode))
+ if (EX_RDONLY(exp, rqstp) || IS_RDONLY(inode))
return nfserr_rofs;
if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
return nfserr_perm;
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index bff01a54675..e93c6142b23 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -21,6 +21,7 @@
*/
#include <linux/dcache.h>
+#include <linux/exportfs.h>
#include <linux/security.h>
#include "attrib.h"
diff --git a/fs/ocfs2/export.h b/fs/ocfs2/export.h
index 5b77ee7866e..e08bed9e45a 100644
--- a/fs/ocfs2/export.h
+++ b/fs/ocfs2/export.h
@@ -26,6 +26,8 @@
#ifndef OCFS2_EXPORT_H
#define OCFS2_EXPORT_H
+#include <linux/exportfs.h>
+
extern struct export_operations ocfs2_export_ops;
#endif /* OCFS2_EXPORT_H */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f04c7aa834c..004c2abbc73 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1867,7 +1867,8 @@ static ssize_t ocfs2_file_buffered_write(struct file *file, loff_t *ppos,
loff_t pos;
const struct iovec *cur_iov = iov;
struct page *user_page, *page;
- char *buf, *dst;
+ char * uninitialized_var(buf);
+ char *dst;
void *fsdata;
/*
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index bd68c3f2afb..87dcece7e1b 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -63,7 +63,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
goto bail_unlock;
status = -EACCES;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
goto bail_unlock;
if (!S_ISDIR(inode->i_mode))
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ae3627337a9..42cb4f5613b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -283,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
static int proc_pid_wchan(struct task_struct *task, char *buffer)
{
unsigned long wchan;
- char symname[KSYM_NAME_LEN+1];
+ char symname[KSYM_NAME_LEN];
wchan = get_wchan(task);
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index d40d22b347b..ef2b46d099f 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -60,6 +60,7 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev)
inode->i_blocks = 0;
inode->i_mapping->a_ops = &ramfs_aops;
inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
default:
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 1272d11399f..ddde489f1cb 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -7,6 +7,7 @@
#include <linux/reiserfs_fs.h>
#include <linux/reiserfs_acl.h>
#include <linux/reiserfs_xattr.h>
+#include <linux/exportfs.h>
#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index b484d2913c0..11a0fcc2d40 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -51,8 +51,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
if (IS_RDONLY(inode))
return -EROFS;
- if ((current->fsuid != inode->i_uid)
- && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (get_user(flags, (int __user *)arg))
@@ -81,7 +80,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
case REISERFS_IOC_GETVERSION:
return put_user(inode->i_generation, (int __user *)arg);
case REISERFS_IOC_SETVERSION:
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (IS_RDONLY(inode))
return -EROFS;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b4ac9119200..5a93cfe1a03 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
+#include <linux/exportfs.h>
#include <linux/vfs.h>
#include <linux/mnt_namespace.h>
#include <linux/mount.h>
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 5296a29cc5e..b7e4fa4539d 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -21,7 +21,7 @@ xattr_set_acl(struct inode *inode, int type, const void *value, size_t size)
if (!reiserfs_posixacl(inode->i_sb))
return -EOPNOTSUPP;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
if (value) {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 6658afb41cc..d6a504f5d75 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1356,7 +1356,7 @@ udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
case UDF_VIRTUAL_MAP15:
case UDF_VIRTUAL_MAP20:
{
- kernel_lb_addr ino;
+ kernel_lb_addr uninitialized_var(ino);
if (!UDF_SB_LASTBLOCK(sb))
{
diff --git a/fs/utimes.c b/fs/utimes.c
index 83a7e69e706..682eb63b20a 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -106,7 +106,7 @@ long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags
if (IS_IMMUTABLE(inode))
goto dput_and_out;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) {
+ if (!is_owner_or_cap(inode)) {
if (f) {
if (!(f->f_mode & FMODE_WRITE))
goto dput_and_out;
diff --git a/fs/xattr.c b/fs/xattr.c
index 4523aca7965..a44fd92caca 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -60,8 +60,7 @@ xattr_permission(struct inode *inode, const char *name, int mask)
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return -EPERM;
if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
- (mask & MAY_WRITE) && (current->fsuid != inode->i_uid) &&
- !capable(CAP_FOWNER))
+ (mask & MAY_WRITE) && !is_owner_or_cap(inode))
return -EPERM;
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 2df63622354..b0f0e58866d 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -35,10 +35,13 @@
#include <linux/freezer.h>
static kmem_zone_t *xfs_buf_zone;
-static struct shrinker *xfs_buf_shake;
STATIC int xfsbufd(void *);
STATIC int xfsbufd_wakeup(int, gfp_t);
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
+static struct shrinker xfs_buf_shake = {
+ .shrink = xfsbufd_wakeup,
+ .seeks = DEFAULT_SEEKS,
+};
static struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;
@@ -1832,14 +1835,9 @@ xfs_buf_init(void)
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
- xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup);
- if (!xfs_buf_shake)
- goto out_destroy_xfsdatad_workqueue;
-
+ register_shrinker(&xfs_buf_shake);
return 0;
- out_destroy_xfsdatad_workqueue:
- destroy_workqueue(xfsdatad_workqueue);
out_destroy_xfslogd_workqueue:
destroy_workqueue(xfslogd_workqueue);
out_free_buf_zone:
@@ -1854,7 +1852,7 @@ xfs_buf_init(void)
void
xfs_buf_terminate(void)
{
- remove_shrinker(xfs_buf_shake);
+ unregister_shrinker(&xfs_buf_shake);
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 06894cf00b1..4528f9a3f30 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -562,6 +562,7 @@ xfssyncd(
bhv_vfs_sync_work_t *work, *n;
LIST_HEAD (tmp);
+ set_freezable();
timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
for (;;) {
timeleft = schedule_timeout_interruptible(timeleft);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 33dd1ca1324..201cc3273c8 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -18,6 +18,8 @@
#ifndef __XFS_SUPER_H__
#define __XFS_SUPER_H__
+#include <linux/exportfs.h>
+
#ifdef CONFIG_XFS_DMAPI
# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops)
# define vfs_initdmapi() dmapi_init()
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 7def4c69934..2d274b23ade 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -62,7 +62,6 @@ uint ndquot;
kmem_zone_t *qm_dqzone;
kmem_zone_t *qm_dqtrxzone;
-static struct shrinker *xfs_qm_shaker;
static cred_t xfs_zerocr;
@@ -78,6 +77,11 @@ STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
STATIC int xfs_qm_shake(int, gfp_t);
+static struct shrinker xfs_qm_shaker = {
+ .shrink = xfs_qm_shake,
+ .seeks = DEFAULT_SEEKS,
+};
+
#ifdef DEBUG
extern mutex_t qcheck_lock;
#endif
@@ -149,7 +153,7 @@ xfs_Gqm_init(void)
} else
xqm->qm_dqzone = qm_dqzone;
- xfs_qm_shaker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake);
+ register_shrinker(&xfs_qm_shaker);
/*
* The t_dqinfo portion of transactions.
@@ -181,7 +185,7 @@ xfs_qm_destroy(
ASSERT(xqm != NULL);
ASSERT(xqm->qm_nrefs == 0);
- remove_shrinker(xfs_qm_shaker);
+ unregister_shrinker(&xfs_qm_shaker);
hsize = xqm->qm_dqhashmask + 1;
for (i = 0; i < hsize; i++) {
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
diff --git a/include/asm-alpha/fb.h b/include/asm-alpha/fb.h
new file mode 100644
index 00000000000..fa9bbb96b2b
--- /dev/null
+++ b/include/asm-alpha/fb.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/device.h>
+
+/* Caching is off in the I/O space quadrant by design. */
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index d2bed3cb33f..bae7f05716d 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -17,7 +17,8 @@
extern void clear_page(void *page);
#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vmaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
extern void copy_page(void * _to, void * _from);
diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h
index 39e492c3bfa..fa13716a11c 100644
--- a/include/asm-alpha/termios.h
+++ b/include/asm-alpha/termios.h
@@ -81,7 +81,7 @@ struct termio {
#define user_termio_to_kernel_termios(a_termios, u_termio) \
({ \
- struct termios *k_termios = (a_termios); \
+ struct ktermios *k_termios = (a_termios); \
struct termio k_termio; \
int canon, ret; \
\
@@ -113,7 +113,7 @@ struct termio {
*/
#define kernel_termios_to_user_termio(u_termio, a_termios) \
({ \
- struct termios *k_termios = (a_termios); \
+ struct ktermios *k_termios = (a_termios); \
struct termio k_termio; \
int canon; \
\
diff --git a/include/asm-arm/fb.h b/include/asm-arm/fb.h
new file mode 100644
index 00000000000..d92e99cd8c8
--- /dev/null
+++ b/include/asm-arm/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index cb4c2c9d000..d2e8171d1d4 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -83,14 +83,14 @@
* means that a write to a clean page will cause a permission fault, and
* the Linux MM layer will mark the page dirty via handle_pte_fault().
* For the hardware to notice the permission change, the TLB entry must
- * be flushed, and ptep_establish() does that for us.
+ * be flushed, and ptep_set_access_flags() does that for us.
*
* The "accessed" or "young" bit is emulated by a similar method; we only
* allow accesses to the page if the "young" bit is set. Accesses to the
* page will cause a fault, and handle_pte_fault() will set the young bit
* for us as long as the page is marked present in the corresponding Linux
- * PTE entry. Again, ptep_establish() will ensure that the TLB is up to
- * date.
+ * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
+ * up to date.
*
* However, when the "young" bit is cleared, we deny access to the page
* by clearing the hardware PTE. Currently Linux does not flush the TLB
diff --git a/include/asm-arm26/fb.h b/include/asm-arm26/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-arm26/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-avr32/fb.h b/include/asm-avr32/fb.h
new file mode 100644
index 00000000000..41baf84ad40
--- /dev/null
+++ b/include/asm-avr32/fb.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
+ & ~_PAGE_CACHABLE)
+ | (_PAGE_BUFFER | _PAGE_DIRTY));
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-blackfin/fb.h b/include/asm-blackfin/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-blackfin/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-cris/fb.h b/include/asm-cris/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-cris/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h
index 9f13c32552b..0648e3153f8 100644
--- a/include/asm-cris/page.h
+++ b/include/asm-cris/page.h
@@ -20,7 +20,8 @@
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
diff --git a/include/asm-frv/fb.h b/include/asm-frv/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-frv/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index adde6998525..147e995bec2 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -388,13 +388,6 @@ static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pt
static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; }
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- int i = test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
- asm volatile("dcf %M0" :: "U"(*ptep));
- return i;
-}
-
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
@@ -504,7 +497,6 @@ static inline int pte_file(pte_t pte)
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7f30cce5285..344e3091af2 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -28,7 +28,7 @@ struct bug_entry {
#endif
#ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
#endif
#ifndef HAVE_ARCH_WARN_ON
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 7d7bcf990e9..f605e8d0eed 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -3,25 +3,6 @@
#ifndef __ASSEMBLY__
-#ifndef __HAVE_ARCH_PTEP_ESTABLISH
-/*
- * Establish a new mapping:
- * - flush the old one
- * - update the page tables
- * - inform the TLB about the new one
- *
- * We hold the mm semaphore for reading, and the pte lock.
- *
- * Note: the old pte is known to not be writable, so we don't need to
- * worry about dirty bits etc getting lost.
- */
-#define ptep_establish(__vma, __address, __ptep, __entry) \
-do { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
-#endif
-
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Largely same as above, but only sets the access flags (dirty,
@@ -68,31 +49,6 @@ do { \
})
#endif
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
-({ \
- pte_t __pte = *__ptep; \
- int r = 1; \
- if (!pte_dirty(__pte)) \
- r = 0; \
- else \
- set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
- pte_mkclean(__pte)); \
- r; \
-})
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
-({ \
- int __dirty; \
- __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
- if (__dirty) \
- flush_tlb_page(__vma, __address); \
- __dirty; \
-})
-#endif
-
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define ptep_get_and_clear(__mm, __address, __ptep) \
({ \
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 09ec447fe2a..16a466e5068 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -18,7 +18,8 @@
#define get_unaligned(ptr) \
__get_unaligned((ptr), sizeof(*(ptr)))
#define put_unaligned(x,ptr) \
- __put_unaligned((__u64)(x), (ptr), sizeof(*(ptr)))
+ ((void)sizeof(*(ptr)=(x)),\
+ __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr))))
/*
* This function doesn't actually exist. The idea is that when
@@ -95,21 +96,21 @@ static inline void __ustw(__u16 val, __u16 *addr)
default: \
bad_unaligned_access_length(); \
}; \
- (__typeof__(*(ptr)))val; \
+ (__force __typeof__(*(ptr)))val; \
})
#define __put_unaligned(val, ptr, size) \
-do { \
+({ \
void *__gu_p = ptr; \
switch (size) { \
case 1: \
- *(__u8 *)__gu_p = val; \
+ *(__u8 *)__gu_p = (__force __u8)val; \
break; \
case 2: \
- __ustw(val, __gu_p); \
+ __ustw((__force __u16)val, __gu_p); \
break; \
case 4: \
- __ustl(val, __gu_p); \
+ __ustl((__force __u32)val, __gu_p); \
break; \
case 8: \
__ustq(val, __gu_p); \
@@ -117,6 +118,7 @@ do { \
default: \
bad_unaligned_access_length(); \
}; \
-} while(0)
+ (void)0; \
+})
#endif /* _ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-h8300/fb.h b/include/asm-h8300/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-h8300/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h
index 3b4f2903f91..c8cc81a3aca 100644
--- a/include/asm-h8300/page.h
+++ b/include/asm-h8300/page.h
@@ -22,7 +22,8 @@
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
diff --git a/include/asm-i386/fb.h b/include/asm-i386/fb.h
new file mode 100644
index 00000000000..d1c6297d4a6
--- /dev/null
+++ b/include/asm-i386/fb.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+extern int fb_is_primary_device(struct fb_info *info);
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ if (boot_cpu_data.x86 > 3)
+ pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 818ac8bf01e..99cf5d3692a 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -34,7 +34,8 @@
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 01734e05e63..c7fefa6b12f 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -289,17 +289,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
__changed; \
})
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
- int __ret = 0; \
- if (pte_dirty(*(ptep))) \
- __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \
- &(ptep)->pte_low); \
- if (__ret) \
- pte_update((vma)->vm_mm, addr, ptep); \
- __ret; \
-})
-
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
int __ret = 0; \
@@ -311,27 +300,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
__ret; \
})
-/*
- * Rules for using ptep_establish: the pte MUST be a user pte, and
- * must be a present->present transition.
- */
-#define __HAVE_ARCH_PTEP_ESTABLISH
-#define ptep_establish(vma, address, ptep, pteval) \
-do { \
- set_pte_present((vma)->vm_mm, address, ptep, pteval); \
- flush_tlb_page(vma, address); \
-} while (0)
-
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(vma, address, ptep) \
-({ \
- int __dirty; \
- __dirty = ptep_test_and_clear_dirty((vma), (address), (ptep)); \
- if (__dirty) \
- flush_tlb_page(vma, address); \
- __dirty; \
-})
-
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(vma, address, ptep) \
({ \
diff --git a/include/asm-ia64/fb.h b/include/asm-ia64/fb.h
new file mode 100644
index 00000000000..89a397cee90
--- /dev/null
+++ b/include/asm-ia64/fb.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <linux/efi.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-ia64/ioctls.h b/include/asm-ia64/ioctls.h
index 31ee521aeb7..f41b636a0bf 100644
--- a/include/asm-ia64/ioctls.h
+++ b/include/asm-ia64/ioctls.h
@@ -53,6 +53,10 @@
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TCGETS2 _IOR('T',0x2A, struct termios2)
+#define TCSETS2 _IOW('T',0x2B, struct termios2)
+#define TCSETSW2 _IOW('T',0x2C, struct termios2)
+#define TCSETSF2 _IOW('T',0x2D, struct termios2)
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 485759ba9e3..d6345464a2b 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -87,12 +87,13 @@ do { \
} while (0)
-#define alloc_zeroed_user_highpage(vma, vaddr) \
-({ \
- struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr); \
- if (page) \
- flush_dcache_page(page); \
- page; \
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+({ \
+ struct page *page = alloc_page_vma( \
+ GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
+ if (page) \
+ flush_dcache_page(page); \
+ page; \
})
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index f923d811c42..de6d01e24dd 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -395,22 +395,6 @@ ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t
#endif
}
-static inline int
-ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-#ifdef CONFIG_SMP
- if (!pte_dirty(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_D_BIT, ptep);
-#else
- pte_t pte = *ptep;
- if (!pte_dirty(pte))
- return 0;
- set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
- return 1;
-#endif
-}
-
static inline pte_t
ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
@@ -543,8 +527,10 @@ extern void lazy_mmu_prot_update (pte_t pte);
# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
({ \
int __changed = !pte_same(*(__ptep), __entry); \
- if (__changed) \
- ptep_establish(__vma, __addr, __ptep, __entry); \
+ if (__changed) { \
+ set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
+ flush_tlb_page(__vma, __addr); \
+ } \
__changed; \
})
#endif
@@ -588,7 +574,6 @@ extern void lazy_mmu_prot_update (pte_t pte);
#endif
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-ia64/termbits.h b/include/asm-ia64/termbits.h
index 7fae3109ef4..9f162e0089a 100644
--- a/include/asm-ia64/termbits.h
+++ b/include/asm-ia64/termbits.h
@@ -149,6 +149,7 @@ struct ktermios {
#define HUPCL 0002000
#define CLOCAL 0004000
#define CBAUDEX 0010000
+#define BOTHER 0010000
#define B57600 0010001
#define B115200 0010002
#define B230400 0010003
@@ -164,10 +165,12 @@ struct ktermios {
#define B3000000 0010015
#define B3500000 0010016
#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CIBAUD 002003600000 /* input baud rate */
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
/* c_lflag bits */
#define ISIG 0000001
#define ICANON 0000002
diff --git a/include/asm-ia64/termios.h b/include/asm-ia64/termios.h
index 08750c2d360..689d218c0c2 100644
--- a/include/asm-ia64/termios.h
+++ b/include/asm-ia64/termios.h
@@ -87,8 +87,10 @@ struct termio {
copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
})
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
# endif /* __KERNEL__ */
diff --git a/include/asm-m32r/fb.h b/include/asm-m32r/fb.h
new file mode 100644
index 00000000000..d92e99cd8c8
--- /dev/null
+++ b/include/asm-m32r/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h
index 6f6ecf7d14a..04fd183a2c5 100644
--- a/include/asm-m32r/page.h
+++ b/include/asm-m32r/page.h
@@ -15,7 +15,8 @@ extern void copy_page(void *to, void *from);
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
index 35af58c6b81..92d7266783f 100644
--- a/include/asm-m32r/pgtable.h
+++ b/include/asm-m32r/pgtable.h
@@ -250,11 +250,6 @@ static inline pte_t pte_mkwrite(pte_t pte)
return pte;
}
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
-}
-
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
@@ -348,7 +343,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-m68k/fb.h b/include/asm-m68k/fb.h
new file mode 100644
index 00000000000..380b97ae815
--- /dev/null
+++ b/include/asm-m68k/fb.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_SUN3
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
+}
+#else
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ if (CPU_IS_020_OR_030)
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
+ if (CPU_IS_040_OR_060) {
+ pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
+ /* Use no-cache mode, serialized */
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
+ }
+}
+#endif /* CONFIG_SUN3 */
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-m68knommu/fb.h b/include/asm-m68knommu/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-m68knommu/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-m68knommu/page.h b/include/asm-m68knommu/page.h
index 2a1b8bdcb29..9efa0a9851b 100644
--- a/include/asm-m68knommu/page.h
+++ b/include/asm-m68knommu/page.h
@@ -22,7 +22,8 @@
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
diff --git a/include/asm-mips/fb.h b/include/asm-mips/fb.h
new file mode 100644
index 00000000000..bd3f68c9ddf
--- /dev/null
+++ b/include/asm-mips/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-mips/sibyte/bcm1480_regs.h b/include/asm-mips/sibyte/bcm1480_regs.h
index bda391d3af8..2738c1366f6 100644
--- a/include/asm-mips/sibyte/bcm1480_regs.h
+++ b/include/asm-mips/sibyte/bcm1480_regs.h
@@ -220,17 +220,25 @@
#define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
#define BCM1480_DUART_CHANREG_SPACING 0x100
-#define A_BCM1480_DUART_CHANREG(chan,reg) (A_BCM1480_DUART(chan) \
- + BCM1480_DUART_CHANREG_SPACING*((chan)&1) \
- + (reg))
-#define R_BCM1480_DUART_CHANREG(chan,reg) (BCM1480_DUART_CHANREG_SPACING*((chan)&1) + (reg))
-
-#define R_BCM1480_DUART_IMRREG(chan) (R_DUART_IMR_A + ((chan)&1)*DUART_IMRISR_SPACING)
-#define R_BCM1480_DUART_ISRREG(chan) (R_DUART_ISR_A + ((chan)&1)*DUART_IMRISR_SPACING)
-
-#define A_BCM1480_DUART_IMRREG(chan) (A_BCM1480_DUART(chan) + R_BCM1480_DUART_IMRREG(chan))
-#define A_BCM1480_DUART_ISRREG(chan) (A_BCM1480_DUART(chan) + R_BCM1480_DUART_ISRREG(chan))
-#define A_BCM1480_DUART_IN_PORT(chan) (A_BCM1480_DUART(chan) + R_DUART_INP_ORT)
+#define A_BCM1480_DUART_CHANREG(chan, reg) \
+ (A_BCM1480_DUART(chan) + \
+ BCM1480_DUART_CHANREG_SPACING * (((chan) & 1) + 1) + (reg))
+#define A_BCM1480_DUART_CTRLREG(chan, reg) \
+ (A_BCM1480_DUART(chan) + \
+ BCM1480_DUART_CHANREG_SPACING * 3 + (reg))
+
+#define R_BCM1480_DUART_IMRREG(chan) \
+ (R_DUART_IMR_A + ((chan) & 1) * DUART_IMRISR_SPACING)
+#define R_BCM1480_DUART_ISRREG(chan) \
+ (R_DUART_ISR_A + ((chan) & 1) * DUART_IMRISR_SPACING)
+
+#define A_BCM1480_DUART_IMRREG(chan) \
+ (A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_IMRREG(chan)))
+#define A_BCM1480_DUART_ISRREG(chan) \
+ (A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_ISRREG(chan)))
+
+#define A_BCM1480_DUART_IN_PORT(chan) \
+ (A_BCM1480_DUART_CTRLREG((chan), R_DUART_IN_PORT))
/*
* These constants are the absolute addresses.
diff --git a/include/asm-mips/sibyte/sb1250_regs.h b/include/asm-mips/sibyte/sb1250_regs.h
index da7c188993c..220b7e94f1b 100644
--- a/include/asm-mips/sibyte/sb1250_regs.h
+++ b/include/asm-mips/sibyte/sb1250_regs.h
@@ -272,59 +272,69 @@
********************************************************************* */
-#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
+#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
#define R_DUART_NUM_PORTS 2
#define A_DUART 0x0010060000
#define DUART_CHANREG_SPACING 0x100
-#define A_DUART_CHANREG(chan,reg) (A_DUART + DUART_CHANREG_SPACING*(chan) + (reg))
-#define R_DUART_CHANREG(chan,reg) (DUART_CHANREG_SPACING*(chan) + (reg))
+
+#define A_DUART_CHANREG(chan, reg) \
+ (A_DUART + DUART_CHANREG_SPACING * ((chan) + 1) + (reg))
#endif /* 1250 & 112x */
-#define R_DUART_MODE_REG_1 0x100
-#define R_DUART_MODE_REG_2 0x110
-#define R_DUART_STATUS 0x120
-#define R_DUART_CLK_SEL 0x130
-#define R_DUART_CMD 0x150
-#define R_DUART_RX_HOLD 0x160
-#define R_DUART_TX_HOLD 0x170
+#define R_DUART_MODE_REG_1 0x000
+#define R_DUART_MODE_REG_2 0x010
+#define R_DUART_STATUS 0x020
+#define R_DUART_CLK_SEL 0x030
+#define R_DUART_CMD 0x050
+#define R_DUART_RX_HOLD 0x060
+#define R_DUART_TX_HOLD 0x070
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define R_DUART_FULL_CTL 0x140
-#define R_DUART_OPCR_X 0x180
-#define R_DUART_AUXCTL_X 0x190
-#endif /* 1250 PASS2 || 112x PASS1 || 1480*/
+#define R_DUART_FULL_CTL 0x040
+#define R_DUART_OPCR_X 0x080
+#define R_DUART_AUXCTL_X 0x090
+#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
/*
* The IMR and ISR can't be addressed with A_DUART_CHANREG,
- * so use this macro instead.
+ * so use these macros instead.
*/
-#define R_DUART_AUX_CTRL 0x310
-#define R_DUART_ISR_A 0x320
-#define R_DUART_IMR_A 0x330
-#define R_DUART_ISR_B 0x340
-#define R_DUART_IMR_B 0x350
-#define R_DUART_OUT_PORT 0x360
-#define R_DUART_OPCR 0x370
-#define R_DUART_IN_PORT 0x380
+#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
+#define DUART_IMRISR_SPACING 0x20
+#define DUART_INCHNG_SPACING 0x10
-#define R_DUART_SET_OPR 0x3B0
-#define R_DUART_CLEAR_OPR 0x3C0
+#define A_DUART_CTRLREG(reg) \
+ (A_DUART + DUART_CHANREG_SPACING * 3 + (reg))
-#define DUART_IMRISR_SPACING 0x20
+#define R_DUART_IMRREG(chan) \
+ (R_DUART_IMR_A + (chan) * DUART_IMRISR_SPACING)
+#define R_DUART_ISRREG(chan) \
+ (R_DUART_ISR_A + (chan) * DUART_IMRISR_SPACING)
+#define R_DUART_INCHREG(chan) \
+ (R_DUART_IN_CHNG_A + (chan) * DUART_INCHNG_SPACING)
-#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
-#define R_DUART_IMRREG(chan) (R_DUART_IMR_A + (chan)*DUART_IMRISR_SPACING)
-#define R_DUART_ISRREG(chan) (R_DUART_ISR_A + (chan)*DUART_IMRISR_SPACING)
-
-#define A_DUART_IMRREG(chan) (A_DUART + R_DUART_IMRREG(chan))
-#define A_DUART_ISRREG(chan) (A_DUART + R_DUART_ISRREG(chan))
+#define A_DUART_IMRREG(chan) A_DUART_CTRLREG(R_DUART_IMRREG(chan))
+#define A_DUART_ISRREG(chan) A_DUART_CTRLREG(R_DUART_ISRREG(chan))
+#define A_DUART_INCHREG(chan) A_DUART_CTRLREG(R_DUART_INCHREG(chan))
#endif /* 1250 & 112x */
-
+#define R_DUART_AUX_CTRL 0x010
+#define R_DUART_ISR_A 0x020
+#define R_DUART_IMR_A 0x030
+#define R_DUART_ISR_B 0x040
+#define R_DUART_IMR_B 0x050
+#define R_DUART_OUT_PORT 0x060
+#define R_DUART_OPCR 0x070
+#define R_DUART_IN_PORT 0x080
+
+#define R_DUART_SET_OPR 0x0B0
+#define R_DUART_CLEAR_OPR 0x0C0
+#define R_DUART_IN_CHNG_A 0x0D0
+#define R_DUART_IN_CHNG_B 0x0E0
/*
diff --git a/include/asm-mips/sibyte/sb1250_uart.h b/include/asm-mips/sibyte/sb1250_uart.h
index e87045e62bf..cf74fedcbef 100644
--- a/include/asm-mips/sibyte/sb1250_uart.h
+++ b/include/asm-mips/sibyte/sb1250_uart.h
@@ -75,7 +75,8 @@
#define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED)
#define V_DUART_PARITY_MODE_NONE V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE)
-#define M_DUART_ERR_MODE _SB_MAKEMASK1(5) /* must be zero */
+#define M_DUART_TX_IRQ_SEL_TXRDY 0
+#define M_DUART_TX_IRQ_SEL_TXEMPT _SB_MAKEMASK1(5)
#define M_DUART_RX_IRQ_SEL_RXRDY 0
#define M_DUART_RX_IRQ_SEL_RXFULL _SB_MAKEMASK1(6)
@@ -246,10 +247,13 @@
#define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2)
#define M_DUART_ISR_IN_A _SB_MAKEMASK1(3)
+#define M_DUART_ISR_ALL_A _SB_MAKEMASK(4,0)
+
#define M_DUART_ISR_TX_B _SB_MAKEMASK1(4)
#define M_DUART_ISR_RX_B _SB_MAKEMASK1(5)
#define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6)
#define M_DUART_ISR_IN_B _SB_MAKEMASK1(7)
+#define M_DUART_ISR_ALL_B _SB_MAKEMASK(4,4)
/*
* DUART Channel A Interrupt Status Register (Table 10-17)
@@ -262,6 +266,7 @@
#define M_DUART_ISR_RX _SB_MAKEMASK1(1)
#define M_DUART_ISR_BRK _SB_MAKEMASK1(2)
#define M_DUART_ISR_IN _SB_MAKEMASK1(3)
+#define M_DUART_ISR_ALL _SB_MAKEMASK(4,0)
#define M_DUART_ISR_RESERVED _SB_MAKEMASK(4,4)
/*
diff --git a/include/asm-parisc/fb.h b/include/asm-parisc/fb.h
new file mode 100644
index 00000000000..4d503a023ab
--- /dev/null
+++ b/include/asm-parisc/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index 7e222c8ba73..e88cacd6372 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -447,21 +447,6 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
#endif
}
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
-#ifdef CONFIG_SMP
- if (!pte_dirty(*ptep))
- return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));
-#else
- pte_t pte = *ptep;
- if (!pte_dirty(pte))
- return 0;
- set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
- return 1;
-#endif
-}
-
extern spinlock_t pa_dbit_lock;
struct mm_struct;
@@ -529,7 +514,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define HAVE_ARCH_UNMAPPED_AREA
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-powerpc/fb.h b/include/asm-powerpc/fb.h
new file mode 100644
index 00000000000..411af8d17a6
--- /dev/null
+++ b/include/asm-powerpc/fb.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index b0e40ff32ee..9537fda238b 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -65,10 +65,10 @@ typedef unsigned int kprobe_opcode_t;
} else if (name[0] != '.') \
addr = *(kprobe_opcode_t **)addr; \
} else { \
- char dot_name[KSYM_NAME_LEN+1]; \
+ char dot_name[KSYM_NAME_LEN]; \
dot_name[0] = '.'; \
dot_name[1] = '\0'; \
- strncat(dot_name, name, KSYM_NAME_LEN); \
+ strncat(dot_name, name, KSYM_NAME_LEN - 2); \
addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
} \
}
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 6c236d4d626..86a54a4a8a2 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -621,13 +621,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
-}
-
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 7ca8b5c1001..300f9a199bf 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -292,29 +292,6 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
__r; \
})
-/*
- * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
- * moment we always flush but we need to fix hpte_update and test if the
- * optimisation is worth it.
- */
-static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old;
-
- if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
- return 0;
- old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0);
- return (old & _PAGE_DIRTY) != 0;
-}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \
-({ \
- int __r; \
- __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
- __r; \
-})
-
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
@@ -342,14 +319,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
__young; \
})
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
-({ \
- int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
- __ptep); \
- __dirty; \
-})
-
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 18aa776313b..c159315d2c8 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -654,13 +654,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
-}
-
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
diff --git a/include/asm-s390/fb.h b/include/asm-s390/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-s390/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 05ea6f17278..f326451ed6e 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -64,7 +64,8 @@ static inline void copy_page(void *to, void *from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 26215a97612..3208dc6c412 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -669,19 +669,6 @@ ptep_clear_flush_young(struct vm_area_struct *vma,
return ptep_test_and_clear_young(vma, address, ptep);
}
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- return 0;
-}
-
-static inline int
-ptep_clear_flush_dirty(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
-{
- /* No need to flush TLB; bits are in storage key */
- return ptep_test_and_clear_dirty(vma, address, ptep);
-}
-
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
@@ -707,16 +694,19 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
}
-static inline pte_t
-ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
{
- pte_t pte = *ptep;
- pte_t *shadow_pte = get_shadow_pte(ptep);
-
__ptep_ipte(address, ptep);
- if (shadow_pte)
- __ptep_ipte(address, shadow_pte);
+ ptep = get_shadow_pte(ptep);
+ if (ptep)
+ __ptep_ipte(address, ptep);
+}
+
+static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ ptep_invalidate(address, ptep);
return pte;
}
@@ -726,21 +716,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
-static inline void
-ptep_establish(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry)
-{
- ptep_clear_flush(vma, address, ptep);
- set_pte(ptep, entry);
-}
-
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-({ \
- int __changed = !pte_same(*(__ptep), __entry); \
- if (__changed) \
- ptep_establish(__vma, __address, __ptep, __entry); \
- __changed; \
+#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ ptep_invalidate(__addr, __ptep); \
+ set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
+ } \
+ __changed; \
})
/*
@@ -940,12 +923,9 @@ extern int remove_shared_memory(unsigned long start, unsigned long size);
#define __HAVE_ARCH_MEMMAP_INIT
extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
-#define __HAVE_ARCH_PTEP_ESTABLISH
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/include/asm-sh/fb.h b/include/asm-sh/fb.h
new file mode 100644
index 00000000000..d92e99cd8c8
--- /dev/null
+++ b/include/asm-sh/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sh64/fb.h b/include/asm-sh64/fb.h
new file mode 100644
index 00000000000..d92e99cd8c8
--- /dev/null
+++ b/include/asm-sh64/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sparc/fb.h b/include/asm-sparc/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-sparc/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sparc64/fb.h b/include/asm-sparc64/fb.h
new file mode 100644
index 00000000000..d6cd3a175fc
--- /dev/null
+++ b/include/asm-sparc64/fb.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-v850/fb.h b/include/asm-v850/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-v850/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86_64/fb.h b/include/asm-x86_64/fb.h
new file mode 100644
index 00000000000..60548e651d1
--- /dev/null
+++ b/include/asm-x86_64/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ if (boot_cpu_data.x86 > 3)
+ pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index e327c830da0..88adf1afb0a 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -48,7 +48,8 @@ void copy_page(void *, void *);
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
* These are used to make use of C type-checking..
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 4f169ac6b10..3ba53099297 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -284,13 +284,6 @@ static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) &
struct vm_area_struct;
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- if (!pte_dirty(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
-}
-
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
if (!pte_young(*ptep))
@@ -427,7 +420,6 @@ extern int kern_addr_valid(unsigned long addr);
(((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/include/asm-xtensa/fb.h b/include/asm-xtensa/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/include/asm-xtensa/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index e9fc512cc24..06850f3b26a 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -267,17 +267,6 @@ ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
return 1;
}
-static inline int
-ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep)
-{
- pte_t pte = *ptep;
- if (!pte_dirty(pte))
- return 0;
- update_pte(ptep, pte_mkclean(pte));
- return 1;
-}
-
static inline pte_t
ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
@@ -418,7 +407,6 @@ typedef pte_t *pte_addr_t;
#endif /* !defined (__ASSEMBLY__) */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b32564a1e10..f78965fc642 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -624,7 +624,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
*/
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
-#ifdef CONFIG_MMU
+#ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void);
extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
#else
diff --git a/include/linux/crc7.h b/include/linux/crc7.h
new file mode 100644
index 00000000000..1786e772d5c
--- /dev/null
+++ b/include/linux/crc7.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_CRC7_H
+#define _LINUX_CRC7_H
+#include <linux/types.h>
+
+extern const u8 crc7_syndrome_table[256];
+
+static inline u8 crc7_byte(u8 crc, u8 data)
+{
+ return crc7_syndrome_table[(crc << 1) ^ data];
+}
+
+extern u8 crc7(u8 crc, const u8 *buffer, size_t len);
+
+#endif
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h
index dfed8009ebf..16cb25cbf7c 100644
--- a/include/linux/efs_fs.h
+++ b/include/linux/efs_fs.h
@@ -45,6 +45,7 @@ extern efs_block_t efs_map_block(struct inode *, efs_block_t);
extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *);
+extern struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp);
extern struct dentry *efs_get_parent(struct dentry *);
extern int efs_bmap(struct inode *, int);
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
new file mode 100644
index 00000000000..8872fe8392d
--- /dev/null
+++ b/include/linux/exportfs.h
@@ -0,0 +1,126 @@
+#ifndef LINUX_EXPORTFS_H
+#define LINUX_EXPORTFS_H 1
+
+#include <linux/types.h>
+
+struct dentry;
+struct super_block;
+struct vfsmount;
+
+
+/**
+ * struct export_operations - for nfsd to communicate with file systems
+ * @decode_fh: decode a file handle fragment and return a &struct dentry
+ * @encode_fh: encode a file handle fragment from a dentry
+ * @get_name: find the name for a given inode in a given directory
+ * @get_parent: find the parent of a given directory
+ * @get_dentry: find a dentry for the inode given a file handle sub-fragment
+ * @find_exported_dentry:
+ * set by the exporting module to a standard helper function.
+ *
+ * Description:
+ * The export_operations structure provides a means for nfsd to communicate
+ * with a particular exported file system - particularly enabling nfsd and
+ * the filesystem to co-operate when dealing with file handles.
+ *
+ * export_operations contains two basic operation for dealing with file
+ * handles, decode_fh() and encode_fh(), and allows for some other
+ * operations to be defined which standard helper routines use to get
+ * specific information from the filesystem.
+ *
+ * nfsd encodes information use to determine which filesystem a filehandle
+ * applies to in the initial part of the file handle. The remainder, termed
+ * a file handle fragment, is controlled completely by the filesystem. The
+ * standard helper routines assume that this fragment will contain one or
+ * two sub-fragments, one which identifies the file, and one which may be
+ * used to identify the (a) directory containing the file.
+ *
+ * In some situations, nfsd needs to get a dentry which is connected into a
+ * specific part of the file tree. To allow for this, it passes the
+ * function acceptable() together with a @context which can be used to see
+ * if the dentry is acceptable. As there can be multiple dentrys for a
+ * given file, the filesystem should check each one for acceptability before
+ * looking for the next. As soon as an acceptable one is found, it should
+ * be returned.
+ *
+ * decode_fh:
+ * @decode_fh is given a &struct super_block (@sb), a file handle fragment
+ * (@fh, @fh_len) and an acceptability testing function (@acceptable,
+ * @context). It should return a &struct dentry which refers to the same
+ * file that the file handle fragment refers to, and which passes the
+ * acceptability test. If it cannot, it should return a %NULL pointer if
+ * the file was found but no acceptable &dentries were available, or a
+ * %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or
+ * %ENOMEM).
+ *
+ * encode_fh:
+ * @encode_fh should store in the file handle fragment @fh (using at most
+ * @max_len bytes) information that can be used by @decode_fh to recover the
+ * file refered to by the &struct dentry @de. If the @connectable flag is
+ * set, the encode_fh() should store sufficient information so that a good
+ * attempt can be made to find not only the file but also it's place in the
+ * filesystem. This typically means storing a reference to de->d_parent in
+ * the filehandle fragment. encode_fh() should return the number of bytes
+ * stored or a negative error code such as %-ENOSPC
+ *
+ * get_name:
+ * @get_name should find a name for the given @child in the given @parent
+ * directory. The name should be stored in the @name (with the
+ * understanding that it is already pointing to a a %NAME_MAX+1 sized
+ * buffer. get_name() should return %0 on success, a negative error code
+ * or error. @get_name will be called without @parent->i_mutex held.
+ *
+ * get_parent:
+ * @get_parent should find the parent directory for the given @child which
+ * is also a directory. In the event that it cannot be found, or storage
+ * space cannot be allocated, a %ERR_PTR should be returned.
+ *
+ * get_dentry:
+ * Given a &super_block (@sb) and a pointer to a file-system specific inode
+ * identifier, possibly an inode number, (@inump) get_dentry() should find
+ * the identified inode and return a dentry for that inode. Any suitable
+ * dentry can be returned including, if necessary, a new dentry created with
+ * d_alloc_root. The caller can then find any other extant dentrys by
+ * following the d_alias links. If a new dentry was created using
+ * d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry
+ * should be d_rehash()ed.
+ *
+ * If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code
+ * can be returned. The @inump will be whatever was passed to
+ * nfsd_find_fh_dentry() in either the @obj or @parent parameters.
+ *
+ * Locking rules:
+ * get_parent is called with child->d_inode->i_mutex down
+ * get_name is not (which is possibly inconsistent)
+ */
+
+struct export_operations {
+ struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh,
+ int fh_len, int fh_type,
+ int (*acceptable)(void *context, struct dentry *de),
+ void *context);
+ int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
+ int connectable);
+ int (*get_name)(struct dentry *parent, char *name,
+ struct dentry *child);
+ struct dentry * (*get_parent)(struct dentry *child);
+ struct dentry * (*get_dentry)(struct super_block *sb, void *inump);
+
+ /* This is set by the exporting module to a standard helper */
+ struct dentry * (*find_exported_dentry)(
+ struct super_block *sb, void *obj, void *parent,
+ int (*acceptable)(void *context, struct dentry *de),
+ void *context);
+};
+
+extern struct dentry *find_exported_dentry(struct super_block *sb, void *obj,
+ void *parent, int (*acceptable)(void *context, struct dentry *de),
+ void *context);
+
+extern int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
+ int connectable);
+extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh,
+ int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
+ void *context);
+
+#endif /* LINUX_EXPORTFS_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 66226824ab6..cec54106aa8 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -119,6 +119,7 @@ struct dentry;
#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */
#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */
#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */
+#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */
#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
@@ -529,6 +530,8 @@ struct fb_cursor_user {
#define FB_EVENT_CONBLANK 0x0C
/* Get drawing requirements */
#define FB_EVENT_GET_REQ 0x0D
+/* Unbind from the console if possible */
+#define FB_EVENT_FB_UNBIND 0x0E
struct fb_event {
struct fb_info *info;
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 4631086f506..2d38b1a7466 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -1,5 +1,8 @@
/* Freezer declarations */
+#ifndef FREEZER_H_INCLUDED
+#define FREEZER_H_INCLUDED
+
#include <linux/sched.h>
#ifdef CONFIG_PM
@@ -115,6 +118,14 @@ static inline int freezer_should_skip(struct task_struct *p)
return !!(p->flags & PF_FREEZER_SKIP);
}
+/*
+ * Tell the freezer that the current task should be frozen by it
+ */
+static inline void set_freezable(void)
+{
+ current->flags &= ~PF_NOFREEZE;
+}
+
#else
static inline int frozen(struct task_struct *p) { return 0; }
static inline int freezing(struct task_struct *p) { return 0; }
@@ -130,4 +141,7 @@ static inline int try_to_freeze(void) { return 0; }
static inline void freezer_do_not_count(void) {}
static inline void freezer_count(void) {}
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
+static inline void set_freezable(void) {}
#endif
+
+#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e6878081027..98205f68047 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -283,11 +283,14 @@ extern int dir_notify_enable;
#include <linux/init.h>
#include <linux/pid.h>
#include <linux/mutex.h>
+#include <linux/sysctl.h>
+#include <linux/capability.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/byteorder.h>
+struct export_operations;
struct hd_geometry;
struct iovec;
struct nameidata;
@@ -988,6 +991,9 @@ enum {
#define put_fs_excl() atomic_dec(&current->fs_excl)
#define has_fs_excl() atomic_read(&current->fs_excl)
+#define is_owner_or_cap(inode) \
+ ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER))
+
/* not quite ready to be deprecated, but... */
extern void lock_super(struct super_block *);
extern void unlock_super(struct super_block *);
@@ -1277,119 +1283,6 @@ static inline void file_accessed(struct file *file)
int sync_inode(struct inode *inode, struct writeback_control *wbc);
-/**
- * struct export_operations - for nfsd to communicate with file systems
- * @decode_fh: decode a file handle fragment and return a &struct dentry
- * @encode_fh: encode a file handle fragment from a dentry
- * @get_name: find the name for a given inode in a given directory
- * @get_parent: find the parent of a given directory
- * @get_dentry: find a dentry for the inode given a file handle sub-fragment
- * @find_exported_dentry:
- * set by the exporting module to a standard helper function.
- *
- * Description:
- * The export_operations structure provides a means for nfsd to communicate
- * with a particular exported file system - particularly enabling nfsd and
- * the filesystem to co-operate when dealing with file handles.
- *
- * export_operations contains two basic operation for dealing with file
- * handles, decode_fh() and encode_fh(), and allows for some other
- * operations to be defined which standard helper routines use to get
- * specific information from the filesystem.
- *
- * nfsd encodes information use to determine which filesystem a filehandle
- * applies to in the initial part of the file handle. The remainder, termed
- * a file handle fragment, is controlled completely by the filesystem. The
- * standard helper routines assume that this fragment will contain one or
- * two sub-fragments, one which identifies the file, and one which may be
- * used to identify the (a) directory containing the file.
- *
- * In some situations, nfsd needs to get a dentry which is connected into a
- * specific part of the file tree. To allow for this, it passes the
- * function acceptable() together with a @context which can be used to see
- * if the dentry is acceptable. As there can be multiple dentrys for a
- * given file, the filesystem should check each one for acceptability before
- * looking for the next. As soon as an acceptable one is found, it should
- * be returned.
- *
- * decode_fh:
- * @decode_fh is given a &struct super_block (@sb), a file handle fragment
- * (@fh, @fh_len) and an acceptability testing function (@acceptable,
- * @context). It should return a &struct dentry which refers to the same
- * file that the file handle fragment refers to, and which passes the
- * acceptability test. If it cannot, it should return a %NULL pointer if
- * the file was found but no acceptable &dentries were available, or a
- * %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or
- * %ENOMEM).
- *
- * encode_fh:
- * @encode_fh should store in the file handle fragment @fh (using at most
- * @max_len bytes) information that can be used by @decode_fh to recover the
- * file refered to by the &struct dentry @de. If the @connectable flag is
- * set, the encode_fh() should store sufficient information so that a good
- * attempt can be made to find not only the file but also it's place in the
- * filesystem. This typically means storing a reference to de->d_parent in
- * the filehandle fragment. encode_fh() should return the number of bytes
- * stored or a negative error code such as %-ENOSPC
- *
- * get_name:
- * @get_name should find a name for the given @child in the given @parent
- * directory. The name should be stored in the @name (with the
- * understanding that it is already pointing to a a %NAME_MAX+1 sized
- * buffer. get_name() should return %0 on success, a negative error code
- * or error. @get_name will be called without @parent->i_mutex held.
- *
- * get_parent:
- * @get_parent should find the parent directory for the given @child which
- * is also a directory. In the event that it cannot be found, or storage
- * space cannot be allocated, a %ERR_PTR should be returned.
- *
- * get_dentry:
- * Given a &super_block (@sb) and a pointer to a file-system specific inode
- * identifier, possibly an inode number, (@inump) get_dentry() should find
- * the identified inode and return a dentry for that inode. Any suitable
- * dentry can be returned including, if necessary, a new dentry created with
- * d_alloc_root. The caller can then find any other extant dentrys by
- * following the d_alias links. If a new dentry was created using
- * d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry
- * should be d_rehash()ed.
- *
- * If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code
- * can be returned. The @inump will be whatever was passed to
- * nfsd_find_fh_dentry() in either the @obj or @parent parameters.
- *
- * Locking rules:
- * get_parent is called with child->d_inode->i_mutex down
- * get_name is not (which is possibly inconsistent)
- */
-
-struct export_operations {
- struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh, int fh_len, int fh_type,
- int (*acceptable)(void *context, struct dentry *de),
- void *context);
- int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
- int connectable);
-
- /* the following are only called from the filesystem itself */
- int (*get_name)(struct dentry *parent, char *name,
- struct dentry *child);
- struct dentry * (*get_parent)(struct dentry *child);
- struct dentry * (*get_dentry)(struct super_block *sb, void *inump);
-
- /* This is set by the exporting module to a standard helper */
- struct dentry * (*find_exported_dentry)(
- struct super_block *sb, void *obj, void *parent,
- int (*acceptable)(void *context, struct dentry *de),
- void *context);
-
-
-};
-
-extern struct dentry *
-find_exported_dentry(struct super_block *sb, void *obj, void *parent,
- int (*acceptable)(void *context, struct dentry *de),
- void *context);
-
struct file_system_type {
const char *name;
int fs_flags;
@@ -1526,7 +1419,7 @@ extern void putname(const char *name);
#ifdef CONFIG_BLOCK
extern int register_blkdev(unsigned int, const char *);
-extern int unregister_blkdev(unsigned int, const char *);
+extern void unregister_blkdev(unsigned int, const char *);
extern struct block_device *bdget(dev_t);
extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
@@ -2050,5 +1943,9 @@ static inline void free_secdata(void *secdata)
{ }
#endif /* CONFIG_SECURITY */
+int proc_nr_files(ctl_table *table, int write, struct file *filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
+
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 12e631f0fb7..695741b0e42 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -112,7 +112,7 @@ struct fsl_usb2_platform_data {
struct fsl_spi_platform_data {
u32 initial_spmode; /* initial SPMODE value */
u16 bus_num;
-
+ bool qe_mode;
/* board specific information */
u16 max_chipselect;
void (*activate_cs)(u8 cs, u8 polarity);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0d2ef0b082a..bc68dd9a6d4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,6 +30,9 @@ struct vm_area_struct;
* cannot handle allocation failures.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
+ *
+ * __GFP_MOVABLE: Flag that this page will be movable by the page migration
+ * mechanism or reclaimed
*/
#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */
#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */
@@ -45,6 +48,7 @@ struct vm_area_struct;
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
+#define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
@@ -53,7 +57,8 @@ struct vm_area_struct;
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
__GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \
- __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE)
+ __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE| \
+ __GFP_MOVABLE)
/* This equals 0, but use constants in case they ever change */
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
@@ -65,6 +70,15 @@ struct vm_area_struct;
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
__GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+ __GFP_HARDWALL | __GFP_HIGHMEM | \
+ __GFP_MOVABLE)
+#define GFP_NOFS_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_MOVABLE)
+#define GFP_USER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+ __GFP_HARDWALL | __GFP_MOVABLE)
+#define GFP_HIGHUSER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+ __GFP_HARDWALL | __GFP_HIGHMEM | \
+ __GFP_MOVABLE)
#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
@@ -92,6 +106,9 @@ static inline enum zone_type gfp_zone(gfp_t flags)
if (flags & __GFP_DMA32)
return ZONE_DMA32;
#endif
+ if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
+ (__GFP_HIGHMEM | __GFP_MOVABLE))
+ return ZONE_MOVABLE;
#ifdef CONFIG_HIGHMEM
if (flags & __GFP_HIGHMEM)
return ZONE_HIGHMEM;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 98e2cce996a..12c5e4e3135 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -73,10 +73,27 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
}
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+/**
+ * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
+ * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
+ * @vma: The VMA the page is to be allocated for
+ * @vaddr: The virtual address the page will be inserted into
+ *
+ * This function will allocate a page for a VMA but the caller is expected
+ * to specify via movableflags whether the page will be movable in the
+ * future or not
+ *
+ * An architecture may override this function by defining
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
+ * implementation.
+ */
static inline struct page *
-alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
+__alloc_zeroed_user_highpage(gfp_t movableflags,
+ struct vm_area_struct *vma,
+ unsigned long vaddr)
{
- struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr);
+ struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
+ vma, vaddr);
if (page)
clear_user_highpage(page, vaddr);
@@ -85,6 +102,36 @@ alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
}
#endif
+/**
+ * alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA
+ * @vma: The VMA the page is to be allocated for
+ * @vaddr: The virtual address the page will be inserted into
+ *
+ * This function will allocate a page for a VMA that the caller knows will
+ * not be able to move in the future using move_pages() or reclaim. If it
+ * is known that the page can move, use alloc_zeroed_user_highpage_movable
+ */
+static inline struct page *
+alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(0, vma, vaddr);
+}
+
+/**
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
+ * @vma: The VMA the page is to be allocated for
+ * @vaddr: The virtual address the page will be inserted into
+ *
+ * This function will allocate a page for a VMA that the caller knows will
+ * be able to migrate in the future using move_pages() or reclaimed
+ */
+static inline struct page *
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+}
+
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page, KM_USER0);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 2c13715e9dd..49b7053043a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -15,6 +15,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
}
int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
+int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
@@ -29,6 +30,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
extern unsigned long max_huge_pages;
+extern unsigned long hugepages_treat_as_movable;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
extern int sysctl_hugetlb_shm_group;
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 5f06527dca2..f73de6fb5c6 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -7,9 +7,9 @@
#include <linux/errno.h>
-#define KSYM_NAME_LEN 127
-#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + KSYM_NAME_LEN + \
- 2*(BITS_PER_LONG*3/10) + MODULE_NAME_LEN + 1)
+#define KSYM_NAME_LEN 128
+#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
+ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
#ifdef CONFIG_KALLSYMS
/* Lookup the address for a symbol. Returns 0 if not found. */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7a485250591..1eb9cde550c 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -210,6 +210,7 @@ extern enum system_states {
#define TAINT_MACHINE_CHECK (1<<4)
#define TAINT_BAD_PAGE (1<<5)
#define TAINT_USER (1<<6)
+#define TAINT_DIE (1<<7)
extern void dump_stack(void);
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index aea34e74c49..8c4350a9ed8 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -64,7 +64,7 @@ struct capi20_appl {
unsigned long nrecvdatapkt;
unsigned long nsentctlpkt;
unsigned long nsentdatapkt;
- struct semaphore recv_sem;
+ struct mutex recv_mtx;
struct sk_buff_head recv_queue;
struct work_struct recv_work;
int release_in_progress;
diff --git a/include/linux/limits.h b/include/linux/limits.h
index eaf2e099f12..2d0f94162fb 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -5,8 +5,6 @@
#define NGROUPS_MAX 65536 /* supplemental group IDs are available */
#define ARG_MAX 131072 /* # bytes of args + environ for exec() */
-#define CHILD_MAX 999 /* no limit :-) */
-#define OPEN_MAX 256 /* # open files a process may have */
#define LINK_MAX 127 /* # links a file may have */
#define MAX_CANON 255 /* size of the canonical input queue */
#define MAX_INPUT 255 /* size of the type-ahead buffer */
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index 9c01bde5bf1..08a92969c76 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -33,5 +33,13 @@ struct linux_logo {
};
extern const struct linux_logo *fb_find_logo(int depth);
+#ifdef CONFIG_FB_LOGO_EXTRA
+extern void fb_append_extra_logo(const struct linux_logo *logo,
+ unsigned int n);
+#else
+static inline void fb_append_extra_logo(const struct linux_logo *logo,
+ unsigned int n)
+{}
+#endif
#endif /* _LINUX_LINUX_LOGO_H */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 246de1d84a2..6f1637c61e1 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -27,6 +27,7 @@ struct nlmsvc_binding {
struct nfs_fh *,
struct file **);
void (*fclose)(struct file *);
+ unsigned long (*get_grace_period)(void);
};
extern struct nlmsvc_binding * nlmsvc_ops;
@@ -38,4 +39,12 @@ extern int nlmclnt_proc(struct inode *, int, struct file_lock *);
extern int lockd_up(int proto);
extern void lockd_down(void);
+unsigned long get_nfs_grace_period(void);
+
+#ifdef CONFIG_NFSD_V4
+unsigned long get_nfs4_grace_period(void);
+#else
+static inline unsigned long get_nfs4_grace_period(void) {return 0;}
+#endif
+
#endif /* LINUX_LOCKD_BIND_H */
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 9d713c03e3d..36cc20dfd14 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -13,7 +13,6 @@
#define HPFS_SUPER_MAGIC 0xf995e849
#define ISOFS_SUPER_MAGIC 0x9660
#define JFFS2_SUPER_MAGIC 0x72b6
-#define KVMFS_SUPER_MAGIC 0x19700426
#define ANON_INODE_FS_MAGIC 0x09041934
#define MINIX_SUPER_MAGIC 0x137F /* original minix fs */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index daabb3aa1ec..e147cf50529 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -159,7 +159,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);
extern struct mempolicy default_policy;
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, gfp_t gfp_flags);
extern unsigned slab_node(struct mempolicy *policy);
extern enum zone_type policy_zone;
@@ -256,9 +256,9 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p)
#define set_cpuset_being_rebound(x) do {} while (0)
static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
- unsigned long addr)
+ unsigned long addr, gfp_t gfp_flags)
{
- return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
+ return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
}
static inline int do_migrate_pages(struct mm_struct *mm,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 97d0cddfd22..a5c451816fd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -599,6 +599,7 @@ static inline struct address_space *page_mapping(struct page *page)
{
struct address_space *mapping = page->mapping;
+ VM_BUG_ON(PageSlab(page));
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
#ifdef CONFIG_SLUB
@@ -810,27 +811,31 @@ extern unsigned long do_mremap(unsigned long addr,
unsigned long flags, unsigned long new_addr);
/*
- * Prototype to add a shrinker callback for ageable caches.
- *
- * These functions are passed a count `nr_to_scan' and a gfpmask. They should
- * scan `nr_to_scan' objects, attempting to free them.
+ * A callback you can register to apply pressure to ageable caches.
*
- * The callback must return the number of objects which remain in the cache.
+ * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
+ * look through the least-recently-used 'nr_to_scan' entries and
+ * attempt to free them up. It should return the number of objects
+ * which remain in the cache. If it returns -1, it means it cannot do
+ * any scanning at this time (eg. there is a risk of deadlock).
*
- * The callback will be passed nr_to_scan == 0 when the VM is querying the
- * cache size, so a fastpath for that case is appropriate.
- */
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
-
-/*
- * Add an aging callback. The int is the number of 'seeks' it takes
- * to recreate one of the objects that these functions age.
+ * The 'gfpmask' refers to the allocation we are currently trying to
+ * fulfil.
+ *
+ * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
+ * querying the cache size, so a fastpath for that case is appropriate.
*/
+struct shrinker {
+ int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+ int seeks; /* seeks to recreate an obj */
-#define DEFAULT_SEEKS 2
-struct shrinker;
-extern struct shrinker *set_shrinker(int, shrinker_t);
-extern void remove_shrinker(struct shrinker *shrinker);
+ /* These are for internal use */
+ struct list_head list;
+ long nr; /* objs pending delete */
+};
+#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+extern void register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
/*
* Some shared mappigns will want the pages marked read-only
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 04b1636a970..da8eb8ad9e9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -24,6 +24,14 @@
#endif
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
+/*
+ * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
+ * costly to service. That is between allocation orders which should
+ * coelesce naturally under reasonable reclaim pressure and those which
+ * will not.
+ */
+#define PAGE_ALLOC_COSTLY_ORDER 3
+
struct free_area {
struct list_head free_list;
unsigned long nr_free;
@@ -146,6 +154,7 @@ enum zone_type {
*/
ZONE_HIGHMEM,
#endif
+ ZONE_MOVABLE,
MAX_NR_ZONES
};
@@ -167,6 +176,7 @@ enum zone_type {
+ defined(CONFIG_ZONE_DMA32) \
+ 1 \
+ defined(CONFIG_HIGHMEM) \
+ + 1 \
)
#if __ZONE_COUNT < 2
#define ZONES_SHIFT 0
@@ -499,10 +509,22 @@ static inline int populated_zone(struct zone *zone)
return (!!zone->present_pages);
}
+extern int movable_zone;
+
+static inline int zone_movable_is_highmem(void)
+{
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
+ return movable_zone == ZONE_HIGHMEM;
+#else
+ return 0;
+#endif
+}
+
static inline int is_highmem_idx(enum zone_type idx)
{
#ifdef CONFIG_HIGHMEM
- return (idx == ZONE_HIGHMEM);
+ return (idx == ZONE_HIGHMEM ||
+ (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
#else
return 0;
#endif
@@ -522,7 +544,9 @@ static inline int is_normal_idx(enum zone_type idx)
static inline int is_highmem(struct zone *zone)
{
#ifdef CONFIG_HIGHMEM
- return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
+ int zone_idx = zone - zone->zone_pgdat->node_zones;
+ return zone_idx == ZONE_HIGHMEM ||
+ (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
#else
return 0;
#endif
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 9f62d6182d3..78feb7beff7 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -42,6 +42,9 @@
#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */
#define NFSEXP_ALLFLAGS 0xFE3F
+/* The flags that may vary depending on security flavor: */
+#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
+ | NFSEXP_ALLSQUASH)
#ifdef __KERNEL__
@@ -64,6 +67,19 @@ struct nfsd4_fs_locations {
int migrated;
};
+/*
+ * We keep an array of pseudoflavors with the export, in order from most
+ * to least preferred. For the forseeable future, we don't expect more
+ * than the eight pseudoflavors null, unix, krb5, krb5i, krb5p, skpm3,
+ * spkm3i, and spkm3p (and using all 8 at once should be rare).
+ */
+#define MAX_SECINFO_LIST 8
+
+struct exp_flavor_info {
+ u32 pseudoflavor;
+ u32 flags;
+};
+
struct svc_export {
struct cache_head h;
struct auth_domain * ex_client;
@@ -76,6 +92,8 @@ struct svc_export {
int ex_fsid;
unsigned char * ex_uuid; /* 16 byte fsid */
struct nfsd4_fs_locations ex_fslocs;
+ int ex_nflavors;
+ struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST];
};
/* an "export key" (expkey) maps a filehandlefragement to an
@@ -95,10 +113,22 @@ struct svc_expkey {
#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
-#define EX_RDONLY(exp) ((exp)->ex_flags & NFSEXP_READONLY)
#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
+static inline int EX_RDONLY(struct svc_export *exp, struct svc_rqst *rqstp)
+{
+ struct exp_flavor_info *f;
+ struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
+
+ for (f = exp->ex_flavors; f < end; f++) {
+ if (f->pseudoflavor == rqstp->rq_flavor)
+ return f->flags & NFSEXP_READONLY;
+ }
+ return exp->ex_flags & NFSEXP_READONLY;
+}
+
+__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
/*
* Function declarations
@@ -112,13 +142,19 @@ struct svc_export * exp_get_by_name(struct auth_domain *clp,
struct vfsmount *mnt,
struct dentry *dentry,
struct cache_req *reqp);
+struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
+ struct vfsmount *,
+ struct dentry *);
struct svc_export * exp_parent(struct auth_domain *clp,
struct vfsmount *mnt,
struct dentry *dentry,
struct cache_req *reqp);
+struct svc_export * rqst_exp_parent(struct svc_rqst *,
+ struct vfsmount *mnt,
+ struct dentry *dentry);
int exp_rootfh(struct auth_domain *,
char *path, struct knfsd_fh *, int maxsize);
-__be32 exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq);
+__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
__be32 nfserrno(int errno);
extern struct cache_detail svc_export_cache;
@@ -135,6 +171,7 @@ static inline void exp_get(struct svc_export *exp)
extern struct svc_export *
exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
struct cache_req *reqp);
+struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *);
#endif /* __KERNEL__ */
diff --git a/include/linux/nfsd/interface.h b/include/linux/nfsd/interface.h
deleted file mode 100644
index af0979704af..00000000000
--- a/include/linux/nfsd/interface.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * include/linux/nfsd/interface.h
- *
- * defines interface between nfsd and other bits of
- * the kernel. Particularly filesystems (eventually).
- *
- * Copyright (C) 2000 Neil Brown <neilb@cse.unsw.edu.au>
- */
-
-#ifndef LINUX_NFSD_INTERFACE_H
-#define LINUX_NFSD_INTERFACE_H
-
-#endif /* LINUX_NFSD_INTERFACE_H */
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index 72feac581aa..e452256d3f7 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -22,7 +22,6 @@
#include <linux/nfsd/export.h>
#include <linux/nfsd/auth.h>
#include <linux/nfsd/stats.h>
-#include <linux/nfsd/interface.h>
/*
* nfsd version
*/
@@ -72,6 +71,9 @@ int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
struct svc_export **expp);
__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
const char *, int, struct svc_fh *);
+__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
+ const char *, int,
+ struct svc_export **, struct dentry **);
__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
struct iattr *, int, time_t);
#ifdef CONFIG_NFSD_V4
@@ -120,7 +122,8 @@ __be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
struct kstatfs *);
int nfsd_notify_change(struct inode *, struct iattr *);
-__be32 nfsd_permission(struct svc_export *, struct dentry *, int);
+__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
+ struct dentry *, int);
int nfsd_sync_dir(struct dentry *dp);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
@@ -149,6 +152,7 @@ extern int nfsd_max_blksize;
* NFSv4 State
*/
#ifdef CONFIG_NFSD_V4
+extern unsigned int max_delegations;
void nfs4_state_init(void);
int nfs4_state_start(void);
void nfs4_state_shutdown(void);
@@ -236,6 +240,7 @@ void nfsd_lockd_shutdown(void);
#define nfserr_badname __constant_htonl(NFSERR_BADNAME)
#define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN)
#define nfserr_locked __constant_htonl(NFSERR_LOCKED)
+#define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC)
#define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME)
/* error codes for internal use */
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index ab5c236bd9a..db348f74937 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -67,7 +67,7 @@ struct nfs4_cb_recall {
int cbr_trunc;
stateid_t cbr_stateid;
u32 cbr_fhlen;
- u32 cbr_fhval[NFS4_FHSIZE];
+ char cbr_fhval[NFS4_FHSIZE];
struct nfs4_delegation *cbr_dp;
};
@@ -224,6 +224,7 @@ struct nfs4_file {
struct inode *fi_inode;
u32 fi_id; /* used with stateowner->so_id
* for stateid_hashtbl hash */
+ bool fi_had_conflict;
};
/*
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 09799bcee0a..1b653267133 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -293,6 +293,12 @@ struct nfsd4_rename {
struct nfsd4_change_info rn_tinfo; /* response */
};
+struct nfsd4_secinfo {
+ u32 si_namelen; /* request */
+ char *si_name; /* request */
+ struct svc_export *si_exp; /* response */
+};
+
struct nfsd4_setattr {
stateid_t sa_stateid; /* request */
u32 sa_bmval[2]; /* request */
@@ -365,6 +371,7 @@ struct nfsd4_op {
struct nfsd4_remove remove;
struct nfsd4_rename rename;
clientid_t renew;
+ struct nfsd4_secinfo secinfo;
struct nfsd4_setattr setattr;
struct nfsd4_setclientid setclientid;
struct nfsd4_setclientid_confirm setclientid_confirm;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 9431101bf87..576f2bb34cc 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -196,6 +196,8 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */
#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */
+#define CPU_DYING 0x000A /* CPU (unsigned)v not running any task,
+ * not handling interrupts, soon dead */
/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
* operation in progress
@@ -208,6 +210,7 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
+#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
#endif /* __KERNEL__ */
#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index eeb1976ef7b..ae8146abd74 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -110,6 +110,8 @@ static inline void ptrace_unlink(struct task_struct *child)
__ptrace_unlink(child);
}
+int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
+int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
#ifndef force_successful_syscall_return
/*
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
index dd5a05d03d4..75e17a05540 100644
--- a/include/linux/raid/bitmap.h
+++ b/include/linux/raid/bitmap.h
@@ -262,7 +262,7 @@ int bitmap_active(struct bitmap *bitmap);
char *file_path(struct file *file, char *buf, int count);
void bitmap_print_sb(struct bitmap *bitmap);
-int bitmap_update_sb(struct bitmap *bitmap);
+void bitmap_update_sb(struct bitmap *bitmap);
int bitmap_setallbits(struct bitmap *bitmap);
void bitmap_write_all(struct bitmap *bitmap);
@@ -278,8 +278,8 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int d
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
void bitmap_close_sync(struct bitmap *bitmap);
-int bitmap_unplug(struct bitmap *bitmap);
-int bitmap_daemon_work(struct bitmap *bitmap);
+void bitmap_unplug(struct bitmap *bitmap);
+void bitmap_daemon_work(struct bitmap *bitmap);
#endif
#endif
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index de72c49747c..28ac632b42d 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -51,7 +51,7 @@ struct mdk_rdev_s
sector_t size; /* Device size (in blocks) */
mddev_t *mddev; /* RAID array if running */
- unsigned long last_events; /* IO event timestamp */
+ long last_events; /* IO event timestamp */
struct block_device *bdev; /* block device handle */
diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h
new file mode 100644
index 00000000000..e8c7c21ceb1
--- /dev/null
+++ b/include/linux/rtc/m48t59.h
@@ -0,0 +1,57 @@
+/*
+ * include/linux/rtc/m48t59.h
+ *
+ * Definitions for the platform data of m48t59 RTC chip driver.
+ *
+ * Copyright (c) 2007 Wind River Systems, Inc.
+ *
+ * Mark Zhan <rongkai.zhan@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_RTC_M48T59_H_
+#define _LINUX_RTC_M48T59_H_
+
+/*
+ * M48T59 Register Offset
+ */
+#define M48T59_YEAR 0x1fff
+#define M48T59_MONTH 0x1ffe
+#define M48T59_MDAY 0x1ffd /* Day of Month */
+#define M48T59_WDAY 0x1ffc /* Day of Week */
+#define M48T59_WDAY_CB 0x20 /* Century Bit */
+#define M48T59_WDAY_CEB 0x10 /* Century Enable Bit */
+#define M48T59_HOUR 0x1ffb
+#define M48T59_MIN 0x1ffa
+#define M48T59_SEC 0x1ff9
+#define M48T59_CNTL 0x1ff8
+#define M48T59_CNTL_READ 0x40
+#define M48T59_CNTL_WRITE 0x80
+#define M48T59_WATCHDOG 0x1ff7
+#define M48T59_INTR 0x1ff6
+#define M48T59_INTR_AFE 0x80 /* Alarm Interrupt Enable */
+#define M48T59_INTR_ABE 0x20
+#define M48T59_ALARM_DATE 0x1ff5
+#define M48T59_ALARM_HOUR 0x1ff4
+#define M48T59_ALARM_MIN 0x1ff3
+#define M48T59_ALARM_SEC 0x1ff2
+#define M48T59_UNUSED 0x1ff1
+#define M48T59_FLAGS 0x1ff0
+#define M48T59_FLAGS_WDT 0x80 /* watchdog timer expired */
+#define M48T59_FLAGS_AF 0x40 /* alarm */
+#define M48T59_FLAGS_BF 0x10 /* low battery */
+
+#define M48T59_NVRAM_SIZE 0x1ff0
+
+struct m48t59_plat_data {
+ /* The method to access M48T59 registers,
+ * NOTE: The 'ofs' should be 0x00~0x1fff
+ */
+ void (*write_byte)(struct device *dev, u32 ofs, u8 val);
+ unsigned char (*read_byte)(struct device *dev, u32 ofs);
+};
+
+#endif /* _LINUX_RTC_M48T59_H_ */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 7f2c99d66e9..9c721cd2c9d 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -142,6 +142,9 @@
/* Micrel KS8695 */
#define PORT_KS8695 76
+/* Broadcom SB1250, etc. SOC */
+#define PORT_SB1250_DUART 77
+
#ifdef __KERNEL__
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 27402fea9b7..0e1d0daef6a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -31,6 +31,19 @@
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
/*
+ * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
+ *
+ * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
+ *
+ * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+ * Both make kfree a no-op.
+ */
+#define ZERO_SIZE_PTR ((void *)16)
+
+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \
+ (unsigned long)ZERO_SIZE_PTR)
+
+/*
* struct kmem_cache related prototypes
*/
void __init kmem_cache_init(void);
@@ -42,7 +55,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
void (*)(void *, struct kmem_cache *, unsigned long));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
-void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *);
@@ -78,11 +90,37 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
/*
* Common kmalloc functions provided by all allocators
*/
-void *__kzalloc(size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
size_t ksize(const void *);
+/*
+ * Allocator specific definitions. These are mainly used to establish optimized
+ * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
+ * selecting the appropriate general cache at compile time.
+ *
+ * Allocators must define at least:
+ *
+ * kmem_cache_alloc()
+ * __kmalloc()
+ * kmalloc()
+ *
+ * Those wishing to support NUMA must also define:
+ *
+ * kmem_cache_alloc_node()
+ * kmalloc_node()
+ *
+ * See each allocator definition file for additional comments and
+ * implementation notes.
+ */
+#ifdef CONFIG_SLUB
+#include <linux/slub_def.h>
+#elif defined(CONFIG_SLOB)
+#include <linux/slob_def.h>
+#else
+#include <linux/slab_def.h>
+#endif
+
/**
* kcalloc - allocate memory for an array. The memory is set to zero.
* @n: number of elements.
@@ -138,37 +176,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
if (n != 0 && size > ULONG_MAX / n)
return NULL;
- return __kzalloc(n * size, flags);
+ return __kmalloc(n * size, flags | __GFP_ZERO);
}
-/*
- * Allocator specific definitions. These are mainly used to establish optimized
- * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
- * selecting the appropriate general cache at compile time.
- *
- * Allocators must define at least:
- *
- * kmem_cache_alloc()
- * __kmalloc()
- * kmalloc()
- * kzalloc()
- *
- * Those wishing to support NUMA must also define:
- *
- * kmem_cache_alloc_node()
- * kmalloc_node()
- *
- * See each allocator definition file for additional comments and
- * implementation notes.
- */
-#ifdef CONFIG_SLUB
-#include <linux/slub_def.h>
-#elif defined(CONFIG_SLOB)
-#include <linux/slob_def.h>
-#else
-#include <linux/slab_def.h>
-#endif
-
#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
/**
* kmalloc_node - allocate memory from a specific node
@@ -242,5 +252,23 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
#endif /* DEBUG_SLAB */
+/*
+ * Shortcuts
+ */
+static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
+{
+ return kmem_cache_alloc(k, flags | __GFP_ZERO);
+}
+
+/**
+ * kzalloc - allocate memory. The memory is set to zero.
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
+static inline void *kzalloc(size_t size, gfp_t flags)
+{
+ return kmalloc(size, flags | __GFP_ZERO);
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 365d036c454..32bdc2ffd71 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -32,6 +32,10 @@ static inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
int i = 0;
+
+ if (!size)
+ return ZERO_SIZE_PTR;
+
#define CACHE(x) \
if (size <= x) \
goto found; \
@@ -54,32 +58,6 @@ found:
return __kmalloc(size, flags);
}
-static inline void *kzalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size)) {
- int i = 0;
-#define CACHE(x) \
- if (size <= x) \
- goto found; \
- else \
- i++;
-#include "kmalloc_sizes.h"
-#undef CACHE
- {
- extern void __you_cannot_kzalloc_that_much(void);
- __you_cannot_kzalloc_that_much();
- }
-found:
-#ifdef CONFIG_ZONE_DMA
- if (flags & GFP_DMA)
- return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
- flags);
-#endif
- return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
- }
- return __kzalloc(size, flags);
-}
-
#ifdef CONFIG_NUMA
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
@@ -88,6 +66,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size)) {
int i = 0;
+
+ if (!size)
+ return ZERO_SIZE_PTR;
+
#define CACHE(x) \
if (size <= x) \
goto found; \
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a582f677152..07f7e4cbcee 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -16,7 +16,9 @@ struct kmem_cache_node {
unsigned long nr_partial;
atomic_long_t nr_slabs;
struct list_head partial;
+#ifdef CONFIG_SLUB_DEBUG
struct list_head full;
+#endif
};
/*
@@ -44,7 +46,9 @@ struct kmem_cache {
int align; /* Alignment */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
+#ifdef CONFIG_SLUB_DEBUG
struct kobject kobj; /* For sysfs */
+#endif
#ifdef CONFIG_NUMA
int defrag_ratio;
@@ -159,18 +163,6 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
#define SLUB_DMA 0
#endif
-
-/*
- * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
- *
- * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
- *
- * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
- * Both make kfree a no-op.
- */
-#define ZERO_SIZE_PTR ((void *)16)
-
-
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
@@ -187,19 +179,6 @@ static inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
-static inline void *kzalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
- struct kmem_cache *s = kmalloc_slab(size);
-
- if (!s)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_zalloc(s, flags);
- } else
- return __kzalloc(size, flags);
-}
-
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 96ac21f8dd7..259a13c3bd9 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -99,11 +99,14 @@ static inline int up_smp_call_function(void)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
-static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
- void *info, int retry, int wait)
-{
- return -EBUSY;
-}
+#define smp_call_function_single(cpuid, func, info, retry, wait) \
+({ \
+ WARN_ON(cpuid != 0); \
+ local_irq_disable(); \
+ (func)(info); \
+ local_irq_enable(); \
+ 0; \
+})
#endif /* !SMP */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 1be5ea05947..302b81d1d11 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -76,6 +76,7 @@ struct spi_device {
#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
#define SPI_CS_HIGH 0x04 /* chipselect active high? */
#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
+#define SPI_3WIRE 0x10 /* SI/SO signals shared */
u8 bits_per_word;
int irq;
void *controller_state;
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 9dbca629dcf..b8db32cea1d 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -26,6 +26,7 @@ struct spi_bitbang {
struct list_head queue;
u8 busy;
u8 use_dma;
+ u8 flags; /* extra spi->mode support */
struct spi_master *master;
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
new file mode 100644
index 00000000000..60b59187e59
--- /dev/null
+++ b/include/linux/spi/tle62x0.h
@@ -0,0 +1,24 @@
+/*
+ * tle62x0.h - platform glue to Infineon TLE62x0 driver chips
+ *
+ * Copyright 2007 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+struct tle62x0_pdata {
+ unsigned int init_state;
+ unsigned int gpio_count;
+};
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index bbac101ac37..459c5fc11d5 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -58,6 +58,7 @@ u32 gss_unwrap(
u32 gss_delete_sec_context(
struct gss_ctx **ctx_id);
+u32 gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 service);
u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor);
char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 129d50f2225..8531a70da73 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -212,6 +212,7 @@ struct svc_rqst {
struct svc_pool * rq_pool; /* thread pool */
struct svc_procedure * rq_procinfo; /* procedure info */
struct auth_ops * rq_authop; /* authentication flavour */
+ u32 rq_flavor; /* pseudoflavor */
struct svc_cred rq_cred; /* auth info */
struct sk_buff * rq_skbuff; /* fast recv inet buffer */
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
@@ -248,6 +249,7 @@ struct svc_rqst {
*/
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
+ struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
struct svc_cacherep * rq_cacherep; /* cache info */
struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to
* determine what device number
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index de92619b082..22e1ef8e200 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -127,6 +127,7 @@ extern struct auth_domain *auth_unix_lookup(struct in_addr addr);
extern int auth_unix_forget_old(struct auth_domain *dom);
extern void svcauth_unix_purge(void);
extern void svcauth_unix_info_release(void *);
+extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
static inline unsigned long hash_str(char *name, int bits)
{
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index 5a5db16ab66..417a1def56d 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -22,6 +22,7 @@
int gss_svc_init(void);
void gss_svc_shutdown(void);
int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
+u32 svcauth_gss_flavor(struct auth_domain *dom);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 00686888134..665f85f2a3a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -188,7 +188,8 @@ extern int rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **, gfp_t);
+extern unsigned long try_to_free_pages(struct zone **zones, int order,
+ gfp_t gfp_mask);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
diff --git a/include/linux/time.h b/include/linux/time.h
index 4bb05a829be..ec3b0ced0af 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -36,7 +36,8 @@ struct timezone {
#define NSEC_PER_SEC 1000000000L
#define FSEC_PER_SEC 1000000000000000L
-static inline int timespec_equal(struct timespec *a, struct timespec *b)
+static inline int timespec_equal(const struct timespec *a,
+ const struct timespec *b)
{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
}
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d9325cf8a13..75370ec0923 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,7 +25,7 @@
#define HIGHMEM_ZONE(xx)
#endif
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
@@ -170,7 +170,8 @@ static inline unsigned long node_page_state(int node,
#ifdef CONFIG_HIGHMEM
zone_page_state(&zones[ZONE_HIGHMEM], item) +
#endif
- zone_page_state(&zones[ZONE_NORMAL], item);
+ zone_page_state(&zones[ZONE_NORMAL], item) +
+ zone_page_state(&zones[ZONE_MOVABLE], item);
}
extern void zone_statistics(struct zonelist *, struct zone *);
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index d961635d0e6..699b7e9864f 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -75,6 +75,8 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
int vt_waitactive(int vt);
void change_console(struct vc_data *new_vc);
void reset_vc(struct vc_data *vc);
+extern int unbind_con_driver(const struct consw *csw, int first, int last,
+ int deflt);
/*
* vc_screen.c shares this temporary buffer with the console write code so that
diff --git a/include/net/scm.h b/include/net/scm.h
index 5637d5e22d5..423cb1d5ac2 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -8,7 +8,7 @@
/* Well, we should have at least one descriptor open
* to accept passed FDs 8)
*/
-#define SCM_MAX_FD (OPEN_MAX-1)
+#define SCM_MAX_FD 255
struct scm_fp_list
{
diff --git a/include/video/tgafb.h b/include/video/tgafb.h
index 03d0dbe293a..7bc5e2c1482 100644
--- a/include/video/tgafb.h
+++ b/include/video/tgafb.h
@@ -216,6 +216,7 @@ struct tga_par {
u32 pll_freq; /* pixclock in mhz */
u32 bits_per_pixel; /* bits per pixel */
u32 sync_on_green; /* set if sync is on green */
+ u32 palette[16];
};
diff --git a/init/Kconfig b/init/Kconfig
index 0b0e29ed82d..e2056828dc6 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -546,7 +546,7 @@ config SLUB_DEBUG
choice
prompt "Choose SLAB allocator"
- default SLAB
+ default SLUB
help
This option allows to select a slab allocator.
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index b222ce9e1c8..a6b4c0c08e1 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -56,12 +56,9 @@ static void __init handle_initrd(void)
sys_chroot(".");
pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
- if (pid > 0) {
- while (pid != sys_wait4(-1, NULL, 0, NULL)) {
- try_to_freeze();
+ if (pid > 0)
+ while (pid != sys_wait4(-1, NULL, 0, NULL))
yield();
- }
- }
/* move initrd to rootfs' /old */
sys_fchdir(old_fd);
diff --git a/ipc/msg.c b/ipc/msg.c
index cbd27e51994..a03fcb522ff 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -385,7 +385,7 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
{
struct kern_ipc_perm *ipcp;
- struct msq_setbuf setbuf;
+ struct msq_setbuf uninitialized_var(setbuf);
struct msg_queue *msq;
int err, version;
struct ipc_namespace *ns;
@@ -509,7 +509,7 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
err = audit_ipc_obj(ipcp);
if (err)
goto out_unlock_up;
- if (cmd==IPC_SET) {
+ if (cmd == IPC_SET) {
err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
setbuf.mode);
if (err)
diff --git a/ipc/sem.c b/ipc/sem.c
index 89bfdffb38d..b676fef6d20 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -856,7 +856,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum,
{
struct sem_array *sma;
int err;
- struct sem_setbuf setbuf;
+ struct sem_setbuf uninitialized_var(setbuf);
struct kern_ipc_perm *ipcp;
if(cmd == IPC_SET) {
diff --git a/kernel/audit.c b/kernel/audit.c
index 5ce8851facf..eb0f9165b40 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -392,6 +392,7 @@ static int kauditd_thread(void *dummy)
{
struct sk_buff *skb;
+ set_freezable();
while (!kthread_should_stop()) {
skb = skb_dequeue(&audit_skb_queue);
wake_up(&audit_backlog_wait);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index ce61f423542..1bf093dcffe 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1210,8 +1210,8 @@ static inline int audit_add_rule(struct audit_entry *entry,
struct audit_entry *e;
struct audit_field *inode_f = entry->rule.inode_f;
struct audit_watch *watch = entry->rule.watch;
- struct nameidata *ndp, *ndw;
- int h, err, putnd_needed = 0;
+ struct nameidata *ndp = NULL, *ndw = NULL;
+ int h, err;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
@@ -1239,7 +1239,6 @@ static inline int audit_add_rule(struct audit_entry *entry,
err = audit_get_nd(watch->path, &ndp, &ndw);
if (err)
goto error;
- putnd_needed = 1;
}
mutex_lock(&audit_filter_mutex);
@@ -1269,14 +1268,11 @@ static inline int audit_add_rule(struct audit_entry *entry,
#endif
mutex_unlock(&audit_filter_mutex);
- if (putnd_needed)
- audit_put_nd(ndp, ndw);
-
+ audit_put_nd(ndp, ndw); /* NULL args OK */
return 0;
error:
- if (putnd_needed)
- audit_put_nd(ndp, ndw);
+ audit_put_nd(ndp, ndw); /* NULL args OK */
if (watch)
audit_put_watch(watch); /* tmp watch, matches initial get */
return err;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 208cf3497c1..181ae708602 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -103,11 +103,19 @@ static inline void check_for_tasks(int cpu)
write_unlock_irq(&tasklist_lock);
}
+struct take_cpu_down_param {
+ unsigned long mod;
+ void *hcpu;
+};
+
/* Take this CPU down. */
-static int take_cpu_down(void *unused)
+static int take_cpu_down(void *_param)
{
+ struct take_cpu_down_param *param = _param;
int err;
+ raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
+ param->hcpu);
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
if (err < 0)
@@ -127,6 +135,10 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
cpumask_t old_allowed, tmp;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
+ struct take_cpu_down_param tcd_param = {
+ .mod = mod,
+ .hcpu = hcpu,
+ };
if (num_online_cpus() == 1)
return -EBUSY;
@@ -153,7 +165,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
set_cpus_allowed(current, tmp);
mutex_lock(&cpu_bitmask_lock);
- p = __stop_machine_run(take_cpu_down, NULL, cpu);
+ p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
mutex_unlock(&cpu_bitmask_lock);
if (IS_ERR(p) || cpu_online(cpu)) {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 824b1c01f41..b4796d85014 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2138,6 +2138,9 @@ static void common_cpu_mem_hotplug_unplug(void)
static int cpuset_handle_cpuhp(struct notifier_block *nb,
unsigned long phase, void *cpu)
{
+ if (phase == CPU_DYING || phase == CPU_DYING_FROZEN)
+ return NOTIFY_DONE;
+
common_cpu_mem_hotplug_unplug();
return 0;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 57626692cd9..e8af8d0c248 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -31,6 +31,7 @@
#include <linux/mempolicy.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
+#include <linux/freezer.h>
#include <linux/cpuset.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
@@ -387,6 +388,11 @@ void daemonize(const char *name, ...)
* they would be locked into memory.
*/
exit_mm(current);
+ /*
+ * We don't want to have TIF_FREEZE set if the system-wide hibernation
+ * or suspend transition begins right now.
+ */
+ current->flags |= PF_NOFREEZE;
set_special_pids(1, 1);
proc_clear_tty(current);
diff --git a/kernel/fork.c b/kernel/fork.c
index 7c5c5888e00..ba39bdb2a7b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -923,7 +923,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
- new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
+ new_flags &= ~PF_SUPERPRIV;
new_flags |= PF_FORKNOEXEC;
if (!(clone_flags & CLONE_PTRACE))
p->ptrace = 0;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 0d662475dd9..474219a4192 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -152,7 +152,7 @@ static unsigned int get_symbol_offset(unsigned long pos)
/* Lookup the address for this symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name)
{
- char namebuf[KSYM_NAME_LEN+1];
+ char namebuf[KSYM_NAME_LEN];
unsigned long i;
unsigned int off;
@@ -248,7 +248,7 @@ const char *kallsyms_lookup(unsigned long addr,
{
const char *msym;
- namebuf[KSYM_NAME_LEN] = 0;
+ namebuf[KSYM_NAME_LEN - 1] = 0;
namebuf[0] = 0;
if (is_ksym_addr(addr)) {
@@ -265,7 +265,7 @@ const char *kallsyms_lookup(unsigned long addr,
/* see if it's in a module */
msym = module_address_lookup(addr, symbolsize, offset, modname);
if (msym)
- return strncpy(namebuf, msym, KSYM_NAME_LEN);
+ return strncpy(namebuf, msym, KSYM_NAME_LEN - 1);
return NULL;
}
@@ -273,7 +273,7 @@ const char *kallsyms_lookup(unsigned long addr,
int lookup_symbol_name(unsigned long addr, char *symname)
{
symname[0] = '\0';
- symname[KSYM_NAME_LEN] = '\0';
+ symname[KSYM_NAME_LEN - 1] = '\0';
if (is_ksym_addr(addr)) {
unsigned long pos;
@@ -291,7 +291,7 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
unsigned long *offset, char *modname, char *name)
{
name[0] = '\0';
- name[KSYM_NAME_LEN] = '\0';
+ name[KSYM_NAME_LEN - 1] = '\0';
if (is_ksym_addr(addr)) {
unsigned long pos;
@@ -312,7 +312,7 @@ int sprint_symbol(char *buffer, unsigned long address)
char *modname;
const char *name;
unsigned long offset, size;
- char namebuf[KSYM_NAME_LEN+1];
+ char namebuf[KSYM_NAME_LEN];
name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
if (!name)
@@ -342,8 +342,8 @@ struct kallsym_iter
unsigned long value;
unsigned int nameoff; /* If iterating in core kernel symbols */
char type;
- char name[KSYM_NAME_LEN+1];
- char module_name[MODULE_NAME_LEN + 1];
+ char name[KSYM_NAME_LEN];
+ char module_name[MODULE_NAME_LEN];
int exported;
};
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 1a5ff2211d8..edba2ffb43d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -379,7 +379,7 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4
static void print_lock_name(struct lock_class *class)
{
- char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4;
+ char str[KSYM_NAME_LEN], c1, c2, c3, c4;
const char *name;
get_usage_chars(class, &c1, &c2, &c3, &c4);
@@ -401,7 +401,7 @@ static void print_lock_name(struct lock_class *class)
static void print_lockdep_cache(struct lockdep_map *lock)
{
const char *name;
- char str[KSYM_NAME_LEN + 1];
+ char str[KSYM_NAME_LEN];
name = lock->name;
if (!name)
diff --git a/kernel/module.c b/kernel/module.c
index 539fed9ac83..33c04ad5117 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2133,7 +2133,7 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
sym = get_ksymbol(mod, addr, NULL, NULL);
if (!sym)
goto out;
- strlcpy(symname, sym, KSYM_NAME_LEN + 1);
+ strlcpy(symname, sym, KSYM_NAME_LEN);
mutex_unlock(&module_mutex);
return 0;
}
@@ -2158,9 +2158,9 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
if (!sym)
goto out;
if (modname)
- strlcpy(modname, mod->name, MODULE_NAME_LEN + 1);
+ strlcpy(modname, mod->name, MODULE_NAME_LEN);
if (name)
- strlcpy(name, sym, KSYM_NAME_LEN + 1);
+ strlcpy(name, sym, KSYM_NAME_LEN);
mutex_unlock(&module_mutex);
return 0;
}
@@ -2181,8 +2181,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
*value = mod->symtab[symnum].st_value;
*type = mod->symtab[symnum].st_info;
strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
- KSYM_NAME_LEN + 1);
- strlcpy(module_name, mod->name, MODULE_NAME_LEN + 1);
+ KSYM_NAME_LEN);
+ strlcpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, mod);
mutex_unlock(&module_mutex);
return 0;
diff --git a/kernel/panic.c b/kernel/panic.c
index 623d1828259..f64f4c1ac11 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -159,14 +159,15 @@ const char *print_tainted(void)
{
static char buf[20];
if (tainted) {
- snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c",
+ snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c",
tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
tainted & TAINT_BAD_PAGE ? 'B' : ' ',
- tainted & TAINT_USER ? 'U' : ' ');
+ tainted & TAINT_USER ? 'U' : ' ',
+ tainted & TAINT_DIE ? 'D' : ' ');
}
else
snprintf(buf, sizeof(buf), "Not tainted");
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b1d11f1c7cf..4a1745f1dad 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -490,3 +490,22 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
return ret;
}
#endif /* __ARCH_SYS_PTRACE */
+
+int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
+{
+ unsigned long tmp;
+ int copied;
+
+ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+ if (copied != sizeof(tmp))
+ return -EIO;
+ return put_user(tmp, (unsigned long __user *)data);
+}
+
+int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
+{
+ int copied;
+
+ copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
+ return (copied == sizeof(data)) ? 0 : -EIO;
+}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 55ba82a85a6..ddff3324778 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -40,6 +40,7 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
+#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/random.h>
#include <linux/delay.h>
@@ -518,7 +519,6 @@ rcu_torture_writer(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
set_user_nice(current, 19);
- current->flags |= PF_NOFREEZE;
do {
schedule_timeout_uninterruptible(1);
@@ -558,7 +558,6 @@ rcu_torture_fakewriter(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
set_user_nice(current, 19);
- current->flags |= PF_NOFREEZE;
do {
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
@@ -589,7 +588,6 @@ rcu_torture_reader(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
set_user_nice(current, 19);
- current->flags |= PF_NOFREEZE;
do {
idx = cur_ops->readlock();
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 015fc633c96..e3055ba6915 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -260,6 +260,7 @@ static int test_func(void *data)
int ret;
current->flags |= PF_MUTEX_TESTER;
+ set_freezable();
allow_signal(SIGHUP);
for(;;) {
diff --git a/kernel/sched.c b/kernel/sched.c
index 1c8076676eb..cb31fb4a137 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4912,8 +4912,6 @@ static int migration_thread(void *data)
struct migration_req *req;
struct list_head *head;
- try_to_freeze();
-
spin_lock_irq(&rq->lock);
if (cpu_is_offline(cpu)) {
@@ -5147,7 +5145,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
if (IS_ERR(p))
return NOTIFY_BAD;
- p->flags |= PF_NOFREEZE;
kthread_bind(p, cpu);
/* Must be high prio: stop_machine expects to yield to it. */
rq = task_rq_lock(p, &flags);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8de26779016..0f546ddea43 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -14,6 +14,7 @@
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
+#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
#include <linux/smp.h>
@@ -488,8 +489,6 @@ void __init softirq_init(void)
static int ksoftirqd(void * __bind_cpu)
{
- current->flags |= PF_NOFREEZE;
-
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 0131e296ffb..708d4882c0c 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -10,6 +10,7 @@
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/notifier.h>
#include <linux/module.h>
@@ -116,7 +117,6 @@ static int watchdog(void * __bind_cpu)
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
sched_setscheduler(current, SCHED_FIFO, &param);
- current->flags |= PF_NOFREEZE;
/* initialize timestamp */
touch_softlockup_watchdog();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2ce7acf841a..7063ebc6db0 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -29,6 +29,7 @@
#include <linux/utsname.h>
#include <linux/capability.h>
#include <linux/smp_lock.h>
+#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
@@ -49,9 +50,6 @@
#include <asm/uaccess.h>
#include <asm/processor.h>
-extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
#ifdef CONFIG_X86
#include <asm/nmi.h>
#include <asm/stacktrace.h>
@@ -826,6 +824,14 @@ static ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hugepages_treat_as_movable",
+ .data = &hugepages_treat_as_movable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &hugetlb_treat_movable_handler,
+ },
#endif
{
.ctl_name = VM_LOWMEM_RESERVE_RATIO,
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 8bbcfb77f7d..e5edc3a22a0 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -38,7 +38,7 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
static void print_name_offset(struct seq_file *m, void *sym)
{
- char symname[KSYM_NAME_LEN+1];
+ char symname[KSYM_NAME_LEN];
if (lookup_symbol_name((unsigned long)sym, symname) < 0)
SEQ_printf(m, "<%p>", sym);
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 9b8a826236d..8ed62fda16c 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -269,7 +269,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
static void print_name_offset(struct seq_file *m, unsigned long addr)
{
- char symname[KSYM_NAME_LEN+1];
+ char symname[KSYM_NAME_LEN];
if (lookup_symbol_name(addr, symname) < 0)
seq_printf(m, "<%p>", (void *)addr);
diff --git a/kernel/timer.c b/kernel/timer.c
index 1258371e0d2..b7792fb0338 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1221,7 +1221,8 @@ static int __devinit init_timers_cpu(int cpu)
/*
* The APs use this path later in boot
*/
- base = kmalloc_node(sizeof(*base), GFP_KERNEL,
+ base = kmalloc_node(sizeof(*base),
+ GFP_KERNEL | __GFP_ZERO,
cpu_to_node(cpu));
if (!base)
return -ENOMEM;
@@ -1232,7 +1233,6 @@ static int __devinit init_timers_cpu(int cpu)
kfree(base);
return -ENOMEM;
}
- memset(base, 0, sizeof(*base));
per_cpu(tvec_bases, cpu) = base;
} else {
/*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d7d3fa3072e..58e5c152a6b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -282,8 +282,8 @@ static int worker_thread(void *__cwq)
struct cpu_workqueue_struct *cwq = __cwq;
DEFINE_WAIT(wait);
- if (!cwq->wq->freezeable)
- current->flags |= PF_NOFREEZE;
+ if (cwq->wq->freezeable)
+ set_freezable();
set_user_nice(current, -5);
@@ -752,18 +752,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
if (cwq->thread == NULL)
return;
+ flush_cpu_workqueue(cwq);
/*
- * If the caller is CPU_DEAD the single flush_cpu_workqueue()
- * is not enough, a concurrent flush_workqueue() can insert a
- * barrier after us.
+ * If the caller is CPU_DEAD and cwq->worklist was not empty,
+ * a concurrent flush_workqueue() can insert a barrier after us.
+ * However, in that case run_workqueue() won't return and check
+ * kthread_should_stop() until it flushes all work_struct's.
* When ->worklist becomes empty it is safe to exit because no
* more work_structs can be queued on this cwq: flush_workqueue
* checks list_empty(), and a "normal" queue_work() can't use
* a dead CPU.
*/
- while (flush_cpu_workqueue(cwq))
- ;
-
kthread_stop(cwq->thread);
cwq->thread = NULL;
}
diff --git a/lib/Kconfig b/lib/Kconfig
index 3eb29d5dc4f..e5c2c514174 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -41,6 +41,14 @@ config CRC32
kernel tree does. Such modules that use library CRC32 functions
require M here.
+config CRC7
+ tristate "CRC7 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC7 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC7
+ functions require M here.
+
config LIBCRC32C
tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
help
diff --git a/lib/Makefile b/lib/Makefile
index 8363b60be9d..da68b2ca060 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
+obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
diff --git a/lib/crc7.c b/lib/crc7.c
new file mode 100644
index 00000000000..f1c3a144cec
--- /dev/null
+++ b/lib/crc7.c
@@ -0,0 +1,68 @@
+/*
+ * crc7.c
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc7.h>
+
+
+/* Table for CRC-7 (polynomial x^7 + x^3 + 1) */
+const u8 crc7_syndrome_table[256] = {
+ 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
+ 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
+ 0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26,
+ 0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e,
+ 0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d,
+ 0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45,
+ 0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14,
+ 0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c,
+ 0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b,
+ 0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13,
+ 0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42,
+ 0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a,
+ 0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69,
+ 0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21,
+ 0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70,
+ 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38,
+ 0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e,
+ 0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36,
+ 0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67,
+ 0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f,
+ 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
+ 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04,
+ 0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55,
+ 0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d,
+ 0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a,
+ 0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52,
+ 0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03,
+ 0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b,
+ 0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28,
+ 0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60,
+ 0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31,
+ 0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79
+};
+EXPORT_SYMBOL(crc7_syndrome_table);
+
+/**
+ * crc7 - update the CRC7 for the data buffer
+ * @crc: previous CRC7 value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
+ * Context: any
+ *
+ * Returns the updated CRC7 value.
+ */
+u8 crc7(u8 crc, const u8 *buffer, size_t len)
+{
+ while (len--)
+ crc = crc7_byte(crc, *buffer++);
+ return crc;
+}
+EXPORT_SYMBOL(crc7);
+
+MODULE_DESCRIPTION("CRC7 calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eb7c2bab9eb..f6d276db2d5 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -54,11 +54,10 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
int nbytes = sizeof(struct gen_pool_chunk) +
(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
- chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
+ chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
if (unlikely(chunk == NULL))
return -1;
- memset(chunk, 0, nbytes);
spin_lock_init(&chunk->lock);
chunk->start_addr = addr;
chunk->end_addr = addr + size;
diff --git a/mm/Kconfig b/mm/Kconfig
index 086af703da4..86187221e78 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -163,6 +163,10 @@ config ZONE_DMA_FLAG
default "0" if !ZONE_DMA
default "1"
+config BOUNCE
+ def_bool y
+ depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
+
config NR_QUICK
int
depends on QUICKLIST
diff --git a/mm/Makefile b/mm/Makefile
index a9148ea329a..245e33ab00c 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -13,9 +13,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
$(mmu-y)
-ifeq ($(CONFIG_MMU)$(CONFIG_BLOCK),yy)
-obj-y += bounce.o
-endif
+obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index b2486cf887a..00b02623f00 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -53,12 +53,9 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
int node = cpu_to_node(cpu);
BUG_ON(pdata->ptrs[cpu]);
- if (node_online(node)) {
- /* FIXME: kzalloc_node(size, gfp, node) */
- pdata->ptrs[cpu] = kmalloc_node(size, gfp, node);
- if (pdata->ptrs[cpu])
- memset(pdata->ptrs[cpu], 0, size);
- } else
+ if (node_online(node))
+ pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
+ else
pdata->ptrs[cpu] = kzalloc(size, gfp);
return pdata->ptrs[cpu];
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 100b99c2d50..5d5449f3d41 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -867,13 +867,11 @@ void do_generic_mapping_read(struct address_space *mapping,
{
struct inode *inode = mapping->host;
unsigned long index;
- unsigned long end_index;
unsigned long offset;
unsigned long last_index;
unsigned long next_index;
unsigned long prev_index;
unsigned int prev_offset;
- loff_t isize;
struct page *cached_page;
int error;
struct file_ra_state ra = *_ra;
@@ -886,27 +884,12 @@ void do_generic_mapping_read(struct address_space *mapping,
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
- isize = i_size_read(inode);
- if (!isize)
- goto out;
-
- end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
for (;;) {
struct page *page;
+ unsigned long end_index;
+ loff_t isize;
unsigned long nr, ret;
- /* nr is the maximum number of bytes to copy from this page */
- nr = PAGE_CACHE_SIZE;
- if (index >= end_index) {
- if (index > end_index)
- goto out;
- nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
- if (nr <= offset) {
- goto out;
- }
- }
- nr = nr - offset;
-
cond_resched();
if (index == next_index)
next_index = page_cache_readahead(mapping, &ra, filp,
@@ -921,6 +904,32 @@ find_page:
if (!PageUptodate(page))
goto page_not_up_to_date;
page_ok:
+ /*
+ * i_size must be checked after we know the page is Uptodate.
+ *
+ * Checking i_size after the check allows us to calculate
+ * the correct value for "nr", which means the zero-filled
+ * part of the page is not copied back to userspace (unless
+ * another truncate extends the file - this is desired though).
+ */
+
+ isize = i_size_read(inode);
+ end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+ if (unlikely(!isize || index > end_index)) {
+ page_cache_release(page);
+ goto out;
+ }
+
+ /* nr is the maximum number of bytes to copy from this page */
+ nr = PAGE_CACHE_SIZE;
+ if (index == end_index) {
+ nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+ if (nr <= offset) {
+ page_cache_release(page);
+ goto out;
+ }
+ }
+ nr = nr - offset;
/* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
@@ -1007,31 +1016,6 @@ readpage:
unlock_page(page);
}
- /*
- * i_size must be checked after we have done ->readpage.
- *
- * Checking i_size after the readpage allows us to calculate
- * the correct value for "nr", which means the zero-filled
- * part of the page is not copied back to userspace (unless
- * another truncate extends the file - this is desired though).
- */
- isize = i_size_read(inode);
- end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
- if (unlikely(!isize || index > end_index)) {
- page_cache_release(page);
- goto out;
- }
-
- /* nr is the maximum number of bytes to copy from this page */
- nr = PAGE_CACHE_SIZE;
- if (index == end_index) {
- nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
- if (nr <= offset) {
- page_cache_release(page);
- goto out;
- }
- }
- nr = nr - offset;
goto page_ok;
readpage_error:
diff --git a/mm/highmem.c b/mm/highmem.c
index be8f8d36a8b..7a967bc3515 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -46,9 +46,14 @@ unsigned int nr_free_highpages (void)
pg_data_t *pgdat;
unsigned int pages = 0;
- for_each_online_pgdat(pgdat)
+ for_each_online_pgdat(pgdat) {
pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
NR_FREE_PAGES);
+ if (zone_movable_is_highmem())
+ pages += zone_page_state(
+ &pgdat->node_zones[ZONE_MOVABLE],
+ NR_FREE_PAGES);
+ }
return pages;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index acc0fb3cf06..6912bbf33fa 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -27,6 +27,9 @@ unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
+static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
+unsigned long hugepages_treat_as_movable;
+
/*
* Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
*/
@@ -68,12 +71,13 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
{
int nid;
struct page *page = NULL;
- struct zonelist *zonelist = huge_zonelist(vma, address);
+ struct zonelist *zonelist = huge_zonelist(vma, address,
+ htlb_alloc_mask);
struct zone **z;
for (z = zonelist->zones; *z; z++) {
nid = zone_to_nid(*z);
- if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
+ if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
!list_empty(&hugepage_freelists[nid]))
break;
}
@@ -113,7 +117,7 @@ static int alloc_fresh_huge_page(void)
prev_nid = nid;
spin_unlock(&nid_lock);
- page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
+ page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
HUGETLB_PAGE_ORDER);
if (page) {
set_compound_page_dtor(page, free_huge_page);
@@ -263,6 +267,19 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
max_huge_pages = set_max_huge_pages(max_huge_pages);
return 0;
}
+
+int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
+ struct file *file, void __user *buffer,
+ size_t *length, loff_t *ppos)
+{
+ proc_dointvec(table, write, file, buffer, length, ppos);
+ if (hugepages_treat_as_movable)
+ htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
+ else
+ htlb_alloc_mask = GFP_HIGHUSER;
+ return 0;
+}
+
#endif /* CONFIG_SYSCTL */
int hugetlb_report_meminfo(char *buf)
@@ -481,7 +498,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_MINOR;
}
-int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, int write_access)
{
int ret = VM_FAULT_SIGBUS;
diff --git a/mm/memory.c b/mm/memory.c
index b3d73bb1f68..9c6ff7fffdc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1715,11 +1715,11 @@ gotten:
if (unlikely(anon_vma_prepare(vma)))
goto oom;
if (old_page == ZERO_PAGE(address)) {
- new_page = alloc_zeroed_user_highpage(vma, address);
+ new_page = alloc_zeroed_user_highpage_movable(vma, address);
if (!new_page)
goto oom;
} else {
- new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (!new_page)
goto oom;
cow_user_page(new_page, old_page, address, vma);
@@ -2237,7 +2237,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- page = alloc_zeroed_user_highpage(vma, address);
+ page = alloc_zeroed_user_highpage_movable(vma, address);
if (!page)
goto oom;
@@ -2340,7 +2340,8 @@ retry:
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
+ vma, address);
if (!page)
goto oom;
copy_user_highpage(page, new_page, address, vma);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 188f8d9c4ae..9f4e9b95e8f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -594,7 +594,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
static struct page *new_node_page(struct page *page, unsigned long node, int **x)
{
- return alloc_pages_node(node, GFP_HIGHUSER, 0);
+ return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
}
/*
@@ -710,7 +710,8 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
{
struct vm_area_struct *vma = (struct vm_area_struct *)private;
- return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma));
+ return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
+ page_address_in_vma(page, vma));
}
#else
@@ -1202,7 +1203,8 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
#ifdef CONFIG_HUGETLBFS
/* Return a zonelist suitable for a huge page allocation. */
-struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
+struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
+ gfp_t gfp_flags)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
@@ -1210,7 +1212,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
unsigned nid;
nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
- return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
+ return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
}
return zonelist_policy(GFP_HIGHUSER, pol);
}
diff --git a/mm/mempool.c b/mm/mempool.c
index 3e8f1fed0e1..02d5ec3feab 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -62,10 +62,9 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data, int node_id)
{
mempool_t *pool;
- pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id);
+ pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
if (!pool)
return NULL;
- memset(pool, 0, sizeof(*pool));
pool->elements = kmalloc_node(min_nr * sizeof(void *),
GFP_KERNEL, node_id);
if (!pool->elements) {
diff --git a/mm/migrate.c b/mm/migrate.c
index a91ca00abeb..34d8ada053e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -761,7 +761,8 @@ static struct page *new_page_node(struct page *p, unsigned long private,
*result = &pm->status;
- return alloc_pages_node(pm->node, GFP_HIGHUSER | GFP_THISNODE, 0);
+ return alloc_pages_node(pm->node,
+ GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
}
/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ea9da3bed3e..886ea0d5a13 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -824,6 +824,7 @@ int __set_page_dirty_nobuffers(struct page *page)
mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
if (mapping_cap_account_dirty(mapping)) {
__inc_zone_page_state(page, NR_FILE_DIRTY);
task_io_account_write(PAGE_CACHE_SIZE);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f9e4e647d7e..e2a10b957f2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -80,8 +80,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
256,
#endif
#ifdef CONFIG_HIGHMEM
- 32
+ 32,
#endif
+ 32,
};
EXPORT_SYMBOL(totalram_pages);
@@ -95,8 +96,9 @@ static char * const zone_names[MAX_NR_ZONES] = {
#endif
"Normal",
#ifdef CONFIG_HIGHMEM
- "HighMem"
+ "HighMem",
#endif
+ "Movable",
};
int min_free_kbytes = 1024;
@@ -134,6 +136,13 @@ static unsigned long __meminitdata dma_reserve;
static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
+ unsigned long __initdata required_kernelcore;
+ unsigned long __initdata required_movablecore;
+ unsigned long __initdata zone_movable_pfn[MAX_NUMNODES];
+
+ /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
+ int movable_zone;
+ EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
#if MAX_NUMNODES > 1
@@ -1324,7 +1333,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
+ did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
@@ -1361,7 +1370,8 @@ nofail_alloc:
*/
do_retry = 0;
if (!(gfp_mask & __GFP_NORETRY)) {
- if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
+ if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
+ (gfp_mask & __GFP_REPEAT))
do_retry = 1;
if (gfp_mask & __GFP_NOFAIL)
do_retry = 1;
@@ -1474,13 +1484,14 @@ unsigned int nr_free_buffer_pages(void)
{
return nr_free_zone_pages(gfp_zone(GFP_USER));
}
+EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
/*
* Amount of free RAM allocatable within all zones
*/
unsigned int nr_free_pagecache_pages(void)
{
- return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
+ return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
}
static inline void show_node(struct zone *zone)
@@ -2667,6 +2678,63 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
}
/*
+ * This finds a zone that can be used for ZONE_MOVABLE pages. The
+ * assumption is made that zones within a node are ordered in monotonic
+ * increasing memory addresses so that the "highest" populated zone is used
+ */
+void __init find_usable_zone_for_movable(void)
+{
+ int zone_index;
+ for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
+ if (zone_index == ZONE_MOVABLE)
+ continue;
+
+ if (arch_zone_highest_possible_pfn[zone_index] >
+ arch_zone_lowest_possible_pfn[zone_index])
+ break;
+ }
+
+ VM_BUG_ON(zone_index == -1);
+ movable_zone = zone_index;
+}
+
+/*
+ * The zone ranges provided by the architecture do not include ZONE_MOVABLE
+ * because it is sized independant of architecture. Unlike the other zones,
+ * the starting point for ZONE_MOVABLE is not fixed. It may be different
+ * in each node depending on the size of each node and how evenly kernelcore
+ * is distributed. This helper function adjusts the zone ranges
+ * provided by the architecture for a given node by using the end of the
+ * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
+ * zones within a node are in order of monotonic increases memory addresses
+ */
+void __meminit adjust_zone_range_for_zone_movable(int nid,
+ unsigned long zone_type,
+ unsigned long node_start_pfn,
+ unsigned long node_end_pfn,
+ unsigned long *zone_start_pfn,
+ unsigned long *zone_end_pfn)
+{
+ /* Only adjust if ZONE_MOVABLE is on this node */
+ if (zone_movable_pfn[nid]) {
+ /* Size ZONE_MOVABLE */
+ if (zone_type == ZONE_MOVABLE) {
+ *zone_start_pfn = zone_movable_pfn[nid];
+ *zone_end_pfn = min(node_end_pfn,
+ arch_zone_highest_possible_pfn[movable_zone]);
+
+ /* Adjust for ZONE_MOVABLE starting within this range */
+ } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
+ *zone_end_pfn > zone_movable_pfn[nid]) {
+ *zone_end_pfn = zone_movable_pfn[nid];
+
+ /* Check if this whole range is within ZONE_MOVABLE */
+ } else if (*zone_start_pfn >= zone_movable_pfn[nid])
+ *zone_start_pfn = *zone_end_pfn;
+ }
+}
+
+/*
* Return the number of pages a zone spans in a node, including holes
* present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
*/
@@ -2681,6 +2749,9 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
+ adjust_zone_range_for_zone_movable(nid, zone_type,
+ node_start_pfn, node_end_pfn,
+ &zone_start_pfn, &zone_end_pfn);
/* Check that this node has pages within the zone's required range */
if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
@@ -2771,6 +2842,9 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
node_end_pfn);
+ adjust_zone_range_for_zone_movable(nid, zone_type,
+ node_start_pfn, node_end_pfn,
+ &zone_start_pfn, &zone_end_pfn);
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
}
@@ -3148,6 +3222,157 @@ unsigned long __init find_max_pfn_with_active_regions(void)
return max_pfn;
}
+unsigned long __init early_calculate_totalpages(void)
+{
+ int i;
+ unsigned long totalpages = 0;
+
+ for (i = 0; i < nr_nodemap_entries; i++)
+ totalpages += early_node_map[i].end_pfn -
+ early_node_map[i].start_pfn;
+
+ return totalpages;
+}
+
+/*
+ * Find the PFN the Movable zone begins in each node. Kernel memory
+ * is spread evenly between nodes as long as the nodes have enough
+ * memory. When they don't, some nodes will have more kernelcore than
+ * others
+ */
+void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+{
+ int i, nid;
+ unsigned long usable_startpfn;
+ unsigned long kernelcore_node, kernelcore_remaining;
+ int usable_nodes = num_online_nodes();
+
+ /*
+ * If movablecore was specified, calculate what size of
+ * kernelcore that corresponds so that memory usable for
+ * any allocation type is evenly spread. If both kernelcore
+ * and movablecore are specified, then the value of kernelcore
+ * will be used for required_kernelcore if it's greater than
+ * what movablecore would have allowed.
+ */
+ if (required_movablecore) {
+ unsigned long totalpages = early_calculate_totalpages();
+ unsigned long corepages;
+
+ /*
+ * Round-up so that ZONE_MOVABLE is at least as large as what
+ * was requested by the user
+ */
+ required_movablecore =
+ roundup(required_movablecore, MAX_ORDER_NR_PAGES);
+ corepages = totalpages - required_movablecore;
+
+ required_kernelcore = max(required_kernelcore, corepages);
+ }
+
+ /* If kernelcore was not specified, there is no ZONE_MOVABLE */
+ if (!required_kernelcore)
+ return;
+
+ /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
+ find_usable_zone_for_movable();
+ usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
+
+restart:
+ /* Spread kernelcore memory as evenly as possible throughout nodes */
+ kernelcore_node = required_kernelcore / usable_nodes;
+ for_each_online_node(nid) {
+ /*
+ * Recalculate kernelcore_node if the division per node
+ * now exceeds what is necessary to satisfy the requested
+ * amount of memory for the kernel
+ */
+ if (required_kernelcore < kernelcore_node)
+ kernelcore_node = required_kernelcore / usable_nodes;
+
+ /*
+ * As the map is walked, we track how much memory is usable
+ * by the kernel using kernelcore_remaining. When it is
+ * 0, the rest of the node is usable by ZONE_MOVABLE
+ */
+ kernelcore_remaining = kernelcore_node;
+
+ /* Go through each range of PFNs within this node */
+ for_each_active_range_index_in_nid(i, nid) {
+ unsigned long start_pfn, end_pfn;
+ unsigned long size_pages;
+
+ start_pfn = max(early_node_map[i].start_pfn,
+ zone_movable_pfn[nid]);
+ end_pfn = early_node_map[i].end_pfn;
+ if (start_pfn >= end_pfn)
+ continue;
+
+ /* Account for what is only usable for kernelcore */
+ if (start_pfn < usable_startpfn) {
+ unsigned long kernel_pages;
+ kernel_pages = min(end_pfn, usable_startpfn)
+ - start_pfn;
+
+ kernelcore_remaining -= min(kernel_pages,
+ kernelcore_remaining);
+ required_kernelcore -= min(kernel_pages,
+ required_kernelcore);
+
+ /* Continue if range is now fully accounted */
+ if (end_pfn <= usable_startpfn) {
+
+ /*
+ * Push zone_movable_pfn to the end so
+ * that if we have to rebalance
+ * kernelcore across nodes, we will
+ * not double account here
+ */
+ zone_movable_pfn[nid] = end_pfn;
+ continue;
+ }
+ start_pfn = usable_startpfn;
+ }
+
+ /*
+ * The usable PFN range for ZONE_MOVABLE is from
+ * start_pfn->end_pfn. Calculate size_pages as the
+ * number of pages used as kernelcore
+ */
+ size_pages = end_pfn - start_pfn;
+ if (size_pages > kernelcore_remaining)
+ size_pages = kernelcore_remaining;
+ zone_movable_pfn[nid] = start_pfn + size_pages;
+
+ /*
+ * Some kernelcore has been met, update counts and
+ * break if the kernelcore for this node has been
+ * satisified
+ */
+ required_kernelcore -= min(required_kernelcore,
+ size_pages);
+ kernelcore_remaining -= size_pages;
+ if (!kernelcore_remaining)
+ break;
+ }
+ }
+
+ /*
+ * If there is still required_kernelcore, we do another pass with one
+ * less node in the count. This will push zone_movable_pfn[nid] further
+ * along on the nodes that still have memory until kernelcore is
+ * satisified
+ */
+ usable_nodes--;
+ if (usable_nodes && required_kernelcore > usable_nodes)
+ goto restart;
+
+ /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
+ for (nid = 0; nid < MAX_NUMNODES; nid++)
+ zone_movable_pfn[nid] =
+ roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+}
+
/**
* free_area_init_nodes - Initialise all pg_data_t and zone data
* @max_zone_pfn: an array of max PFNs for each zone
@@ -3177,19 +3402,37 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
for (i = 1; i < MAX_NR_ZONES; i++) {
+ if (i == ZONE_MOVABLE)
+ continue;
arch_zone_lowest_possible_pfn[i] =
arch_zone_highest_possible_pfn[i-1];
arch_zone_highest_possible_pfn[i] =
max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
}
+ arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
+ arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
+
+ /* Find the PFNs that ZONE_MOVABLE begins at in each node */
+ memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
+ find_zone_movable_pfns_for_nodes(zone_movable_pfn);
/* Print out the zone ranges */
printk("Zone PFN ranges:\n");
- for (i = 0; i < MAX_NR_ZONES; i++)
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ if (i == ZONE_MOVABLE)
+ continue;
printk(" %-8s %8lu -> %8lu\n",
zone_names[i],
arch_zone_lowest_possible_pfn[i],
arch_zone_highest_possible_pfn[i]);
+ }
+
+ /* Print out the PFNs ZONE_MOVABLE begins at in each node */
+ printk("Movable zone start PFN for each node\n");
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (zone_movable_pfn[i])
+ printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
+ }
/* Print out the early_node_map[] */
printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
@@ -3206,6 +3449,43 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
find_min_pfn_for_node(nid), NULL);
}
}
+
+static int __init cmdline_parse_core(char *p, unsigned long *core)
+{
+ unsigned long long coremem;
+ if (!p)
+ return -EINVAL;
+
+ coremem = memparse(p, &p);
+ *core = coremem >> PAGE_SHIFT;
+
+ /* Paranoid check that UL is enough for the coremem value */
+ WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
+
+ return 0;
+}
+
+/*
+ * kernelcore=size sets the amount of memory for use for allocations that
+ * cannot be reclaimed or migrated.
+ */
+static int __init cmdline_parse_kernelcore(char *p)
+{
+ return cmdline_parse_core(p, &required_kernelcore);
+}
+
+/*
+ * movablecore=size sets the amount of memory for use for allocations that
+ * can be reclaimed or migrated.
+ */
+static int __init cmdline_parse_movablecore(char *p)
+{
+ return cmdline_parse_core(p, &required_movablecore);
+}
+
+early_param("kernelcore", cmdline_parse_kernelcore);
+early_param("movablecore", cmdline_parse_movablecore);
+
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
/**
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 8ce0900dc95..8f6ee073c0e 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -92,6 +92,7 @@ struct pdflush_work {
static int __pdflush(struct pdflush_work *my_work)
{
current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+ set_freezable();
my_work->fn = NULL;
my_work->who = current;
INIT_LIST_HEAD(&my_work->list);
diff --git a/mm/shmem.c b/mm/shmem.c
index 0493e4d0bca..96fa79fb6ad 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/xattr.h>
+#include <linux/exportfs.h>
#include <linux/generic_acl.h>
#include <linux/mm.h>
#include <linux/mman.h>
@@ -93,8 +94,11 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
* The above definition of ENTRIES_PER_PAGE, and the use of
* BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
* might be reconsidered if it ever diverges from PAGE_SIZE.
+ *
+ * __GFP_MOVABLE is masked out as swap vectors cannot move
*/
- return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
+ return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
+ PAGE_CACHE_SHIFT-PAGE_SHIFT);
}
static inline void shmem_dir_free(struct page *page)
@@ -372,7 +376,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
}
spin_unlock(&info->lock);
- page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
+ page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
if (page)
set_page_private(page, 0);
spin_lock(&info->lock);
diff --git a/mm/slab.c b/mm/slab.c
index a453383333f..96d30ee256e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
*/
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
+ if (!size)
+ return ZERO_SIZE_PTR;
+
while (size > csizep->cs_size)
csizep++;
@@ -2351,7 +2354,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* this should not happen at all.
* But leave a BUG_ON for some lucky dude.
*/
- BUG_ON(!cachep->slabp_cache);
+ BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
}
cachep->ctor = ctor;
cachep->name = name;
@@ -2743,7 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep,
* Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc().
*/
- BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+ BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
local_flags = (flags & GFP_LEVEL_MASK);
/* Take the l3 list lock to change the colour_next on this node */
@@ -3389,6 +3392,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+ if (unlikely((flags & __GFP_ZERO) && ptr))
+ memset(ptr, 0, obj_size(cachep));
+
return ptr;
}
@@ -3440,6 +3446,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp);
+ if (unlikely((flags & __GFP_ZERO) && objp))
+ memset(objp, 0, obj_size(cachep));
+
return objp;
}
@@ -3581,23 +3590,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL(kmem_cache_alloc);
/**
- * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
- * @cache: The cache to allocate from.
- * @flags: See kmalloc().
- *
- * Allocate an object from this cache and set the allocated memory to zero.
- * The flags are only relevant if the cache has no available objects.
- */
-void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
-{
- void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
- if (ret)
- memset(ret, 0, obj_size(cache));
- return ret;
-}
-EXPORT_SYMBOL(kmem_cache_zalloc);
-
-/**
* kmem_ptr_validate - check if an untrusted pointer might
* be a slab entry.
* @cachep: the cache we're checking against
@@ -3653,8 +3645,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
struct kmem_cache *cachep;
cachep = kmem_find_general_cachep(size, flags);
- if (unlikely(cachep == NULL))
- return NULL;
+ if (unlikely(ZERO_OR_NULL_PTR(cachep)))
+ return cachep;
return kmem_cache_alloc_node(cachep, flags, node);
}
@@ -3726,52 +3718,6 @@ EXPORT_SYMBOL(__kmalloc);
#endif
/**
- * krealloc - reallocate memory. The contents will remain unchanged.
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes. If @p is %NULL, krealloc()
- * behaves exactly like kmalloc(). If @size is 0 and @p is not a
- * %NULL pointer, the object pointed to is freed.
- */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
-{
- struct kmem_cache *cache, *new_cache;
- void *ret;
-
- if (unlikely(!p))
- return kmalloc_track_caller(new_size, flags);
-
- if (unlikely(!new_size)) {
- kfree(p);
- return NULL;
- }
-
- cache = virt_to_cache(p);
- new_cache = __find_general_cachep(new_size, flags);
-
- /*
- * If new size fits in the current cache, bail out.
- */
- if (likely(cache == new_cache))
- return (void *)p;
-
- /*
- * We are on the slow-path here so do not use __cache_alloc
- * because it bloats kernel text.
- */
- ret = kmalloc_track_caller(new_size, flags);
- if (ret) {
- memcpy(ret, p, min(new_size, ksize(p)));
- kfree(p);
- }
- return ret;
-}
-EXPORT_SYMBOL(krealloc);
-
-/**
* kmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from.
* @objp: The previously allocated object.
@@ -3806,7 +3752,7 @@ void kfree(const void *objp)
struct kmem_cache *c;
unsigned long flags;
- if (unlikely(!objp))
+ if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
local_irq_save(flags);
kfree_debugcheck(objp);
@@ -4398,7 +4344,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
unsigned long offset, size;
- char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1];
+ char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
@@ -4493,7 +4439,7 @@ const struct seq_operations slabstats_op = {
*/
size_t ksize(const void *objp)
{
- if (unlikely(objp == NULL))
+ if (unlikely(ZERO_OR_NULL_PTR(objp)))
return 0;
return obj_size(virt_to_cache(objp));
diff --git a/mm/slob.c b/mm/slob.c
index b4899079d8b..c89ef116d7a 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -334,6 +334,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags);
}
+ if (unlikely((gfp & __GFP_ZERO) && b))
+ memset(b, 0, size);
return b;
}
@@ -347,7 +349,7 @@ static void slob_free(void *block, int size)
slobidx_t units;
unsigned long flags;
- if (!block)
+ if (ZERO_OR_NULL_PTR(block))
return;
BUG_ON(!size);
@@ -424,10 +426,13 @@ out:
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
+ unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
if (size < PAGE_SIZE - align) {
- unsigned int *m;
+ if (!size)
+ return ZERO_SIZE_PTR;
+
m = slob_alloc(size + align, gfp, align, node);
if (m)
*m = size;
@@ -446,44 +451,11 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
}
EXPORT_SYMBOL(__kmalloc_node);
-/**
- * krealloc - reallocate memory. The contents will remain unchanged.
- *
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes. If @p is %NULL, krealloc()
- * behaves exactly like kmalloc(). If @size is 0 and @p is not a
- * %NULL pointer, the object pointed to is freed.
- */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
-{
- void *ret;
-
- if (unlikely(!p))
- return kmalloc_track_caller(new_size, flags);
-
- if (unlikely(!new_size)) {
- kfree(p);
- return NULL;
- }
-
- ret = kmalloc_track_caller(new_size, flags);
- if (ret) {
- memcpy(ret, p, min(new_size, ksize(p)));
- kfree(p);
- }
- return ret;
-}
-EXPORT_SYMBOL(krealloc);
-
void kfree(const void *block)
{
struct slob_page *sp;
- if (!block)
+ if (ZERO_OR_NULL_PTR(block))
return;
sp = (struct slob_page *)virt_to_page(block);
@@ -501,7 +473,7 @@ size_t ksize(const void *block)
{
struct slob_page *sp;
- if (!block)
+ if (ZERO_OR_NULL_PTR(block))
return 0;
sp = (struct slob_page *)virt_to_page(block);
@@ -571,16 +543,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
-{
- void *ret = kmem_cache_alloc(c, flags);
- if (ret)
- memset(ret, 0, c->size);
-
- return ret;
-}
-EXPORT_SYMBOL(kmem_cache_zalloc);
-
static void __kmem_cache_free(void *b, int size)
{
if (size < PAGE_SIZE)
diff --git a/mm/slub.c b/mm/slub.c
index 6aea48942c2..52a4f44be39 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -205,6 +205,11 @@ static inline void ClearSlabDebug(struct page *page)
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
+/*
+ * The page->inuse field is 16 bit thus we have this limitation
+ */
+#define MAX_OBJECTS_PER_SLAB 65535
+
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000 /* Poison object */
@@ -228,7 +233,7 @@ static enum {
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
-LIST_HEAD(slab_caches);
+static LIST_HEAD(slab_caches);
/*
* Tracking user of a slab.
@@ -247,9 +252,10 @@ static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *);
#else
-static int sysfs_slab_add(struct kmem_cache *s) { return 0; }
-static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; }
-static void sysfs_slab_remove(struct kmem_cache *s) {}
+static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
+static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
+ { return 0; }
+static inline void sysfs_slab_remove(struct kmem_cache *s) {}
#endif
/********************************************************************
@@ -344,7 +350,7 @@ static void print_section(char *text, u8 *addr, unsigned int length)
for (i = 0; i < length; i++) {
if (newline) {
- printk(KERN_ERR "%10s 0x%p: ", text, addr + i);
+ printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
newline = 0;
}
printk(" %02x", addr[i]);
@@ -401,10 +407,11 @@ static void set_track(struct kmem_cache *s, void *object,
static void init_tracking(struct kmem_cache *s, void *object)
{
- if (s->flags & SLAB_STORE_USER) {
- set_track(s, object, TRACK_FREE, NULL);
- set_track(s, object, TRACK_ALLOC, NULL);
- }
+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
+ set_track(s, object, TRACK_FREE, NULL);
+ set_track(s, object, TRACK_ALLOC, NULL);
}
static void print_track(const char *s, struct track *t)
@@ -412,65 +419,106 @@ static void print_track(const char *s, struct track *t)
if (!t->addr)
return;
- printk(KERN_ERR "%s: ", s);
+ printk(KERN_ERR "INFO: %s in ", s);
__print_symbol("%s", (unsigned long)t->addr);
- printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
+ printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
+}
+
+static void print_tracking(struct kmem_cache *s, void *object)
+{
+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
+ print_track("Allocated", get_track(s, object, TRACK_ALLOC));
+ print_track("Freed", get_track(s, object, TRACK_FREE));
+}
+
+static void print_page_info(struct page *page)
+{
+ printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
+ page, page->inuse, page->freelist, page->flags);
+
+}
+
+static void slab_bug(struct kmem_cache *s, char *fmt, ...)
+{
+ va_list args;
+ char buf[100];
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+ printk(KERN_ERR "========================================"
+ "=====================================\n");
+ printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
+ printk(KERN_ERR "----------------------------------------"
+ "-------------------------------------\n\n");
}
-static void print_trailer(struct kmem_cache *s, u8 *p)
+static void slab_fix(struct kmem_cache *s, char *fmt, ...)
+{
+ va_list args;
+ char buf[100];
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+ printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
+}
+
+static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
{
unsigned int off; /* Offset of last byte */
+ u8 *addr = page_address(page);
+
+ print_tracking(s, p);
+
+ print_page_info(page);
+
+ printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
+ p, p - addr, get_freepointer(s, p));
+
+ if (p > addr + 16)
+ print_section("Bytes b4", p - 16, 16);
+
+ print_section("Object", p, min(s->objsize, 128));
if (s->flags & SLAB_RED_ZONE)
print_section("Redzone", p + s->objsize,
s->inuse - s->objsize);
- printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n",
- p + s->offset,
- get_freepointer(s, p));
-
if (s->offset)
off = s->offset + sizeof(void *);
else
off = s->inuse;
- if (s->flags & SLAB_STORE_USER) {
- print_track("Last alloc", get_track(s, p, TRACK_ALLOC));
- print_track("Last free ", get_track(s, p, TRACK_FREE));
+ if (s->flags & SLAB_STORE_USER)
off += 2 * sizeof(struct track);
- }
if (off != s->size)
/* Beginning of the filler is the free pointer */
- print_section("Filler", p + off, s->size - off);
+ print_section("Padding", p + off, s->size - off);
+
+ dump_stack();
}
static void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason)
{
- u8 *addr = page_address(page);
-
- printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n",
- s->name, reason, object, page);
- printk(KERN_ERR " offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n",
- object - addr, page->flags, page->inuse, page->freelist);
- if (object > addr + 16)
- print_section("Bytes b4", object - 16, 16);
- print_section("Object", object, min(s->objsize, 128));
- print_trailer(s, object);
- dump_stack();
+ slab_bug(s, reason);
+ print_trailer(s, page, object);
}
-static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...)
+static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
{
va_list args;
char buf[100];
- va_start(args, reason);
- vsnprintf(buf, sizeof(buf), reason, args);
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
- printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf,
- page);
+ slab_bug(s, fmt);
+ print_page_info(page);
dump_stack();
}
@@ -489,15 +537,46 @@ static void init_object(struct kmem_cache *s, void *object, int active)
s->inuse - s->objsize);
}
-static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
+static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
{
while (bytes) {
if (*start != (u8)value)
- return 0;
+ return start;
start++;
bytes--;
}
- return 1;
+ return NULL;
+}
+
+static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
+ void *from, void *to)
+{
+ slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
+ memset(from, data, to - from);
+}
+
+static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
+ u8 *object, char *what,
+ u8* start, unsigned int value, unsigned int bytes)
+{
+ u8 *fault;
+ u8 *end;
+
+ fault = check_bytes(start, value, bytes);
+ if (!fault)
+ return 1;
+
+ end = start + bytes;
+ while (end > fault && end[-1] == value)
+ end--;
+
+ slab_bug(s, "%s overwritten", what);
+ printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
+ fault, end - 1, fault[0], value);
+ print_trailer(s, page, object);
+
+ restore_bytes(s, what, value, fault, end);
+ return 0;
}
/*
@@ -538,14 +617,6 @@ static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
* may be used with merged slabcaches.
*/
-static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
- void *from, void *to)
-{
- printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
- s->name, message, data, from, to - 1);
- memset(from, data, to - from);
-}
-
static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
{
unsigned long off = s->inuse; /* The end of info */
@@ -561,39 +632,39 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
if (s->size == off)
return 1;
- if (check_bytes(p + off, POISON_INUSE, s->size - off))
- return 1;
-
- object_err(s, page, p, "Object padding check fails");
-
- /*
- * Restore padding
- */
- restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size);
- return 0;
+ return check_bytes_and_report(s, page, p, "Object padding",
+ p + off, POISON_INUSE, s->size - off);
}
static int slab_pad_check(struct kmem_cache *s, struct page *page)
{
- u8 *p;
- int length, remainder;
+ u8 *start;
+ u8 *fault;
+ u8 *end;
+ int length;
+ int remainder;
if (!(s->flags & SLAB_POISON))
return 1;
- p = page_address(page);
+ start = page_address(page);
+ end = start + (PAGE_SIZE << s->order);
length = s->objects * s->size;
- remainder = (PAGE_SIZE << s->order) - length;
+ remainder = end - (start + length);
if (!remainder)
return 1;
- if (!check_bytes(p + length, POISON_INUSE, remainder)) {
- slab_err(s, page, "Padding check failed");
- restore_bytes(s, "slab padding", POISON_INUSE, p + length,
- p + length + remainder);
- return 0;
- }
- return 1;
+ fault = check_bytes(start + length, POISON_INUSE, remainder);
+ if (!fault)
+ return 1;
+ while (end > fault && end[-1] == POISON_INUSE)
+ end--;
+
+ slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
+ print_section("Padding", start, length);
+
+ restore_bytes(s, "slab padding", POISON_INUSE, start, end);
+ return 0;
}
static int check_object(struct kmem_cache *s, struct page *page,
@@ -606,41 +677,22 @@ static int check_object(struct kmem_cache *s, struct page *page,
unsigned int red =
active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
- if (!check_bytes(endobject, red, s->inuse - s->objsize)) {
- object_err(s, page, object,
- active ? "Redzone Active" : "Redzone Inactive");
- restore_bytes(s, "redzone", red,
- endobject, object + s->inuse);
+ if (!check_bytes_and_report(s, page, object, "Redzone",
+ endobject, red, s->inuse - s->objsize))
return 0;
- }
} else {
- if ((s->flags & SLAB_POISON) && s->objsize < s->inuse &&
- !check_bytes(endobject, POISON_INUSE,
- s->inuse - s->objsize)) {
- object_err(s, page, p, "Alignment padding check fails");
- /*
- * Fix it so that there will not be another report.
- *
- * Hmmm... We may be corrupting an object that now expects
- * to be longer than allowed.
- */
- restore_bytes(s, "alignment padding", POISON_INUSE,
- endobject, object + s->inuse);
- }
+ if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
+ check_bytes_and_report(s, page, p, "Alignment padding", endobject,
+ POISON_INUSE, s->inuse - s->objsize);
}
if (s->flags & SLAB_POISON) {
if (!active && (s->flags & __OBJECT_POISON) &&
- (!check_bytes(p, POISON_FREE, s->objsize - 1) ||
- p[s->objsize - 1] != POISON_END)) {
-
- object_err(s, page, p, "Poison check failed");
- restore_bytes(s, "Poison", POISON_FREE,
- p, p + s->objsize -1);
- restore_bytes(s, "Poison", POISON_END,
- p + s->objsize - 1, p + s->objsize);
+ (!check_bytes_and_report(s, page, p, "Poison", p,
+ POISON_FREE, s->objsize - 1) ||
+ !check_bytes_and_report(s, page, p, "Poison",
+ p + s->objsize -1, POISON_END, 1)))
return 0;
- }
/*
* check_pad_bytes cleans up on its own.
*/
@@ -673,25 +725,17 @@ static int check_slab(struct kmem_cache *s, struct page *page)
VM_BUG_ON(!irqs_disabled());
if (!PageSlab(page)) {
- slab_err(s, page, "Not a valid slab page flags=%lx "
- "mapping=0x%p count=%d", page->flags, page->mapping,
- page_count(page));
+ slab_err(s, page, "Not a valid slab page");
return 0;
}
if (page->offset * sizeof(void *) != s->offset) {
- slab_err(s, page, "Corrupted offset %lu flags=0x%lx "
- "mapping=0x%p count=%d",
- (unsigned long)(page->offset * sizeof(void *)),
- page->flags,
- page->mapping,
- page_count(page));
+ slab_err(s, page, "Corrupted offset %lu",
+ (unsigned long)(page->offset * sizeof(void *)));
return 0;
}
if (page->inuse > s->objects) {
- slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx "
- "mapping=0x%p count=%d",
- s->name, page->inuse, s->objects, page->flags,
- page->mapping, page_count(page));
+ slab_err(s, page, "inuse %u > max %u",
+ s->name, page->inuse, s->objects);
return 0;
}
/* Slab_pad_check fixes things up after itself */
@@ -719,13 +763,10 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
set_freepointer(s, object, NULL);
break;
} else {
- slab_err(s, page, "Freepointer 0x%p corrupt",
- fp);
+ slab_err(s, page, "Freepointer corrupt");
page->freelist = NULL;
page->inuse = s->objects;
- printk(KERN_ERR "@@@ SLUB %s: Freelist "
- "cleared. Slab 0x%p\n",
- s->name, page);
+ slab_fix(s, "Freelist cleared");
return 0;
}
break;
@@ -737,11 +778,9 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
if (page->inuse != s->objects - nr) {
slab_err(s, page, "Wrong object count. Counter is %d but "
- "counted were %d", s, page, page->inuse,
- s->objects - nr);
+ "counted were %d", page->inuse, s->objects - nr);
page->inuse = s->objects - nr;
- printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. "
- "Slab @0x%p\n", s->name, page);
+ slab_fix(s, "Object count adjusted.");
}
return search == NULL;
}
@@ -803,7 +842,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
goto bad;
if (object && !on_freelist(s, page, object)) {
- slab_err(s, page, "Object 0x%p already allocated", object);
+ object_err(s, page, object, "Object already allocated");
goto bad;
}
@@ -829,8 +868,7 @@ bad:
* to avoid issues in the future. Marking all objects
* as used avoids touching the remaining objects.
*/
- printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n",
- s->name, page);
+ slab_fix(s, "Marking all objects used");
page->inuse = s->objects;
page->freelist = NULL;
/* Fix up fields that may be corrupted */
@@ -851,7 +889,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
}
if (on_freelist(s, page, object)) {
- slab_err(s, page, "Object 0x%p already free", object);
+ object_err(s, page, object, "Object already free");
goto fail;
}
@@ -870,8 +908,8 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
dump_stack();
}
else
- slab_err(s, page, "object at 0x%p belongs "
- "to slab %s", object, page->slab->name);
+ object_err(s, page, object,
+ "page slab pointer corrupt.");
goto fail;
}
@@ -885,8 +923,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
return 1;
fail:
- printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
- s->name, page, object);
+ slab_fix(s, "Object at 0x%p not freed", object);
return 0;
}
@@ -1041,7 +1078,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
void *last;
void *p;
- BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+ BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
if (flags & __GFP_WAIT)
local_irq_enable();
@@ -1359,7 +1396,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
unfreeze_slab(s, page);
}
-static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
+static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
{
slab_lock(page);
deactivate_slab(s, page, cpu);
@@ -1369,7 +1406,7 @@ static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
* Flush cpu slab.
* Called from IPI handler with interrupts disabled.
*/
-static void __flush_cpu_slab(struct kmem_cache *s, int cpu)
+static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
{
struct page *page = s->cpu_slab[cpu];
@@ -1504,7 +1541,7 @@ debug:
* Otherwise we can simply pick the next object from the lockless free list.
*/
static void __always_inline *slab_alloc(struct kmem_cache *s,
- gfp_t gfpflags, int node, void *addr)
+ gfp_t gfpflags, int node, void *addr)
{
struct page *page;
void **object;
@@ -1522,6 +1559,10 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
page->lockless_freelist = object[page->offset];
}
local_irq_restore(flags);
+
+ if (unlikely((gfpflags & __GFP_ZERO) && object))
+ memset(object, 0, s->objsize);
+
return object;
}
@@ -1705,8 +1746,17 @@ static inline int slab_order(int size, int min_objects,
{
int order;
int rem;
+ int min_order = slub_min_order;
- for (order = max(slub_min_order,
+ /*
+ * If we would create too many object per slab then reduce
+ * the slab order even if it goes below slub_min_order.
+ */
+ while (min_order > 0 &&
+ (PAGE_SIZE << min_order) >= MAX_OBJECTS_PER_SLAB * size)
+ min_order--;
+
+ for (order = max(min_order,
fls(min_objects * size - 1) - PAGE_SHIFT);
order <= max_order; order++) {
@@ -1720,6 +1770,9 @@ static inline int slab_order(int size, int min_objects,
if (rem <= slab_size / fract_leftover)
break;
+ /* If the next size is too high then exit now */
+ if (slab_size * 2 >= MAX_OBJECTS_PER_SLAB * size)
+ break;
}
return order;
@@ -1800,7 +1853,9 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
atomic_long_set(&n->nr_slabs, 0);
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
+#ifdef CONFIG_SLUB_DEBUG
INIT_LIST_HEAD(&n->full);
+#endif
}
#ifdef CONFIG_NUMA
@@ -1828,7 +1883,10 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
page->freelist = get_freepointer(kmalloc_caches, n);
page->inuse++;
kmalloc_caches->node[node] = n;
- setup_object_debug(kmalloc_caches, page, n);
+#ifdef CONFIG_SLUB_DEBUG
+ init_object(kmalloc_caches, n, 1);
+ init_tracking(kmalloc_caches, n);
+#endif
init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs);
add_partial(n, page);
@@ -2006,7 +2064,7 @@ static int calculate_sizes(struct kmem_cache *s)
* The page->inuse field is only 16 bit wide! So we cannot have
* more than 64k objects per slab.
*/
- if (!s->objects || s->objects > 65535)
+ if (!s->objects || s->objects > MAX_OBJECTS_PER_SLAB)
return 0;
return 1;
@@ -2110,7 +2168,7 @@ static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
/*
* Release all resources used by a slab cache.
*/
-static int kmem_cache_close(struct kmem_cache *s)
+static inline int kmem_cache_close(struct kmem_cache *s)
{
int node;
@@ -2138,12 +2196,13 @@ void kmem_cache_destroy(struct kmem_cache *s)
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
+ up_write(&slub_lock);
if (kmem_cache_close(s))
WARN_ON(1);
sysfs_slab_remove(s);
kfree(s);
- }
- up_write(&slub_lock);
+ } else
+ up_write(&slub_lock);
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -2216,47 +2275,92 @@ panic:
panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
}
-static struct kmem_cache *get_slab(size_t size, gfp_t flags)
+#ifdef CONFIG_ZONE_DMA
+static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
{
- int index = kmalloc_index(size);
+ struct kmem_cache *s;
+ struct kmem_cache *x;
+ char *text;
+ size_t realsize;
- if (!index)
- return NULL;
+ s = kmalloc_caches_dma[index];
+ if (s)
+ return s;
- /* Allocation too large? */
- BUG_ON(index < 0);
+ /* Dynamically create dma cache */
+ x = kmalloc(kmem_size, flags & ~SLUB_DMA);
+ if (!x)
+ panic("Unable to allocate memory for dma cache\n");
-#ifdef CONFIG_ZONE_DMA
- if ((flags & SLUB_DMA)) {
- struct kmem_cache *s;
- struct kmem_cache *x;
- char *text;
- size_t realsize;
-
- s = kmalloc_caches_dma[index];
- if (s)
- return s;
+ realsize = kmalloc_caches[index].objsize;
+ text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
+ (unsigned int)realsize);
+ s = create_kmalloc_cache(x, text, realsize, flags);
+ down_write(&slub_lock);
+ if (!kmalloc_caches_dma[index]) {
+ kmalloc_caches_dma[index] = s;
+ up_write(&slub_lock);
+ return s;
+ }
+ up_write(&slub_lock);
+ kmem_cache_destroy(s);
+ return kmalloc_caches_dma[index];
+}
+#endif
+
+/*
+ * Conversion table for small slabs sizes / 8 to the index in the
+ * kmalloc array. This is necessary for slabs < 192 since we have non power
+ * of two cache sizes there. The size of larger slabs can be determined using
+ * fls.
+ */
+static s8 size_index[24] = {
+ 3, /* 8 */
+ 4, /* 16 */
+ 5, /* 24 */
+ 5, /* 32 */
+ 6, /* 40 */
+ 6, /* 48 */
+ 6, /* 56 */
+ 6, /* 64 */
+ 1, /* 72 */
+ 1, /* 80 */
+ 1, /* 88 */
+ 1, /* 96 */
+ 7, /* 104 */
+ 7, /* 112 */
+ 7, /* 120 */
+ 7, /* 128 */
+ 2, /* 136 */
+ 2, /* 144 */
+ 2, /* 152 */
+ 2, /* 160 */
+ 2, /* 168 */
+ 2, /* 176 */
+ 2, /* 184 */
+ 2 /* 192 */
+};
- /* Dynamically create dma cache */
- x = kmalloc(kmem_size, flags & ~SLUB_DMA);
- if (!x)
- panic("Unable to allocate memory for dma cache\n");
+static struct kmem_cache *get_slab(size_t size, gfp_t flags)
+{
+ int index;
- if (index <= KMALLOC_SHIFT_HIGH)
- realsize = 1 << index;
- else {
- if (index == 1)
- realsize = 96;
- else
- realsize = 192;
- }
+ if (size <= 192) {
+ if (!size)
+ return ZERO_SIZE_PTR;
- text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
- (unsigned int)realsize);
- s = create_kmalloc_cache(x, text, realsize, flags);
- kmalloc_caches_dma[index] = s;
- return s;
+ index = size_index[(size - 1) / 8];
+ } else {
+ if (size > KMALLOC_MAX_SIZE)
+ return NULL;
+
+ index = fls(size - 1);
}
+
+#ifdef CONFIG_ZONE_DMA
+ if (unlikely((flags & SLUB_DMA)))
+ return dma_kmalloc_cache(index, flags);
+
#endif
return &kmalloc_caches[index];
}
@@ -2265,9 +2369,10 @@ void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s = get_slab(size, flags);
- if (s)
- return slab_alloc(s, flags, -1, __builtin_return_address(0));
- return ZERO_SIZE_PTR;
+ if (ZERO_OR_NULL_PTR(s))
+ return s;
+
+ return slab_alloc(s, flags, -1, __builtin_return_address(0));
}
EXPORT_SYMBOL(__kmalloc);
@@ -2276,9 +2381,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s = get_slab(size, flags);
- if (s)
- return slab_alloc(s, flags, node, __builtin_return_address(0));
- return ZERO_SIZE_PTR;
+ if (ZERO_OR_NULL_PTR(s))
+ return s;
+
+ return slab_alloc(s, flags, node, __builtin_return_address(0));
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -2329,7 +2435,7 @@ void kfree(const void *x)
* this comparison would be true for all "negative" pointers
* (which would cover the whole upper half of the address space).
*/
- if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR)
+ if (ZERO_OR_NULL_PTR(x))
return;
page = virt_to_head_page(x);
@@ -2418,43 +2524,6 @@ int kmem_cache_shrink(struct kmem_cache *s)
}
EXPORT_SYMBOL(kmem_cache_shrink);
-/**
- * krealloc - reallocate memory. The contents will remain unchanged.
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes. If @p is %NULL, krealloc()
- * behaves exactly like kmalloc(). If @size is 0 and @p is not a
- * %NULL pointer, the object pointed to is freed.
- */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
-{
- void *ret;
- size_t ks;
-
- if (unlikely(!p || p == ZERO_SIZE_PTR))
- return kmalloc(new_size, flags);
-
- if (unlikely(!new_size)) {
- kfree(p);
- return ZERO_SIZE_PTR;
- }
-
- ks = ksize(p);
- if (ks >= new_size)
- return (void *)p;
-
- ret = kmalloc(new_size, flags);
- if (ret) {
- memcpy(ret, p, min(new_size, ks));
- kfree(p);
- }
- return ret;
-}
-EXPORT_SYMBOL(krealloc);
-
/********************************************************************
* Basic setup of slabs
*******************************************************************/
@@ -2497,6 +2566,24 @@ void __init kmem_cache_init(void)
caches++;
}
+
+ /*
+ * Patch up the size_index table if we have strange large alignment
+ * requirements for the kmalloc array. This is only the case for
+ * mips it seems. The standard arches will not generate any code here.
+ *
+ * Largest permitted alignment is 256 bytes due to the way we
+ * handle the index determination for the smaller caches.
+ *
+ * Make sure that nothing crazy happens if someone starts tinkering
+ * around with ARCH_KMALLOC_MINALIGN
+ */
+ BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
+ (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
+
+ for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
+ size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
+
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
@@ -2542,7 +2629,7 @@ static struct kmem_cache *find_mergeable(size_t size,
size_t align, unsigned long flags,
void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
- struct list_head *h;
+ struct kmem_cache *s;
if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
return NULL;
@@ -2554,10 +2641,7 @@ static struct kmem_cache *find_mergeable(size_t size,
align = calculate_alignment(flags, align, size);
size = ALIGN(size, align);
- list_for_each(h, &slab_caches) {
- struct kmem_cache *s =
- container_of(h, struct kmem_cache, list);
-
+ list_for_each_entry(s, &slab_caches, list) {
if (slab_unmergeable(s))
continue;
@@ -2600,25 +2684,26 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
*/
s->objsize = max(s->objsize, (int)size);
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
+ up_write(&slub_lock);
if (sysfs_slab_alias(s, name))
goto err;
- } else {
- s = kmalloc(kmem_size, GFP_KERNEL);
- if (s && kmem_cache_open(s, GFP_KERNEL, name,
+ return s;
+ }
+ s = kmalloc(kmem_size, GFP_KERNEL);
+ if (s) {
+ if (kmem_cache_open(s, GFP_KERNEL, name,
size, align, flags, ctor)) {
- if (sysfs_slab_add(s)) {
- kfree(s);
- goto err;
- }
list_add(&s->list, &slab_caches);
- } else
- kfree(s);
+ up_write(&slub_lock);
+ if (sysfs_slab_add(s))
+ goto err;
+ return s;
+ }
+ kfree(s);
}
up_write(&slub_lock);
- return s;
err:
- up_write(&slub_lock);
if (flags & SLAB_PANIC)
panic("Cannot create slabcache %s\n", name);
else
@@ -2627,45 +2712,7 @@ err:
}
EXPORT_SYMBOL(kmem_cache_create);
-void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
-{
- void *x;
-
- x = slab_alloc(s, flags, -1, __builtin_return_address(0));
- if (x)
- memset(x, 0, s->objsize);
- return x;
-}
-EXPORT_SYMBOL(kmem_cache_zalloc);
-
#ifdef CONFIG_SMP
-static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
-{
- struct list_head *h;
-
- down_read(&slub_lock);
- list_for_each(h, &slab_caches) {
- struct kmem_cache *s =
- container_of(h, struct kmem_cache, list);
-
- func(s, cpu);
- }
- up_read(&slub_lock);
-}
-
-/*
- * Version of __flush_cpu_slab for the case that interrupts
- * are enabled.
- */
-static void cpu_slab_flush(struct kmem_cache *s, int cpu)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __flush_cpu_slab(s, cpu);
- local_irq_restore(flags);
-}
-
/*
* Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary.
@@ -2674,13 +2721,21 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
+ struct kmem_cache *s;
+ unsigned long flags;
switch (action) {
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- for_all_slabs(cpu_slab_flush, cpu);
+ down_read(&slub_lock);
+ list_for_each_entry(s, &slab_caches, list) {
+ local_irq_save(flags);
+ __flush_cpu_slab(s, cpu);
+ local_irq_restore(flags);
+ }
+ up_read(&slub_lock);
break;
default:
break;
@@ -2697,8 +2752,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
{
struct kmem_cache *s = get_slab(size, gfpflags);
- if (!s)
- return ZERO_SIZE_PTR;
+ if (ZERO_OR_NULL_PTR(s))
+ return s;
return slab_alloc(s, gfpflags, -1, caller);
}
@@ -2708,18 +2763,18 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
{
struct kmem_cache *s = get_slab(size, gfpflags);
- if (!s)
- return ZERO_SIZE_PTR;
+ if (ZERO_OR_NULL_PTR(s))
+ return s;
return slab_alloc(s, gfpflags, node, caller);
}
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
-static int validate_slab(struct kmem_cache *s, struct page *page)
+static int validate_slab(struct kmem_cache *s, struct page *page,
+ unsigned long *map)
{
void *p;
void *addr = page_address(page);
- DECLARE_BITMAP(map, s->objects);
if (!check_slab(s, page) ||
!on_freelist(s, page, NULL))
@@ -2741,10 +2796,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page)
return 1;
}
-static void validate_slab_slab(struct kmem_cache *s, struct page *page)
+static void validate_slab_slab(struct kmem_cache *s, struct page *page,
+ unsigned long *map)
{
if (slab_trylock(page)) {
- validate_slab(s, page);
+ validate_slab(s, page, map);
slab_unlock(page);
} else
printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
@@ -2761,7 +2817,8 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page)
}
}
-static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
+static int validate_slab_node(struct kmem_cache *s,
+ struct kmem_cache_node *n, unsigned long *map)
{
unsigned long count = 0;
struct page *page;
@@ -2770,7 +2827,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru) {
- validate_slab_slab(s, page);
+ validate_slab_slab(s, page, map);
count++;
}
if (count != n->nr_partial)
@@ -2781,7 +2838,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
goto out;
list_for_each_entry(page, &n->full, lru) {
- validate_slab_slab(s, page);
+ validate_slab_slab(s, page, map);
count++;
}
if (count != atomic_long_read(&n->nr_slabs))
@@ -2794,17 +2851,23 @@ out:
return count;
}
-static unsigned long validate_slab_cache(struct kmem_cache *s)
+static long validate_slab_cache(struct kmem_cache *s)
{
int node;
unsigned long count = 0;
+ unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!map)
+ return -ENOMEM;
flush_all(s);
for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);
- count += validate_slab_node(s, n);
+ count += validate_slab_node(s, n, map);
}
+ kfree(map);
return count;
}
@@ -2893,18 +2956,14 @@ static void free_loc_track(struct loc_track *t)
get_order(sizeof(struct location) * t->max));
}
-static int alloc_loc_track(struct loc_track *t, unsigned long max)
+static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
{
struct location *l;
int order;
- if (!max)
- max = PAGE_SIZE / sizeof(struct location);
-
order = get_order(sizeof(struct location) * max);
- l = (void *)__get_free_pages(GFP_ATOMIC, order);
-
+ l = (void *)__get_free_pages(flags, order);
if (!l)
return 0;
@@ -2970,7 +3029,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
/*
* Not found. Insert new tracking element.
*/
- if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
+ if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
return 0;
l = t->loc + pos;
@@ -3013,11 +3072,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
{
int n = 0;
unsigned long i;
- struct loc_track t;
+ struct loc_track t = { 0, 0, NULL };
int node;
- t.count = 0;
- t.max = 0;
+ if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
+ GFP_KERNEL))
+ return sprintf(buf, "Out of memory\n");
/* Push back cpu slabs */
flush_all(s);
@@ -3421,11 +3481,14 @@ static ssize_t validate_show(struct kmem_cache *s, char *buf)
static ssize_t validate_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- if (buf[0] == '1')
- validate_slab_cache(s);
- else
- return -EINVAL;
- return length;
+ int ret = -EINVAL;
+
+ if (buf[0] == '1') {
+ ret = validate_slab_cache(s);
+ if (ret >= 0)
+ ret = length;
+ }
+ return ret;
}
SLAB_ATTR(validate);
@@ -3579,7 +3642,7 @@ static struct kset_uevent_ops slab_uevent_ops = {
.filter = uevent_filter,
};
-decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
+static decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
#define ID_STR_LENGTH 64
@@ -3677,7 +3740,7 @@ struct saved_alias {
struct saved_alias *next;
};
-struct saved_alias *alias_list;
+static struct saved_alias *alias_list;
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
@@ -3705,7 +3768,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
static int __init slab_sysfs_init(void)
{
- struct list_head *h;
+ struct kmem_cache *s;
int err;
err = subsystem_register(&slab_subsys);
@@ -3716,10 +3779,7 @@ static int __init slab_sysfs_init(void)
slab_state = SYSFS;
- list_for_each(h, &slab_caches) {
- struct kmem_cache *s =
- container_of(h, struct kmem_cache, list);
-
+ list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s);
BUG_ON(err);
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 925d5c50f18..67daecb6031 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -334,7 +334,8 @@ struct page *read_swap_cache_async(swp_entry_t entry,
* Get a new page to read into from swap.
*/
if (!new_page) {
- new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
+ vma, addr);
if (!new_page)
break; /* Out of memory */
}
diff --git a/mm/truncate.c b/mm/truncate.c
index 7c994f2d614..f47e46d1be3 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -100,9 +100,9 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
if (PagePrivate(page))
do_invalidatepage(page, 0);
+ remove_from_page_cache(page);
ClearPageUptodate(page);
ClearPageMappedToDisk(page);
- remove_from_page_cache(page);
page_cache_release(page); /* pagecache ref */
}
diff --git a/mm/util.c b/mm/util.c
index ace2aea69f1..78f3783bdcc 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -5,20 +5,6 @@
#include <asm/uaccess.h>
/**
- * __kzalloc - allocate memory. The memory is set to zero.
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- */
-void *__kzalloc(size_t size, gfp_t flags)
-{
- void *ret = kmalloc_track_caller(size, flags);
- if (ret)
- memset(ret, 0, size);
- return ret;
-}
-EXPORT_SYMBOL(__kzalloc);
-
-/*
* kstrdup - allocate space for and copy an existing string
*
* @s: the string to duplicate
@@ -58,6 +44,40 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
}
EXPORT_SYMBOL(kmemdup);
+/**
+ * krealloc - reallocate memory. The contents will remain unchanged.
+ * @p: object to reallocate memory for.
+ * @new_size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * The contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes. If @p is %NULL, krealloc()
+ * behaves exactly like kmalloc(). If @size is 0 and @p is not a
+ * %NULL pointer, the object pointed to is freed.
+ */
+void *krealloc(const void *p, size_t new_size, gfp_t flags)
+{
+ void *ret;
+ size_t ks;
+
+ if (unlikely(!new_size)) {
+ kfree(p);
+ return ZERO_SIZE_PTR;
+ }
+
+ ks = ksize(p);
+ if (ks >= new_size)
+ return (void *)p;
+
+ ret = kmalloc_track_caller(new_size, flags);
+ if (ret) {
+ memcpy(ret, p, min(new_size, ks));
+ kfree(p);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(krealloc);
+
/*
* strndup_user - duplicate an existing string from user space
*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ddf87145cc4..8e05a11155c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -432,11 +432,12 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
- pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
+ pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
+ PAGE_KERNEL, node);
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size,
- (gfp_mask & GFP_LEVEL_MASK),
+ (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
node);
}
area->pages = pages;
@@ -445,7 +446,6 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
kfree(area);
return NULL;
}
- memset(area->pages, 0, array_size);
for (i = 0; i < area->nr_pages; i++) {
if (node < 0)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1be5a6376ef..d419e10e3da 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -66,17 +66,8 @@ struct scan_control {
int swappiness;
int all_unreclaimable;
-};
-/*
- * The list of shrinker callbacks used by to apply pressure to
- * ageable caches.
- */
-struct shrinker {
- shrinker_t shrinker;
- struct list_head list;
- int seeks; /* seeks to recreate an obj */
- long nr; /* objs pending delete */
+ int order;
};
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -121,34 +112,25 @@ static DECLARE_RWSEM(shrinker_rwsem);
/*
* Add a shrinker callback to be called from the vm
*/
-struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
+void register_shrinker(struct shrinker *shrinker)
{
- struct shrinker *shrinker;
-
- shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
- if (shrinker) {
- shrinker->shrinker = theshrinker;
- shrinker->seeks = seeks;
- shrinker->nr = 0;
- down_write(&shrinker_rwsem);
- list_add_tail(&shrinker->list, &shrinker_list);
- up_write(&shrinker_rwsem);
- }
- return shrinker;
+ shrinker->nr = 0;
+ down_write(&shrinker_rwsem);
+ list_add_tail(&shrinker->list, &shrinker_list);
+ up_write(&shrinker_rwsem);
}
-EXPORT_SYMBOL(set_shrinker);
+EXPORT_SYMBOL(register_shrinker);
/*
* Remove one
*/
-void remove_shrinker(struct shrinker *shrinker)
+void unregister_shrinker(struct shrinker *shrinker)
{
down_write(&shrinker_rwsem);
list_del(&shrinker->list);
up_write(&shrinker_rwsem);
- kfree(shrinker);
}
-EXPORT_SYMBOL(remove_shrinker);
+EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
/*
@@ -185,7 +167,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
unsigned long total_scan;
- unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
+ unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
delta = (4 * scanned) / shrinker->seeks;
delta *= max_pass;
@@ -213,8 +195,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
int shrink_ret;
int nr_before;
- nr_before = (*shrinker->shrinker)(0, gfp_mask);
- shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
+ nr_before = (*shrinker->shrink)(0, gfp_mask);
+ shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
if (shrink_ret == -1)
break;
if (shrink_ret < nr_before)
@@ -481,7 +463,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
referenced = page_referenced(page, 1);
/* In active use or really unfreeable? Activate it. */
- if (referenced && page_mapping_inuse(page))
+ if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
+ referenced && page_mapping_inuse(page))
goto activate_locked;
#ifdef CONFIG_SWAP
@@ -514,7 +497,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
if (PageDirty(page)) {
- if (referenced)
+ if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
goto keep_locked;
if (!may_enter_fs)
goto keep_locked;
@@ -598,6 +581,51 @@ keep:
return nr_reclaimed;
}
+/* LRU Isolation modes. */
+#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
+#define ISOLATE_ACTIVE 1 /* Isolate active pages. */
+#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
+
+/*
+ * Attempt to remove the specified page from its LRU. Only take this page
+ * if it is of the appropriate PageActive status. Pages which are being
+ * freed elsewhere are also ignored.
+ *
+ * page: page to consider
+ * mode: one of the LRU isolation modes defined above
+ *
+ * returns 0 on success, -ve errno on failure.
+ */
+static int __isolate_lru_page(struct page *page, int mode)
+{
+ int ret = -EINVAL;
+
+ /* Only take pages on the LRU. */
+ if (!PageLRU(page))
+ return ret;
+
+ /*
+ * When checking the active state, we need to be sure we are
+ * dealing with comparible boolean values. Take the logical not
+ * of each.
+ */
+ if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
+ return ret;
+
+ ret = -EBUSY;
+ if (likely(get_page_unless_zero(page))) {
+ /*
+ * Be careful not to clear PageLRU until after we're
+ * sure the page is not being freed elsewhere -- the
+ * page release code relies on it.
+ */
+ ClearPageLRU(page);
+ ret = 0;
+ }
+
+ return ret;
+}
+
/*
* zone->lru_lock is heavily contended. Some of the functions that
* shrink the lists perform better by taking out a batch of pages
@@ -612,38 +640,90 @@ keep:
* @src: The LRU list to pull pages off.
* @dst: The temp list to put pages on to.
* @scanned: The number of pages that were scanned.
+ * @order: The caller's attempted allocation order
+ * @mode: One of the LRU isolation modes
*
* returns how many pages were moved onto *@dst.
*/
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct list_head *src, struct list_head *dst,
- unsigned long *scanned)
+ unsigned long *scanned, int order, int mode)
{
unsigned long nr_taken = 0;
- struct page *page;
unsigned long scan;
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
- struct list_head *target;
+ struct page *page;
+ unsigned long pfn;
+ unsigned long end_pfn;
+ unsigned long page_pfn;
+ int zone_id;
+
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
VM_BUG_ON(!PageLRU(page));
- list_del(&page->lru);
- target = src;
- if (likely(get_page_unless_zero(page))) {
- /*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
- */
- ClearPageLRU(page);
- target = dst;
+ switch (__isolate_lru_page(page, mode)) {
+ case 0:
+ list_move(&page->lru, dst);
nr_taken++;
- } /* else it is being freed elsewhere */
+ break;
+
+ case -EBUSY:
+ /* else it is being freed elsewhere */
+ list_move(&page->lru, src);
+ continue;
+
+ default:
+ BUG();
+ }
+
+ if (!order)
+ continue;
- list_add(&page->lru, target);
+ /*
+ * Attempt to take all pages in the order aligned region
+ * surrounding the tag page. Only take those pages of
+ * the same active state as that tag page. We may safely
+ * round the target page pfn down to the requested order
+ * as the mem_map is guarenteed valid out to MAX_ORDER,
+ * where that page is in a different zone we will detect
+ * it from its zone id and abort this block scan.
+ */
+ zone_id = page_zone_id(page);
+ page_pfn = page_to_pfn(page);
+ pfn = page_pfn & ~((1 << order) - 1);
+ end_pfn = pfn + (1 << order);
+ for (; pfn < end_pfn; pfn++) {
+ struct page *cursor_page;
+
+ /* The target page is in the block, ignore it. */
+ if (unlikely(pfn == page_pfn))
+ continue;
+
+ /* Avoid holes within the zone. */
+ if (unlikely(!pfn_valid_within(pfn)))
+ break;
+
+ cursor_page = pfn_to_page(pfn);
+ /* Check that we have not crossed a zone boundary. */
+ if (unlikely(page_zone_id(cursor_page) != zone_id))
+ continue;
+ switch (__isolate_lru_page(cursor_page, mode)) {
+ case 0:
+ list_move(&cursor_page->lru, dst);
+ nr_taken++;
+ scan++;
+ break;
+
+ case -EBUSY:
+ /* else it is being freed elsewhere */
+ list_move(&cursor_page->lru, src);
+ default:
+ break;
+ }
+ }
}
*scanned = scan;
@@ -651,6 +731,24 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
}
/*
+ * clear_active_flags() is a helper for shrink_active_list(), clearing
+ * any active bits from the pages in the list.
+ */
+static unsigned long clear_active_flags(struct list_head *page_list)
+{
+ int nr_active = 0;
+ struct page *page;
+
+ list_for_each_entry(page, page_list, lru)
+ if (PageActive(page)) {
+ ClearPageActive(page);
+ nr_active++;
+ }
+
+ return nr_active;
+}
+
+/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
@@ -671,11 +769,18 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_taken;
unsigned long nr_scan;
unsigned long nr_freed;
+ unsigned long nr_active;
nr_taken = isolate_lru_pages(sc->swap_cluster_max,
- &zone->inactive_list,
- &page_list, &nr_scan);
- __mod_zone_page_state(zone, NR_INACTIVE, -nr_taken);
+ &zone->inactive_list,
+ &page_list, &nr_scan, sc->order,
+ (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
+ ISOLATE_BOTH : ISOLATE_INACTIVE);
+ nr_active = clear_active_flags(&page_list);
+
+ __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
+ __mod_zone_page_state(zone, NR_INACTIVE,
+ -(nr_taken - nr_active));
zone->pages_scanned += nr_scan;
spin_unlock_irq(&zone->lru_lock);
@@ -820,7 +925,7 @@ force_reclaim_mapped:
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
- &l_hold, &pgscanned);
+ &l_hold, &pgscanned, sc->order, ISOLATE_ACTIVE);
zone->pages_scanned += pgscanned;
__mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
spin_unlock_irq(&zone->lru_lock);
@@ -1011,7 +1116,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
* holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail.
*/
-unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
{
int priority;
int ret = 0;
@@ -1026,6 +1131,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
.swap_cluster_max = SWAP_CLUSTER_MAX,
.may_swap = 1,
.swappiness = vm_swappiness,
+ .order = order,
};
count_vm_event(ALLOCSTALL);
@@ -1131,6 +1237,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
.may_swap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = vm_swappiness,
+ .order = order,
};
/*
* temp_priority is used to remember the scanning priority at which
@@ -1314,6 +1421,7 @@ static int kswapd(void *p)
* trying to free the first piece of memory in the first place).
*/
tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+ set_freezable();
order = 0;
for ( ; ; ) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index eceaf496210..fadf791cd7e 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -472,7 +472,7 @@ const struct seq_operations fragmentation_op = {
#endif
#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
- TEXT_FOR_HIGHMEM(xx)
+ TEXT_FOR_HIGHMEM(xx) xx "_movable",
static const char * const vmstat_text[] = {
/* Zoned VM counters */
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 1c8f4a0c5f4..1f78c3e336d 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -36,6 +36,7 @@
#include <linux/signal.h>
#include <linux/init.h>
#include <linux/wait.h>
+#include <linux/freezer.h>
#include <linux/errno.h>
#include <linux/net.h>
#include <net/sock.h>
@@ -474,7 +475,6 @@ static int bnep_session(void *arg)
daemonize("kbnepd %s", dev->name);
set_user_nice(current, -15);
- current->flags |= PF_NOFREEZE;
init_waitqueue_entry(&wait, current);
add_wait_queue(sk->sk_sleep, &wait);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 66bef1ccee2..ca60a4517fd 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
+#include <linux/freezer.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
@@ -287,7 +288,6 @@ static int cmtp_session(void *arg)
daemonize("kcmtpd_ctr_%d", session->num);
set_user_nice(current, -15);
- current->flags |= PF_NOFREEZE;
init_waitqueue_entry(&wait, current);
add_wait_queue(sk->sk_sleep, &wait);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 450eb0244bb..64d89ca2884 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
+#include <linux/freezer.h>
#include <linux/fcntl.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
@@ -547,7 +548,6 @@ static int hidp_session(void *arg)
daemonize("khidpd_%04x%04x", vendor, product);
set_user_nice(current, -15);
- current->flags |= PF_NOFREEZE;
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 52e04df323e..bb7220770f2 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,6 +33,7 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/init.h>
+#include <linux/freezer.h>
#include <linux/wait.h>
#include <linux/device.h>
#include <linux/net.h>
@@ -1940,7 +1941,6 @@ static int rfcomm_run(void *unused)
daemonize("krfcommd");
set_user_nice(current, -10);
- current->flags |= PF_NOFREEZE;
BT_DBG("");
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 75215331b04..bca787fdbc5 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3465,6 +3465,8 @@ static int pktgen_thread_worker(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
+ set_freezable();
+
while (!kthread_should_stop()) {
pkt_dev = next_to_run(t);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index aa55d0a03e6..29a8ecc6092 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -543,17 +543,18 @@ rpcauth_uptodatecred(struct rpc_task *task)
test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
}
-
-static struct shrinker *rpc_cred_shrinker;
+static struct shrinker rpc_cred_shrinker = {
+ .shrink = rpcauth_cache_shrinker,
+ .seeks = DEFAULT_SEEKS,
+};
void __init rpcauth_init_module(void)
{
rpc_init_authunix();
- rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker);
+ register_shrinker(&rpc_cred_shrinker);
}
void __exit rpcauth_remove_module(void)
{
- if (rpc_cred_shrinker != NULL)
- remove_shrinker(rpc_cred_shrinker);
+ unregister_shrinker(&rpc_cred_shrinker);
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 71b9daefdff..9843eacef11 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -231,6 +231,7 @@ static struct pf_desc gss_kerberos_pfs[] = {
static struct gss_api_mech gss_kerberos_mech = {
.gm_name = "krb5",
.gm_owner = THIS_MODULE,
+ .gm_oid = {9, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x02"},
.gm_ops = &gss_kerberos_ops,
.gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
.gm_pfs = gss_kerberos_pfs,
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 26872517ccf..61801a069ff 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -194,6 +194,20 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor);
u32
+gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
+{
+ int i;
+
+ for (i = 0; i < gm->gm_pf_num; i++) {
+ if (gm->gm_pfs[i].service == service) {
+ return gm->gm_pfs[i].pseudoflavor;
+ }
+ }
+ return RPC_AUTH_MAXFLAVOR; /* illegal value */
+}
+EXPORT_SYMBOL(gss_svc_to_pseudoflavor);
+
+u32
gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
{
int i;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 577d590e755..5deb4b6e451 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -217,6 +217,7 @@ static struct pf_desc gss_spkm3_pfs[] = {
static struct gss_api_mech gss_spkm3_mech = {
.gm_name = "spkm3",
.gm_owner = THIS_MODULE,
+ .gm_oid = {7, "\053\006\001\005\005\001\003"},
.gm_ops = &gss_spkm3_ops,
.gm_pf_num = ARRAY_SIZE(gss_spkm3_pfs),
.gm_pfs = gss_spkm3_pfs,
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index c094583386f..490697542fc 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -743,6 +743,15 @@ find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
static struct auth_ops svcauthops_gss;
+u32 svcauth_gss_flavor(struct auth_domain *dom)
+{
+ struct gss_domain *gd = container_of(dom, struct gss_domain, h);
+
+ return gd->pseudoflavor;
+}
+
+EXPORT_SYMBOL(svcauth_gss_flavor);
+
int
svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
{
@@ -913,10 +922,23 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
struct gss_svc_data *svcdata = rqstp->rq_auth_data;
struct rsc *rsci = svcdata->rsci;
struct rpc_gss_wire_cred *gc = &svcdata->clcred;
+ int stat;
- rqstp->rq_client = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
- if (rqstp->rq_client == NULL)
+ /*
+ * A gss export can be specified either by:
+ * export *(sec=krb5,rw)
+ * or by
+ * export gss/krb5(rw)
+ * The latter is deprecated; but for backwards compatibility reasons
+ * the nfsd code will still fall back on trying it if the former
+ * doesn't work; so we try to make both available to nfsd, below.
+ */
+ rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
+ if (rqstp->rq_gssclient == NULL)
return SVC_DENIED;
+ stat = svcauth_unix_set_client(rqstp);
+ if (stat == SVC_DROP)
+ return stat;
return SVC_OK;
}
@@ -1088,7 +1110,6 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
svc_putnl(resv, GSS_SEQ_WIN);
if (svc_safe_putnetobj(resv, &rsip->out_token))
goto drop;
- rqstp->rq_client = NULL;
}
goto complete;
case RPC_GSS_PROC_DESTROY:
@@ -1131,6 +1152,8 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
}
svcdata->rsci = rsci;
cache_get(&rsci->h);
+ rqstp->rq_flavor = gss_svc_to_pseudoflavor(
+ rsci->mechctx->mech_type, gc->gc_svc);
ret = SVC_OK;
goto out;
}
@@ -1317,6 +1340,9 @@ out_err:
if (rqstp->rq_client)
auth_domain_put(rqstp->rq_client);
rqstp->rq_client = NULL;
+ if (rqstp->rq_gssclient)
+ auth_domain_put(rqstp->rq_gssclient);
+ rqstp->rq_gssclient = NULL;
if (rqstp->rq_cred.cr_group_info)
put_group_info(rqstp->rq_cred.cr_group_info);
rqstp->rq_cred.cr_group_info = NULL;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 07dcd20cbee..411479411b2 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -5,6 +5,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/svcauth.h>
+#include <linux/sunrpc/gss_api.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <linux/hash.h>
@@ -637,7 +638,7 @@ static int unix_gid_find(uid_t uid, struct group_info **gip,
}
}
-static int
+int
svcauth_unix_set_client(struct svc_rqst *rqstp)
{
struct sockaddr_in *sin = svc_addr_in(rqstp);
@@ -672,6 +673,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
return SVC_OK;
}
+EXPORT_SYMBOL(svcauth_unix_set_client);
+
static int
svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
{
@@ -707,6 +710,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
svc_putnl(resv, RPC_AUTH_NULL);
svc_putnl(resv, 0);
+ rqstp->rq_flavor = RPC_AUTH_NULL;
return SVC_OK;
}
@@ -784,6 +788,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
svc_putnl(resv, RPC_AUTH_NULL);
svc_putnl(resv, 0);
+ rqstp->rq_flavor = RPC_AUTH_UNIX;
return SVC_OK;
badcred:
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 8b809b264d1..10b006694e5 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -31,7 +31,7 @@
#include <string.h>
#include <ctype.h>
-#define KSYM_NAME_LEN 127
+#define KSYM_NAME_LEN 128
struct sym_entry {
@@ -254,7 +254,7 @@ static void write_src(void)
unsigned int i, k, off;
unsigned int best_idx[256];
unsigned int *markers;
- char buf[KSYM_NAME_LEN+1];
+ char buf[KSYM_NAME_LEN];
printf("#include <asm/types.h>\n");
printf("#if BITS_PER_LONG == 64\n");
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 78c3f98fcdc..520b9998123 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2318,7 +2318,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, char *name, void *value
if (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)
return -EOPNOTSUPP;
- if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+ if (!is_owner_or_cap(inode))
return -EPERM;
AVC_AUDIT_DATA_INIT(&ad,FS);
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 314477909f8..866d4de8d4a 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -5,35 +5,6 @@
#
# Prompt user for primary drivers.
-config OSS_OBSOLETE
- bool "Obsolete OSS drivers"
- depends on SOUND_PRIME
- help
- This option enables support for obsolete OSS drivers that
- are scheduled for removal in the near future.
-
- Please contact Adrian Bunk <bunk@stusta.de> if you had to
- say Y here because your hardware is not properly supported
- by ALSA.
-
- If unsure, say N.
-
-config SOUND_BT878
- tristate "BT878 audio dma"
- depends on SOUND_PRIME && PCI && OSS_OBSOLETE
- ---help---
- Audio DMA support for bt878 based grabber boards. As you might have
- already noticed, bt878 is listed with two functions in /proc/pci.
- Function 0 does the video stuff (bt848 compatible), function 1 does
- the same for audio data. This is a driver for the audio part of
- the chip. If you say 'Y' here you get a oss-compatible dsp device
- where you can record from. If you want just watch TV you probably
- don't need this driver as most TV cards handle sound with a short
- cable from the TV card to your sound card's line-in.
-
- To compile this driver as a module, choose M here: the module will
- be called btaudio.
-
config SOUND_BCM_CS4297A
tristate "Crystal Sound CS4297a (for Swarm)"
depends on SOUND_PRIME && SIBYTE_SWARM
@@ -44,13 +15,6 @@ config SOUND_BCM_CS4297A
note that CONFIG_KGDB should not be enabled at the same
time, since it also attempts to use this UART port.
-config SOUND_ICH
- tristate "Intel ICH (i8xx) audio support"
- depends on SOUND_PRIME && PCI && OSS_OBSOLETE
- help
- Support for integral audio in Intel's I/O Controller Hub (ICH)
- chipset, as used on the 810/820/840 motherboards.
-
config SOUND_VWSND
tristate "SGI Visual Workstation Sound"
depends on SOUND_PRIME && X86_VISWS
@@ -346,26 +310,6 @@ config MSND_FIFOSIZE
and Pinnacle). Larger values reduce the chance of data overruns at
the expense of overall latency. If unsure, use the default.
-config SOUND_VIA82CXXX
- tristate "VIA 82C686 Audio Codec"
- depends on SOUND_PRIME && PCI && OSS_OBSOLETE && VIRT_TO_BUS
- help
- Say Y here to include support for the audio codec found on VIA
- 82Cxxx-based chips. Typically these are built into a motherboard.
-
- DO NOT select Sound Blaster or Adlib with this driver, unless
- you have a Sound Blaster or Adlib card in addition to your VIA
- audio chip.
-
-config MIDI_VIA82CXXX
- bool "VIA 82C686 MIDI"
- depends on SOUND_VIA82CXXX && ISA_DMA_API
- help
- Answer Y to use the MIDI interface of the Via686. You may need to
- enable this in the BIOS before it will work. This is for connection
- to external MIDI hardware, and is not required for software playback
- of MIDI files.
-
config SOUND_OSS
tristate "OSS sound modules"
depends on SOUND_PRIME && ISA_DMA_API && VIRT_TO_BUS
@@ -400,20 +344,6 @@ config SOUND_DMAP
Say Y unless you have 16MB or more RAM or a PCI sound card.
-config SOUND_CS4232
- tristate "Crystal CS4232 based (PnP) cards"
- depends on SOUND_OSS && OSS_OBSOLETE
- help
- Say Y here if you have a card based on the Crystal CS4232 chip set,
- which uses its own Plug and Play protocol.
-
- If you compile the driver into the kernel, you have to add
- "cs4232=<io>,<irq>,<dma>,<dma2>,<mpuio>,<mpuirq>" to the kernel
- command line.
-
- See <file:Documentation/sound/oss/CS4232> for more information on
- configuring this card.
-
config SOUND_SSCAPE
tristate "Ensoniq SoundScape support"
depends on SOUND_OSS
@@ -720,13 +650,6 @@ config SOUND_WAVEARTIST
Say Y here to include support for the Rockwell WaveArtist sound
system. This driver is mainly for the NetWinder.
-config SOUND_TVMIXER
- tristate "TV card (bt848) mixer support"
- depends on SOUND_PRIME && I2C && VIDEO_V4L1 && OSS_OBSOLETE
- help
- Support for audio mixer facilities on the BT848 TV frame-grabber
- card.
-
config SOUND_KAHLUA
tristate "XpressAudio Sound Blaster emulation"
depends on SOUND_SB
diff --git a/sound/oss/trident.c b/sound/oss/trident.c
index 3bc1f6e9e4a..96adc47917a 100644
--- a/sound/oss/trident.c
+++ b/sound/oss/trident.c
@@ -11,12 +11,12 @@
* Built from:
* Low level code: <audio@tridentmicro.com> from ALSA
* Framework: Thomas Sailer <sailer@ife.ee.ethz.ch>
- * Extended by: Zach Brown <zab@redhat.com>
+ * Extended by: Zach Brown <zab@redhat.com>
*
* Hacked up by:
* Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
* Ollie Lho <ollie@sis.com.tw> SiS 7018 Audio Core Support
- * Ching-Ling Lee <cling-li@ali.com.tw> ALi 5451 Audio Core Support
+ * Ching-Ling Lee <cling-li@ali.com.tw> ALi 5451 Audio Core Support
* Matt Wu <mattwu@acersoftech.com.cn> ALi 5451 Audio Core Support
* Peter Wächtler <pwaechtler@loewe-komp.de> CyberPro5050 support
* Muli Ben-Yehuda <mulix@mulix.org>
@@ -54,33 +54,33 @@
* adapt to new pci joystick attachment interface
* v0.14.10f
* July 24 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
- * patch from Eric Lemar (via Ian Soboroff): in suspend and resume,
- * fix wrong cast from pci_dev* to struct trident_card*.
+ * patch from Eric Lemar (via Ian Soboroff): in suspend and resume,
+ * fix wrong cast from pci_dev* to struct trident_card*.
* v0.14.10e
* July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
- * rewrite the DMA buffer allocation/deallcoation functions, to make it
- * modular and fix a bug where we would call free_pages on memory
- * obtained with pci_alloc_consistent. Also remove unnecessary #ifdef
+ * rewrite the DMA buffer allocation/deallcoation functions, to make it
+ * modular and fix a bug where we would call free_pages on memory
+ * obtained with pci_alloc_consistent. Also remove unnecessary #ifdef
* CONFIG_PROC_FS and various other cleanups.
* v0.14.10d
* July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
* made several printk(KERN_NOTICE...) into TRDBG(...), to avoid spamming
- * my syslog with hundreds of messages.
+ * my syslog with hundreds of messages.
* v0.14.10c
* July 16 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
* Cleaned up Lei Hu's 0.4.10 driver to conform to Documentation/CodingStyle
- * and the coding style used in the rest of the file.
+ * and the coding style used in the rest of the file.
* v0.14.10b
* June 23 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
- * add a missing unlock_set_fmt, remove a superflous lock/unlock pair
- * with nothing in between.
+ * add a missing unlock_set_fmt, remove a superflous lock/unlock pair
+ * with nothing in between.
* v0.14.10a
- * June 21 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
- * use a debug macro instead of #ifdef CONFIG_DEBUG, trim to 80 columns
- * per line, use 'do {} while (0)' in statement macros.
+ * June 21 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
+ * use a debug macro instead of #ifdef CONFIG_DEBUG, trim to 80 columns
+ * per line, use 'do {} while (0)' in statement macros.
* v0.14.10
* June 6 2002 Lei Hu <Lei_hu@ali.com.tw>
- * rewrite the part to read/write registers of audio codec for Ali5451
+ * rewrite the part to read/write registers of audio codec for Ali5451
* v0.14.9e
* January 2 2002 Vojtech Pavlik <vojtech@ucw.cz> added gameport
* support to avoid resource conflict with pcigame.c
@@ -111,7 +111,7 @@
* Set EBUF1 and EBUF2 to still mode
* Add dc97/ac97 reset function
* Fix power management: ali_restore_regs
- * unreleased
+ * unreleased
* Mar 09 2001 Matt Wu
* Add cache for ac97 access
* v0.14.7
@@ -120,7 +120,7 @@
* Fix bug: an extra tail will be played when playing
* Jan 05 2001 Matt Wu
* Implement multi-channels and S/PDIF in support for ALi 1535+
- * v0.14.6
+ * v0.14.6
* Nov 1 2000 Ching-Ling Lee
* Fix the bug of memory leak when switching 5.1-channels to 2 channels.
* Add lock protection into dynamic changing format of data.
@@ -138,7 +138,7 @@
* v0.14.3 May 10 2000 Ollie Lho
* fixed a small bug in trident_update_ptr, xmms 1.0.1 no longer uses 100% CPU
* v0.14.2 Mar 29 2000 Ching-Ling Lee
- * Add clear to silence advance in trident_update_ptr
+ * Add clear to silence advance in trident_update_ptr
* fix invalid data of the end of the sound
* v0.14.1 Mar 24 2000 Ching-Ling Lee
* ALi 5451 support added, playback and recording O.K.
@@ -178,7 +178,7 @@
* SiS 7018 support added, playback O.K.
* v0.01 Alan Cox et. al.
* Initial Release in kernel 2.3.30, does not work
- *
+ *
* ToDo
* Clean up of low level channel register access code. (done)
* Fix the bug on dma buffer management in update_ptr, read/write, drain_dac (done)
@@ -326,7 +326,7 @@ struct trident_state {
unsigned error; /* number of over/underruns */
/* put process on wait queue when no more space in buffer */
- wait_queue_head_t wait;
+ wait_queue_head_t wait;
/* redundant, but makes calculations easier */
unsigned fragsize;
@@ -358,7 +358,7 @@ struct trident_state {
struct trident_channel {
int num; /* channel number */
u32 lba; /* Loop Begine Address, where dma buffer starts */
- u32 eso; /* End Sample Offset, wehre dma buffer ends */
+ u32 eso; /* End Sample Offset, wehre dma buffer ends */
/* (in the unit of samples) */
u32 delta; /* delta value, sample rate / 48k for playback, */
/* 48k/sample rate for recording */
@@ -417,7 +417,7 @@ struct trident_card {
/* soundcore stuff */
int dev_audio;
- /* structures for abstraction of hardware facilities, codecs, */
+ /* structures for abstraction of hardware facilities, codecs, */
/* banks and channels */
struct ac97_codec *ac97_codec[NR_AC97];
struct trident_pcm_bank banks[NR_BANKS];
@@ -479,7 +479,7 @@ static void trident_ac97_set(struct ac97_codec *codec, u8 reg, u16 val);
static u16 trident_ac97_get(struct ac97_codec *codec, u8 reg);
static int trident_open_mixdev(struct inode *inode, struct file *file);
-static int trident_ioctl_mixdev(struct inode *inode, struct file *file,
+static int trident_ioctl_mixdev(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
static void ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val);
@@ -496,10 +496,10 @@ static void ali_disable_spdif_in(struct trident_card *card);
static void ali_disable_special_channel(struct trident_card *card, int ch);
static void ali_setup_spdif_out(struct trident_card *card, int flag);
static int ali_write_5_1(struct trident_state *state,
- const char __user *buffer,
- int cnt_for_multi_channel, unsigned int *copy_count,
+ const char __user *buffer,
+ int cnt_for_multi_channel, unsigned int *copy_count,
unsigned int *state_cnt);
-static int ali_allocate_other_states_resources(struct trident_state *state,
+static int ali_allocate_other_states_resources(struct trident_state *state,
int chan_nums);
static void ali_free_other_states_resources(struct trident_state *state);
@@ -722,7 +722,7 @@ trident_free_pcm_channel(struct trident_card *card, unsigned int channel)
if (channel < 31 || channel > 63)
return;
- if (card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_DX ||
+ if (card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_DX ||
card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_NX) {
b = inb(TRID_REG(card, T4D_REC_CH));
if ((b & ~0x80) == channel)
@@ -742,7 +742,7 @@ cyber_alloc_pcm_channel(struct trident_card *card)
int idx;
/* The cyberpro 5050 has only 32 voices and one bank */
- /* .. at least they are not documented (if you want to call that
+ /* .. at least they are not documented (if you want to call that
* crap documentation), perhaps broken ? */
bank = &card->banks[BANK_A];
@@ -802,7 +802,7 @@ cyber_init_ritual(struct trident_card *card)
/* enable, if it was disabled */
if ((portDat & CYBER_BMSK_AUENZ) != CYBER_BMSK_AUENZ_ENABLE) {
printk(KERN_INFO "cyberpro5050: enabling audio controller\n");
- cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE,
+ cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE,
portDat | CYBER_BMSK_AUENZ_ENABLE);
/* check again if hardware is enabled now */
portDat = cyber_inidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE);
@@ -811,7 +811,7 @@ cyber_init_ritual(struct trident_card *card)
printk(KERN_ERR "cyberpro5050: initAudioAccess: no success\n");
ret = -1;
} else {
- cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_IRQ_ENABLE,
+ cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_IRQ_ENABLE,
CYBER_BMSK_AUDIO_INT_ENABLE);
cyber_outidx(CYBER_PORT_AUDIO, 0xbf, 0x01);
cyber_outidx(CYBER_PORT_AUDIO, 0xba, 0x20);
@@ -827,7 +827,7 @@ cyber_init_ritual(struct trident_card *card)
/* called with spin lock held */
static int
-trident_load_channel_registers(struct trident_card *card, u32 * data,
+trident_load_channel_registers(struct trident_card *card, u32 * data,
unsigned int channel)
{
int i;
@@ -845,7 +845,7 @@ trident_load_channel_registers(struct trident_card *card, u32 * data,
continue;
outl(data[i], TRID_REG(card, CHANNEL_START + 4 * i));
}
- if (card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
+ if (card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF1));
outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF2));
@@ -884,7 +884,7 @@ trident_write_voice_regs(struct trident_state *state)
break;
case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
data[0] = (channel->delta << 24);
- data[2] = ((channel->delta << 16) & 0xff000000) |
+ data[2] = ((channel->delta << 16) & 0xff000000) |
(channel->eso & 0x00ffffff);
data[3] = channel->fm_vol & 0xffff;
break;
@@ -989,13 +989,13 @@ trident_play_setup(struct trident_state *state)
if (state->card->pci_id != PCI_DEVICE_ID_SI_7018) {
channel->attribute = 0;
if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451) {
- if ((channel->num == ALI_SPDIF_IN_CHANNEL) ||
+ if ((channel->num == ALI_SPDIF_IN_CHANNEL) ||
(channel->num == ALI_PCM_IN_CHANNEL))
ali_disable_special_channel(state->card, channel->num);
- else if ((inl(TRID_REG(state->card, ALI_GLOBAL_CONTROL))
+ else if ((inl(TRID_REG(state->card, ALI_GLOBAL_CONTROL))
& ALI_SPDIF_OUT_CH_ENABLE)
&& (channel->num == ALI_SPDIF_OUT_CHANNEL)) {
- ali_set_spdif_out_rate(state->card,
+ ali_set_spdif_out_rate(state->card,
state->dmabuf.rate);
state->dmabuf.channel->delta = 0x1000;
}
@@ -1063,7 +1063,7 @@ trident_rec_setup(struct trident_state *state)
channel->lba = dmabuf->dma_handle;
channel->delta = compute_rate_rec(dmabuf->rate);
- if ((card->pci_id == PCI_DEVICE_ID_ALI_5451) &&
+ if ((card->pci_id == PCI_DEVICE_ID_ALI_5451) &&
(channel->num == ALI_SPDIF_IN_CHANNEL)) {
rate = ali_get_spdif_in_rate(card);
if (rate == 0) {
@@ -1180,8 +1180,8 @@ start_adc(struct trident_state *state)
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
- if ((dmabuf->mapped ||
- dmabuf->count < (signed) dmabuf->dmasize) &&
+ if ((dmabuf->mapped ||
+ dmabuf->count < (signed) dmabuf->dmasize) &&
dmabuf->ready) {
dmabuf->enable |= ADC_RUNNING;
trident_enable_voice_irq(card, chan_num);
@@ -1261,7 +1261,7 @@ alloc_dmabuf(struct dmabuf *dmabuf, struct pci_dev *pci_dev, int order)
void *rawbuf = NULL;
struct page *page, *pend;
- if (!(rawbuf = pci_alloc_consistent(pci_dev, PAGE_SIZE << order,
+ if (!(rawbuf = pci_alloc_consistent(pci_dev, PAGE_SIZE << order,
&dmabuf->dma_handle)))
return -ENOMEM;
@@ -1272,7 +1272,7 @@ alloc_dmabuf(struct dmabuf *dmabuf, struct pci_dev *pci_dev, int order)
dmabuf->rawbuf = rawbuf;
dmabuf->buforder = order;
- /* now mark the pages as reserved; otherwise */
+ /* now mark the pages as reserved; otherwise */
/* remap_pfn_range doesn't do what we want */
pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
for (page = virt_to_page(rawbuf); page <= pend; page++)
@@ -1310,7 +1310,7 @@ dealloc_dmabuf(struct dmabuf *dmabuf, struct pci_dev *pci_dev)
pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
ClearPageReserved(page);
- pci_free_consistent(pci_dev, PAGE_SIZE << dmabuf->buforder,
+ pci_free_consistent(pci_dev, PAGE_SIZE << dmabuf->buforder,
dmabuf->rawbuf, dmabuf->dma_handle);
dmabuf->rawbuf = NULL;
}
@@ -1368,7 +1368,7 @@ prog_dmabuf(struct trident_state *state, enum dmabuf_mode rec)
dealloc_dmabuf(&state->dmabuf, state->card->pci_dev);
/* release the auxiliary DMA buffers */
for (i -= 2; i >= 0; i--)
- dealloc_dmabuf(&state->other_states[i]->dmabuf,
+ dealloc_dmabuf(&state->other_states[i]->dmabuf,
state->card->pci_dev);
unlock_set_fmt(state);
return ret;
@@ -1398,7 +1398,7 @@ prog_dmabuf(struct trident_state *state, enum dmabuf_mode rec)
dmabuf->fragsamples = dmabuf->fragsize >> sample_shift[dmabuf->fmt];
dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
- memset(dmabuf->rawbuf, (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80,
+ memset(dmabuf->rawbuf, (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80,
dmabuf->dmasize);
spin_lock_irqsave(&s->card->lock, flags);
@@ -1453,7 +1453,7 @@ trident_clear_tail(struct trident_state *state)
swptr = dmabuf->swptr;
spin_unlock_irqrestore(&state->card->lock, flags);
- if (swptr == 0 || swptr == dmabuf->dmasize / 2 ||
+ if (swptr == 0 || swptr == dmabuf->dmasize / 2 ||
swptr == dmabuf->dmasize)
return;
@@ -1511,7 +1511,7 @@ drain_dac(struct trident_state *state, int nonblock)
/* No matter how much data is left in the buffer, we have to wait until
CSO == ESO/2 or CSO == ESO when address engine interrupts */
- if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
+ if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
state->card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
diff = dmabuf->swptr - trident_get_dma_addr(state) + dmabuf->dmasize;
diff = diff % (dmabuf->dmasize);
@@ -1532,7 +1532,7 @@ drain_dac(struct trident_state *state, int nonblock)
return 0;
}
-/* update buffer manangement pointers, especially, */
+/* update buffer manangement pointers, especially, */
/* dmabuf->count and dmabuf->hwptr */
static void
trident_update_ptr(struct trident_state *state)
@@ -1559,11 +1559,11 @@ trident_update_ptr(struct trident_state *state)
} else {
dmabuf->count += diff;
- if (dmabuf->count < 0 ||
+ if (dmabuf->count < 0 ||
dmabuf->count > dmabuf->dmasize) {
- /* buffer underrun or buffer overrun, */
- /* we have no way to recover it here, just */
- /* stop the machine and let the process */
+ /* buffer underrun or buffer overrun, */
+ /* we have no way to recover it here, just */
+ /* stop the machine and let the process */
/* force hwptr and swptr to sync */
__stop_adc(state);
dmabuf->error++;
@@ -1582,7 +1582,7 @@ trident_update_ptr(struct trident_state *state)
} else {
dmabuf->count -= diff;
- if (dmabuf->count < 0 ||
+ if (dmabuf->count < 0 ||
dmabuf->count > dmabuf->dmasize) {
/* buffer underrun or buffer overrun, we have no way to recover
it here, just stop the machine and let the process force hwptr
@@ -1608,13 +1608,13 @@ trident_update_ptr(struct trident_state *state)
if (state->chans_num == 6) {
clear_cnt = clear_cnt / 2;
swptr = swptr / 2;
- memset(state->other_states[0]->dmabuf.rawbuf + swptr,
+ memset(state->other_states[0]->dmabuf.rawbuf + swptr,
silence, clear_cnt);
- memset(state->other_states[1]->dmabuf.rawbuf + swptr,
+ memset(state->other_states[1]->dmabuf.rawbuf + swptr,
silence, clear_cnt);
- memset(state->other_states[2]->dmabuf.rawbuf + swptr,
+ memset(state->other_states[2]->dmabuf.rawbuf + swptr,
silence, clear_cnt);
- memset(state->other_states[3]->dmabuf.rawbuf + swptr,
+ memset(state->other_states[3]->dmabuf.rawbuf + swptr,
silence, clear_cnt);
}
dmabuf->endcleared = 1;
@@ -1627,13 +1627,13 @@ trident_update_ptr(struct trident_state *state)
if (state->chans_num == 6) {
clear_cnt = clear_cnt / 2;
swptr = swptr / 2;
- memset(state->other_states[0]->dmabuf.rawbuf + swptr,
+ memset(state->other_states[0]->dmabuf.rawbuf + swptr,
silence, clear_cnt);
- memset(state->other_states[1]->dmabuf.rawbuf + swptr,
+ memset(state->other_states[1]->dmabuf.rawbuf + swptr,
silence, clear_cnt);
- memset(state->other_states[2]->dmabuf.rawbuf + swptr,
+ memset(state->other_states[2]->dmabuf.rawbuf + swptr,
silence, clear_cnt);
- memset(state->other_states[3]->dmabuf.rawbuf + swptr,
+ memset(state->other_states[3]->dmabuf.rawbuf + swptr,
silence, clear_cnt);
}
dmabuf->endcleared = 1;
@@ -1665,7 +1665,7 @@ trident_address_interrupt(struct trident_card *card)
if ((state = card->states[i]) != NULL) {
trident_update_ptr(state);
} else {
- printk(KERN_WARNING "trident: spurious channel "
+ printk(KERN_WARNING "trident: spurious channel "
"irq %d.\n", channel);
trident_stop_voice(card, channel);
trident_disable_voice_irq(card, channel);
@@ -1694,7 +1694,7 @@ ali_hwvol_control(struct trident_card *card, int opt)
if (opt == 1) { // MUTE
dwTemp ^= 0x8000;
- ali_ac97_write(card->ac97_codec[0],
+ ali_ac97_write(card->ac97_codec[0],
0x02, dwTemp);
} else if (opt == 2) { // Down
if (mute)
@@ -1706,7 +1706,7 @@ ali_hwvol_control(struct trident_card *card, int opt)
dwTemp &= 0xe0e0;
dwTemp |= (volume[0]) | (volume[1] << 8);
ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp);
- card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
+ card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
(((32 - volume[1]) * 25 / 8) << 8);
} else if (opt == 4) { // Up
if (mute)
@@ -1718,7 +1718,7 @@ ali_hwvol_control(struct trident_card *card, int opt)
dwTemp &= 0xe0e0;
dwTemp |= (volume[0]) | (volume[1] << 8);
ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp);
- card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
+ card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
(((32 - volume[1]) * 25 / 8) << 8);
} else {
/* Nothing needs doing */
@@ -1801,7 +1801,7 @@ cyber_address_interrupt(struct trident_card *card)
if ((state = card->states[i]) != NULL) {
trident_update_ptr(state);
} else {
- printk(KERN_WARNING "cyber5050: spurious "
+ printk(KERN_WARNING "cyber5050: spurious "
"channel irq %d.\n", channel);
trident_stop_voice(card, channel);
trident_disable_voice_irq(card, channel);
@@ -1836,21 +1836,21 @@ trident_interrupt(int irq, void *dev_id)
ali_queue_task(card, gpio & 0x07);
}
event = inl(TRID_REG(card, T4D_MISCINT));
- outl(event | (ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
+ outl(event | (ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
TRID_REG(card, T4D_MISCINT));
spin_unlock(&card->lock);
return IRQ_HANDLED;
}
/* manually clear interrupt status, bad hardware design, blame T^2 */
- outl((ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
+ outl((ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
TRID_REG(card, T4D_MISCINT));
spin_unlock(&card->lock);
return IRQ_HANDLED;
}
-/* in this loop, dmabuf.count signifies the amount of data that is waiting */
-/* to be copied to the user's buffer. it is filled by the dma machine and */
+/* in this loop, dmabuf.count signifies the amount of data that is waiting */
+/* to be copied to the user's buffer. it is filled by the dma machine and */
/* drained by this loop. */
static ssize_t
trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
@@ -1878,8 +1878,8 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
while (count > 0) {
spin_lock_irqsave(&state->card->lock, flags);
if (dmabuf->count > (signed) dmabuf->dmasize) {
- /* buffer overrun, we are recovering from */
- /* sleep_on_timeout, resync hwptr and swptr, */
+ /* buffer overrun, we are recovering from */
+ /* sleep_on_timeout, resync hwptr and swptr, */
/* make process flush the buffer */
dmabuf->count = dmabuf->dmasize;
dmabuf->swptr = dmabuf->hwptr;
@@ -1894,7 +1894,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
cnt = count;
if (cnt <= 0) {
unsigned long tmo;
- /* buffer is empty, start the dma machine and */
+ /* buffer is empty, start the dma machine and */
/* wait for data to be recorded */
start_adc(state);
if (file->f_flags & O_NONBLOCK) {
@@ -1904,8 +1904,8 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
}
mutex_unlock(&state->sem);
- /* No matter how much space left in the buffer, */
- /* we have to wait until CSO == ESO/2 or CSO == ESO */
+ /* No matter how much space left in the buffer, */
+ /* we have to wait until CSO == ESO/2 or CSO == ESO */
/* when address engine interrupts */
tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
tmo >>= sample_shift[dmabuf->fmt];
@@ -2005,7 +2005,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
while (count > 0) {
spin_lock_irqsave(&state->card->lock, flags);
if (dmabuf->count < 0) {
- /* buffer underrun, we are recovering from */
+ /* buffer underrun, we are recovering from */
/* sleep_on_timeout, resync hwptr and swptr */
dmabuf->count = 0;
dmabuf->swptr = dmabuf->hwptr;
@@ -2020,7 +2020,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
cnt = count;
if (cnt <= 0) {
unsigned long tmo;
- /* buffer is full, start the dma machine and */
+ /* buffer is full, start the dma machine and */
/* wait for data to be played */
start_dac(state);
if (file->f_flags & O_NONBLOCK) {
@@ -2028,8 +2028,8 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
ret = -EAGAIN;
goto out;
}
- /* No matter how much data left in the buffer, */
- /* we have to wait until CSO == ESO/2 or CSO == ESO */
+ /* No matter how much data left in the buffer, */
+ /* we have to wait until CSO == ESO/2 or CSO == ESO */
/* when address engine interrupts */
lock_set_fmt(state);
tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
@@ -2037,15 +2037,15 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
unlock_set_fmt(state);
mutex_unlock(&state->sem);
- /* There are two situations when sleep_on_timeout */
- /* returns, one is when the interrupt is serviced */
- /* correctly and the process is waked up by ISR */
- /* ON TIME. Another is when timeout is expired, which */
- /* means that either interrupt is NOT serviced */
- /* correctly (pending interrupt) or it is TOO LATE */
- /* for the process to be scheduled to run */
- /* (scheduler latency) which results in a (potential) */
- /* buffer underrun. And worse, there is NOTHING we */
+ /* There are two situations when sleep_on_timeout */
+ /* returns, one is when the interrupt is serviced */
+ /* correctly and the process is waked up by ISR */
+ /* ON TIME. Another is when timeout is expired, which */
+ /* means that either interrupt is NOT serviced */
+ /* correctly (pending interrupt) or it is TOO LATE */
+ /* for the process to be scheduled to run */
+ /* (scheduler latency) which results in a (potential) */
+ /* buffer underrun. And worse, there is NOTHING we */
/* can do to prevent it. */
if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
pr_debug(KERN_ERR "trident: playback schedule "
@@ -2054,8 +2054,8 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
dmabuf->fragsize, dmabuf->count,
dmabuf->hwptr, dmabuf->swptr);
- /* a buffer underrun, we delay the recovery */
- /* until next time the while loop begin and */
+ /* a buffer underrun, we delay the recovery */
+ /* until next time the while loop begin and */
/* we REALLY have data to play */
}
if (signal_pending(current)) {
@@ -2079,7 +2079,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
if (state->chans_num == 6) {
copy_count = 0;
state_cnt = 0;
- if (ali_write_5_1(state, buffer, cnt, &copy_count,
+ if (ali_write_5_1(state, buffer, cnt, &copy_count,
&state_cnt) == -EFAULT) {
if (state_cnt) {
swptr = (swptr + state_cnt) % dmabuf->dmasize;
@@ -2096,7 +2096,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
goto out;
}
} else {
- if (copy_from_user(dmabuf->rawbuf + swptr,
+ if (copy_from_user(dmabuf->rawbuf + swptr,
buffer, cnt)) {
if (!ret)
ret = -EFAULT;
@@ -2172,7 +2172,7 @@ trident_poll(struct file *file, struct poll_table_struct *wait)
if (dmabuf->count >= (signed) dmabuf->fragsize)
mask |= POLLOUT | POLLWRNORM;
} else {
- if ((signed) dmabuf->dmasize >= dmabuf->count +
+ if ((signed) dmabuf->dmasize >= dmabuf->count +
(signed) dmabuf->fragsize)
mask |= POLLOUT | POLLWRNORM;
}
@@ -2227,7 +2227,7 @@ out:
}
static int
-trident_ioctl(struct inode *inode, struct file *file,
+trident_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct trident_state *state = (struct trident_state *)file->private_data;
@@ -2348,7 +2348,7 @@ trident_ioctl(struct inode *inode, struct file *file,
case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format */
- ret = put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 |
+ ret = put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 |
AFMT_U8, p);
break;
@@ -2379,7 +2379,7 @@ trident_ioctl(struct inode *inode, struct file *file,
}
}
unlock_set_fmt(state);
- ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
+ ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
AFMT_U8, p);
break;
@@ -2438,7 +2438,7 @@ trident_ioctl(struct inode *inode, struct file *file,
stop_adc(state);
dmabuf->ready = 0;
if (val >= 2) {
- if (!((file->f_mode & FMODE_WRITE) &&
+ if (!((file->f_mode & FMODE_WRITE) &&
(val == 6)))
val = 2;
dmabuf->fmt |= TRIDENT_FMT_STEREO;
@@ -2504,7 +2504,7 @@ trident_ioctl(struct inode *inode, struct file *file,
abinfo.fragstotal = dmabuf->numfrag;
abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
spin_unlock_irqrestore(&state->card->lock, flags);
- ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
+ ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
-EFAULT : 0;
break;
@@ -2524,7 +2524,7 @@ trident_ioctl(struct inode *inode, struct file *file,
abinfo.fragstotal = dmabuf->numfrag;
abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
spin_unlock_irqrestore(&state->card->lock, flags);
- ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
+ ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
-EFAULT : 0;
break;
@@ -2533,7 +2533,7 @@ trident_ioctl(struct inode *inode, struct file *file,
break;
case SNDCTL_DSP_GETCAPS:
- ret = put_user(DSP_CAP_REALTIME | DSP_CAP_TRIGGER |
+ ret = put_user(DSP_CAP_REALTIME | DSP_CAP_TRIGGER |
DSP_CAP_MMAP | DSP_CAP_BIND, p);
break;
@@ -2553,7 +2553,7 @@ trident_ioctl(struct inode *inode, struct file *file,
}
if (file->f_mode & FMODE_READ) {
if (val & PCM_ENABLE_INPUT) {
- if (!dmabuf->ready &&
+ if (!dmabuf->ready &&
(ret = prog_dmabuf_record(state)))
break;
start_adc(state);
@@ -2562,7 +2562,7 @@ trident_ioctl(struct inode *inode, struct file *file,
}
if (file->f_mode & FMODE_WRITE) {
if (val & PCM_ENABLE_OUTPUT) {
- if (!dmabuf->ready &&
+ if (!dmabuf->ready &&
(ret = prog_dmabuf_playback(state)))
break;
start_dac(state);
@@ -2589,7 +2589,7 @@ trident_ioctl(struct inode *inode, struct file *file,
if (dmabuf->mapped)
dmabuf->count &= dmabuf->fragsize - 1;
spin_unlock_irqrestore(&state->card->lock, flags);
- ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
+ ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
-EFAULT : 0;
break;
@@ -2612,7 +2612,7 @@ trident_ioctl(struct inode *inode, struct file *file,
if (dmabuf->mapped)
dmabuf->count &= dmabuf->fragsize - 1;
spin_unlock_irqrestore(&state->card->lock, flags);
- ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
+ ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
-EFAULT : 0;
break;
@@ -2641,17 +2641,17 @@ trident_ioctl(struct inode *inode, struct file *file,
break;
case SOUND_PCM_READ_CHANNELS:
- ret = put_user((dmabuf->fmt & TRIDENT_FMT_STEREO) ? 2 : 1,
+ ret = put_user((dmabuf->fmt & TRIDENT_FMT_STEREO) ? 2 : 1,
p);
break;
case SOUND_PCM_READ_BITS:
- ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
+ ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
AFMT_U8, p);
break;
case SNDCTL_DSP_GETCHANNELMASK:
- ret = put_user(DSP_BIND_FRONT | DSP_BIND_SURR |
+ ret = put_user(DSP_BIND_FRONT | DSP_BIND_SURR |
DSP_BIND_CENTER_LFE, p);
break;
@@ -2671,10 +2671,10 @@ trident_ioctl(struct inode *inode, struct file *file,
} else {
dmabuf->ready = 0;
if (file->f_mode & FMODE_READ)
- dmabuf->channel->attribute = (CHANNEL_REC |
+ dmabuf->channel->attribute = (CHANNEL_REC |
SRC_ENABLE);
if (file->f_mode & FMODE_WRITE)
- dmabuf->channel->attribute = (CHANNEL_SPC_PB |
+ dmabuf->channel->attribute = (CHANNEL_SPC_PB |
SRC_ENABLE);
dmabuf->channel->attribute |= mask2attr[ffs(val)];
}
@@ -2702,6 +2702,7 @@ trident_open(struct inode *inode, struct file *file)
struct trident_card *card = devs;
struct trident_state *state = NULL;
struct dmabuf *dmabuf = NULL;
+ unsigned long flags;
/* Added by Matt Wu 01-05-2001 */
/* TODO: there's some redundacy here wrt the check below */
@@ -2765,8 +2766,8 @@ trident_open(struct inode *inode, struct file *file)
init_waitqueue_head(&dmabuf->wait);
file->private_data = state;
- /* set default sample format. According to OSS Programmer's */
- /* Guide /dev/dsp should be default to unsigned 8-bits, mono, */
+ /* set default sample format. According to OSS Programmer's */
+ /* Guide /dev/dsp should be default to unsigned 8-bits, mono, */
/* with sample rate 8kHz and /dev/dspW will accept 16-bits sample */
if (file->f_mode & FMODE_WRITE) {
dmabuf->fmt &= ~TRIDENT_FMT_MASK;
@@ -2779,11 +2780,13 @@ trident_open(struct inode *inode, struct file *file)
/* set default channel attribute to normal playback */
dmabuf->channel->attribute = CHANNEL_PB;
}
+ spin_lock_irqsave(&card->lock, flags);
trident_set_dac_rate(state, 8000);
+ spin_unlock_irqrestore(&card->lock, flags);
}
if (file->f_mode & FMODE_READ) {
- /* FIXME: Trident 4d can only record in signed 16-bits stereo, */
+ /* FIXME: Trident 4d can only record in signed 16-bits stereo, */
/* 48kHz sample, to be dealed with in trident_set_adc_rate() ?? */
dmabuf->fmt &= ~TRIDENT_FMT_MASK;
if ((minor & 0x0f) == SND_DEV_DSP16)
@@ -2794,10 +2797,12 @@ trident_open(struct inode *inode, struct file *file)
if (card->pci_id == PCI_DEVICE_ID_SI_7018) {
/* set default channel attribute to 0x8a80, record from
PCM L/R FIFO and mono = (left + right + 1)/2 */
- dmabuf->channel->attribute = (CHANNEL_REC | PCM_LR |
+ dmabuf->channel->attribute = (CHANNEL_REC | PCM_LR |
MONO_MIX);
}
+ spin_lock_irqsave(&card->lock, flags);
trident_set_adc_rate(state, 8000);
+ spin_unlock_irqrestore(&card->lock, flags);
/* Added by Matt Wu 01-05-2001 */
if (card->pci_id == PCI_DEVICE_ID_ALI_5451)
@@ -3020,7 +3025,7 @@ acquirecodecaccess(struct trident_card *card)
break;
if (wsemabits == 0) {
unlock:
- outl(((u32) (wcontrol & 0x1eff) | 0x00004000),
+ outl(((u32) (wcontrol & 0x1eff) | 0x00004000),
TRID_REG(card, ALI_AC97_WRITE));
continue;
}
@@ -3104,7 +3109,7 @@ ali_ac97_get(struct trident_card *card, int secondary, u8 reg)
ncount = 10;
while (1) {
- if ((inw(TRID_REG(card, ALI_AC97_WRITE)) & ALI_AC97_BUSY_READ)
+ if ((inw(TRID_REG(card, ALI_AC97_WRITE)) & ALI_AC97_BUSY_READ)
!= 0)
break;
if (ncount <= 0)
@@ -3112,7 +3117,7 @@ ali_ac97_get(struct trident_card *card, int secondary, u8 reg)
if (ncount-- == 1) {
pr_debug("ali_ac97_read :try clear busy flag\n");
aud_reg = inl(TRID_REG(card, ALI_AC97_WRITE));
- outl((aud_reg & 0xffff7fff),
+ outl((aud_reg & 0xffff7fff),
TRID_REG(card, ALI_AC97_WRITE));
}
udelay(10);
@@ -3159,7 +3164,7 @@ ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val)
wcontrol = inw(TRID_REG(card, ALI_AC97_WRITE));
wcontrol &= 0xff00;
- wcontrol |= (0x8100 | reg); /* bit 8=1: (ali1535 )reserved/ */
+ wcontrol |= (0x8100 | reg); /* bit 8=1: (ali1535 )reserved/ */
/* ali1535+ write */
outl((data | wcontrol), TRID_REG(card, ALI_AC97_WRITE));
@@ -3177,7 +3182,7 @@ ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val)
break;
if (ncount-- == 1) {
pr_debug("ali_ac97_set :try clear busy flag!!\n");
- outw(wcontrol & 0x7fff,
+ outw(wcontrol & 0x7fff,
TRID_REG(card, ALI_AC97_WRITE));
}
udelay(10);
@@ -3382,7 +3387,7 @@ ali_detect_spdif_rate(struct trident_card *card)
bval |= 0x1F;
outb(bval, TRID_REG(card, ALI_SPDIF_CTRL + 1));
- while (((R1 < 0x0B) || (R1 > 0x0E)) && (R1 != 0x12) &&
+ while (((R1 < 0x0B) || (R1 > 0x0E)) && (R1 != 0x12) &&
count <= 50000) {
count++;
@@ -3669,14 +3674,14 @@ ali_save_regs(struct trident_card *card)
spin_lock_irqsave(&card->lock, flags);
ali_registers.global_regs[0x2c] = inl(TRID_REG(card, T4D_MISCINT));
- //ali_registers.global_regs[0x20] = inl(TRID_REG(card,T4D_START_A));
+ //ali_registers.global_regs[0x20] = inl(TRID_REG(card,T4D_START_A));
ali_registers.global_regs[0x21] = inl(TRID_REG(card, T4D_STOP_A));
//disable all IRQ bits
outl(ALI_DISABLE_ALL_IRQ, TRID_REG(card, T4D_MISCINT));
for (i = 1; i < ALI_MIXER_REGS; i++)
- ali_registers.mixer_regs[i] = ali_ac97_read(card->ac97_codec[0],
+ ali_registers.mixer_regs[i] = ali_ac97_read(card->ac97_codec[0],
i * 2);
for (i = 0; i < ALI_GLOBAL_REGS; i++) {
@@ -3688,7 +3693,7 @@ ali_save_regs(struct trident_card *card)
for (i = 0; i < ALI_CHANNELS; i++) {
outb(i, TRID_REG(card, T4D_LFO_GC_CIR));
for (j = 0; j < ALI_CHANNEL_REGS; j++)
- ali_registers.channel_regs[i][j] = inl(TRID_REG(card,
+ ali_registers.channel_regs[i][j] = inl(TRID_REG(card,
j * 4 + 0xe0));
}
@@ -3707,18 +3712,18 @@ ali_restore_regs(struct trident_card *card)
spin_lock_irqsave(&card->lock, flags);
for (i = 1; i < ALI_MIXER_REGS; i++)
- ali_ac97_write(card->ac97_codec[0], i * 2,
+ ali_ac97_write(card->ac97_codec[0], i * 2,
ali_registers.mixer_regs[i]);
for (i = 0; i < ALI_CHANNELS; i++) {
outb(i, TRID_REG(card, T4D_LFO_GC_CIR));
for (j = 0; j < ALI_CHANNEL_REGS; j++)
- outl(ali_registers.channel_regs[i][j],
+ outl(ali_registers.channel_regs[i][j],
TRID_REG(card, j * 4 + 0xe0));
}
for (i = 0; i < ALI_GLOBAL_REGS; i++) {
- if ((i * 4 == T4D_MISCINT) || (i * 4 == T4D_STOP_A) ||
+ if ((i * 4 == T4D_MISCINT) || (i * 4 == T4D_STOP_A) ||
(i * 4 == T4D_START_A))
continue;
outl(ali_registers.global_regs[i], TRID_REG(card, i * 4));
@@ -3763,7 +3768,7 @@ ali_alloc_pcm_channel(struct trident_card *card)
bank = &card->banks[BANK_A];
- if (inl(TRID_REG(card, ALI_GLOBAL_CONTROL)) &
+ if (inl(TRID_REG(card, ALI_GLOBAL_CONTROL)) &
(ALI_SPDIF_OUT_CH_ENABLE)) {
idx = ALI_SPDIF_OUT_CHANNEL;
if (!(bank->bitmap & (1 << idx))) {
@@ -3774,7 +3779,7 @@ ali_alloc_pcm_channel(struct trident_card *card)
}
}
- for (idx = ALI_PCM_OUT_CHANNEL_FIRST; idx <= ALI_PCM_OUT_CHANNEL_LAST;
+ for (idx = ALI_PCM_OUT_CHANNEL_FIRST; idx <= ALI_PCM_OUT_CHANNEL_LAST;
idx++) {
if (!(bank->bitmap & (1 << idx))) {
struct trident_channel *channel = &bank->channels[idx];
@@ -3785,9 +3790,9 @@ ali_alloc_pcm_channel(struct trident_card *card)
}
/* no more free channels avaliable */
-#if 0
+#if 0
printk(KERN_ERR "ali: no more channels available on Bank A.\n");
-#endif /* 0 */
+#endif /* 0 */
return NULL;
}
@@ -3812,9 +3817,9 @@ ali_alloc_rec_pcm_channel(struct trident_card *card)
}
/* no free recordable channels avaliable */
-#if 0
+#if 0
printk(KERN_ERR "ali: no recordable channels available on Bank A.\n");
-#endif /* 0 */
+#endif /* 0 */
return NULL;
}
@@ -3837,14 +3842,14 @@ ali_set_spdif_out_rate(struct trident_card *card, unsigned int rate)
break;
}
- /* select spdif_out */
+ /* select spdif_out */
ch_st_sel = inb(TRID_REG(card, ALI_SPDIF_CTRL)) & ALI_SPDIF_OUT_CH_STATUS;
- ch_st_sel |= 0x80; /* select right */
+ ch_st_sel |= 0x80; /* select right */
outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL));
outb(status_rate | 0x20, TRID_REG(card, ALI_SPDIF_CS + 2));
- ch_st_sel &= (~0x80); /* select left */
+ ch_st_sel &= (~0x80); /* select left */
outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL));
outw(status_rate | 0x10, TRID_REG(card, ALI_SPDIF_CS + 2));
}
@@ -3881,14 +3886,14 @@ ali_address_interrupt(struct trident_card *card)
}
}
-/* Updating the values of counters of other_states' DMAs without lock
+/* Updating the values of counters of other_states' DMAs without lock
protection is no harm because all DMAs of multi-channels and interrupt
depend on a master state's DMA, and changing the counters of the master
state DMA is protected by a spinlock.
*/
static int
-ali_write_5_1(struct trident_state *state, const char __user *buf,
- int cnt_for_multi_channel, unsigned int *copy_count,
+ali_write_5_1(struct trident_state *state, const char __user *buf,
+ int cnt_for_multi_channel, unsigned int *copy_count,
unsigned int *state_cnt)
{
@@ -3904,10 +3909,10 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
if ((i = state->multi_channels_adjust_count) > 0) {
if (i == 1) {
- if (copy_from_user(dmabuf->rawbuf + swptr,
+ if (copy_from_user(dmabuf->rawbuf + swptr,
buffer, sample_s))
return -EFAULT;
- seek_offset(swptr, buffer, cnt_for_multi_channel,
+ seek_offset(swptr, buffer, cnt_for_multi_channel,
sample_s, *copy_count);
i--;
(*state_cnt) += sample_s;
@@ -3916,10 +3921,10 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
i = i - (state->chans_num - other_dma_nums);
for (; (i < other_dma_nums) && (cnt_for_multi_channel > 0); i++) {
dmabuf_temp = &state->other_states[i]->dmabuf;
- if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
+ if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
buffer, sample_s))
return -EFAULT;
- seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
+ seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
sample_s, *copy_count);
}
if (cnt_for_multi_channel == 0)
@@ -3928,39 +3933,39 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
if (cnt_for_multi_channel > 0) {
loop = cnt_for_multi_channel / (state->chans_num * sample_s);
for (i = 0; i < loop; i++) {
- if (copy_from_user(dmabuf->rawbuf + swptr, buffer,
+ if (copy_from_user(dmabuf->rawbuf + swptr, buffer,
sample_s * 2))
return -EFAULT;
- seek_offset(swptr, buffer, cnt_for_multi_channel,
+ seek_offset(swptr, buffer, cnt_for_multi_channel,
sample_s * 2, *copy_count);
(*state_cnt) += (sample_s * 2);
dmabuf_temp = &state->other_states[0]->dmabuf;
- if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
+ if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
buffer, sample_s))
return -EFAULT;
- seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
+ seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
sample_s, *copy_count);
dmabuf_temp = &state->other_states[1]->dmabuf;
- if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
+ if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
buffer, sample_s))
return -EFAULT;
- seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
+ seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
sample_s, *copy_count);
dmabuf_temp = &state->other_states[2]->dmabuf;
- if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
+ if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
buffer, sample_s))
return -EFAULT;
- seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
+ seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
sample_s, *copy_count);
dmabuf_temp = &state->other_states[3]->dmabuf;
- if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
+ if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
buffer, sample_s))
return -EFAULT;
- seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
+ seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
sample_s, *copy_count);
}
@@ -3969,15 +3974,15 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
if (copy_from_user(dmabuf->rawbuf + swptr, buffer, sample_s))
return -EFAULT;
- seek_offset(swptr, buffer, cnt_for_multi_channel,
+ seek_offset(swptr, buffer, cnt_for_multi_channel,
sample_s, *copy_count);
(*state_cnt) += sample_s;
if (cnt_for_multi_channel > 0) {
- if (copy_from_user(dmabuf->rawbuf + swptr,
+ if (copy_from_user(dmabuf->rawbuf + swptr,
buffer, sample_s))
return -EFAULT;
- seek_offset(swptr, buffer, cnt_for_multi_channel,
+ seek_offset(swptr, buffer, cnt_for_multi_channel,
sample_s, *copy_count);
(*state_cnt) += sample_s;
@@ -3986,12 +3991,12 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
loop = state->multi_channels_adjust_count - diff;
for (i = 0; i < loop; i++) {
dmabuf_temp = &state->other_states[i]->dmabuf;
- if (copy_from_user(dmabuf_temp->rawbuf +
- dmabuf_temp->swptr,
+ if (copy_from_user(dmabuf_temp->rawbuf +
+ dmabuf_temp->swptr,
buffer, sample_s))
return -EFAULT;
- seek_offset(dmabuf_temp->swptr, buffer,
- cnt_for_multi_channel,
+ seek_offset(dmabuf_temp->swptr, buffer,
+ cnt_for_multi_channel,
sample_s, *copy_count);
}
}
@@ -4048,11 +4053,11 @@ ali_write_proc(struct file *file, const char __user *buffer, unsigned long count
ali_disable_special_channel(card, ALI_SPDIF_OUT_CHANNEL);
break;
case '1':
- ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
+ ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
ALI_SPDIF_OUT_PCM);
break;
case '2':
- ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
+ ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
ALI_SPDIF_OUT_NON_PCM);
break;
case '3':
@@ -4077,7 +4082,7 @@ trident_open_mixdev(struct inode *inode, struct file *file)
for (card = devs; card != NULL; card = card->next)
for (i = 0; i < NR_AC97; i++)
- if (card->ac97_codec[i] != NULL &&
+ if (card->ac97_codec[i] != NULL &&
card->ac97_codec[i]->dev_mixer == minor)
goto match;
@@ -4091,7 +4096,7 @@ trident_open_mixdev(struct inode *inode, struct file *file)
}
static int
-trident_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
+trident_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
struct ac97_codec *codec = (struct ac97_codec *) file->private_data;
@@ -4185,9 +4190,9 @@ trident_ac97_init(struct trident_card *card)
/* disable AC97 GPIO interrupt */
outl(0x00, TRID_REG(card, SI_AC97_GPIO));
/* when power up the AC link is in cold reset mode so stop it */
- outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT | SECONDARY_ID,
+ outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT | SECONDARY_ID,
TRID_REG(card, SI_SERIAL_INTF_CTRL));
- /* it take a long time to recover from a cold reset */
+ /* it take a long time to recover from a cold reset */
/* (especially when you have more than one codec) */
udelay(2000);
ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL));
@@ -4207,9 +4212,9 @@ trident_ac97_init(struct trident_card *card)
/* disable AC97 GPIO interrupt */
outl(0x00, TRID_REG(card, SI_AC97_GPIO));
/* when power up, the AC link is in cold reset mode, so stop it */
- outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT,
+ outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT,
TRID_REG(card, SI_SERIAL_INTF_CTRL));
- /* it take a long time to recover from a cold reset (especially */
+ /* it take a long time to recover from a cold reset (especially */
/* when you have more than one codec) */
udelay(2000);
ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL));
@@ -4221,7 +4226,7 @@ trident_ac97_init(struct trident_card *card)
if ((codec = ac97_alloc_codec()) == NULL)
return -ENOMEM;
- /* initialize some basic codec information, other fields */
+ /* initialize some basic codec information, other fields */
/* will be filled in ac97_probe_codec */
codec->private_data = card;
codec->id = num_ac97;
@@ -4352,8 +4357,8 @@ static inline int trident_register_gameport(struct trident_card *card) { return
static inline void trident_unregister_gameport(struct trident_card *card) { }
#endif /* SUPPORT_JOYSTICK */
-/* install the driver, we do not allocate hardware channel nor DMA buffer */
-/* now, they are defered until "ACCESS" time (in prog_dmabuf called by */
+/* install the driver, we do not allocate hardware channel nor DMA buffer */
+/* now, they are defered until "ACCESS" time (in prog_dmabuf called by */
/* open/read/write/ioctl/mmap) */
static int __devinit
trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
@@ -4376,9 +4381,9 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
else
dma_mask = TRIDENT_DMA_MASK;
if (pci_set_dma_mask(pci_dev, dma_mask)) {
- printk(KERN_ERR "trident: architecture does not support"
- " %s PCI busmaster DMA\n",
- pci_dev->device == PCI_DEVICE_ID_ALI_5451 ?
+ printk(KERN_ERR "trident: architecture does not support"
+ " %s PCI busmaster DMA\n",
+ pci_dev->device == PCI_DEVICE_ID_ALI_5451 ?
"32-bit" : "30-bit");
goto out;
}
@@ -4422,7 +4427,7 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
pci_set_master(pci_dev);
- printk(KERN_INFO "trident: %s found at IO 0x%04lx, IRQ %d\n",
+ printk(KERN_INFO "trident: %s found at IO 0x%04lx, IRQ %d\n",
card_names[pci_id->driver_data], card->iobase, card->irq);
if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
@@ -4449,9 +4454,9 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
/* Add H/W Volume Control By Matt Wu Jul. 06, 2001 */
card->hwvolctl = 0;
- pci_dev_m1533 = pci_find_device(PCI_VENDOR_ID_AL,
- PCI_DEVICE_ID_AL_M1533,
- pci_dev_m1533);
+ pci_dev_m1533 = pci_get_device(PCI_VENDOR_ID_AL,
+ PCI_DEVICE_ID_AL_M1533,
+ pci_dev_m1533);
rc = -ENODEV;
if (pci_dev_m1533 == NULL)
goto out_proc_fs;
@@ -4465,6 +4470,8 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
bits &= 0xbf; /*clear bit 6 */
pci_write_config_byte(pci_dev_m1533, 0x7b, bits);
}
+ pci_dev_put(pci_dev_m1533);
+
} else if (card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
card->alloc_pcm_channel = cyber_alloc_pcm_channel;
card->alloc_rec_pcm_channel = cyber_alloc_pcm_channel;
@@ -4482,7 +4489,7 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
rc = -ENODEV;
if (request_irq(card->irq, &trident_interrupt, IRQF_SHARED,
card_names[pci_id->driver_data], card)) {
- printk(KERN_ERR "trident: unable to allocate irq %d\n",
+ printk(KERN_ERR "trident: unable to allocate irq %d\n",
card->irq);
goto out_proc_fs;
}
@@ -4533,7 +4540,7 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
printk(KERN_INFO "trident: Running on Alpha system "
"type Nautilus\n");
ac97_data = ali_ac97_get(card, 0, AC97_POWER_CONTROL);
- ali_ac97_set(card, 0, AC97_POWER_CONTROL,
+ ali_ac97_set(card, 0, AC97_POWER_CONTROL,
ac97_data | ALI_EAPD_POWER_DOWN);
}
}
@@ -4566,7 +4573,7 @@ out_proc_fs:
devs = NULL;
out_release_region:
release_region(iobase, 256);
- return rc;
+ return rc;
}
static void __devexit
@@ -4634,8 +4641,8 @@ static struct pci_driver trident_pci_driver = {
static int __init
trident_init_module(void)
{
- printk(KERN_INFO "Trident 4DWave/SiS 7018/ALi 5451,Tvia CyberPro "
- "5050 PCI Audio, version " DRIVER_VERSION ", " __TIME__ " "
+ printk(KERN_INFO "Trident 4DWave/SiS 7018/ALi 5451,Tvia CyberPro "
+ "5050 PCI Audio, version " DRIVER_VERSION ", " __TIME__ " "
__DATE__ "\n");
return pci_register_driver(&trident_pci_driver);
diff --git a/sound/pci/mixart/mixart_hwdep.c b/sound/pci/mixart/mixart_hwdep.c
index 1d9232d2db3..170781a7229 100644
--- a/sound/pci/mixart/mixart_hwdep.c
+++ b/sound/pci/mixart/mixart_hwdep.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/firmware.h>
+#include <linux/vmalloc.h>
#include <asm/io.h>
#include <sound/core.h>
#include "mixart.h"