summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/data-integrity.txt4
-rw-r--r--Documentation/cgroups/cpusets.txt12
-rw-r--r--Documentation/gcov.txt25
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/kmemleak.txt23
-rw-r--r--Documentation/leds-lp3944.txt50
-rw-r--r--Documentation/powerpc/dts-bindings/gpio/led.txt17
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--Documentation/spi/spidev_test.c10
-rw-r--r--MAINTAINERS62
-rw-r--r--arch/alpha/include/asm/percpu.h6
-rw-r--r--arch/arm/Kconfig.debug8
-rw-r--r--arch/arm/configs/s3c2410_defconfig2
-rw-r--r--arch/arm/configs/s3c6400_defconfig1
-rw-r--r--arch/arm/configs/tct_hammer_defconfig1
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/kernel/irq.c24
-rw-r--r--arch/arm/kernel/vmlinux.lds.S15
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c54
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c6
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c3
-rw-r--r--arch/arm/mach-omap1/mailbox.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c1
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c21
-rw-r--r--arch/arm/mach-omap2/id.c22
-rw-r--r--arch/arm/mach-omap2/mailbox.c6
-rw-r--r--arch/arm/mach-omap2/mmc-twl4030.c13
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c3
-rw-r--r--arch/arm/mach-s3c2442/mach-gta02.c3
-rw-r--r--arch/arm/plat-omap/dma.c13
-rw-r--r--arch/arm/plat-omap/gpio.c1
-rw-r--r--arch/arm/plat-omap/include/mach/cpu.h22
-rw-r--r--arch/arm/plat-omap/include/mach/dma.h15
-rw-r--r--arch/arm/plat-omap/include/mach/io.h2
-rw-r--r--arch/arm/plat-omap/iommu.c2
-rw-r--r--arch/arm/plat-omap/sram.c7
-rw-r--r--arch/arm/plat-s3c/Makefile2
-rw-r--r--arch/arm/plat-s3c/include/plat/devs.h1
-rw-r--r--arch/arm/plat-s3c24xx/Makefile2
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c3
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c3
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/atomic.h68
-rw-r--r--arch/frv/include/asm/perf_counter.h17
-rw-r--r--arch/frv/include/asm/system.h2
-rw-r--r--arch/frv/include/asm/unistd.h4
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/kernel/frv_ksyms.c4
-rw-r--r--arch/frv/lib/Makefile4
-rw-r--r--arch/frv/lib/atomic-ops.S3
-rw-r--r--arch/frv/lib/atomic64-ops.S162
-rw-r--r--arch/frv/lib/perf_counter.c19
-rw-r--r--arch/ia64/kernel/esi.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kvm/kvm_lib.c6
-rw-r--r--arch/ia64/kvm/process.c6
-rw-r--r--arch/ia64/kvm/vcpu.c2
-rw-r--r--arch/ia64/kvm/vtlb.c4
-rw-r--r--arch/ia64/sn/kernel/io_common.c3
-rw-r--r--arch/mn10300/include/asm/unistd.h4
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/powerpc/include/asm/perf_counter.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h4
-rw-r--r--arch/s390/kvm/kvm-s390.c23
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/sh/Kconfig.debug4
-rw-r--r--arch/sh/boards/mach-se/7206/io.c2
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c110
-rw-r--r--arch/sh/configs/migor_defconfig53
-rw-r--r--arch/sh/configs/se7724_defconfig25
-rw-r--r--arch/sh/include/asm/perf_counter.h2
-rw-r--r--arch/sh/include/asm/syscall_32.h1
-rw-r--r--arch/sh/include/mach-se/mach/se7724.h5
-rw-r--r--arch/sh/mm/fault_32.c61
-rw-r--r--arch/sh/mm/tlbflush_64.c15
-rw-r--r--arch/x86/Kconfig15
-rw-r--r--arch/x86/include/asm/boot.h6
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/percpu.h10
-rw-r--r--arch/x86/include/asm/perf_counter.h3
-rw-r--r--arch/x86/include/asm/proto.h11
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c22
-rw-r--r--arch/x86/kernel/dumpstack.c1
-rw-r--r--arch/x86/kernel/e820.c16
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/setup.c16
-rw-r--r--arch/x86/kernel/setup_percpu.c219
-rw-r--r--arch/x86/kernel/tlb_uv.c9
-rw-r--r--arch/x86/kernel/traps.c3
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c15
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--arch/x86/kvm/x86_emulate.c2
-rw-r--r--arch/x86/lib/delay.c3
-rw-r--r--arch/x86/mm/init.c17
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/pageattr.c65
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-core.c14
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/bsg.c2
-rw-r--r--block/cfq-iosched.c176
-rw-r--r--block/cmd-filter.c233
-rw-r--r--block/elevator.c8
-rw-r--r--block/scsi_ioctl.c43
-rw-r--r--drivers/block/cciss.c15
-rw-r--r--drivers/block/cciss_cmd.h1
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/char/tty_ldisc.c15
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/edac/edac_core.h4
-rw-r--r--drivers/edac/edac_mc_sysfs.c4
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/mpc85xx_edac.h1
-rw-r--r--drivers/gpio/pl061.c20
-rw-r--r--drivers/ide/ide-acpi.c37
-rw-r--r--drivers/ide/ide-cd.c14
-rw-r--r--drivers/ide/ide-devsets.c2
-rw-r--r--drivers/ide/ide-eh.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-io.c14
-rw-r--r--drivers/ide/ide-ioctls.c3
-rw-r--r--drivers/ide/ide-pm.c30
-rw-r--r--drivers/leds/Kconfig14
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-alix2.c7
-rw-r--r--drivers/leds/leds-bd2802.c96
-rw-r--r--drivers/leds/leds-gpio.c22
-rw-r--r--drivers/leds/leds-lp3944.c466
-rw-r--r--drivers/leds/leds-pca9532.c58
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_user.c4
-rw-r--r--drivers/md/dm-exception-store.c9
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c56
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c9
-rw-r--r--drivers/md/raid10.c19
-rw-r--r--drivers/md/raid5.c28
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c2
-rw-r--r--drivers/mtd/inftlcore.c11
-rw-r--r--drivers/mtd/maps/integrator-flash.c22
-rw-r--r--drivers/mtd/nand/atmel_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c7
-rw-r--r--drivers/mtd/nftlcore.c16
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/benet/be_main.c45
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/e1000e/netdev.c3
-rw-r--r--drivers/net/fsl_pq_mdio.c8
-rw-r--r--drivers/net/igb/igb_main.c14
-rw-r--r--drivers/net/irda/bfin_sir.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c45
-rw-r--r--drivers/net/mdio.c4
-rw-r--r--drivers/net/sh_eth.c9
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/net1080.c12
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc95xx.c10
-rw-r--r--drivers/net/usb/usbnet.c30
-rw-r--r--drivers/net/veth.c41
-rw-r--r--drivers/parport/parport_pc.c5
-rw-r--r--drivers/pci/intel-iommu.c704
-rw-r--r--drivers/pci/iova.c26
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/rtc/rtc-bfin.c30
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c4
-rw-r--r--drivers/scsi/fnic/fnic_main.c8
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/serial/8250_pci.c6
-rw-r--r--drivers/spi/omap_uwire.c2
-rw-r--r--drivers/spi/spi_bitbang.c24
-rw-r--r--drivers/spi/spidev.c17
-rw-r--r--drivers/video/atafb.c7
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/atyfb.h3
-rw-r--r--drivers/video/aty/atyfb_base.c141
-rw-r--r--drivers/video/aty/mach64_accel.c7
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/fbmem.c13
-rw-r--r--drivers/video/fsl-diu-fb.c14
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c3
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c5
-rw-r--r--drivers/video/mx3fb.c17
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/sh7760fb.c19
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c53
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sm501fb.c21
-rw-r--r--drivers/video/w100fb.c2
-rw-r--r--fs/afs/flock.c1
-rw-r--r--fs/aio.c24
-rw-r--r--fs/binfmt_elf.c9
-rw-r--r--fs/bio-integrity.c170
-rw-r--r--fs/bio.c11
-rw-r--r--fs/btrfs/async-thread.c2
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/extent-tree.c566
-rw-r--r--fs/btrfs/file.c5
-rw-r--r--fs/btrfs/inode.c25
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/relocation.c5
-rw-r--r--fs/btrfs/transaction.c4
-rw-r--r--fs/cifs/file.c10
-rw-r--r--fs/eventfd.c122
-rw-r--r--fs/ext2/namei.c12
-rw-r--r--fs/fuse/dev.c83
-rw-r--r--fs/fuse/dir.c57
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/fuse_i.h27
-rw-r--r--fs/fuse/inode.c68
-rw-r--r--fs/hostfs/hostfs_kern.c1
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/namei.c7
-rw-r--r--fs/notify/inotify/inotify_user.c3
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/bio.h22
-rw-r--r--include/linux/blkdev.h15
-rw-r--r--include/linux/eventfd.h35
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/fuse.h36
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--include/linux/ide.h3
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/leds-lp3944.h53
-rw-r--r--include/linux/leds.h14
-rw-r--r--include/linux/netfilter/xt_conntrack.h13
-rw-r--r--include/linux/netfilter/xt_osf.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu-defs.h3
-rw-r--r--include/linux/perf_counter.h46
-rw-r--r--include/linux/sched.h16
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spi/spidev.h2
-rw-r--r--include/linux/timer.h4
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--include/net/phonet/pn_dev.h1
-rw-r--r--ipc/mqueue.c2
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/acct.c6
-rw-r--r--kernel/perf_counter.c320
-rw-r--r--kernel/pid.c7
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sysctl.c13
-rw-r--r--kernel/time/timer_stats.c16
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c56
-rw-r--r--kernel/trace/ring_buffer.c11
-rw-r--r--kernel/trace/trace.c23
-rw-r--r--kernel/trace/trace.h7
-rw-r--r--kernel/trace/trace_events.c28
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_printk.c26
-rw-r--r--kernel/trace/trace_stat.c6
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/kmemleak.c197
-rw-r--r--mm/nommu.c21
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/page_alloc.c13
-rw-r--r--mm/percpu.c24
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/ieee802154/netlink.c6
-rw-r--r--net/ipv4/arp.c7
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c17
-rw-r--r--net/ipv4/tcp.c15
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_extend.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/xt_conntrack.c66
-rw-r--r--net/phonet/pn_dev.c52
-rw-r--r--net/phonet/pn_netlink.c4
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sunrpc/sunrpc_syms.c1
-rw-r--r--net/xfrm/xfrm_algo.c4
-rw-r--r--net/xfrm/xfrm_state.c57
-rwxr-xr-xscripts/kernel-doc4
-rw-r--r--scripts/pnmtologo.c4
-rw-r--r--security/integrity/ima/ima_main.c29
-rw-r--r--security/integrity/ima/ima_queue.c3
-rw-r--r--sound/isa/cmi8330.c2
-rw-r--r--sound/oss/kahlua.c2
-rw-r--r--sound/oss/mpu401.c16
-rw-r--r--sound/pci/atiixp.c8
-rw-r--r--sound/pci/atiixp_modem.c4
-rw-r--r--sound/pci/au88x0/au8810.c3
-rw-r--r--sound/pci/au88x0/au8820.c3
-rw-r--r--sound/pci/au88x0/au8830.c3
-rw-r--r--sound/pci/ca0106/ca0106_main.c2
-rw-r--r--sound/pci/cmipci.c10
-rw-r--r--sound/pci/cs4281.c2
-rw-r--r--sound/pci/cs46xx/cs46xx.c6
-rw-r--r--sound/pci/emu10k1/emu10k1.c6
-rw-r--r--sound/pci/emu10k1/emu10k1x.c2
-rw-r--r--sound/pci/ens1370.c8
-rw-r--r--sound/pci/es1938.c2
-rw-r--r--sound/pci/hda/patch_analog.c128
-rw-r--r--sound/pci/hda/patch_realtek.c46
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/pci/ice1712/ice1724.c2
-rw-r--r--sound/pci/intel8x0.c46
-rw-r--r--sound/pci/intel8x0m.c34
-rw-r--r--sound/pci/lx6464es/lx6464es.c7
-rw-r--r--sound/pci/mixart/mixart.c2
-rw-r--r--sound/pci/nm256/nm256.c6
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c28
-rw-r--r--sound/pci/rme32.c9
-rw-r--r--sound/pci/rme96.c12
-rw-r--r--sound/pci/sonicvibes.c2
-rw-r--r--sound/pci/via82xx.c4
-rw-r--r--sound/pci/via82xx_modem.c2
-rw-r--r--sound/pci/ymfpci/ymfpci.c12
-rw-r--r--tools/perf/CREDITS30
-rw-r--r--tools/perf/Documentation/perf-report.txt14
-rw-r--r--tools/perf/Documentation/perf-stat.txt6
-rw-r--r--tools/perf/Makefile6
-rw-r--r--tools/perf/builtin-annotate.c8
-rw-r--r--tools/perf/builtin-record.c127
-rw-r--r--tools/perf/builtin-report.c236
-rw-r--r--tools/perf/builtin-stat.c171
-rw-r--r--tools/perf/builtin-top.c11
-rw-r--r--tools/perf/perf.h19
-rw-r--r--tools/perf/util/callchain.c174
-rw-r--r--tools/perf/util/callchain.h33
-rw-r--r--tools/perf/util/header.c242
-rw-r--r--tools/perf/util/header.h37
-rw-r--r--tools/perf/util/help.c15
-rw-r--r--tools/perf/util/pager.c5
-rw-r--r--tools/perf/util/parse-events.c153
-rw-r--r--tools/perf/util/run-command.c95
-rw-r--r--tools/perf/util/run-command.h5
-rw-r--r--tools/perf/util/strbuf.c2
-rw-r--r--tools/perf/util/string.h2
-rw-r--r--tools/perf/util/strlist.c184
-rw-r--r--tools/perf/util/strlist.h32
-rw-r--r--tools/perf/util/symbol.c16
-rw-r--r--tools/perf/util/symbol.h5
-rw-r--r--tools/perf/util/types.h (renamed from tools/perf/types.h)0
-rw-r--r--tools/perf/util/util.h15
-rw-r--r--virt/kvm/kvm_main.c5
382 files changed, 6551 insertions, 2751 deletions
diff --git a/Documentation/block/data-integrity.txt b/Documentation/block/data-integrity.txt
index e8ca040ba2c..2d735b0ae38 100644
--- a/Documentation/block/data-integrity.txt
+++ b/Documentation/block/data-integrity.txt
@@ -50,7 +50,7 @@ encouraged them to allow separation of the data and integrity metadata
scatter-gather lists.
The controller will interleave the buffers on write and split them on
-read. This means that the Linux can DMA the data buffers to and from
+read. This means that Linux can DMA the data buffers to and from
host memory without changes to the page cache.
Also, the 16-bit CRC checksum mandated by both the SCSI and SATA specs
@@ -66,7 +66,7 @@ software RAID5).
The IP checksum is weaker than the CRC in terms of detecting bit
errors. However, the strength is really in the separation of the data
-buffers and the integrity metadata. These two distinct buffers much
+buffers and the integrity metadata. These two distinct buffers must
match up for an I/O to complete.
The separation of the data and integrity metadata buffers as well as
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index f9ca389dddf..1d7e9784439 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -777,6 +777,18 @@ in cpuset directories:
# /bin/echo 1-4 > cpus -> set cpus list to cpus 1,2,3,4
# /bin/echo 1,2,3,4 > cpus -> set cpus list to cpus 1,2,3,4
+To add a CPU to a cpuset, write the new list of CPUs including the
+CPU to be added. To add 6 to the above cpuset:
+
+# /bin/echo 1-4,6 > cpus -> set cpus list to cpus 1,2,3,4,6
+
+Similarly to remove a CPU from a cpuset, write the new list of CPUs
+without the CPU to be removed.
+
+To remove all the CPUs:
+
+# /bin/echo "" > cpus -> clear cpus list
+
2.3 Setting flags
-----------------
diff --git a/Documentation/gcov.txt b/Documentation/gcov.txt
index e716aadb3a3..40ec6335276 100644
--- a/Documentation/gcov.txt
+++ b/Documentation/gcov.txt
@@ -188,13 +188,18 @@ Solution: Exclude affected source files from profiling by specifying
GCOV_PROFILE := n or GCOV_PROFILE_basename.o := n in the
corresponding Makefile.
+Problem: Files copied from sysfs appear empty or incomplete.
+Cause: Due to the way seq_file works, some tools such as cp or tar
+ may not correctly copy files from sysfs.
+Solution: Use 'cat' to read .gcda files and 'cp -d' to copy links.
+ Alternatively use the mechanism shown in Appendix B.
+
Appendix A: gather_on_build.sh
==============================
Sample script to gather coverage meta files on the build machine
(see 6a):
-
#!/bin/bash
KSRC=$1
@@ -226,7 +231,7 @@ Appendix B: gather_on_test.sh
Sample script to gather coverage data files on the test machine
(see 6b):
-#!/bin/bash
+#!/bin/bash -e
DEST=$1
GCDA=/sys/kernel/debug/gcov
@@ -236,11 +241,13 @@ if [ -z "$DEST" ] ; then
exit 1
fi
-find $GCDA -name '*.gcno' -o -name '*.gcda' | tar cfz $DEST -T -
+TEMPDIR=$(mktemp -d)
+echo Collecting data..
+find $GCDA -type d -exec mkdir -p $TEMPDIR/\{\} \;
+find $GCDA -name '*.gcda' -exec sh -c 'cat < $0 > '$TEMPDIR'/$0' {} \;
+find $GCDA -name '*.gcno' -exec sh -c 'cp -d $0 '$TEMPDIR'/$0' {} \;
+tar czf $DEST -C $TEMPDIR sys
+rm -rf $TEMPDIR
-if [ $? -eq 0 ] ; then
- echo "$DEST successfully created, copy to build system and unpack with:"
- echo " tar xfz $DEST"
-else
- echo "Could not create file $DEST"
-fi
+echo "$DEST successfully created, copy to build system and unpack with:"
+echo " tar xfz $DEST"
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d08759aa090..d77fbd8b79a 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1915,6 +1915,12 @@ and is between 256 and 4096 characters. It is defined in the file
Format: { 0 | 1 }
See arch/parisc/kernel/pdc_chassis.c
+ percpu_alloc= [X86] Select which percpu first chunk allocator to use.
+ Allowed values are one of "lpage", "embed" and "4k".
+ See comments in arch/x86/kernel/setup_percpu.c for
+ details on each allocator. This parameter is primarily
+ for debugging and performance comparison.
+
pf. [PARIDE]
See Documentation/blockdev/paride.txt.
@@ -2467,7 +2473,8 @@ and is between 256 and 4096 characters. It is defined in the file
tp720= [HW,PS2]
- trace_buf_size=nn[KMG] [ftrace] will set tracing buffer size.
+ trace_buf_size=nn[KMG]
+ [FTRACE] will set tracing buffer size.
trix= [HW,OSS] MediaTrix AudioTrix Pro
Format:
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index 0112da3b9ab..89068030b01 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -16,13 +16,17 @@ Usage
-----
CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel
-thread scans the memory every 10 minutes (by default) and prints any new
-unreferenced objects found. To trigger an intermediate scan and display
-all the possible memory leaks:
+thread scans the memory every 10 minutes (by default) and prints the
+number of new unreferenced objects found. To display the details of all
+the possible memory leaks:
# mount -t debugfs nodev /sys/kernel/debug/
# cat /sys/kernel/debug/kmemleak
+To trigger an intermediate memory scan:
+
+ # echo scan > /sys/kernel/debug/kmemleak
+
Note that the orphan objects are listed in the order they were allocated
and one object at the beginning of the list may cause other subsequent
objects to be reported as orphan.
@@ -31,16 +35,21 @@ Memory scanning parameters can be modified at run-time by writing to the
/sys/kernel/debug/kmemleak file. The following parameters are supported:
off - disable kmemleak (irreversible)
- stack=on - enable the task stacks scanning
+ stack=on - enable the task stacks scanning (default)
stack=off - disable the tasks stacks scanning
- scan=on - start the automatic memory scanning thread
+ scan=on - start the automatic memory scanning thread (default)
scan=off - stop the automatic memory scanning thread
- scan=<secs> - set the automatic memory scanning period in seconds (0
- to disable it)
+ scan=<secs> - set the automatic memory scanning period in seconds
+ (default 600, 0 to stop the automatic scanning)
+ scan - trigger a memory scan
Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on
the kernel command line.
+Memory may be allocated or freed before kmemleak is initialised and
+these actions are stored in an early log buffer. The size of this buffer
+is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option.
+
Basic Algorithm
---------------
diff --git a/Documentation/leds-lp3944.txt b/Documentation/leds-lp3944.txt
new file mode 100644
index 00000000000..c6eda18b15e
--- /dev/null
+++ b/Documentation/leds-lp3944.txt
@@ -0,0 +1,50 @@
+Kernel driver lp3944
+====================
+
+ * National Semiconductor LP3944 Fun-light Chip
+ Prefix: 'lp3944'
+ Addresses scanned: None (see the Notes section below)
+ Datasheet: Publicly available at the National Semiconductor website
+ http://www.national.com/pf/LP/LP3944.html
+
+Authors:
+ Antonio Ospite <ospite@studenti.unina.it>
+
+
+Description
+-----------
+The LP3944 is a helper chip that can drive up to 8 leds, with two programmable
+DIM modes; it could even be used as a gpio expander but this driver assumes it
+is used as a led controller.
+
+The DIM modes are used to set _blink_ patterns for leds, the pattern is
+specified supplying two parameters:
+ - period: from 0s to 1.6s
+ - duty cycle: percentage of the period the led is on, from 0 to 100
+
+Setting a led in DIM0 or DIM1 mode makes it blink according to the pattern.
+See the datasheet for details.
+
+LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
+leds, the camera flash light and the lcds power.
+
+
+Notes
+-----
+The chip is used mainly in embedded contexts, so this driver expects it is
+registered using the i2c_board_info mechanism.
+
+To register the chip at address 0x60 on adapter 0, set the platform data
+according to include/linux/leds-lp3944.h, set the i2c board info:
+
+ static struct i2c_board_info __initdata a910_i2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("lp3944", 0x60),
+ .platform_data = &a910_lp3944_leds,
+ },
+ };
+
+and register it in the platform init function
+
+ i2c_register_board_info(0, a910_i2c_board_info,
+ ARRAY_SIZE(a910_i2c_board_info));
diff --git a/Documentation/powerpc/dts-bindings/gpio/led.txt b/Documentation/powerpc/dts-bindings/gpio/led.txt
index 4fe14deedc0..064db928c3c 100644
--- a/Documentation/powerpc/dts-bindings/gpio/led.txt
+++ b/Documentation/powerpc/dts-bindings/gpio/led.txt
@@ -16,10 +16,17 @@ LED sub-node properties:
string defining the trigger assigned to the LED. Current triggers are:
"backlight" - LED will act as a back-light, controlled by the framebuffer
system
- "default-on" - LED will turn on
+ "default-on" - LED will turn on, but see "default-state" below
"heartbeat" - LED "double" flashes at a load average based rate
"ide-disk" - LED indicates disk activity
"timer" - LED flashes at a fixed, configurable rate
+- default-state: (optional) The initial state of the LED. Valid
+ values are "on", "off", and "keep". If the LED is already on or off
+ and the default-state property is set the to same value, then no
+ glitch should be produced where the LED momentarily turns off (or
+ on). The "keep" setting will keep the LED at whatever its current
+ state is, without producing a glitch. The default is off if this
+ property is not present.
Examples:
@@ -30,14 +37,22 @@ leds {
gpios = <&mcu_pio 0 1>; /* Active low */
linux,default-trigger = "ide-disk";
};
+
+ fault {
+ gpios = <&mcu_pio 1 0>;
+ /* Keep LED on if BIOS detected hardware fault */
+ default-state = "keep";
+ };
};
run-control {
compatible = "gpio-leds";
red {
gpios = <&mpc8572 6 0>;
+ default-state = "off";
};
green {
gpios = <&mpc8572 7 0>;
+ default-state = "on";
};
}
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 0d8d23581c4..939a3dd5814 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -240,6 +240,7 @@ AD1986A
laptop-automute 2-channel with EAPD and HP-automute (Lenovo N100)
ultra 2-channel with EAPD (Samsung Ultra tablet PC)
samsung 2-channel with EAPD (Samsung R65)
+ samsung-p50 2-channel with HP-automute (Samsung P50)
AD1988/AD1988B/AD1989A/AD1989B
==============================
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c
index cf0e3ce0d52..c1a5aad3c75 100644
--- a/Documentation/spi/spidev_test.c
+++ b/Documentation/spi/spidev_test.c
@@ -99,11 +99,13 @@ void parse_opts(int argc, char *argv[])
{ "lsb", 0, 0, 'L' },
{ "cs-high", 0, 0, 'C' },
{ "3wire", 0, 0, '3' },
+ { "no-cs", 0, 0, 'N' },
+ { "ready", 0, 0, 'R' },
{ NULL, 0, 0, 0 },
};
int c;
- c = getopt_long(argc, argv, "D:s:d:b:lHOLC3", lopts, NULL);
+ c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR", lopts, NULL);
if (c == -1)
break;
@@ -139,6 +141,12 @@ void parse_opts(int argc, char *argv[])
case '3':
mode |= SPI_3WIRE;
break;
+ case 'N':
+ mode |= SPI_NO_CS;
+ break;
+ case 'R':
+ mode |= SPI_READY;
+ break;
default:
print_usage(argv[0]);
break;
diff --git a/MAINTAINERS b/MAINTAINERS
index fa2a16def17..381190c7949 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -867,12 +867,22 @@ M: alex@shark-linux.de
W: http://www.shark-linux.de/shark.html
S: Maintained
+ARM/SAMSUNG ARM ARCHITECTURES
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/plat-s3c/
+F: arch/arm/plat-s3c24xx/
+
ARM/S3C2410 ARM ARCHITECTURE
P: Ben Dooks
M: ben-linux@fluff.org
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
W: http://www.fluff.org/ben/linux/
S: Maintained
+F: arch/arm/mach-s3c2410/
ARM/S3C2440 ARM ARCHITECTURE
P: Ben Dooks
@@ -880,6 +890,39 @@ M: ben-linux@fluff.org
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
W: http://www.fluff.org/ben/linux/
S: Maintained
+F: arch/arm/mach-s3c2440/
+
+ARM/S3C2442 ARM ARCHITECTURE
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/mach-s3c2442/
+
+ARM/S3C2443 ARM ARCHITECTURE
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/mach-s3c2443/
+
+ARM/S3C6400 ARM ARCHITECTURE
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/mach-s3c6400/
+
+ARM/S3C6410 ARM ARCHITECTURE
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://www.fluff.org/ben/linux/
+S: Maintained
+F: arch/arm/mach-s3c6410/
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
P: Lennert Buytenhek
@@ -2087,9 +2130,9 @@ F: drivers/edac/i5400_edac.c
EDAC-I82975X
P: Ranganathan Desikan
-M: rdesikan@jetzbroadband.com
+M: ravi@jetztechnologies.com
P: Arvind R.
-M: arvind@acarlab.com
+M: arvind@jetztechnologies.com
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
@@ -2808,7 +2851,9 @@ S: Maintained
IA64 (Itanium) PLATFORM
P: Tony Luck
+P: Fenghua Yu
M: tony.luck@intel.com
+M: fenghua.yu@intel.com
L: linux-ia64@vger.kernel.org
W: http://www.ia64-linux.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git
@@ -2886,7 +2931,7 @@ P: Dmitry Eremin-Solenikov
M: dbaryshkov@gmail.com
P: Sergey Lapin
M: slapin@ossfans.org
-L: linux-zigbee-devel@lists.sourceforge.net
+L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
S: Maintained
@@ -5533,8 +5578,8 @@ F: drivers/staging/
STARFIRE/DURALAN NETWORK DRIVER
P: Ion Badulescu
-M: ionut@cs.columbia.edu
-S: Maintained
+M: ionut@badula.org
+S: Odd Fixes
F: drivers/net/starfire*
STARMODE RADIO IP (STRIP) PROTOCOL DRIVER
@@ -5668,6 +5713,13 @@ F: drivers/misc/tifm*
F: drivers/mmc/host/tifm_sd.c
F: include/linux/tifm.h
+TI TWL4030 SERIES SOC CODEC DRIVER
+P: Peter Ujfalusi
+M: peter.ujfalusi@nokia.com
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: sound/soc/codecs/twl4030*
+
TIPC NETWORK LAYER
P: Per Liden
M: per.liden@ericsson.com
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index 06c5c7a4afd..b663f1f10b6 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -30,7 +30,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#ifndef MODULE
#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
-#define PER_CPU_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
#else
/*
* To calculate addresses of locally defined variables, GCC uses 32-bit
@@ -49,7 +49,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
: "=&r"(__ptr), "=&r"(tmp_gp)); \
(typeof(&per_cpu_var(var)))(__ptr + (offset)); })
-#define PER_CPU_ATTRIBUTES __used
+#define PER_CPU_DEF_ATTRIBUTES __used
#endif /* MODULE */
@@ -71,7 +71,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define __get_cpu_var(var) per_cpu_var(var)
#define __raw_get_cpu_var(var) per_cpu_var(var)
-#define PER_CPU_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
#endif /* SMP */
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index a71fd941ade..a89e4734b8f 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -99,14 +99,6 @@ config DEBUG_CLPS711X_UART2
output to the second serial port on these devices. Saying N will
cause the debug messages to appear on the first serial port.
-config DEBUG_S3C_PORT
- depends on DEBUG_LL && PLAT_S3C
- bool "Kernel low-level debugging messages via S3C UART"
- help
- Say Y here if you want debug print routines to go to one of the
- S3C internal UARTs. The chosen UART must have been configured
- before it is used.
-
config DEBUG_S3C_UART
depends on PLAT_S3C
int "S3C UART to use for low-level debug"
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 2d58b8fe59b..b49810461e4 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -260,6 +260,7 @@ CONFIG_MACH_NEXCODER_2440=y
CONFIG_SMDK2440_CPU2440=y
CONFIG_MACH_AT2440EVB=y
CONFIG_CPU_S3C2442=y
+CONFIG_MACH_MINI2440=y
#
# S3C2442 Machines
@@ -2298,7 +2299,6 @@ CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_LL=y
# CONFIG_DEBUG_ICEDCC is not set
-CONFIG_DEBUG_S3C_PORT=y
CONFIG_DEBUG_S3C_UART=0
#
diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig
index 2e8fa50e9a0..32860609e05 100644
--- a/arch/arm/configs/s3c6400_defconfig
+++ b/arch/arm/configs/s3c6400_defconfig
@@ -816,7 +816,6 @@ CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_LL=y
# CONFIG_DEBUG_ICEDCC is not set
-CONFIG_DEBUG_S3C_PORT=y
CONFIG_DEBUG_S3C_UART=0
#
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig
index 07dfb98df4f..9d32faef05f 100644
--- a/arch/arm/configs/tct_hammer_defconfig
+++ b/arch/arm/configs/tct_hammer_defconfig
@@ -857,7 +857,6 @@ CONFIG_DEBUG_ERRORS=y
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_LL=y
# CONFIG_DEBUG_ICEDCC is not set
-# CONFIG_DEBUG_S3C_PORT is not set
CONFIG_DEBUG_S3C_UART=0
#
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index be962c1349c..9c746af1bf6 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -12,7 +12,7 @@
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifndef __ASSEMBLY__
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 096f600dc8d..b7c3490eaa2 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -98,17 +98,6 @@ unlock:
return 0;
}
-/* Handle bad interrupts */
-static struct irq_desc bad_irq_desc = {
- .handle_irq = handle_bad_irq,
- .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
-};
-
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* We are not allocating bad_irq_desc.affinity or .pending_mask */
-#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
-#endif
-
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
@@ -124,10 +113,13 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
* Some hardware gives randomly wrong interrupts. Rather
* than crashing, do something sensible.
*/
- if (irq >= NR_IRQS)
- handle_bad_irq(irq, &bad_irq_desc);
- else
+ if (unlikely(irq >= NR_IRQS)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "Bad IRQ%u\n", irq);
+ ack_bad_irq(irq);
+ } else {
generic_handle_irq(irq);
+ }
/* AT91 specific workaround */
irq_finish(irq);
@@ -165,10 +157,6 @@ void __init init_IRQ(void)
for (irq = 0; irq < NR_IRQS; irq++)
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
-#ifdef CONFIG_SMP
- cpumask_setall(bad_irq_desc.affinity);
- bad_irq_desc.node = smp_processor_id();
-#endif
init_arch_irq();
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 4340bf3d2c8..69371028a20 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
+#include <asm/page.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -63,7 +64,7 @@ SECTIONS
usr/built-in.o(.init.ramfs)
__initramfs_end = .;
#endif
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__per_cpu_load = .;
__per_cpu_start = .;
*(.data.percpu.page_aligned)
@@ -73,7 +74,7 @@ SECTIONS
#ifndef CONFIG_XIP_KERNEL
__init_begin = _stext;
INIT_DATA
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
}
@@ -118,7 +119,7 @@ SECTIONS
*(.got) /* Global offset table */
}
- RODATA
+ RO_DATA(PAGE_SIZE)
_etext = .; /* End of text and rodata section */
@@ -158,17 +159,17 @@ SECTIONS
*(.data.init_task)
#ifdef CONFIG_XIP_KERNEL
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_DATA
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__nosave_begin = .;
*(.data.nosave)
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__nosave_end = .;
/*
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index cc270beadd5..a55398ed121 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -24,6 +24,8 @@
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/at73c213.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
#include <linux/clk.h>
#include <mach/hardware.h>
@@ -218,6 +220,56 @@ static struct gpio_led ek_leds[] = {
}
};
+
+/*
+ * GPIO Buttons
+ */
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+static struct gpio_keys_button ek_buttons[] = {
+ {
+ .gpio = AT91_PIN_PA30,
+ .code = BTN_3,
+ .desc = "Button 3",
+ .active_low = 1,
+ .wakeup = 1,
+ },
+ {
+ .gpio = AT91_PIN_PA31,
+ .code = BTN_4,
+ .desc = "Button 4",
+ .active_low = 1,
+ .wakeup = 1,
+ }
+};
+
+static struct gpio_keys_platform_data ek_button_data = {
+ .buttons = ek_buttons,
+ .nbuttons = ARRAY_SIZE(ek_buttons),
+};
+
+static struct platform_device ek_button_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .num_resources = 0,
+ .dev = {
+ .platform_data = &ek_button_data,
+ }
+};
+
+static void __init ek_add_device_buttons(void)
+{
+ at91_set_gpio_input(AT91_PIN_PA30, 1); /* btn3 */
+ at91_set_deglitch(AT91_PIN_PA30, 1);
+ at91_set_gpio_input(AT91_PIN_PA31, 1); /* btn4 */
+ at91_set_deglitch(AT91_PIN_PA31, 1);
+
+ platform_device_register(&ek_button_device);
+}
+#else
+static void __init ek_add_device_buttons(void) {}
+#endif
+
+
static struct i2c_board_info __initdata ek_i2c_devices[] = {
{
I2C_BOARD_INFO("24c512", 0x50),
@@ -245,6 +297,8 @@ static void __init ek_board_init(void)
at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices));
/* LEDs */
at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds));
+ /* Push Buttons */
+ ek_add_device_buttons();
/* PCK0 provides MCLK to the WM8731 */
at91_set_B_periph(AT91_PIN_PC1, 0);
/* SSC (for WM8731) */
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 35e12a49d1a..f6b5672cabd 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -186,19 +186,21 @@ static struct fb_monspecs at91fb_default_monspecs = {
static void at91_lcdc_power_control(int on)
{
if (on)
- at91_set_gpio_value(AT91_PIN_PA30, 0); /* power up */
+ at91_set_gpio_value(AT91_PIN_PC1, 0); /* power up */
else
- at91_set_gpio_value(AT91_PIN_PA30, 1); /* power down */
+ at91_set_gpio_value(AT91_PIN_PC1, 1); /* power down */
}
/* Driver datas */
static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+ .lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
.default_lcdcon2 = AT91SAM9RL_DEFAULT_LCDCON2,
.default_monspecs = &at91fb_default_monspecs,
.atmel_lcdfb_power_control = at91_lcdc_power_control,
.guard_time = 1,
+ .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB,
};
#else
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index e70fc7c66bb..ed2a48a9ce7 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -36,7 +36,6 @@
#include <mach/hwa742.h>
#include <mach/lcd_mipid.h>
#include <mach/mmc.h>
-#include <mach/usb.h>
#include <mach/clock.h>
#define ADS7846_PENDOWN_GPIO 15
@@ -205,9 +204,11 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot)
static struct omap_mmc_platform_data nokia770_mmc2_data = {
.nr_slots = 1,
.dma_mask = 0xffffffff,
+ .max_freq = 12000000,
.slots[0] = {
.set_power = nokia770_mmc_set_power,
.get_cover_state = nokia770_mmc_get_cover_state,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.name = "mmcblk",
},
};
diff --git a/arch/arm/mach-omap1/mailbox.c b/arch/arm/mach-omap1/mailbox.c
index 0af4d6c85b4..6810b4aeb02 100644
--- a/arch/arm/mach-omap1/mailbox.c
+++ b/arch/arm/mach-omap1/mailbox.c
@@ -203,5 +203,5 @@ module_exit(omap1_mbox_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions");
-MODULE_AUTHOR("Hiroshi DOYU" <Hiroshi.DOYU@nokia.com>);
+MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
MODULE_ALIAS("platform:omap1-mailbox");
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index da93b86234e..9a0bf6744a0 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -362,6 +362,7 @@ static struct omap_onenand_platform_data board_onenand_data = {
.gpio_irq = 65,
.parts = onenand_partitions,
.nr_parts = ARRAY_SIZE(onenand_partitions),
+ .flags = ONENAND_SYNC_READWRITE,
};
static void __init board_onenand_init(void)
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 2fd22f9c5f0..54fec53a48e 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -31,6 +31,8 @@ static struct platform_device gpmc_onenand_device = {
static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
{
struct gpmc_timings t;
+ u32 reg;
+ int err;
const int t_cer = 15;
const int t_avdp = 12;
@@ -43,6 +45,11 @@ static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
const int t_wpl = 40;
const int t_wph = 30;
+ /* Ensure sync read and sync write are disabled */
+ reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
+ reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE;
+ writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
+
memset(&t, 0, sizeof(t));
t.sync_clk = 0;
t.cs_on = 0;
@@ -74,7 +81,16 @@ static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
GPMC_CONFIG1_DEVICESIZE_16 |
GPMC_CONFIG1_MUXADDDATA);
- return gpmc_cs_set_timings(cs, &t);
+ err = gpmc_cs_set_timings(cs, &t);
+ if (err)
+ return err;
+
+ /* Ensure sync read and sync write are disabled */
+ reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
+ reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE;
+ writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
+
+ return 0;
}
static void set_onenand_cfg(void __iomem *onenand_base, int latency,
@@ -124,7 +140,8 @@ static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
} else if (cfg->flags & ONENAND_SYNC_READWRITE) {
sync_read = 1;
sync_write = 1;
- }
+ } else
+ return omap2_onenand_set_async_mode(cs, onenand_base);
if (!freq) {
/* Very first call freq is not known */
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 458990e20c6..a98201cc265 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -48,6 +48,28 @@ int omap_chip_is(struct omap_chip_id oci)
}
EXPORT_SYMBOL(omap_chip_is);
+int omap_type(void)
+{
+ u32 val = 0;
+
+ if (cpu_is_omap24xx())
+ val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
+ else if (cpu_is_omap34xx())
+ val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
+ else {
+ pr_err("Cannot detect omap type!\n");
+ goto out;
+ }
+
+ val &= OMAP2_DEVICETYPE_MASK;
+ val >>= 8;
+
+out:
+ return val;
+}
+EXPORT_SYMBOL(omap_type);
+
+
/*----------------------------------------------------------------------------*/
#define OMAP_TAP_IDCODE 0x0204
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index fd5b8a5925c..6f71f3730c9 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -282,12 +282,12 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev)
return -ENOMEM;
/* DSP or IVA2 IRQ */
- mbox_dsp_info.irq = platform_get_irq(pdev, 0);
- if (mbox_dsp_info.irq < 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
dev_err(&pdev->dev, "invalid irq resource\n");
- ret = -ENODEV;
goto err_dsp;
}
+ mbox_dsp_info.irq = ret;
ret = omap_mbox_register(&pdev->dev, &mbox_dsp_info);
if (ret)
diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c
index 9756a878fd9..1541fd4c8d0 100644
--- a/arch/arm/mach-omap2/mmc-twl4030.c
+++ b/arch/arm/mach-omap2/mmc-twl4030.c
@@ -263,8 +263,19 @@ static int twl_mmc1_set_power(struct device *dev, int slot, int power_on,
static int twl_mmc23_set_power(struct device *dev, int slot, int power_on, int vdd)
{
int ret = 0;
- struct twl_mmc_controller *c = &hsmmc[1];
+ struct twl_mmc_controller *c = NULL;
struct omap_mmc_platform_data *mmc = dev->platform_data;
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(hsmmc); i++) {
+ if (mmc == hsmmc[i].mmc) {
+ c = &hsmmc[i];
+ break;
+ }
+ }
+
+ if (c == NULL)
+ return -ENODEV;
/* If we don't see a Vcc regulator, assume it's a fixed
* voltage always-on regulator.
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index 6a5bc3021bd..ec71a696578 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -48,8 +48,6 @@
#include <plat/mci.h>
#include <plat/udc.h>
-#include <plat/regs-serial.h>
-
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
@@ -275,6 +273,7 @@ static struct s3c2410_nand_set mini2440_nand_sets[] __initdata = {
.nr_chips = 1,
.nr_partitions = ARRAY_SIZE(mini2440_default_nand_part),
.partitions = mini2440_default_nand_part,
+ .flash_bbt = 1, /* we use u-boot to create a BBT */
},
};
diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c
index e23b581aa0e..0fb385bd9cd 100644
--- a/arch/arm/mach-s3c2442/mach-gta02.c
+++ b/arch/arm/mach-s3c2442/mach-gta02.c
@@ -433,8 +433,7 @@ static struct s3c2410_nand_set gta02_nand_sets[] = {
*/
.name = "neo1973-nand",
.nr_chips = 1,
- .use_bbt = 1,
- .force_soft_ecc = 1,
+ .flash_bbt = 1,
},
};
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index def14ec265b..7677a4a1cef 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -2457,6 +2457,19 @@ static int __init omap_init_dma(void)
setup_irq(irq, &omap24xx_dma_irq);
}
+ /* Enable smartidle idlemodes and autoidle */
+ if (cpu_is_omap34xx()) {
+ u32 v = dma_read(OCP_SYSCONFIG);
+ v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
+ DMA_SYSCONFIG_SIDLEMODE_MASK |
+ DMA_SYSCONFIG_AUTOIDLE);
+ v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
+ DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
+ DMA_SYSCONFIG_AUTOIDLE);
+ dma_write(v , OCP_SYSCONFIG);
+ }
+
+
/* FIXME: Update LCD DMA to work on 24xx */
if (cpu_class_is_omap1()) {
r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 7fd89ba8d3b..26b387c1242 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -1585,6 +1585,7 @@ static int __init _omap_gpio_init(void)
__raw_writel(0x00000000, bank->base + OMAP24XX_GPIO_IRQENABLE1);
__raw_writel(0xffffffff, bank->base + OMAP24XX_GPIO_IRQSTATUS1);
__raw_writew(0x0015, bank->base + OMAP24XX_GPIO_SYSCONFIG);
+ __raw_writel(0x00000000, bank->base + OMAP24XX_GPIO_DEBOUNCE_EN);
/* Initialize interface clock ungated, module enabled */
__raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
diff --git a/arch/arm/plat-omap/include/mach/cpu.h b/arch/arm/plat-omap/include/mach/cpu.h
index fc60c4ebcc2..285eaa3a827 100644
--- a/arch/arm/plat-omap/include/mach/cpu.h
+++ b/arch/arm/plat-omap/include/mach/cpu.h
@@ -30,6 +30,17 @@
#ifndef __ASM_ARCH_OMAP_CPU_H
#define __ASM_ARCH_OMAP_CPU_H
+/*
+ * Omap device type i.e. EMU/HS/TST/GP/BAD
+ */
+#define OMAP2_DEVICE_TYPE_TEST 0
+#define OMAP2_DEVICE_TYPE_EMU 1
+#define OMAP2_DEVICE_TYPE_SEC 2
+#define OMAP2_DEVICE_TYPE_GP 3
+#define OMAP2_DEVICE_TYPE_BAD 4
+
+int omap_type(void);
+
struct omap_chip_id {
u8 oc;
u8 type;
@@ -424,17 +435,6 @@ IS_OMAP_TYPE(3430, 0x3430)
int omap_chip_is(struct omap_chip_id oci);
-int omap_type(void);
-
-/*
- * Macro to detect device type i.e. EMU/HS/TST/GP/BAD
- */
-#define OMAP2_DEVICE_TYPE_TEST 0
-#define OMAP2_DEVICE_TYPE_EMU 1
-#define OMAP2_DEVICE_TYPE_SEC 2
-#define OMAP2_DEVICE_TYPE_GP 3
-#define OMAP2_DEVICE_TYPE_BAD 4
-
void omap2_check_revision(void);
#endif /* defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) */
diff --git a/arch/arm/plat-omap/include/mach/dma.h b/arch/arm/plat-omap/include/mach/dma.h
index 8c1eae88737..7b939cc0196 100644
--- a/arch/arm/plat-omap/include/mach/dma.h
+++ b/arch/arm/plat-omap/include/mach/dma.h
@@ -389,6 +389,21 @@
#define DMA_THREAD_FIFO_25 (0x02 << 14)
#define DMA_THREAD_FIFO_50 (0x03 << 14)
+/* DMA4_OCP_SYSCONFIG bits */
+#define DMA_SYSCONFIG_MIDLEMODE_MASK (3 << 12)
+#define DMA_SYSCONFIG_CLOCKACTIVITY_MASK (3 << 8)
+#define DMA_SYSCONFIG_EMUFREE (1 << 5)
+#define DMA_SYSCONFIG_SIDLEMODE_MASK (3 << 3)
+#define DMA_SYSCONFIG_SOFTRESET (1 << 2)
+#define DMA_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define DMA_SYSCONFIG_MIDLEMODE(n) ((n) << 12)
+#define DMA_SYSCONFIG_SIDLEMODE(n) ((n) << 3)
+
+#define DMA_IDLEMODE_SMARTIDLE 0x2
+#define DMA_IDLEMODE_NO_IDLE 0x1
+#define DMA_IDLEMODE_FORCE_IDLE 0x0
+
/* Chaining modes*/
#ifndef CONFIG_ARCH_OMAP1
#define OMAP_DMA_STATIC_CHAIN 0x1
diff --git a/arch/arm/plat-omap/include/mach/io.h b/arch/arm/plat-omap/include/mach/io.h
index 3b281472056..73f483d56ca 100644
--- a/arch/arm/plat-omap/include/mach/io.h
+++ b/arch/arm/plat-omap/include/mach/io.h
@@ -201,7 +201,7 @@
#define OMAP2_IO_ADDRESS(pa) IOMEM(__OMAP2_IO_ADDRESS(pa))
#ifdef __ASSEMBLER__
-#define IOMEM(x) x
+#define IOMEM(x) (x)
#else
#define IOMEM(x) ((void __force __iomem *)(x))
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 4cf449fa2cb..4a030139901 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -298,7 +298,7 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
if ((start <= da) && (da < start + bytes)) {
dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
__func__, start, da, bytes);
-
+ iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
}
}
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 65006df3f1b..4ea73804d21 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -133,7 +133,12 @@ void __init omap_detect_sram(void)
if (cpu_is_omap34xx()) {
omap_sram_base = OMAP3_SRAM_PUB_VA;
omap_sram_start = OMAP3_SRAM_PUB_PA;
- omap_sram_size = 0x8000; /* 32K */
+ if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
+ (omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
+ omap_sram_size = 0x7000; /* 28K */
+ } else {
+ omap_sram_size = 0x8000; /* 32K */
+ }
} else {
omap_sram_base = OMAP2_SRAM_PUB_VA;
omap_sram_start = OMAP2_SRAM_PUB_PA;
diff --git a/arch/arm/plat-s3c/Makefile b/arch/arm/plat-s3c/Makefile
index 74bb7cb5da4..0761766b183 100644
--- a/arch/arm/plat-s3c/Makefile
+++ b/arch/arm/plat-s3c/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_S3C_DEV_HSMMC) += dev-hsmmc.o
obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o
obj-y += dev-i2c0.o
obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o
-obj-$(CONFIG_SND_S3C24XX_SOC) += dev-audio.o
+obj-$(CONFIG_SND_S3C64XX_SOC_I2S) += dev-audio.o
obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o
obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o
obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o
diff --git a/arch/arm/plat-s3c/include/plat/devs.h b/arch/arm/plat-s3c/include/plat/devs.h
index b5b9c4d46e9..2e170827e0b 100644
--- a/arch/arm/plat-s3c/include/plat/devs.h
+++ b/arch/arm/plat-s3c/include/plat/devs.h
@@ -37,6 +37,7 @@ extern struct platform_device s3c_device_i2c1;
extern struct platform_device s3c_device_rtc;
extern struct platform_device s3c_device_adc;
extern struct platform_device s3c_device_sdi;
+extern struct platform_device s3c_device_iis;
extern struct platform_device s3c_device_hwmon;
extern struct platform_device s3c_device_hsmmc0;
extern struct platform_device s3c_device_hsmmc1;
diff --git a/arch/arm/plat-s3c24xx/Makefile b/arch/arm/plat-s3c24xx/Makefile
index 636cb12711d..579a165c282 100644
--- a/arch/arm/plat-s3c24xx/Makefile
+++ b/arch/arm/plat-s3c24xx/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_PM) += irq-pm.o
obj-$(CONFIG_PM) += sleep.o
-obj-$(CONFIG_HAVE_PWM) += pwm.o
+obj-$(CONFIG_S3C24XX_PWM) += pwm.o
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
obj-$(CONFIG_S3C2410_DMA) += dma.o
obj-$(CONFIG_S3C24XX_ADC) += adc.o
diff --git a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
index 9edf7894eed..da7a61728c1 100644
--- a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
+++ b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
@@ -12,8 +12,7 @@
*/
#include <linux/kernel.h>
-
-#include <mach/hardware.h>
+#include <linux/gpio.h>
#include <mach/spi.h>
#include <mach/regs-gpio.h>
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
index f34d0fc69ad..86b9edc6741 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
@@ -12,8 +12,7 @@
*/
#include <linux/kernel.h>
-
-#include <mach/hardware.h>
+#include <linux/gpio.h>
#include <mach/spi.h>
#include <mach/regs-gpio.h>
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 8a5bd7a9c6f..b86e19c9b5b 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -7,6 +7,7 @@ config FRV
default y
select HAVE_IDE
select HAVE_ARCH_TRACEHOOK
+ select HAVE_PERF_COUNTERS
config ZONE_DMA
bool
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 0409d981fd3..00a57af79af 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -121,10 +121,72 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+/*
+ * 64-bit atomic ops
+ */
+typedef struct {
+ volatile long long counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+static inline long long atomic64_read(atomic64_t *v)
+{
+ long long counter;
+
+ asm("ldd%I1 %M1,%0"
+ : "=e"(counter)
+ : "m"(v->counter));
+ return counter;
+}
+
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ asm volatile("std%I0 %1,%M0"
+ : "=m"(v->counter)
+ : "e"(i));
+}
+
+extern long long atomic64_inc_return(atomic64_t *v);
+extern long long atomic64_dec_return(atomic64_t *v);
+extern long long atomic64_add_return(long long i, atomic64_t *v);
+extern long long atomic64_sub_return(long long i, atomic64_t *v);
+
+static inline long long atomic64_add_negative(long long i, atomic64_t *v)
+{
+ return atomic64_add_return(i, v) < 0;
+}
+
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+ atomic64_add_return(i, v);
+}
+
+static inline void atomic64_sub(long long i, atomic64_t *v)
+{
+ atomic64_sub_return(i, v);
+}
+
+static inline void atomic64_inc(atomic64_t *v)
+{
+ atomic64_inc_return(v);
+}
+
+static inline void atomic64_dec(atomic64_t *v)
+{
+ atomic64_dec_return(v);
+}
+
+#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
+
/*****************************************************************************/
/*
* exchange value with memory
*/
+extern uint64_t __xchg_64(uint64_t i, volatile void *v);
+
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define xchg(ptr, x) \
@@ -174,8 +236,10 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
#define tas(ptr) (xchg((ptr), 1))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
+#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
+#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+#define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
{
diff --git a/arch/frv/include/asm/perf_counter.h b/arch/frv/include/asm/perf_counter.h
new file mode 100644
index 00000000000..ccf726e61b2
--- /dev/null
+++ b/arch/frv/include/asm/perf_counter.h
@@ -0,0 +1,17 @@
+/* FRV performance counter support
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PERF_COUNTER_H
+#define _ASM_PERF_COUNTER_H
+
+#define PERF_COUNTER_INDEX_OFFSET 0
+
+#endif /* _ASM_PERF_COUNTER_H */
diff --git a/arch/frv/include/asm/system.h b/arch/frv/include/asm/system.h
index 7742ec000cc..efd22d9077a 100644
--- a/arch/frv/include/asm/system.h
+++ b/arch/frv/include/asm/system.h
@@ -208,6 +208,8 @@ extern void free_initmem(void);
* - if (*ptr == test) then orig = *ptr; *ptr = test;
* - if (*ptr != test) then orig = *ptr;
*/
+extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v);
+
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define cmpxchg(ptr, test, new) \
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index 96d78d5d2c4..4a8fb427ce0 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -341,10 +341,12 @@
#define __NR_inotify_init1 332
#define __NR_preadv 333
#define __NR_pwritev 334
+#define __NR_rt_tgsigqueueinfo 335
+#define __NR_perf_counter_open 336
#ifdef __KERNEL__
-#define NR_syscalls 335
+#define NR_syscalls 337
#define __ARCH_WANT_IPC_PARSE_VERSION
/* #define __ARCH_WANT_OLD_READDIR */
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 356e0e327a8..fde1e446b44 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1524,5 +1524,7 @@ sys_call_table:
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev
+ .long sys_rt_tgsigqueueinfo /* 335 */
+ .long sys_perf_counter_open
syscall_table_size = (. - sys_call_table)
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 0316b3c50ef..a89803b58b9 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -67,6 +67,10 @@ EXPORT_SYMBOL(atomic_sub_return);
EXPORT_SYMBOL(__xchg_32);
EXPORT_SYMBOL(__cmpxchg_32);
#endif
+EXPORT_SYMBOL(atomic64_add_return);
+EXPORT_SYMBOL(atomic64_sub_return);
+EXPORT_SYMBOL(__xchg_64);
+EXPORT_SYMBOL(__cmpxchg_64);
EXPORT_SYMBOL(__debug_bug_printk);
EXPORT_SYMBOL(__delay_loops_MHz);
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 08be305c9f4..0a377210c89 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -4,5 +4,5 @@
lib-y := \
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
- checksum.o memcpy.o memset.o atomic-ops.o \
- outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
+ checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
+ outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S
index ee0ac905fb0..5e9e6ab5dd0 100644
--- a/arch/frv/lib/atomic-ops.S
+++ b/arch/frv/lib/atomic-ops.S
@@ -163,11 +163,10 @@ __cmpxchg_32:
ld.p @(gr11,gr0),gr8
orcr cc7,cc7,cc3
subcc gr8,gr9,gr7,icc0
- bne icc0,#0,1f
+ bnelr icc0,#0
cst.p gr10,@(gr11,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1
beq icc3,#0,0b
-1:
bralr
.size __cmpxchg_32, .-__cmpxchg_32
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S
new file mode 100644
index 00000000000..b6194eeac12
--- /dev/null
+++ b/arch/frv/lib/atomic64-ops.S
@@ -0,0 +1,162 @@
+/* kernel atomic64 operations
+ *
+ * For an explanation of how atomic ops work in this arch, see:
+ * Documentation/frv/atomic-ops.txt
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/spr-regs.h>
+
+ .text
+ .balign 4
+
+
+###############################################################################
+#
+# long long atomic64_inc_return(atomic64_t *v)
+#
+###############################################################################
+ .globl atomic64_inc_return
+ .type atomic64_inc_return,@function
+atomic64_inc_return:
+ or.p gr8,gr8,gr10
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ addicc gr9,#1,gr9,icc0
+ addxi gr8,#0,gr8,icc0
+ cstd.p gr8,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size atomic64_inc_return, .-atomic64_inc_return
+
+###############################################################################
+#
+# long long atomic64_dec_return(atomic64_t *v)
+#
+###############################################################################
+ .globl atomic64_dec_return
+ .type atomic64_dec_return,@function
+atomic64_dec_return:
+ or.p gr8,gr8,gr10
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ subicc gr9,#1,gr9,icc0
+ subxi gr8,#0,gr8,icc0
+ cstd.p gr8,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size atomic64_dec_return, .-atomic64_dec_return
+
+###############################################################################
+#
+# long long atomic64_add_return(long long i, atomic64_t *v)
+#
+###############################################################################
+ .globl atomic64_add_return
+ .type atomic64_add_return,@function
+atomic64_add_return:
+ or.p gr8,gr8,gr4
+ or gr9,gr9,gr5
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ addcc gr9,gr5,gr9,icc0
+ addx gr8,gr4,gr8,icc0
+ cstd.p gr8,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size atomic64_add_return, .-atomic64_add_return
+
+###############################################################################
+#
+# long long atomic64_sub_return(long long i, atomic64_t *v)
+#
+###############################################################################
+ .globl atomic64_sub_return
+ .type atomic64_sub_return,@function
+atomic64_sub_return:
+ or.p gr8,gr8,gr4
+ or gr9,gr9,gr5
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ subcc gr9,gr5,gr9,icc0
+ subx gr8,gr4,gr8,icc0
+ cstd.p gr8,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size atomic64_sub_return, .-atomic64_sub_return
+
+###############################################################################
+#
+# uint64_t __xchg_64(uint64_t i, uint64_t *v)
+#
+###############################################################################
+ .globl __xchg_64
+ .type __xchg_64,@function
+__xchg_64:
+ or.p gr8,gr8,gr4
+ or gr9,gr9,gr5
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3 /* set CC3 to true */
+ cstd.p gr4,@(gr10,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size __xchg_64, .-__xchg_64
+
+###############################################################################
+#
+# uint64_t __cmpxchg_64(uint64_t test, uint64_t new, uint64_t *v)
+#
+###############################################################################
+ .globl __cmpxchg_64
+ .type __cmpxchg_64,@function
+__cmpxchg_64:
+ or.p gr8,gr8,gr4
+ or gr9,gr9,gr5
+0:
+ orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
+ ckeq icc3,cc7
+ ldd.p @(gr12,gr0),gr8 /* LDD.P/ORCR must be atomic */
+ orcr cc7,cc7,cc3
+ subcc gr8,gr4,gr0,icc0
+ subcc.p gr9,gr5,gr0,icc1
+ bnelr icc0,#0
+ bnelr icc1,#0
+ cstd.p gr10,@(gr12,gr0) ,cc3,#1
+ corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
+ beq icc3,#0,0b
+ bralr
+
+ .size __cmpxchg_64, .-__cmpxchg_64
+
diff --git a/arch/frv/lib/perf_counter.c b/arch/frv/lib/perf_counter.c
new file mode 100644
index 00000000000..2000feecd57
--- /dev/null
+++ b/arch/frv/lib/perf_counter.c
@@ -0,0 +1,19 @@
+/* Performance counter handling
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/perf_counter.h>
+
+/*
+ * mark the performance counter as pending
+ */
+void set_perf_counter_pending(void)
+{
+}
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
index ebf4e988e78..d5764a3d74a 100644
--- a/arch/ia64/kernel/esi.c
+++ b/arch/ia64/kernel/esi.c
@@ -65,7 +65,7 @@ static int __init esi_init (void)
}
if (!esi)
- return -ENODEV;;
+ return -ENODEV;
systab = __va(esi);
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index abce2468a40..f1782705b1f 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg)
* /proc/perfmon interface, for debug only
*/
-#define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1)
+#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
static void *
pfm_proc_start(struct seq_file *m, loff_t *pos)
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 7053c55b764..e6676fca482 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -192,7 +192,7 @@ struct salinfo_platform_oemdata_parms {
static void
salinfo_work_to_do(struct salinfo_data *data)
{
- down_trylock(&data->mutex);
+ (void)(down_trylock(&data->mutex) ?: 0);
up(&data->mutex);
}
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c
index a85cb611ecd..f1268b8e6f9 100644
--- a/arch/ia64/kvm/kvm_lib.c
+++ b/arch/ia64/kvm/kvm_lib.c
@@ -11,5 +11,11 @@
*
*/
#undef CONFIG_MODULES
+#include <linux/module.h>
+#undef CONFIG_KALLSYMS
+#undef EXPORT_SYMBOL
+#undef EXPORT_SYMBOL_GPL
+#define EXPORT_SYMBOL(sym)
+#define EXPORT_SYMBOL_GPL(sym)
#include "../../../lib/vsprintf.c"
#include "../../../lib/ctype.c"
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index a8f84da04b4..bb862fb224f 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -130,7 +130,7 @@ static void collect_interruption(struct kvm_vcpu *vcpu)
if (vdcr & IA64_DCR_PP) {
vpsr |= IA64_PSR_PP;
} else {
- vpsr &= ~IA64_PSR_PP;;
+ vpsr &= ~IA64_PSR_PP;
}
vcpu_set_psr(vcpu, vpsr);
@@ -594,11 +594,11 @@ static void set_pal_call_data(struct kvm_vcpu *vcpu)
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
break;
case PAL_BRAND_INFO:
- p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr29 = gr29;
p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
break;
default:
- p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr29 = gr29;
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
}
p->u.pal_data.gr28 = gr28;
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index a2c6c15e476..46b02cbcc87 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -406,7 +406,7 @@ void getreg(unsigned long regnum, unsigned long *val,
* Now look at registers in [0-31] range and init correct UNAT
*/
addr = (unsigned long)regs;
- unat = &regs->eml_unat;;
+ unat = &regs->eml_unat;
addr += gr_info[regnum];
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 4290a429bf7..20b3852f7a6 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -135,7 +135,7 @@ struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
u64 rid;
rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;;
+ rid = rid & RR_RID_MASK;
if (type == D_TLB) {
if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
@@ -518,7 +518,7 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
struct thash_cb *hcb = &v->arch.vtlb;
- cch = __vtr_lookup(v, va, is_data);;
+ cch = __vtr_lookup(v, va, is_data);
if (cch)
return cch;
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 76645cf6ac5..25831c47c57 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -435,7 +435,8 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
bricktype = MODULE_GET_BTYPE(moduleid);
if ((bricktype == L1_BRICKTYPE_191010) ||
(bricktype == L1_BRICKTYPE_1932))
- sprintf(address, "%s^%d", address, geo_slot(geoid));
+ sprintf(address + strlen(address), "^%d",
+ geo_slot(geoid));
}
void __devinit
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index fef5b434dad..fad68616af3 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -346,10 +346,12 @@
#define __NR_inotify_init1 333
#define __NR_preadv 334
#define __NR_pwritev 335
+#define __NR_rt_tgsigqueueinfo 336
+#define __NR_perf_counter_open 337
#ifdef __KERNEL__
-#define NR_syscalls 326
+#define NR_syscalls 338
/*
* specify the deprecated syscalls we want to support on this arch
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 7408a27199f..e0d2563af4f 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -722,6 +722,8 @@ ENTRY(sys_call_table)
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev /* 335 */
+ .long sys_rt_tgsigqueueinfo
+ .long sys_perf_counter_open
nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index 8ccd4e15576..0ea0639fcf7 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -61,6 +61,8 @@ struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+#define PERF_COUNTER_INDEX_OFFSET 1
+
/*
* Only override the default definitions in include/linux/perf_counter.h
* if we have hardware PMU support.
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a27d0d5a6f8..1cd02f6073a 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -99,7 +99,9 @@ struct kvm_s390_sie_block {
__u8 reservedd0[48]; /* 0x00d0 */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
- __u8 reserved188[120]; /* 0x0188 */
+ __u8 reserved188[24]; /* 0x0188 */
+ __u32 fac; /* 0x01a0 */
+ __u8 reserved1a4[92]; /* 0x01a4 */
} __attribute__((packed));
struct kvm_vcpu_stat {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index c18b21d6991..90d9d1ba258 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -25,6 +25,7 @@
#include <asm/lowcore.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
+#include <asm/system.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -69,6 +70,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
+static unsigned long long *facilities;
/* Section: not file related */
void kvm_arch_hardware_enable(void *garbage)
@@ -288,6 +290,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
vcpu->arch.sie_block->ecb = 2;
vcpu->arch.sie_block->eca = 0xC1002001U;
+ vcpu->arch.sie_block->fac = (int) (long) facilities;
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
(unsigned long) vcpu);
@@ -739,11 +742,29 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
static int __init kvm_s390_init(void)
{
- return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+ int ret;
+ ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+ if (ret)
+ return ret;
+
+ /*
+ * guests can ask for up to 255+1 double words, we need a full page
+ * to hold the maximum amount of facilites. On the other hand, we
+ * only set facilities that are known to work in KVM.
+ */
+ facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
+ if (!facilities) {
+ kvm_exit();
+ return -ENOMEM;
+ }
+ stfle(facilities, 1);
+ facilities[0] &= 0xff00fff3f0700000ULL;
+ return 0;
}
static void __exit kvm_s390_exit(void)
{
+ free_page((unsigned long) facilities);
kvm_exit();
}
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 93ecd06e1a7..d426aac8095 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -158,7 +158,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
vcpu->stat.instruction_stfl++;
/* only pass the facility bits, which we can handle */
- facility_list &= 0xfe00fff3;
+ facility_list &= 0xff00fff3;
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&facility_list, sizeof(facility_list));
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 8ece0b5bd02..39224b57c6e 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -61,10 +61,6 @@ config EARLY_PRINTK
select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using
the kernel command line option to toggle back and forth.
-config DEBUG_BOOTMEM
- depends on DEBUG_KERNEL
- bool "Debug BOOTMEM initialization"
-
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL && SUPERH32
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c
index 9c3a33210d6..180455642a4 100644
--- a/arch/sh/boards/mach-se/7206/io.c
+++ b/arch/sh/boards/mach-se/7206/io.c
@@ -50,7 +50,7 @@ unsigned char se7206_inb_p(unsigned long port)
unsigned short se7206_inw(unsigned long port)
{
- return *port2adr(port);;
+ return *port2adr(port);
}
void se7206_outb(unsigned char value, unsigned long port)
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 9cd04bd558b..c050a8d76df 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -23,6 +23,8 @@
#include <media/sh_mobile_ceu.h>
#include <asm/io.h>
#include <asm/heartbeat.h>
+#include <asm/sh_eth.h>
+#include <asm/clock.h>
#include <asm/sh_keysc.h>
#include <cpu/sh7724.h>
#include <mach-se/mach/se7724.h>
@@ -272,6 +274,34 @@ static struct platform_device keysc_device = {
},
};
+/* SH Eth */
+static struct resource sh_eth_resources[] = {
+ [0] = {
+ .start = SH_ETH_ADDR,
+ .end = SH_ETH_ADDR + 0x1FC,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 91,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+ },
+};
+
+struct sh_eth_plat_data sh_eth_plat = {
+ .phy = 0x1f, /* SMSC LAN8187 */
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+};
+
+static struct platform_device sh_eth_device = {
+ .name = "sh-eth",
+ .id = 0,
+ .dev = {
+ .platform_data = &sh_eth_plat,
+ },
+ .num_resources = ARRAY_SIZE(sh_eth_resources),
+ .resource = sh_eth_resources,
+};
+
static struct platform_device *ms7724se_devices[] __initdata = {
&heartbeat_device,
&smc91x_eth_device,
@@ -280,8 +310,57 @@ static struct platform_device *ms7724se_devices[] __initdata = {
&ceu0_device,
&ceu1_device,
&keysc_device,
+ &sh_eth_device,
};
+#define EEPROM_OP 0xBA206000
+#define EEPROM_ADR 0xBA206004
+#define EEPROM_DATA 0xBA20600C
+#define EEPROM_STAT 0xBA206010
+#define EEPROM_STRT 0xBA206014
+static int __init sh_eth_is_eeprom_ready(void)
+{
+ int t = 10000;
+
+ while (t--) {
+ if (!ctrl_inw(EEPROM_STAT))
+ return 1;
+ cpu_relax();
+ }
+
+ printk(KERN_ERR "ms7724se can not access to eeprom\n");
+ return 0;
+}
+
+static void __init sh_eth_init(void)
+{
+ int i;
+ u16 mac[3];
+
+ /* check EEPROM status */
+ if (!sh_eth_is_eeprom_ready())
+ return;
+
+ /* read MAC addr from EEPROM */
+ for (i = 0 ; i < 3 ; i++) {
+ ctrl_outw(0x0, EEPROM_OP); /* read */
+ ctrl_outw(i*2, EEPROM_ADR);
+ ctrl_outw(0x1, EEPROM_STRT);
+ if (!sh_eth_is_eeprom_ready())
+ return;
+
+ mac[i] = ctrl_inw(EEPROM_DATA);
+ mac[i] = ((mac[i] & 0xFF) << 8) | (mac[i] >> 8); /* swap */
+ }
+
+ /* reset sh-eth */
+ ctrl_outl(0x1, SH_ETH_ADDR + 0x0);
+
+ /* set MAC addr */
+ ctrl_outl(((mac[0] << 16) | (mac[1])), SH_ETH_MAHR);
+ ctrl_outl((mac[2]), SH_ETH_MALR);
+}
+
#define SW4140 0xBA201000
#define FPGA_OUT 0xBA200400
#define PORT_HIZA 0xA4050158
@@ -302,7 +381,8 @@ static int __init devices_setup(void)
ctrl_outw(ctrl_inw(FPGA_OUT) &
~((1 << 1) | /* LAN */
(1 << 6) | /* VIDEO DAC */
- (1 << 12)), /* USB0 */
+ (1 << 12) | /* USB0 */
+ (1 << 14)), /* RMII */
FPGA_OUT);
/* enable IRQ 0,1,2 */
@@ -374,7 +454,7 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_VIO0_CLK, NULL);
gpio_request(GPIO_FN_VIO0_FLD, NULL);
gpio_request(GPIO_FN_VIO0_HD, NULL);
- platform_resource_setup_memory(&ceu0_device, "ceu", 4 << 20);
+ platform_resource_setup_memory(&ceu0_device, "ceu0", 4 << 20);
/* enable CEU1 */
gpio_request(GPIO_FN_VIO1_D7, NULL);
@@ -389,7 +469,7 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_VIO1_HD, NULL);
gpio_request(GPIO_FN_VIO1_VD, NULL);
gpio_request(GPIO_FN_VIO1_CLK, NULL);
- platform_resource_setup_memory(&ceu1_device, "ceu", 4 << 20);
+ platform_resource_setup_memory(&ceu1_device, "ceu1", 4 << 20);
/* KEYSC */
gpio_request(GPIO_FN_KEYOUT5_IN5, NULL);
@@ -404,6 +484,28 @@ static int __init devices_setup(void)
gpio_request(GPIO_FN_KEYOUT1, NULL);
gpio_request(GPIO_FN_KEYOUT0, NULL);
+ /*
+ * enable SH-Eth
+ *
+ * please remove J33 pin from your board !!
+ *
+ * ms7724 board should not use GPIO_FN_LNKSTA pin
+ * So, This time PTX5 is set to input pin
+ */
+ gpio_request(GPIO_FN_RMII_RXD0, NULL);
+ gpio_request(GPIO_FN_RMII_RXD1, NULL);
+ gpio_request(GPIO_FN_RMII_TXD0, NULL);
+ gpio_request(GPIO_FN_RMII_TXD1, NULL);
+ gpio_request(GPIO_FN_RMII_REF_CLK, NULL);
+ gpio_request(GPIO_FN_RMII_TX_EN, NULL);
+ gpio_request(GPIO_FN_RMII_RX_ER, NULL);
+ gpio_request(GPIO_FN_RMII_CRS_DV, NULL);
+ gpio_request(GPIO_FN_MDIO, NULL);
+ gpio_request(GPIO_FN_MDC, NULL);
+ gpio_request(GPIO_PTX5, NULL);
+ gpio_direction_input(GPIO_PTX5);
+ sh_eth_init();
+
if (sw & SW41_B) {
/* SVGA */
lcdc_info.ch[0].lcd_cfg.xres = 800;
@@ -437,7 +539,7 @@ static int __init devices_setup(void)
}
return platform_add_devices(ms7724se_devices,
- ARRAY_SIZE(ms7724se_devices));
+ ARRAY_SIZE(ms7724se_devices));
}
device_initcall(devices_setup);
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index da627d22c00..b18cfd39cac 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -309,7 +309,7 @@ CONFIG_ZERO_PAGE_OFFSET=0x00001000
CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttySC0,115200 earlyprintk=serial ip=on root=/dev/nfs ip=dhcp"
+CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 earlyprintk=serial ip=on root=/dev/nfs ip=dhcp"
#
# Bus options
@@ -858,7 +858,35 @@ CONFIG_VIDEO_SH_MOBILE_CEU=y
#
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
-# CONFIG_FB is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+CONFIG_FB_SYS_FILLRECT=y
+CONFIG_FB_SYS_COPYAREA=y
+CONFIG_FB_SYS_IMAGEBLIT=y
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+CONFIG_FB_SYS_FOPS=y
+CONFIG_FB_DEFERRED_IO=y
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_SH_MOBILE_LCDC=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -870,6 +898,27 @@ CONFIG_VIDEO_SH_MOBILE_CEU=y
# Console display driver support
#
CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
+# CONFIG_FONT_8x16 is not set
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_MINI_4x6=y
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_LOGO_SUPERH_MONO is not set
+CONFIG_LOGO_SUPERH_VGA16=y
+# CONFIG_LOGO_SUPERH_CLUT224 is not set
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index 3840270283e..3ee783a0a07 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.30
-# Thu Jun 18 16:09:05 2009
+# Mon Jun 29 16:28:43 2009
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@@ -14,6 +14,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@@ -28,7 +29,9 @@ CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -88,10 +91,12 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_COUNTERS=y
#
# Performance Counters
#
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
@@ -107,6 +112,10 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -119,7 +128,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -584,7 +593,6 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_LIBFC is not set
# CONFIG_LIBFCOE is not set
# CONFIG_SCSI_DEBUG is not set
@@ -624,7 +632,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_AX88796 is not set
# CONFIG_STNIC is not set
-# CONFIG_SH_ETH is not set
+CONFIG_SH_ETH=y
CONFIG_SMC91X=y
# CONFIG_ENC28J60 is not set
# CONFIG_ETHOC is not set
@@ -801,6 +809,11 @@ CONFIG_SPI_BITBANG=y
#
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_GPIO_SYSFS is not set
@@ -851,6 +864,8 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@@ -1196,6 +1211,7 @@ CONFIG_RTC_DRV_PCF8563=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1260,6 +1276,7 @@ CONFIG_FS_MBCACHE=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
CONFIG_FILE_LOCKING=y
diff --git a/arch/sh/include/asm/perf_counter.h b/arch/sh/include/asm/perf_counter.h
index a8153c2aa6f..61c2b40c802 100644
--- a/arch/sh/include/asm/perf_counter.h
+++ b/arch/sh/include/asm/perf_counter.h
@@ -2,6 +2,6 @@
#define __ASM_SH_PERF_COUNTER_H
/* SH only supports software counters through this interface. */
-#define set_perf_counter_pending() do { } while (0)
+static inline void set_perf_counter_pending(void) {}
#endif /* __ASM_SH_PERF_COUNTER_H */
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
index 5bc34681d99..6f83f2cc45c 100644
--- a/arch/sh/include/asm/syscall_32.h
+++ b/arch/sh/include/asm/syscall_32.h
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/err.h>
#include <asm/ptrace.h>
/* The system call number is given by the user in R3 */
diff --git a/arch/sh/include/mach-se/mach/se7724.h b/arch/sh/include/mach-se/mach/se7724.h
index 74164b60d0d..29514a39d0f 100644
--- a/arch/sh/include/mach-se/mach/se7724.h
+++ b/arch/sh/include/mach-se/mach/se7724.h
@@ -20,6 +20,11 @@
*/
#include <asm/addrspace.h>
+/* SH Eth */
+#define SH_ETH_ADDR (0xA4600000)
+#define SH_ETH_MAHR (SH_ETH_ADDR + 0x1C0)
+#define SH_ETH_MALR (SH_ETH_ADDR + 0x1C8)
+
#define PA_LED (0xba203000) /* 8bit LED */
#define IRQ_MODE (0xba200010)
#define IRQ0_SR (0xba200014)
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index cc8ddbdf3d7..71925946f1e 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,12 +15,28 @@
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
-#include <linux/marker.h>
+#include <linux/perf_counter.h>
#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
+{
+ int ret = 0;
+
+#ifdef CONFIG_KPROBES
+ if (!user_mode(regs)) {
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, trap))
+ ret = 1;
+ preempt_enable();
+ }
+#endif
+
+ return ret;
+}
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -87,13 +103,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
return;
}
+ mm = tsk->mm;
+
+ if (unlikely(notify_page_fault(regs, lookup_exception_vector())))
+ return;
+
/* Only enable interrupts if they were on before the fault */
- if ((regs->sr & SR_IMASK) != SR_IMASK) {
- trace_hardirqs_on();
+ if ((regs->sr & SR_IMASK) != SR_IMASK)
local_irq_enable();
- }
- mm = tsk->mm;
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt or have no user
@@ -141,10 +160,15 @@ survive:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+ if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- else
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ regs, address);
+ } else {
tsk->min_flt++;
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ regs, address);
+ }
up_read(&mm->mmap_sem);
return;
@@ -245,22 +269,6 @@ do_sigbus:
goto no_context;
}
-static inline int notify_page_fault(struct pt_regs *regs, int trap)
-{
- int ret = 0;
-
-#ifdef CONFIG_KPROBES
- if (!user_mode(regs)) {
- preempt_disable();
- if (kprobe_running() && kprobe_fault_handler(regs, trap))
- ret = 1;
- preempt_enable();
- }
-#endif
-
- return ret;
-}
-
/*
* Called with interrupts disabled.
*/
@@ -273,12 +281,7 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
pmd_t *pmd;
pte_t *pte;
pte_t entry;
- int ret = 0;
-
- if (notify_page_fault(regs, lookup_exception_vector()))
- goto out;
-
- ret = 1;
+ int ret = 1;
/*
* We don't take page faults for P1, P2, and parts of P4, these
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index fcbb6e135ce..3ce40ea3482 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
- * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2003 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -20,6 +20,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/perf_counter.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -115,6 +116,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
/* Not an IO address, so reenable interrupts */
local_irq_enable();
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -195,10 +198,16 @@ survive:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+
+ if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- else
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ regs, address);
+ } else {
tsk->min_flt++;
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ regs, address);
+ }
/* If we get here, the page fault has been handled. Do the TLB refill
now from the newly-setup PTE, to avoid having to fault again right
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d1430ef6b4f..c07f7220590 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1913,25 +1913,14 @@ config DMAR_DEFAULT_ON
recommended you say N here while the DMAR code remains
experimental.
-config DMAR_GFX_WA
- def_bool y
- prompt "Support for Graphics workaround"
- depends on DMAR
- ---help---
- Current Graphics drivers tend to use physical address
- for DMA and avoid using DMA APIs. Setting this config
- option permits the IOMMU driver to set a unity map for
- all the OS-visible memory. Hence the driver can continue
- to use physical addresses for DMA.
-
config DMAR_FLOPPY_WA
def_bool y
depends on DMAR
---help---
- Floppy disk drivers are know to bypass DMA API calls
+ Floppy disk drivers are known to bypass DMA API calls
thereby failing to work when IOMMU is enabled. This
workaround will setup a 1:1 mapping for the first
- 16M to make floppy (an ISA device) work.
+ 16MiB to make floppy (an ISA device) work.
config INTR_REMAP
bool "Support for Interrupt Remapping (EXPERIMENTAL)"
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 418e632d4a8..7a1065958ba 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -8,7 +8,7 @@
#ifdef __KERNEL__
-#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
/* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
@@ -16,10 +16,10 @@
& ~(CONFIG_PHYSICAL_ALIGN - 1))
/* Minimum kernel alignment, as a power of two */
-#ifdef CONFIG_x86_64
+#ifdef CONFIG_X86_64
#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
#else
-#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT+1)
+#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER)
#endif
#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 927958d13c1..1ff685ca221 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -91,7 +91,7 @@ extern void pci_iommu_alloc(void);
#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
-#if defined(CONFIG_X86_64) || defined(CONFIG_DMA_API_DEBUG)
+#if defined(CONFIG_X86_64) || defined(CONFIG_DMAR) || defined(CONFIG_DMA_API_DEBUG)
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 02ecb30982a..103f1ddb0d8 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -42,6 +42,7 @@
#else /* ...!ASSEMBLY */
+#include <linux/kernel.h>
#include <linux/stringify.h>
#ifdef CONFIG_SMP
@@ -155,6 +156,15 @@ do { \
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+void *pcpu_lpage_remapped(void *kaddr);
+#else
+static inline void *pcpu_lpage_remapped(void *kaddr)
+{
+ return NULL;
+}
+#endif
+
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index 5fb33e160ea..fa64e401589 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -87,6 +87,9 @@ union cpuid10_edx {
#ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void);
extern void perf_counters_lapic_init(void);
+
+#define PERF_COUNTER_INDEX_OFFSET 0
+
#else
static inline void init_hw_perf_counters(void) { }
static inline void perf_counters_lapic_init(void) { }
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 49fb3ecf3bb..621f56d7312 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -22,7 +22,14 @@ extern int reboot_force;
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
-#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
-#define round_down(x, y) ((x) & ~((y) - 1))
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x,y) ((__typeof__(x))((y)-1))
+#define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1)
+#define round_down(x,y) ((x) & ~__round_mask(x,y))
#endif /* _ASM_X86_PROTO_H */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index e5b27d8f1b4..28e5f595604 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -258,13 +258,15 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_HT
unsigned bits;
+ int cpu = smp_processor_id();
bits = c->x86_coreid_bits;
-
/* Low order bits define the core id (index of core in socket) */
c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
/* Convert the initial APIC ID into the socket ID */
c->phys_proc_id = c->initial_apicid >> bits;
+ /* use socket ID also for last level cache */
+ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
#endif
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6b26d4deada..f1961c07af9 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -848,9 +848,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
numa_add_cpu(smp_processor_id());
#endif
-
- /* Cap the iomem address space to what is addressable on all CPUs */
- iomem_resource.end &= (1ULL << c->x86_phys_bits) - 1;
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 284d1de968b..af425b83202 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1117,7 +1117,7 @@ static void mcheck_timer(unsigned long data)
*n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
t->expires = jiffies + *n;
- add_timer(t);
+ add_timer_on(t, smp_processor_id());
}
static void mce_do_trigger(struct work_struct *work)
@@ -1321,7 +1321,7 @@ static void mce_init_timer(void)
return;
setup_timer(t, mcheck_timer, smp_processor_id());
t->expires = round_jiffies(jiffies + *n);
- add_timer(t);
+ add_timer_on(t, smp_processor_id());
}
/*
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 76dfef23f78..d4cf4ce19aa 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -401,7 +401,7 @@ static const u64 amd_hw_cache_event_ids
[ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
},
[ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0042, /* Data Cache Refills from L2 */
+ [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
@@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,
err = checking_wrmsrl(hwc->counter_base + idx,
(u64)(-left) & x86_pmu.counter_mask);
+ perf_counter_update_userpage(counter);
+
return ret;
}
@@ -969,13 +971,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
if (!x86_pmu.num_counters_fixed)
return -1;
- /*
- * Quirk, IA32_FIXED_CTRs do not work on current Atom processors:
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86_model == 28)
- return -1;
-
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
@@ -1041,6 +1036,8 @@ try_generic:
x86_perf_counter_set_period(counter, hwc, idx);
x86_pmu.enable(hwc, idx);
+ perf_counter_update_userpage(counter);
+
return 0;
}
@@ -1133,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter)
x86_perf_counter_update(counter, hwc, idx);
cpuc->counters[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
+
+ perf_counter_update_userpage(counter);
}
/*
@@ -1428,8 +1427,6 @@ static int intel_pmu_init(void)
*/
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
-
/*
* Install the hw-cache-events table:
*/
@@ -1499,21 +1496,22 @@ void __init init_hw_perf_counters(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
- x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
+ x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
perf_max_counters = x86_pmu.num_counters;
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
- x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
+ x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
}
perf_counter_mask |=
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+ x86_pmu.intel_ctrl = perf_counter_mask;
perf_counters_lapic_init();
register_die_notifier(&perf_counter_nmi_notifier);
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 95ea5fa7d44..c8405718a4c 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -22,6 +22,7 @@
#include "dumpstack.h"
int panic_on_unrecovered_nmi;
+int panic_on_io_nmi;
unsigned int code_bytes = 64;
int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
static int die_counter;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 7271fa33d79..c4ca89d9aaf 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1383,6 +1383,8 @@ static unsigned long ram_alignment(resource_size_t pos)
return 32*1024*1024;
}
+#define MAX_RESOURCE_SIZE ((resource_size_t)-1)
+
void __init e820_reserve_resources_late(void)
{
int i;
@@ -1400,17 +1402,19 @@ void __init e820_reserve_resources_late(void)
* avoid stolen RAM:
*/
for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *entry = &e820_saved.map[i];
- resource_size_t start, end;
+ struct e820entry *entry = &e820.map[i];
+ u64 start, end;
if (entry->type != E820_RAM)
continue;
start = entry->addr + entry->size;
- end = round_up(start, ram_alignment(start));
- if (start == end)
+ end = round_up(start, ram_alignment(start)) - 1;
+ if (end > MAX_RESOURCE_SIZE)
+ end = MAX_RESOURCE_SIZE;
+ if (start >= end)
continue;
- reserve_region_with_split(&iomem_resource, start,
- end - 1, "RAM buffer");
+ reserve_region_with_split(&iomem_resource, start, end,
+ "RAM buffer");
}
}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 47630479b06..1a041bcf506 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -211,11 +211,11 @@ static __init int iommu_setup(char *p)
#ifdef CONFIG_SWIOTLB
if (!strncmp(p, "soft", 4))
swiotlb = 1;
+#endif
if (!strncmp(p, "pt", 2)) {
iommu_pass_through = 1;
return 1;
}
-#endif
gart_parse_options(p);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index be5ae80f897..de2cab13284 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -289,6 +289,20 @@ void * __init extend_brk(size_t size, size_t align)
return ret;
}
+#ifdef CONFIG_X86_64
+static void __init init_gbpages(void)
+{
+ if (direct_gbpages && cpu_has_gbpages)
+ printk(KERN_INFO "Using GB pages for direct mapping\n");
+ else
+ direct_gbpages = 0;
+}
+#else
+static inline void init_gbpages(void)
+{
+}
+#endif
+
static void __init reserve_brk(void)
{
if (_brk_end > _brk_start)
@@ -871,6 +885,8 @@ void __init setup_arch(char **cmdline_p)
reserve_brk();
+ init_gbpages();
+
/* max_pfn_mapped is updated here */
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 9c3f0823e6a..29a3eef7cf4 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -124,7 +124,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
}
/*
- * Remap allocator
+ * Large page remap allocator
*
* This allocator uses PMD page as unit. A PMD page is allocated for
* each cpu and each is remapped into vmalloc area using PMD mapping.
@@ -137,105 +137,185 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
* better than only using 4k mappings while still being NUMA friendly.
*/
#ifdef CONFIG_NEED_MULTIPLE_NODES
-static size_t pcpur_size __initdata;
-static void **pcpur_ptrs __initdata;
+struct pcpul_ent {
+ unsigned int cpu;
+ void *ptr;
+};
+
+static size_t pcpul_size;
+static struct pcpul_ent *pcpul_map;
+static struct vm_struct pcpul_vm;
-static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
+static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
{
size_t off = (size_t)pageno << PAGE_SHIFT;
- if (off >= pcpur_size)
+ if (off >= pcpul_size)
return NULL;
- return virt_to_page(pcpur_ptrs[cpu] + off);
+ return virt_to_page(pcpul_map[cpu].ptr + off);
}
-static ssize_t __init setup_pcpu_remap(size_t static_size)
+static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
{
- static struct vm_struct vm;
- size_t ptrs_size, dyn_size;
+ size_t map_size, dyn_size;
unsigned int cpu;
+ int i, j;
ssize_t ret;
- /*
- * If large page isn't supported, there's no benefit in doing
- * this. Also, on non-NUMA, embedding is better.
- *
- * NOTE: disabled for now.
- */
- if (true || !cpu_has_pse || !pcpu_need_numa())
+ if (!chosen) {
+ size_t vm_size = VMALLOC_END - VMALLOC_START;
+ size_t tot_size = num_possible_cpus() * PMD_SIZE;
+
+ /* on non-NUMA, embedding is better */
+ if (!pcpu_need_numa())
+ return -EINVAL;
+
+ /* don't consume more than 20% of vmalloc area */
+ if (tot_size > vm_size / 5) {
+ pr_info("PERCPU: too large chunk size %zuMB for "
+ "large page remap\n", tot_size >> 20);
+ return -EINVAL;
+ }
+ }
+
+ /* need PSE */
+ if (!cpu_has_pse) {
+ pr_warning("PERCPU: lpage allocator requires PSE\n");
return -EINVAL;
+ }
/*
* Currently supports only single page. Supporting multiple
* pages won't be too difficult if it ever becomes necessary.
*/
- pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
+ pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
PERCPU_DYNAMIC_RESERVE);
- if (pcpur_size > PMD_SIZE) {
+ if (pcpul_size > PMD_SIZE) {
pr_warning("PERCPU: static data is larger than large page, "
"can't use large page\n");
return -EINVAL;
}
- dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
+ dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
/* allocate pointer array and alloc large pages */
- ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
- pcpur_ptrs = alloc_bootmem(ptrs_size);
+ map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
+ pcpul_map = alloc_bootmem(map_size);
for_each_possible_cpu(cpu) {
- pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
- if (!pcpur_ptrs[cpu])
+ pcpul_map[cpu].cpu = cpu;
+ pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
+ PMD_SIZE);
+ if (!pcpul_map[cpu].ptr) {
+ pr_warning("PERCPU: failed to allocate large page "
+ "for cpu%u\n", cpu);
goto enomem;
+ }
/*
- * Only use pcpur_size bytes and give back the rest.
+ * Only use pcpul_size bytes and give back the rest.
*
* Ingo: The 2MB up-rounding bootmem is needed to make
* sure the partial 2MB page is still fully RAM - it's
* not well-specified to have a PAT-incompatible area
* (unmapped RAM, device memory, etc.) in that hole.
*/
- free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
- PMD_SIZE - pcpur_size);
+ free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size),
+ PMD_SIZE - pcpul_size);
- memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
+ memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size);
}
/* allocate address and map */
- vm.flags = VM_ALLOC;
- vm.size = num_possible_cpus() * PMD_SIZE;
- vm_area_register_early(&vm, PMD_SIZE);
+ pcpul_vm.flags = VM_ALLOC;
+ pcpul_vm.size = num_possible_cpus() * PMD_SIZE;
+ vm_area_register_early(&pcpul_vm, PMD_SIZE);
for_each_possible_cpu(cpu) {
- pmd_t *pmd;
+ pmd_t *pmd, pmd_v;
- pmd = populate_extra_pmd((unsigned long)vm.addr
- + cpu * PMD_SIZE);
- set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
- PAGE_KERNEL_LARGE));
+ pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr +
+ cpu * PMD_SIZE);
+ pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)),
+ PAGE_KERNEL_LARGE);
+ set_pmd(pmd, pmd_v);
}
/* we're ready, commit */
pr_info("PERCPU: Remapped at %p with large pages, static data "
- "%zu bytes\n", vm.addr, static_size);
+ "%zu bytes\n", pcpul_vm.addr, static_size);
- ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
+ ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
- PMD_SIZE, vm.addr, NULL);
- goto out_free_ar;
+ PMD_SIZE, pcpul_vm.addr, NULL);
+
+ /* sort pcpul_map array for pcpu_lpage_remapped() */
+ for (i = 0; i < num_possible_cpus() - 1; i++)
+ for (j = i + 1; j < num_possible_cpus(); j++)
+ if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
+ struct pcpul_ent tmp = pcpul_map[i];
+ pcpul_map[i] = pcpul_map[j];
+ pcpul_map[j] = tmp;
+ }
+
+ return ret;
enomem:
for_each_possible_cpu(cpu)
- if (pcpur_ptrs[cpu])
- free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
- ret = -ENOMEM;
-out_free_ar:
- free_bootmem(__pa(pcpur_ptrs), ptrs_size);
- return ret;
+ if (pcpul_map[cpu].ptr)
+ free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size);
+ free_bootmem(__pa(pcpul_map), map_size);
+ return -ENOMEM;
+}
+
+/**
+ * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
+ * @kaddr: the kernel address in question
+ *
+ * Determine whether @kaddr falls in the pcpul recycled area. This is
+ * used by pageattr to detect VM aliases and break up the pcpu PMD
+ * mapping such that the same physical page is not mapped under
+ * different attributes.
+ *
+ * The recycled area is always at the tail of a partially used PMD
+ * page.
+ *
+ * RETURNS:
+ * Address of corresponding remapped pcpu address if match is found;
+ * otherwise, NULL.
+ */
+void *pcpu_lpage_remapped(void *kaddr)
+{
+ void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK);
+ unsigned long offset = (unsigned long)kaddr & ~PMD_MASK;
+ int left = 0, right = num_possible_cpus() - 1;
+ int pos;
+
+ /* pcpul in use at all? */
+ if (!pcpul_map)
+ return NULL;
+
+ /* okay, perform binary search */
+ while (left <= right) {
+ pos = (left + right) / 2;
+
+ if (pcpul_map[pos].ptr < pmd_addr)
+ left = pos + 1;
+ else if (pcpul_map[pos].ptr > pmd_addr)
+ right = pos - 1;
+ else {
+ /* it shouldn't be in the area for the first chunk */
+ WARN_ON(offset < pcpul_size);
+
+ return pcpul_vm.addr +
+ pcpul_map[pos].cpu * PMD_SIZE + offset;
+ }
+ }
+
+ return NULL;
}
#else
-static ssize_t __init setup_pcpu_remap(size_t static_size)
+static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
{
return -EINVAL;
}
@@ -249,7 +329,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
* mapping so that it can use PMD mapping without additional TLB
* pressure.
*/
-static ssize_t __init setup_pcpu_embed(size_t static_size)
+static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
{
size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
@@ -258,7 +338,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
* this. Also, embedding allocation doesn't play well with
* NUMA.
*/
- if (!cpu_has_pse || pcpu_need_numa())
+ if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
return -EINVAL;
return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
@@ -308,8 +388,11 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
void *ptr;
ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
- if (!ptr)
+ if (!ptr) {
+ pr_warning("PERCPU: failed to allocate "
+ "4k page for cpu%u\n", cpu);
goto enomem;
+ }
memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
pcpu4k_pages[j++] = virt_to_page(ptr);
@@ -333,6 +416,16 @@ out_free_ar:
return ret;
}
+/* for explicit first chunk allocator selection */
+static char pcpu_chosen_alloc[16] __initdata;
+
+static int __init percpu_alloc_setup(char *str)
+{
+ strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
+ return 0;
+}
+early_param("percpu_alloc", percpu_alloc_setup);
+
static inline void setup_percpu_segment(int cpu)
{
#ifdef CONFIG_X86_32
@@ -346,11 +439,6 @@ static inline void setup_percpu_segment(int cpu)
#endif
}
-/*
- * Great future plan:
- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
- * Always point %gs to its beginning
- */
void __init setup_per_cpu_areas(void)
{
size_t static_size = __per_cpu_end - __per_cpu_start;
@@ -367,9 +455,26 @@ void __init setup_per_cpu_areas(void)
* of large page mappings. Please read comments on top of
* each allocator for details.
*/
- ret = setup_pcpu_remap(static_size);
- if (ret < 0)
- ret = setup_pcpu_embed(static_size);
+ ret = -EINVAL;
+ if (strlen(pcpu_chosen_alloc)) {
+ if (strcmp(pcpu_chosen_alloc, "4k")) {
+ if (!strcmp(pcpu_chosen_alloc, "lpage"))
+ ret = setup_pcpu_lpage(static_size, true);
+ else if (!strcmp(pcpu_chosen_alloc, "embed"))
+ ret = setup_pcpu_embed(static_size, true);
+ else
+ pr_warning("PERCPU: unknown allocator %s "
+ "specified\n", pcpu_chosen_alloc);
+ if (ret < 0)
+ pr_warning("PERCPU: %s allocator failed (%zd), "
+ "falling back to 4k\n",
+ pcpu_chosen_alloc, ret);
+ }
+ } else {
+ ret = setup_pcpu_lpage(static_size, false);
+ if (ret < 0)
+ ret = setup_pcpu_embed(static_size, false);
+ }
if (ret < 0)
ret = setup_pcpu_4k(static_size);
if (ret < 0)
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 124d40c575d..8ccabb8a2f6 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -711,7 +711,6 @@ uv_activation_descriptor_init(int node, int pnode)
unsigned long pa;
unsigned long m;
unsigned long n;
- unsigned long mmr_image;
struct bau_desc *adp;
struct bau_desc *ad2;
@@ -727,12 +726,8 @@ uv_activation_descriptor_init(int node, int pnode)
n = pa >> uv_nshift;
m = pa & uv_mmask;
- mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
- if (mmr_image) {
- uv_write_global_mmr64(pnode, (unsigned long)
- UVH_LB_BAU_SB_DESCRIPTOR_BASE,
- (n << UV_DESC_BASE_PNODE_SHIFT | m));
- }
+ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
+ (n << UV_DESC_BASE_PNODE_SHIFT | m));
/*
* initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a0f48f5671c..5204332f475 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -346,6 +346,9 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
show_registers(regs);
+ if (panic_on_io_nmi)
+ panic("NMI IOCK error: Not continuing");
+
/* Re-enable the IOCK line, wait for a few seconds */
reason = (reason & 0xf) | 8;
outb(reason, 0x61);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5c3d6e81a7d..7030b5f911b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2157,7 +2157,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
else
/* 32 bits PSE 4MB page */
context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
- context->rsvd_bits_mask[1][0] = ~0ull;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
case PT32E_ROOT_LEVEL:
context->rsvd_bits_mask[0][2] =
@@ -2170,7 +2170,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = ~0ull;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
case PT64_ROOT_LEVEL:
context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
@@ -2186,7 +2186,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = ~0ull;
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
break;
}
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 258e4591e1c..67785f63539 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -281,7 +281,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
{
unsigned access = gw->pt_access;
struct kvm_mmu_page *shadow_page;
- u64 spte, *sptep;
+ u64 spte, *sptep = NULL;
int direct;
gfn_t table_gfn;
int r;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e770bf349ec..356a0ce85c6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3012,6 +3012,12 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -3198,6 +3204,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
[EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_VMCALL] = handle_vmcall,
+ [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
+ [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
+ [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
+ [EXIT_REASON_VMPTRST] = handle_vmx_insn,
+ [EXIT_REASON_VMREAD] = handle_vmx_insn,
+ [EXIT_REASON_VMRESUME] = handle_vmx_insn,
+ [EXIT_REASON_VMWRITE] = handle_vmx_insn,
+ [EXIT_REASON_VMOFF] = handle_vmx_insn,
+ [EXIT_REASON_VMON] = handle_vmx_insn,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_WBINVD] = handle_wbinvd,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 249540f9851..fe5474aec41 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -898,6 +898,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_VM_HSAVE_PA:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
+ case MSR_K7_EVNTSEL0:
data = 0;
break;
case MSR_MTRRcap:
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
index c1b6c232e02..616de4628d6 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/x86_emulate.c
@@ -1361,7 +1361,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
return 0;
}
-void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
+static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
{
u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask);
/*
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index f4568605d7d..ff485d36118 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -55,8 +55,10 @@ static void delay_tsc(unsigned long loops)
preempt_disable();
cpu = smp_processor_id();
+ rdtsc_barrier();
rdtscl(bclock);
for (;;) {
+ rdtsc_barrier();
rdtscl(now);
if ((now - bclock) >= loops)
break;
@@ -78,6 +80,7 @@ static void delay_tsc(unsigned long loops)
if (unlikely(cpu != smp_processor_id())) {
loops -= (now - bclock);
cpu = smp_processor_id();
+ rdtsc_barrier();
rdtscl(bclock);
}
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index f53b57e4086..47ce9a2ce5e 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -177,20 +177,6 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
return nr_range;
}
-#ifdef CONFIG_X86_64
-static void __init init_gbpages(void)
-{
- if (direct_gbpages && cpu_has_gbpages)
- printk(KERN_INFO "Using GB pages for direct mapping\n");
- else
- direct_gbpages = 0;
-}
-#else
-static inline void init_gbpages(void)
-{
-}
-#endif
-
/*
* Setup the direct mapping of the physical memory at PAGE_OFFSET.
* This runs before bootmem is initialized and gets pages directly from
@@ -210,9 +196,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
- if (!after_bootmem)
- init_gbpages();
-
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index c4378f4fd4a..b177652251a 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -598,6 +598,8 @@ void __init paging_init(void)
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
+ /* clear the default setting with node 0 */
+ nodes_clear(node_states[N_NORMAL_MEMORY]);
free_area_init_nodes(max_zone_pfns);
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 3cfe9ced8a4..1b734d7a896 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/pfn.h>
#include <asm/e820.h>
#include <asm/processor.h>
@@ -681,8 +682,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
static int cpa_process_alias(struct cpa_data *cpa)
{
struct cpa_data alias_cpa;
- int ret = 0;
- unsigned long temp_cpa_vaddr, vaddr;
+ unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
+ unsigned long vaddr, remapped;
+ int ret;
if (cpa->pfn >= max_pfn_mapped)
return 0;
@@ -706,42 +708,55 @@ static int cpa_process_alias(struct cpa_data *cpa)
PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
alias_cpa = *cpa;
- temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
- alias_cpa.vaddr = &temp_cpa_vaddr;
+ alias_cpa.vaddr = &laddr;
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
-
ret = __change_page_attr_set_clr(&alias_cpa, 0);
+ if (ret)
+ return ret;
}
#ifdef CONFIG_X86_64
- if (ret)
- return ret;
/*
- * No need to redo, when the primary call touched the high
- * mapping already:
- */
- if (within(vaddr, (unsigned long) _text, _brk_end))
- return 0;
-
- /*
- * If the physical address is inside the kernel map, we need
+ * If the primary call didn't touch the high mapping already
+ * and the physical address is inside the kernel map, we need
* to touch the high mapped kernel as well:
*/
- if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
- return 0;
+ if (!within(vaddr, (unsigned long)_text, _brk_end) &&
+ within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
+ unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
+ __START_KERNEL_map - phys_base;
+ alias_cpa = *cpa;
+ alias_cpa.vaddr = &temp_cpa_vaddr;
+ alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
- alias_cpa = *cpa;
- temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
- alias_cpa.vaddr = &temp_cpa_vaddr;
- alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
+ /*
+ * The high mapping range is imprecise, so ignore the
+ * return value.
+ */
+ __change_page_attr_set_clr(&alias_cpa, 0);
+ }
+#endif
/*
- * The high mapping range is imprecise, so ignore the return value.
+ * If the PMD page was partially used for per-cpu remapping,
+ * the recycled area needs to be split and modified. Because
+ * the area is always proper subset of a PMD page
+ * cpa->numpages is guaranteed to be 1 for these areas, so
+ * there's no need to loop over and check for further remaps.
*/
- __change_page_attr_set_clr(&alias_cpa, 0);
-#endif
- return ret;
+ remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
+ if (remapped) {
+ WARN_ON(cpa->numpages > 1);
+ alias_cpa = *cpa;
+ alias_cpa.vaddr = &remapped;
+ alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
+ ret = __change_page_attr_set_clr(&alias_cpa, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index d277ef1eea5..b3d20b9cac6 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -244,7 +244,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
do_fpu_end();
mtrr_ap_init();
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_OLD_MCE
mcheck_init(&boot_cpu_data);
#endif
}
diff --git a/block/Makefile b/block/Makefile
index e9fa4dd690f..6c54ed0ff75 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- ioctl.o genhd.o scsi_ioctl.o cmd-filter.o
+ ioctl.o genhd.o scsi_ioctl.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
diff --git a/block/blk-core.c b/block/blk-core.c
index b06cf5c2a82..4b45435c6ea 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -595,8 +595,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
q->sg_reserved_size = INT_MAX;
- blk_set_cmd_filter_defaults(&q->cmd_filter);
-
/*
* all done
*/
@@ -1172,6 +1170,11 @@ static int __make_request(struct request_queue *q, struct bio *bio)
const int unplug = bio_unplug(bio);
int rw_flags;
+ if (bio_barrier(bio) && bio_has_data(bio) &&
+ (q->next_ordered == QUEUE_ORDERED_NONE)) {
+ bio_endio(bio, -EOPNOTSUPP);
+ return 0;
+ }
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
@@ -1472,11 +1475,6 @@ static inline void __generic_make_request(struct bio *bio)
err = -EOPNOTSUPP;
goto end_io;
}
- if (bio_barrier(bio) && bio_has_data(bio) &&
- (q->next_ordered == QUEUE_ORDERED_NONE)) {
- err = -EOPNOTSUPP;
- goto end_io;
- }
ret = q->make_request_fn(q, bio);
} while (ret);
@@ -2365,7 +2363,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
__bio_clone(bio, bio_src);
if (bio_integrity(bio_src) &&
- bio_integrity_clone(bio, bio_src, gfp_mask))
+ bio_integrity_clone(bio, bio_src, gfp_mask, bs))
goto free_and_out;
if (bio_ctr && bio_ctr(bio, bio_src, data))
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 39ce64432ba..e1999679a4d 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -350,6 +350,12 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (blk_integrity_rq(req) != blk_integrity_rq(next))
return 0;
+ /* don't merge requests of different failfast settings */
+ if (blk_failfast_dev(req) != blk_failfast_dev(next) ||
+ blk_failfast_transport(req) != blk_failfast_transport(next) ||
+ blk_failfast_driver(req) != blk_failfast_driver(next))
+ return 0;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
diff --git a/block/bsg.c b/block/bsg.c
index e7d47525424..5f184bb3ff9 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -186,7 +186,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
- if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm))
+ if (blk_verify_command(rq->cmd, has_write_perm))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 833ec18eaa6..87276eb83f7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -71,6 +71,51 @@ struct cfq_rb_root {
#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
/*
+ * Per process-grouping structure
+ */
+struct cfq_queue {
+ /* reference count */
+ atomic_t ref;
+ /* various state flags, see below */
+ unsigned int flags;
+ /* parent cfq_data */
+ struct cfq_data *cfqd;
+ /* service_tree member */
+ struct rb_node rb_node;
+ /* service_tree key */
+ unsigned long rb_key;
+ /* prio tree member */
+ struct rb_node p_node;
+ /* prio tree root we belong to, if any */
+ struct rb_root *p_root;
+ /* sorted list of pending requests */
+ struct rb_root sort_list;
+ /* if fifo isn't expired, next request to serve */
+ struct request *next_rq;
+ /* requests queued in sort_list */
+ int queued[2];
+ /* currently allocated requests */
+ int allocated[2];
+ /* fifo list of requests in sort_list */
+ struct list_head fifo;
+
+ unsigned long slice_end;
+ long slice_resid;
+ unsigned int slice_dispatch;
+
+ /* pending metadata requests */
+ int meta_pending;
+ /* number of requests that are on the dispatch list or inside driver */
+ int dispatched;
+
+ /* io prio of this group */
+ unsigned short ioprio, org_ioprio;
+ unsigned short ioprio_class, org_ioprio_class;
+
+ pid_t pid;
+};
+
+/*
* Per block device queue structure
*/
struct cfq_data {
@@ -135,51 +180,11 @@ struct cfq_data {
unsigned int cfq_slice_idle;
struct list_head cic_list;
-};
-
-/*
- * Per process-grouping structure
- */
-struct cfq_queue {
- /* reference count */
- atomic_t ref;
- /* various state flags, see below */
- unsigned int flags;
- /* parent cfq_data */
- struct cfq_data *cfqd;
- /* service_tree member */
- struct rb_node rb_node;
- /* service_tree key */
- unsigned long rb_key;
- /* prio tree member */
- struct rb_node p_node;
- /* prio tree root we belong to, if any */
- struct rb_root *p_root;
- /* sorted list of pending requests */
- struct rb_root sort_list;
- /* if fifo isn't expired, next request to serve */
- struct request *next_rq;
- /* requests queued in sort_list */
- int queued[2];
- /* currently allocated requests */
- int allocated[2];
- /* fifo list of requests in sort_list */
- struct list_head fifo;
- unsigned long slice_end;
- long slice_resid;
- unsigned int slice_dispatch;
-
- /* pending metadata requests */
- int meta_pending;
- /* number of requests that are on the dispatch list or inside driver */
- int dispatched;
-
- /* io prio of this group */
- unsigned short ioprio, org_ioprio;
- unsigned short ioprio_class, org_ioprio_class;
-
- pid_t pid;
+ /*
+ * Fallback dummy cfqq for extreme OOM conditions
+ */
+ struct cfq_queue oom_cfqq;
};
enum cfqq_state_flags {
@@ -1641,6 +1646,26 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
ioc->ioprio_changed = 0;
}
+static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ pid_t pid, int is_sync)
+{
+ RB_CLEAR_NODE(&cfqq->rb_node);
+ RB_CLEAR_NODE(&cfqq->p_node);
+ INIT_LIST_HEAD(&cfqq->fifo);
+
+ atomic_set(&cfqq->ref, 0);
+ cfqq->cfqd = cfqd;
+
+ cfq_mark_cfqq_prio_changed(cfqq);
+
+ if (is_sync) {
+ if (!cfq_class_idle(cfqq))
+ cfq_mark_cfqq_idle_window(cfqq);
+ cfq_mark_cfqq_sync(cfqq);
+ }
+ cfqq->pid = pid;
+}
+
static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
struct io_context *ioc, gfp_t gfp_mask)
@@ -1653,56 +1678,40 @@ retry:
/* cic always exists here */
cfqq = cic_to_cfqq(cic, is_sync);
- if (!cfqq) {
+ /*
+ * Always try a new alloc if we fell back to the OOM cfqq
+ * originally, since it should just be a temporary situation.
+ */
+ if (!cfqq || cfqq == &cfqd->oom_cfqq) {
+ cfqq = NULL;
if (new_cfqq) {
cfqq = new_cfqq;
new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
- /*
- * Inform the allocator of the fact that we will
- * just repeat this allocation if it fails, to allow
- * the allocator to do whatever it needs to attempt to
- * free memory.
- */
spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc_node(cfq_pool,
- gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+ gfp_mask | __GFP_ZERO,
cfqd->queue->node);
spin_lock_irq(cfqd->queue->queue_lock);
- goto retry;
+ if (new_cfqq)
+ goto retry;
} else {
cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
cfqd->queue->node);
- if (!cfqq)
- goto out;
}
- RB_CLEAR_NODE(&cfqq->rb_node);
- RB_CLEAR_NODE(&cfqq->p_node);
- INIT_LIST_HEAD(&cfqq->fifo);
-
- atomic_set(&cfqq->ref, 0);
- cfqq->cfqd = cfqd;
-
- cfq_mark_cfqq_prio_changed(cfqq);
-
- cfq_init_prio_data(cfqq, ioc);
-
- if (is_sync) {
- if (!cfq_class_idle(cfqq))
- cfq_mark_cfqq_idle_window(cfqq);
- cfq_mark_cfqq_sync(cfqq);
- }
- cfqq->pid = current->pid;
- cfq_log_cfqq(cfqd, cfqq, "alloced");
+ if (cfqq) {
+ cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
+ cfq_init_prio_data(cfqq, ioc);
+ cfq_log_cfqq(cfqd, cfqq, "alloced");
+ } else
+ cfqq = &cfqd->oom_cfqq;
}
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
-out:
- WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
return cfqq;
}
@@ -1735,11 +1744,8 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
cfqq = *async_cfqq;
}
- if (!cfqq) {
+ if (!cfqq)
cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
- if (!cfqq)
- return NULL;
- }
/*
* pin the queue now that it's allocated, scheduler exit will prune it
@@ -2307,10 +2313,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq) {
cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
-
- if (!cfqq)
- goto queue_fail;
-
cic_set_cfqq(cic, cfqq, is_sync);
}
@@ -2465,6 +2467,14 @@ static void *cfq_init_queue(struct request_queue *q)
for (i = 0; i < CFQ_PRIO_LISTS; i++)
cfqd->prio_trees[i] = RB_ROOT;
+ /*
+ * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
+ * Grab a permanent reference to it, so that the normal code flow
+ * will not attempt to free it.
+ */
+ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
+ atomic_inc(&cfqd->oom_cfqq.ref);
+
INIT_LIST_HEAD(&cfqd->cic_list);
cfqd->queue = q;
diff --git a/block/cmd-filter.c b/block/cmd-filter.c
deleted file mode 100644
index 572bbc2f900..00000000000
--- a/block/cmd-filter.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright 2004 Peter M. Jones <pjones@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
- */
-
-#include <linux/list.h>
-#include <linux/genhd.h>
-#include <linux/spinlock.h>
-#include <linux/capability.h>
-#include <linux/bitops.h>
-#include <linux/blkdev.h>
-
-#include <scsi/scsi.h>
-#include <linux/cdrom.h>
-
-int blk_verify_command(struct blk_cmd_filter *filter,
- unsigned char *cmd, fmode_t has_write_perm)
-{
- /* root can do any command. */
- if (capable(CAP_SYS_RAWIO))
- return 0;
-
- /* if there's no filter set, assume we're filtering everything out */
- if (!filter)
- return -EPERM;
-
- /* Anybody who can open the device can do a read-safe command */
- if (test_bit(cmd[0], filter->read_ok))
- return 0;
-
- /* Write-safe commands require a writable open */
- if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
- return 0;
-
- return -EPERM;
-}
-EXPORT_SYMBOL(blk_verify_command);
-
-#if 0
-/* and now, the sysfs stuff */
-static ssize_t rcf_cmds_show(struct blk_cmd_filter *filter, char *page,
- int rw)
-{
- char *npage = page;
- unsigned long *okbits;
- int i;
-
- if (rw == READ)
- okbits = filter->read_ok;
- else
- okbits = filter->write_ok;
-
- for (i = 0; i < BLK_SCSI_MAX_CMDS; i++) {
- if (test_bit(i, okbits)) {
- npage += sprintf(npage, "0x%02x", i);
- if (i < BLK_SCSI_MAX_CMDS - 1)
- sprintf(npage++, " ");
- }
- }
-
- if (npage != page)
- npage += sprintf(npage, "\n");
-
- return npage - page;
-}
-
-static ssize_t rcf_readcmds_show(struct blk_cmd_filter *filter, char *page)
-{
- return rcf_cmds_show(filter, page, READ);
-}
-
-static ssize_t rcf_writecmds_show(struct blk_cmd_filter *filter,
- char *page)
-{
- return rcf_cmds_show(filter, page, WRITE);
-}
-
-static ssize_t rcf_cmds_store(struct blk_cmd_filter *filter,
- const char *page, size_t count, int rw)
-{
- unsigned long okbits[BLK_SCSI_CMD_PER_LONG], *target_okbits;
- int cmd, set;
- char *p, *status;
-
- if (rw == READ) {
- memcpy(&okbits, filter->read_ok, sizeof(okbits));
- target_okbits = filter->read_ok;
- } else {
- memcpy(&okbits, filter->write_ok, sizeof(okbits));
- target_okbits = filter->write_ok;
- }
-
- while ((p = strsep((char **)&page, " ")) != NULL) {
- set = 1;
-
- if (p[0] == '+') {
- p++;
- } else if (p[0] == '-') {
- set = 0;
- p++;
- }
-
- cmd = simple_strtol(p, &status, 16);
-
- /* either of these cases means invalid input, so do nothing. */
- if ((status == p) || cmd >= BLK_SCSI_MAX_CMDS)
- return -EINVAL;
-
- if (set)
- __set_bit(cmd, okbits);
- else
- __clear_bit(cmd, okbits);
- }
-
- memcpy(target_okbits, okbits, sizeof(okbits));
- return count;
-}
-
-static ssize_t rcf_readcmds_store(struct blk_cmd_filter *filter,
- const char *page, size_t count)
-{
- return rcf_cmds_store(filter, page, count, READ);
-}
-
-static ssize_t rcf_writecmds_store(struct blk_cmd_filter *filter,
- const char *page, size_t count)
-{
- return rcf_cmds_store(filter, page, count, WRITE);
-}
-
-struct rcf_sysfs_entry {
- struct attribute attr;
- ssize_t (*show)(struct blk_cmd_filter *, char *);
- ssize_t (*store)(struct blk_cmd_filter *, const char *, size_t);
-};
-
-static struct rcf_sysfs_entry rcf_readcmds_entry = {
- .attr = { .name = "read_table", .mode = S_IRUGO | S_IWUSR },
- .show = rcf_readcmds_show,
- .store = rcf_readcmds_store,
-};
-
-static struct rcf_sysfs_entry rcf_writecmds_entry = {
- .attr = {.name = "write_table", .mode = S_IRUGO | S_IWUSR },
- .show = rcf_writecmds_show,
- .store = rcf_writecmds_store,
-};
-
-static struct attribute *default_attrs[] = {
- &rcf_readcmds_entry.attr,
- &rcf_writecmds_entry.attr,
- NULL,
-};
-
-#define to_rcf(atr) container_of((atr), struct rcf_sysfs_entry, attr)
-
-static ssize_t
-rcf_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
- struct rcf_sysfs_entry *entry = to_rcf(attr);
- struct blk_cmd_filter *filter;
-
- filter = container_of(kobj, struct blk_cmd_filter, kobj);
- if (entry->show)
- return entry->show(filter, page);
-
- return 0;
-}
-
-static ssize_t
-rcf_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *page, size_t length)
-{
- struct rcf_sysfs_entry *entry = to_rcf(attr);
- struct blk_cmd_filter *filter;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- if (!entry->store)
- return -EINVAL;
-
- filter = container_of(kobj, struct blk_cmd_filter, kobj);
- return entry->store(filter, page, length);
-}
-
-static struct sysfs_ops rcf_sysfs_ops = {
- .show = rcf_attr_show,
- .store = rcf_attr_store,
-};
-
-static struct kobj_type rcf_ktype = {
- .sysfs_ops = &rcf_sysfs_ops,
- .default_attrs = default_attrs,
-};
-
-int blk_register_filter(struct gendisk *disk)
-{
- int ret;
- struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
-
- ret = kobject_init_and_add(&filter->kobj, &rcf_ktype,
- &disk_to_dev(disk)->kobj,
- "%s", "cmd_filter");
- if (ret < 0)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL(blk_register_filter);
-
-void blk_unregister_filter(struct gendisk *disk)
-{
- struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
-
- kobject_put(&filter->kobj);
-}
-EXPORT_SYMBOL(blk_unregister_filter);
-#endif
diff --git a/block/elevator.c b/block/elevator.c
index ca861927ba4..6f2375339a9 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -100,6 +100,14 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
if (bio_integrity(bio) != blk_integrity_rq(rq))
return 0;
+ /*
+ * Don't merge if failfast settings don't match
+ */
+ if (bio_failfast_dev(bio) != blk_failfast_dev(rq) ||
+ bio_failfast_transport(bio) != blk_failfast_transport(rq) ||
+ bio_failfast_driver(bio) != blk_failfast_driver(rq))
+ return 0;
+
if (!elv_iosched_allow_merge(rq, bio))
return 0;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 5f8e798ede4..f0e0ce0a607 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -32,6 +32,11 @@
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
+struct blk_cmd_filter {
+ unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
+ unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
+} blk_default_cmd_filter;
+
/* Command group 3 is reserved and should never be used. */
const unsigned char scsi_command_size_tbl[8] =
{
@@ -105,7 +110,7 @@ static int sg_emulated_host(struct request_queue *q, int __user *p)
return put_user(1, p);
}
-void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
+static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
{
/* Basic read-only commands */
__set_bit(TEST_UNIT_READY, filter->read_ok);
@@ -187,14 +192,37 @@ void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_SET_STREAMING, filter->write_ok);
__set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
}
-EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults);
+
+int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
+{
+ struct blk_cmd_filter *filter = &blk_default_cmd_filter;
+
+ /* root can do any command. */
+ if (capable(CAP_SYS_RAWIO))
+ return 0;
+
+ /* if there's no filter set, assume we're filtering everything out */
+ if (!filter)
+ return -EPERM;
+
+ /* Anybody who can open the device can do a read-safe command */
+ if (test_bit(cmd[0], filter->read_ok))
+ return 0;
+
+ /* Write-safe commands require a writable open */
+ if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
+ return 0;
+
+ return -EPERM;
+}
+EXPORT_SYMBOL(blk_verify_command);
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, fmode_t mode)
{
if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE))
+ if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
return -EPERM;
/*
@@ -427,7 +455,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
- err = blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE);
+ err = blk_verify_command(rq->cmd, mode & FMODE_WRITE);
if (err)
goto error;
@@ -645,5 +673,10 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
blk_put_queue(q);
return err;
}
-
EXPORT_SYMBOL(scsi_cmd_ioctl);
+
+int __init blk_scsi_ioctl_init(void)
+{
+ blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
+ return 0;
+}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c7a527c08a0..65a0655e7fc 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -226,8 +226,18 @@ static inline void addQ(struct hlist_head *list, CommandList_struct *c)
static inline void removeQ(CommandList_struct *c)
{
- if (WARN_ON(hlist_unhashed(&c->list)))
+ /*
+ * After kexec/dump some commands might still
+ * be in flight, which the firmware will try
+ * to complete. Resetting the firmware doesn't work
+ * with old fw revisions, so we have to mark
+ * them off as 'stale' to prevent the driver from
+ * falling over.
+ */
+ if (WARN_ON(hlist_unhashed(&c->list))) {
+ c->cmd_type = CMD_MSG_STALE;
return;
+ }
hlist_del_init(&c->list);
}
@@ -4246,7 +4256,8 @@ static void fail_all_cmds(unsigned long ctlr)
while (!hlist_empty(&h->cmpQ)) {
c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
removeQ(c);
- c->err_info->CommandStatus = CMD_HARDWARE_ERR;
+ if (c->cmd_type != CMD_MSG_STALE)
+ c->err_info->CommandStatus = CMD_HARDWARE_ERR;
if (c->cmd_type == CMD_RWREQ) {
complete_command(h, c, 0);
} else if (c->cmd_type == CMD_IOCTL_PEND)
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index cd665b00c7c..dbaed1ea0da 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -274,6 +274,7 @@ typedef struct _ErrorInfo_struct {
#define CMD_SCSI 0x03
#define CMD_MSG_DONE 0x04
#define CMD_MSG_TIMEOUT 0x05
+#define CMD_MSG_STALE 0xff
/* This structure needs to be divisible by 8 for new
* indexing method.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 862b40c9018..91b75301378 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&open_lock);
- LOCK_FDC(drive, 1);
+ if (lock_fdc(drive, 1)) {
+ mutex_unlock(&open_lock);
+ return -EINTR;
+ }
floppy_type[type] = *g;
floppy_type[type].name = "user format";
for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index a19e935847b..913aa8d3f1c 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -867,15 +867,22 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_wait_idle(tty);
/*
- * Shutdown the current line discipline, and reset it to N_TTY.
- *
- * FIXME: this MUST get fixed for the new reflocking
+ * Now kill off the ldisc
*/
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ /* Force an oops if we mess this up */
+ tty->ldisc = NULL;
+
+ /* Ensure the next open requests the N_TTY ldisc */
+ tty_set_termios_ldisc(tty, N_TTY);
- tty_ldisc_reinit(tty);
/* This will need doing differently if we need to lock */
if (o_tty)
tty_ldisc_release(o_tty, NULL);
+
+ /* And the memory resources remaining (buffers, termios) will be
+ disposed of when the kref hits zero */
}
/**
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 9ffb05f4095..93c2322feab 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -161,7 +161,7 @@ static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
if (periodic)
sh_tmu_write(p, TCOR, delta);
else
- sh_tmu_write(p, TCOR, 0);
+ sh_tmu_write(p, TCOR, 0xffffffff);
sh_tmu_write(p, TCNT, delta);
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 3493c6bdb82..871c13b4c14 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -150,6 +150,8 @@ enum mem_type {
MEM_FB_DDR2, /* fully buffered DDR2 */
MEM_RDDR2, /* Registered DDR2 RAM */
MEM_XDR, /* Rambus XDR */
+ MEM_DDR3, /* DDR3 RAM */
+ MEM_RDDR3, /* Registered DDR3 RAM */
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -167,6 +169,8 @@ enum mem_type {
#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ad218fe4942..e1d4ce08348 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -94,7 +94,9 @@ static const char *mem_types[] = {
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
[MEM_RDDR2] = "Registered-DDR2",
- [MEM_XDR] = "XDR"
+ [MEM_XDR] = "XDR",
+ [MEM_DDR3] = "Unbuffered-DDR3",
+ [MEM_RDDR3] = "Registered-DDR3"
};
static const char *dev_types[] = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 7c8c2d72916..3f2ccfc6407 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -757,6 +757,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_RDDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_RDDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
@@ -769,6 +772,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_DDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_DDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 135b3539a03..52432ee7c4b 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -53,6 +53,7 @@
#define DSC_SDTYPE_DDR 0x02000000
#define DSC_SDTYPE_DDR2 0x03000000
+#define DSC_SDTYPE_DDR3 0x07000000
#define DSC_X32_EN 0x00000020
/* Err_Int_En */
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index aa8e7cb020d..4ee4c8367a3 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -109,6 +109,16 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
}
+static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+
+ if (chip->irq_base == (unsigned) -1)
+ return -EINVAL;
+
+ return chip->irq_base + offset;
+}
+
/*
* PL061 GPIO IRQ
*/
@@ -200,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
desc->chip->ack(irq);
list_for_each(ptr, chip_list) {
unsigned long pending;
- int gpio;
+ int offset;
chip = list_entry(ptr, struct pl061_gpio, list);
pending = readb(chip->base + GPIOMIS);
@@ -209,8 +219,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
if (pending == 0)
continue;
- for_each_bit(gpio, &pending, PL061_GPIO_NR)
- generic_handle_irq(gpio_to_irq(gpio));
+ for_each_bit(offset, &pending, PL061_GPIO_NR)
+ generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
desc->chip->unmask(irq);
}
@@ -221,7 +231,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
struct pl061_gpio *chip;
struct list_head *chip_list;
int ret, irq, i;
- static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)];
+ static DECLARE_BITMAP(init_irq, NR_IRQS);
pdata = dev->dev.platform_data;
if (pdata == NULL)
@@ -251,6 +261,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
chip->gc.direction_output = pl061_direction_output;
chip->gc.get = pl061_get_value;
chip->gc.set = pl061_set_value;
+ chip->gc.to_irq = pl061_to_irq;
chip->gc.base = pdata->gpio_base;
chip->gc.ngpio = PL061_GPIO_NR;
chip->gc.label = dev_name(&dev->dev);
@@ -280,6 +291,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
if (chip_list == NULL) {
+ clear_bit(irq, init_irq);
ret = -ENOMEM;
goto iounmap;
}
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 77f79d26b26..c509c991646 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -92,6 +92,11 @@ int ide_acpi_init(void)
return 0;
}
+bool ide_port_acpi(ide_hwif_t *hwif)
+{
+ return ide_noacpi == 0 && hwif->acpidata;
+}
+
/**
* ide_get_dev_handle - finds acpi_handle and PCI device.function
* @dev: device to locate
@@ -352,9 +357,6 @@ int ide_acpi_exec_tfs(ide_drive_t *drive)
unsigned long gtf_address;
unsigned long obj_loc;
- if (ide_noacpi)
- return 0;
-
DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
@@ -389,16 +391,6 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
struct acpi_buffer output;
union acpi_object *out_obj;
- if (ide_noacpi)
- return;
-
- DEBPRINT("ENTER:\n");
-
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* Setting up output buffer for _GTM */
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
@@ -479,16 +471,6 @@ void ide_acpi_push_timing(ide_hwif_t *hwif)
struct ide_acpi_drive_link *master = &hwif->acpidata->master;
struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
- if (ide_noacpi)
- return;
-
- DEBPRINT("ENTER:\n");
-
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* Give the GTM buffer + drive Identify data to the channel via the
* _STM method: */
/* setup input parameters buffer for _STM */
@@ -527,16 +509,11 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
ide_drive_t *drive;
int i;
- if (ide_noacpi || ide_noacpi_psx)
+ if (ide_noacpi_psx)
return;
DEBPRINT("ENTER:\n");
- if (!hwif->acpidata) {
- DEBPRINT("no ACPI data for %s\n", hwif->name);
- return;
- }
-
/* channel first and then drives for power on and verse versa for power off */
if (on)
acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0);
@@ -616,7 +593,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
drive->name, err);
}
- if (!ide_acpionboot) {
+ if (ide_noacpi || ide_acpionboot == 0) {
DEBPRINT("ACPI methods disabled on boot\n");
return;
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index f0ede5953af..6a9a769bffc 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -592,9 +592,19 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
} else if (!blk_pc_request(rq)) {
ide_cd_request_sense_fixup(drive, cmd);
- /* complain if we still have data left to transfer */
+
uptodate = cmd->nleft ? 0 : 1;
- if (uptodate == 0)
+
+ /*
+ * suck out the remaining bytes from the drive in an
+ * attempt to complete the data xfer. (see BZ#13399)
+ */
+ if (!(stat & ATA_ERR) && !uptodate && thislen) {
+ ide_pio_bytes(drive, cmd, write, thislen);
+ uptodate = cmd->nleft ? 0 : 1;
+ }
+
+ if (!uptodate)
rq->cmd_flags |= REQ_FAILED;
}
goto out_end;
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 5bf958e5b1d..1099bf7cf96 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -183,6 +183,6 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
err = setfunc(drive, *(int *)&rq->cmd[1]);
if (err)
rq->errors = err;
- ide_complete_rq(drive, err, ide_rq_bytes(rq));
+ ide_complete_rq(drive, err, blk_rq_bytes(rq));
return ide_stopped;
}
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 2b914197961..e9abf2c3c33 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -149,7 +149,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
if (err <= 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, err ? err : 0, ide_rq_bytes(rq));
+ ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
}
}
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 8b3f204f7d7..fefbdfc8db0 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -293,7 +293,7 @@ out_end:
drive->failed_pc = NULL;
if (blk_fs_request(rq) == 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
return ide_stopped;
}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 93b7886a2d6..d5f3c77bead 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -112,16 +112,6 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
}
}
-/* obsolete, blk_rq_bytes() should be used instead */
-unsigned int ide_rq_bytes(struct request *rq)
-{
- if (blk_pc_request(rq))
- return blk_rq_bytes(rq);
- else
- return blk_rq_cur_sectors(rq) << 9;
-}
-EXPORT_SYMBOL_GPL(ide_rq_bytes);
-
int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
{
ide_hwif_t *hwif = drive->hwif;
@@ -152,14 +142,14 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
if ((media == ide_floppy || media == ide_tape) && drv_req) {
rq->errors = 0;
- ide_complete_rq(drive, 0, blk_rq_bytes(rq));
} else {
if (media == ide_tape)
rq->errors = IDE_DRV_ERROR_GENERAL;
else if (blk_fs_request(rq) == 0 && rq->errors == 0)
rq->errors = -EIO;
- ide_complete_rq(drive, -EIO, ide_rq_bytes(rq));
}
+
+ ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
}
static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 82f252c3ee6..e246d3d3fbc 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -64,7 +64,8 @@ static int ide_get_identity_ioctl(ide_drive_t *drive, unsigned int cmd,
goto out;
}
- id = kmalloc(size, GFP_KERNEL);
+ /* ata_id_to_hd_driveid() relies on 'id' to be fully allocated. */
+ id = kmalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
if (id == NULL) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index c14ca144cff..ad7be2669dc 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -10,9 +10,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
struct request_pm_state rqpm;
int ret;
- /* call ACPI _GTM only once */
- if ((drive->dn & 1) == 0 || pair == NULL)
- ide_acpi_get_timing(hwif);
+ if (ide_port_acpi(hwif)) {
+ /* call ACPI _GTM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL)
+ ide_acpi_get_timing(hwif);
+ }
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
@@ -26,9 +28,11 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
ret = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
- /* call ACPI _PS3 only after both devices are suspended */
- if (ret == 0 && ((drive->dn & 1) || pair == NULL))
- ide_acpi_set_state(hwif, 0);
+ if (ret == 0 && ide_port_acpi(hwif)) {
+ /* call ACPI _PS3 only after both devices are suspended */
+ if ((drive->dn & 1) || pair == NULL)
+ ide_acpi_set_state(hwif, 0);
+ }
return ret;
}
@@ -42,13 +46,15 @@ int generic_ide_resume(struct device *dev)
struct request_pm_state rqpm;
int err;
- /* call ACPI _PS0 / _STM only once */
- if ((drive->dn & 1) == 0 || pair == NULL) {
- ide_acpi_set_state(hwif, 1);
- ide_acpi_push_timing(hwif);
- }
+ if (ide_port_acpi(hwif)) {
+ /* call ACPI _PS0 / _STM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL) {
+ ide_acpi_set_state(hwif, 1);
+ ide_acpi_push_timing(hwif);
+ }
- ide_acpi_exec_tfs(drive);
+ ide_acpi_exec_tfs(drive);
+ }
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9b60b6b684d..7c8e7122aaa 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -75,6 +75,7 @@ config LEDS_ALIX2
depends on LEDS_CLASS && X86 && EXPERIMENTAL
help
This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
+ You have to set leds-alix2.force=1 for boards with Award BIOS.
config LEDS_H1940
tristate "LED Support for iPAQ H1940 device"
@@ -145,15 +146,16 @@ config LEDS_GPIO_OF
of_platform devices. For instance, LEDs which are listed in a "dts"
file.
-config LEDS_LP5521
- tristate "LED Support for the LP5521 LEDs"
+config LEDS_LP3944
+ tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
depends on LEDS_CLASS && I2C
help
- If you say 'Y' here you get support for the National Semiconductor
- LP5521 LED driver used in n8x0 boards.
+ This option enables support for LEDs connected to the National
+ Semiconductor LP3944 Lighting Management Unit (LMU) also known as
+ Fun Light Chip.
- This driver can be built as a module by choosing 'M'. The module
- will be called leds-lp5521.
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp3944.
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2d41c4dcf92..e8cdcf77a4c 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index ddbd7730dfc..731d4eef342 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -14,7 +14,7 @@
static int force = 0;
module_param(force, bool, 0444);
-MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs");
+MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
struct alix_led {
struct led_classdev cdev;
@@ -155,6 +155,11 @@ static int __init alix_led_init(void)
goto out;
}
+ /* enable output on GPIO for LED 1,2,3 */
+ outl(1 << 6, 0x6104);
+ outl(1 << 9, 0x6184);
+ outl(1 << 11, 0x6184);
+
pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
if (!IS_ERR(pdev)) {
ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 4149ecb3a9b..779d7f262c0 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -97,6 +97,10 @@ struct bd2802_led {
enum led_ids led_id;
enum led_colors color;
enum led_bits state;
+
+ /* General attributes of RGB LEDs */
+ int wave_pattern;
+ int rgb_current;
};
@@ -254,7 +258,7 @@ static void bd2802_set_on(struct bd2802_led *led, enum led_ids id,
bd2802_reset_cancel(led);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
@@ -275,9 +279,9 @@ static void bd2802_set_blink(struct bd2802_led *led, enum led_ids id,
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
- bd2802_write_byte(led->client, reg, BD2802_PATTERN_HALF);
+ bd2802_write_byte(led->client, reg, led->wave_pattern);
bd2802_enable(led, id);
bd2802_update_state(led, id, color, BD2802_BLINK);
@@ -406,7 +410,7 @@ static void bd2802_enable_adv_conf(struct bd2802_led *led)
ret = device_create_file(&led->client->dev,
bd2802_addr_attributes[i]);
if (ret) {
- dev_err(&led->client->dev, "failed to sysfs file %s\n",
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
bd2802_addr_attributes[i]->attr.name);
goto failed_remove_files;
}
@@ -483,6 +487,52 @@ static struct device_attribute bd2802_adv_conf_attr = {
.store = bd2802_store_adv_conf,
};
+#define BD2802_CONTROL_ATTR(attr_name, name_str) \
+static ssize_t bd2802_show_##attr_name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ ssize_t ret; \
+ down_read(&led->rwsem); \
+ ret = sprintf(buf, "0x%02x\n", led->attr_name); \
+ up_read(&led->rwsem); \
+ return ret; \
+} \
+static ssize_t bd2802_store_##attr_name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ unsigned long val; \
+ int ret; \
+ if (!count) \
+ return -EINVAL; \
+ ret = strict_strtoul(buf, 16, &val); \
+ if (ret) \
+ return ret; \
+ down_write(&led->rwsem); \
+ led->attr_name = val; \
+ up_write(&led->rwsem); \
+ return count; \
+} \
+static struct device_attribute bd2802_##attr_name##_attr = { \
+ .attr = { \
+ .name = name_str, \
+ .mode = 0644, \
+ .owner = THIS_MODULE \
+ }, \
+ .show = bd2802_show_##attr_name, \
+ .store = bd2802_store_##attr_name, \
+};
+
+BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern");
+BD2802_CONTROL_ATTR(rgb_current, "rgb_current");
+
+static struct device_attribute *bd2802_attributes[] = {
+ &bd2802_adv_conf_attr,
+ &bd2802_wave_pattern_attr,
+ &bd2802_rgb_current_attr,
+};
+
static void bd2802_led_work(struct work_struct *work)
{
struct bd2802_led *led = container_of(work, struct bd2802_led, work);
@@ -538,7 +588,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1r.brightness = LED_OFF;
led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness;
led->cdev_led1r.blink_set = bd2802_set_led1r_blink;
- led->cdev_led1r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1r);
if (ret < 0) {
@@ -551,7 +600,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1g.brightness = LED_OFF;
led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness;
led->cdev_led1g.blink_set = bd2802_set_led1g_blink;
- led->cdev_led1g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1g);
if (ret < 0) {
@@ -564,7 +612,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led1b.brightness = LED_OFF;
led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness;
led->cdev_led1b.blink_set = bd2802_set_led1b_blink;
- led->cdev_led1b.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1b);
if (ret < 0) {
@@ -577,7 +624,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led2r.brightness = LED_OFF;
led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness;
led->cdev_led2r.blink_set = bd2802_set_led2r_blink;
- led->cdev_led2r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2r);
if (ret < 0) {
@@ -590,7 +636,6 @@ static int bd2802_register_led_classdev(struct bd2802_led *led)
led->cdev_led2g.brightness = LED_OFF;
led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness;
led->cdev_led2g.blink_set = bd2802_set_led2g_blink;
- led->cdev_led2g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2g);
if (ret < 0) {
@@ -640,7 +685,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
{
struct bd2802_led *led;
struct bd2802_led_platform_data *pdata;
- int ret;
+ int ret, i;
led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL);
if (!led) {
@@ -670,13 +715,20 @@ static int __devinit bd2802_probe(struct i2c_client *client,
/* To save the power, reset BD2802 after detecting */
gpio_set_value(led->pdata->reset_gpio, 0);
+ /* Default attributes */
+ led->wave_pattern = BD2802_PATTERN_HALF;
+ led->rgb_current = BD2802_CURRENT_032;
+
init_rwsem(&led->rwsem);
- ret = device_create_file(&client->dev, &bd2802_adv_conf_attr);
- if (ret) {
- dev_err(&client->dev, "failed to create sysfs file %s\n",
- bd2802_adv_conf_attr.attr.name);
- goto failed_free;
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) {
+ ret = device_create_file(&led->client->dev,
+ bd2802_attributes[i]);
+ if (ret) {
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
+ bd2802_attributes[i]->attr.name);
+ goto failed_unregister_dev_file;
+ }
}
ret = bd2802_register_led_classdev(led);
@@ -686,7 +738,8 @@ static int __devinit bd2802_probe(struct i2c_client *client,
return 0;
failed_unregister_dev_file:
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i--; i >= 0; i--)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
failed_free:
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -697,12 +750,14 @@ failed_free:
static int __exit bd2802_remove(struct i2c_client *client)
{
struct bd2802_led *led = i2c_get_clientdata(client);
+ int i;
- bd2802_unregister_led_classdev(led);
gpio_set_value(led->pdata->reset_gpio, 0);
+ bd2802_unregister_led_classdev(led);
if (led->adf_on)
bd2802_disable_adv_conf(led);
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -723,8 +778,7 @@ static int bd2802_resume(struct i2c_client *client)
struct bd2802_led *led = i2c_get_clientdata(client);
if (!bd2802_is_all_off(led) || led->adf_on) {
- gpio_set_value(led->pdata->reset_gpio, 1);
- udelay(100);
+ bd2802_reset_cancel(led);
bd2802_restore_state(led);
}
@@ -762,4 +816,4 @@ module_exit(bd2802_exit);
MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
MODULE_DESCRIPTION("BD2802 LED driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index d2109054de8..6b06638eb5b 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -76,7 +76,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
int (*blink_set)(unsigned, unsigned long *, unsigned long *))
{
- int ret;
+ int ret, state;
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
@@ -99,11 +99,15 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
led_dat->cdev.blink_set = gpio_blink_set;
}
led_dat->cdev.brightness_set = gpio_led_set;
- led_dat->cdev.brightness = LED_OFF;
+ if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
+ state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low;
+ else
+ state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
+ led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, led_dat->active_low);
+ ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
goto err;
@@ -129,7 +133,7 @@ static void delete_gpio_led(struct gpio_led_data *led)
}
#ifdef CONFIG_LEDS_GPIO_PLATFORM
-static int gpio_led_probe(struct platform_device *pdev)
+static int __devinit gpio_led_probe(struct platform_device *pdev)
{
struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
struct gpio_led_data *leds_data;
@@ -223,12 +227,22 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
memset(&led, 0, sizeof(led));
for_each_child_of_node(np, child) {
enum of_gpio_flags flags;
+ const char *state;
led.gpio = of_get_gpio_flags(child, 0, &flags);
led.active_low = flags & OF_GPIO_ACTIVE_LOW;
led.name = of_get_property(child, "label", NULL) ? : child->name;
led.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
+ state = of_get_property(child, "default-state", NULL);
+ if (state) {
+ if (!strcmp(state, "keep"))
+ led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
+ else if(!strcmp(state, "on"))
+ led.default_state = LEDS_GPIO_DEFSTATE_ON;
+ else
+ led.default_state = LEDS_GPIO_DEFSTATE_OFF;
+ }
ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++],
&ofdev->dev, NULL);
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
new file mode 100644
index 00000000000..5946208ba26
--- /dev/null
+++ b/drivers/leds/leds-lp3944.c
@@ -0,0 +1,466 @@
+/*
+ * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * I2C driver for National Semiconductor LP3944 Funlight Chip
+ * http://www.national.com/pf/LP/LP3944.html
+ *
+ * This helper chip can drive up to 8 leds, with two programmable DIM modes;
+ * it could even be used as a gpio expander but this driver assumes it is used
+ * as a led controller.
+ *
+ * The DIM modes are used to set _blink_ patterns for leds, the pattern is
+ * specified supplying two parameters:
+ * - period: from 0s to 1.6s
+ * - duty cycle: percentage of the period the led is on, from 0 to 100
+ *
+ * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
+ * leds, the camera flash light and the displays backlights.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/leds-lp3944.h>
+
+/* Read Only Registers */
+#define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */
+#define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */
+
+#define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */
+#define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */
+#define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */
+#define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */
+#define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */
+#define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */
+
+/* These registers are not used to control leds in LP3944, they can store
+ * arbitrary values which the chip will ignore.
+ */
+#define LP3944_REG_REGISTER8 0x08
+#define LP3944_REG_REGISTER9 0x09
+
+#define LP3944_DIM0 0
+#define LP3944_DIM1 1
+
+/* period in ms */
+#define LP3944_PERIOD_MIN 0
+#define LP3944_PERIOD_MAX 1600
+
+/* duty cycle is a percentage */
+#define LP3944_DUTY_CYCLE_MIN 0
+#define LP3944_DUTY_CYCLE_MAX 100
+
+#define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev)
+
+/* Saved data */
+struct lp3944_led_data {
+ u8 id;
+ enum lp3944_type type;
+ enum lp3944_status status;
+ struct led_classdev ldev;
+ struct i2c_client *client;
+ struct work_struct work;
+};
+
+struct lp3944_data {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct lp3944_led_data leds[LP3944_LEDS_MAX];
+};
+
+static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
+{
+ int tmp;
+
+ tmp = i2c_smbus_read_byte_data(client, reg);
+ if (tmp < 0)
+ return -EINVAL;
+
+ *value = tmp;
+
+ return 0;
+}
+
+static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+/**
+ * Set the period for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @period: period of a blink, that is a on/off cycle, expressed in ms.
+ */
+static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period)
+{
+ u8 psc_reg;
+ u8 psc_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ psc_reg = LP3944_REG_PSC0;
+ else if (dim == LP3944_DIM1)
+ psc_reg = LP3944_REG_PSC1;
+ else
+ return -EINVAL;
+
+ /* Convert period to Prescaler value */
+ if (period > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ psc_value = (period * 255) / LP3944_PERIOD_MAX;
+
+ err = lp3944_reg_write(client, psc_reg, psc_value);
+
+ return err;
+}
+
+/**
+ * Set the duty cycle for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @duty_cycle: percentage of a period during which a led is ON
+ */
+static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim,
+ u8 duty_cycle)
+{
+ u8 pwm_reg;
+ u8 pwm_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ pwm_reg = LP3944_REG_PWM0;
+ else if (dim == LP3944_DIM1)
+ pwm_reg = LP3944_REG_PWM1;
+ else
+ return -EINVAL;
+
+ /* Convert duty cycle to PWM value */
+ if (duty_cycle > LP3944_DUTY_CYCLE_MAX)
+ return -EINVAL;
+
+ pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX;
+
+ err = lp3944_reg_write(client, pwm_reg, pwm_value);
+
+ return err;
+}
+
+/**
+ * Set the led status
+ *
+ * @led: a lp3944_led_data structure
+ * @status: one of LP3944_LED_STATUS_OFF
+ * LP3944_LED_STATUS_ON
+ * LP3944_LED_STATUS_DIM0
+ * LP3944_LED_STATUS_DIM1
+ */
+static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
+{
+ struct lp3944_data *data = i2c_get_clientdata(led->client);
+ u8 id = led->id;
+ u8 reg;
+ u8 val = 0;
+ int err;
+
+ dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n",
+ __func__, led->ldev.name, status);
+
+ switch (id) {
+ case LP3944_LED0:
+ case LP3944_LED1:
+ case LP3944_LED2:
+ case LP3944_LED3:
+ reg = LP3944_REG_LS0;
+ break;
+ case LP3944_LED4:
+ case LP3944_LED5:
+ case LP3944_LED6:
+ case LP3944_LED7:
+ id -= LP3944_LED4;
+ reg = LP3944_REG_LS1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (status > LP3944_LED_STATUS_DIM1)
+ return -EINVAL;
+
+ /* invert only 0 and 1, leave unchanged the other values,
+ * remember we are abusing status to set blink patterns
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
+ status = 1 - status;
+
+ mutex_lock(&data->lock);
+ lp3944_reg_read(led->client, reg, &val);
+
+ val &= ~(LP3944_LED_STATUS_MASK << (id << 1));
+ val |= (status << (id << 1));
+
+ dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n",
+ __func__, led->ldev.name, reg, id, status, val);
+
+ /* set led status */
+ err = lp3944_reg_write(led->client, reg, val);
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int lp3944_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+ u16 period;
+ u8 duty_cycle;
+ int err;
+
+ /* units are in ms */
+ if (*delay_on + *delay_off > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ /* Special case: the leds subsystem requires a default user
+ * friendly blink pattern for the LED. Let's blink the led
+ * slowly (1Hz).
+ */
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ period = (*delay_on) + (*delay_off);
+
+ /* duty_cycle is the percentage of period during which the led is ON */
+ duty_cycle = 100 * (*delay_on) / period;
+
+ /* invert duty cycle for inverted leds, this has the same effect of
+ * swapping delay_on and delay_off
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED)
+ duty_cycle = 100 - duty_cycle;
+
+ /* NOTE: using always the first DIM mode, this means that all leds
+ * will have the same blinking pattern.
+ *
+ * We could find a way later to have two leds blinking in hardware
+ * with different patterns at the same time, falling back to software
+ * control for the other ones.
+ */
+ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period);
+ if (err)
+ return err;
+
+ err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle);
+ if (err)
+ return err;
+
+ dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n",
+ __func__);
+
+ led->status = LP3944_LED_STATUS_DIM0;
+ schedule_work(&led->work);
+
+ return 0;
+}
+
+static void lp3944_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+
+ dev_dbg(&led->client->dev, "%s: %s, %d\n",
+ __func__, led_cdev->name, brightness);
+
+ led->status = brightness;
+ schedule_work(&led->work);
+}
+
+static void lp3944_led_work(struct work_struct *work)
+{
+ struct lp3944_led_data *led;
+
+ led = container_of(work, struct lp3944_led_data, work);
+ lp3944_led_set(led, led->status);
+}
+
+static int lp3944_configure(struct i2c_client *client,
+ struct lp3944_data *data,
+ struct lp3944_platform_data *pdata)
+{
+ int i, err = 0;
+
+ for (i = 0; i < pdata->leds_size; i++) {
+ struct lp3944_led *pled = &pdata->leds[i];
+ struct lp3944_led_data *led = &data->leds[i];
+ led->client = client;
+ led->id = i;
+
+ switch (pled->type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led->type = pled->type;
+ led->status = pled->status;
+ led->ldev.name = pled->name;
+ led->ldev.max_brightness = 1;
+ led->ldev.brightness_set = lp3944_led_set_brightness;
+ led->ldev.blink_set = lp3944_led_set_blink;
+ led->ldev.flags = LED_CORE_SUSPENDRESUME;
+
+ INIT_WORK(&led->work, lp3944_led_work);
+ err = led_classdev_register(&client->dev, &led->ldev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "couldn't register LED %s\n",
+ led->ldev.name);
+ goto exit;
+ }
+
+ /* to expose the default value to userspace */
+ led->ldev.brightness = led->status;
+
+ /* Set the default led status */
+ err = lp3944_led_set(led, led->status);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "%s couldn't set STATUS %d\n",
+ led->ldev.name, led->status);
+ goto exit;
+ }
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+
+ }
+ }
+ return 0;
+
+exit:
+ if (i > 0)
+ for (i = i - 1; i >= 0; i--)
+ switch (pdata->leds[i].type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int __devinit lp3944_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
+ struct lp3944_data *data;
+
+ if (lp3944_pdata == NULL) {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ /* Let's see whether this adapter can support what we need. */
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "insufficient functionality!\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+
+ mutex_init(&data->lock);
+
+ dev_info(&client->dev, "lp3944 enabled\n");
+
+ lp3944_configure(client, data, lp3944_pdata);
+ return 0;
+}
+
+static int __devexit lp3944_remove(struct i2c_client *client)
+{
+ struct lp3944_platform_data *pdata = client->dev.platform_data;
+ struct lp3944_data *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < pdata->leds_size; i++)
+ switch (data->leds[i].type) {
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ kfree(data);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+/* lp3944 i2c driver struct */
+static const struct i2c_device_id lp3944_id[] = {
+ {"lp3944", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lp3944_id);
+
+static struct i2c_driver lp3944_driver = {
+ .driver = {
+ .name = "lp3944",
+ },
+ .probe = lp3944_probe,
+ .remove = __devexit_p(lp3944_remove),
+ .id_table = lp3944_id,
+};
+
+static int __init lp3944_module_init(void)
+{
+ return i2c_add_driver(&lp3944_driver);
+}
+
+static void __exit lp3944_module_exit(void)
+{
+ i2c_del_driver(&lp3944_driver);
+}
+
+module_init(lp3944_module_init);
+module_exit(lp3944_module_exit);
+
+MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
+MODULE_DESCRIPTION("LP3944 Fun Light Chip");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 3937244fdca..dba8921240f 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -35,7 +35,7 @@ struct pca9532_data {
struct pca9532_led leds[16];
struct mutex update_lock;
struct input_dev *idev;
- struct work_struct work;
+ struct work_struct work;
u8 pwm[2];
u8 psc[2];
};
@@ -87,14 +87,14 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
if (b > 0xFF)
return -EINVAL;
data->pwm[pwm] = b;
- data->psc[pwm] = blink;
- return 0;
+ data->psc[pwm] = blink;
+ return 0;
}
static int pca9532_setpwm(struct i2c_client *client, int pwm)
{
- struct pca9532_data *data = i2c_get_clientdata(client);
- mutex_lock(&data->update_lock);
+ struct pca9532_data *data = i2c_get_clientdata(client);
+ mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
data->pwm[pwm]);
i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
@@ -132,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev,
led->state = PCA9532_ON;
else {
led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
- err = pca9532_calcpwm(led->client, 0, 0, value);
+ err = pca9532_calcpwm(led->client, 0, 0, value);
if (err)
return; /* XXX: led api doesn't allow error code? */
}
- schedule_work(&led->work);
+ schedule_work(&led->work);
}
static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -145,7 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
struct pca9532_led *led = ldev_to_led(led_cdev);
struct i2c_client *client = led->client;
int psc;
- int err = 0;
+ int err = 0;
if (*delay_on == 0 && *delay_off == 0) {
/* led subsystem ask us for a blink rate */
@@ -157,11 +157,11 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
/* Thecus specific: only use PSC/PWM 0 */
psc = (*delay_on * 152-1)/1000;
- err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
- if (err)
- return err;
- schedule_work(&led->work);
- return 0;
+ err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
+ if (err)
+ return err;
+ schedule_work(&led->work);
+ return 0;
}
static int pca9532_event(struct input_dev *dev, unsigned int type,
@@ -178,15 +178,15 @@ static int pca9532_event(struct input_dev *dev, unsigned int type,
else
data->pwm[1] = 0;
- schedule_work(&data->work);
+ schedule_work(&data->work);
- return 0;
+ return 0;
}
static void pca9532_input_work(struct work_struct *work)
{
- struct pca9532_data *data;
- data = container_of(work, struct pca9532_data, work);
+ struct pca9532_data *data;
+ data = container_of(work, struct pca9532_data, work);
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
data->pwm[1]);
@@ -195,11 +195,11 @@ static void pca9532_input_work(struct work_struct *work)
static void pca9532_led_work(struct work_struct *work)
{
- struct pca9532_led *led;
- led = container_of(work, struct pca9532_led, work);
- if (led->state == PCA9532_PWM0)
- pca9532_setpwm(led->client, 0);
- pca9532_setled(led);
+ struct pca9532_led *led;
+ led = container_of(work, struct pca9532_led, work);
+ if (led->state == PCA9532_PWM0)
+ pca9532_setpwm(led->client, 0);
+ pca9532_setled(led);
}
static int pca9532_configure(struct i2c_client *client,
@@ -232,7 +232,7 @@ static int pca9532_configure(struct i2c_client *client,
led->ldev.brightness = LED_OFF;
led->ldev.brightness_set = pca9532_set_brightness;
led->ldev.blink_set = pca9532_set_blink;
- INIT_WORK(&led->work, pca9532_led_work);
+ INIT_WORK(&led->work, pca9532_led_work);
err = led_classdev_register(&client->dev, &led->ldev);
if (err < 0) {
dev_err(&client->dev,
@@ -262,11 +262,11 @@ static int pca9532_configure(struct i2c_client *client,
BIT_MASK(SND_TONE);
data->idev->event = pca9532_event;
input_set_drvdata(data->idev, data);
- INIT_WORK(&data->work, pca9532_input_work);
+ INIT_WORK(&data->work, pca9532_input_work);
err = input_register_device(data->idev);
if (err) {
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
goto exit;
}
@@ -283,13 +283,13 @@ exit:
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
@@ -340,13 +340,13 @@ static int pca9532_remove(struct i2c_client *client)
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index d4e8979735c..9c3138265f8 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -82,7 +82,7 @@ struct lg_cpu {
struct lg_eventfd {
unsigned long addr;
- struct file *event;
+ struct eventfd_ctx *event;
};
struct lg_eventfd_map {
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 32e29712105..9f9a2953b38 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -50,7 +50,7 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
/* Now append new entry. */
new->map[new->num].addr = addr;
- new->map[new->num].event = eventfd_fget(fd);
+ new->map[new->num].event = eventfd_ctx_fdget(fd);
if (IS_ERR(new->map[new->num].event)) {
kfree(new);
return PTR_ERR(new->map[new->num].event);
@@ -357,7 +357,7 @@ static int close(struct inode *inode, struct file *file)
/* Release any eventfds they registered. */
for (i = 0; i < lg->eventfds->num; i++)
- fput(lg->eventfds->map[i].event);
+ eventfd_ctx_put(lg->eventfds->map[i].event);
kfree(lg->eventfds);
/* If lg->dead doesn't contain an error code it will be NULL or a
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index c3ae51584b1..3710ff88fc1 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -195,7 +195,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_exception_store **store)
{
int r = 0;
- struct dm_exception_store_type *type;
+ struct dm_exception_store_type *type = NULL;
struct dm_exception_store *tmp_store;
char persistent;
@@ -211,12 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
}
persistent = toupper(*argv[1]);
- if (persistent != 'P' && persistent != 'N') {
+ if (persistent == 'P')
+ type = get_type("P");
+ else if (persistent == 'N')
+ type = get_type("N");
+ else {
ti->error = "Persistent flag is not P or N";
return -EINVAL;
}
- type = get_type(&persistent);
if (!type) {
ti->error = "Exception store type not recognised";
r = -EINVAL;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4899ebe767c..2cba557d9e6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -495,7 +495,7 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
- if (blk_stack_limits(limits, &q->limits, start) < 0)
+ if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
DMWARN("%s: target device %s is misaligned",
dm_device_name(ti->table->md), bdevname(bdev, b));
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3c6d4ee8921..9acd54a5cff 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1017,7 +1017,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_flags |= 1 << BIO_CLONED;
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
bio_integrity_trim(clone,
bio_sector_offset(bio, idx, offset), len);
}
@@ -1045,7 +1045,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
clone->bi_flags &= ~(1 << BIO_SEG_VALID);
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ bio_integrity_clone(clone, bio, GFP_NOIO, bs);
if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
bio_integrity_trim(clone,
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 15c8b7b25a9..5810fa906af 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -166,8 +166,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
rdev->sectors = sectors * mddev->chunk_sectors;
}
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 09be637d52c..0f4a70c43ff 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3573,7 +3573,8 @@ suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- if (mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
@@ -3601,7 +3602,8 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
- if (mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL ||
+ mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
@@ -3844,11 +3846,9 @@ static int md_alloc(dev_t dev, char *name)
flush_scheduled_work();
mutex_lock(&disks_mutex);
- if (mddev->gendisk) {
- mutex_unlock(&disks_mutex);
- mddev_put(mddev);
- return -EEXIST;
- }
+ error = -EEXIST;
+ if (mddev->gendisk)
+ goto abort;
if (name) {
/* Need to ensure that 'name' is not a duplicate.
@@ -3860,17 +3860,15 @@ static int md_alloc(dev_t dev, char *name)
if (mddev2->gendisk &&
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
- return -EEXIST;
+ goto abort;
}
spin_unlock(&all_mddevs_lock);
}
+ error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
- if (!mddev->queue) {
- mutex_unlock(&disks_mutex);
- mddev_put(mddev);
- return -ENOMEM;
- }
+ if (!mddev->queue)
+ goto abort;
mddev->queue->queuedata = mddev;
/* Can be unlocked because the queue is new: no concurrency */
@@ -3880,11 +3878,9 @@ static int md_alloc(dev_t dev, char *name)
disk = alloc_disk(1 << shift);
if (!disk) {
- mutex_unlock(&disks_mutex);
blk_cleanup_queue(mddev->queue);
mddev->queue = NULL;
- mddev_put(mddev);
- return -ENOMEM;
+ goto abort;
}
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -3906,16 +3902,22 @@ static int md_alloc(dev_t dev, char *name)
mddev->gendisk = disk;
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
&disk_to_dev(disk)->kobj, "%s", "md");
- mutex_unlock(&disks_mutex);
- if (error)
+ if (error) {
+ /* This isn't possible, but as kobject_init_and_add is marked
+ * __must_check, we must do something with the result
+ */
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
- else {
+ error = 0;
+ }
+ abort:
+ mutex_unlock(&disks_mutex);
+ if (!error) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
- return 0;
+ return error;
}
static struct kobject *md_probe(dev_t dev, int *part, void *data)
@@ -6334,10 +6336,16 @@ void md_do_sync(mddev_t *mddev)
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
- if (j >= mddev->resync_max)
- wait_event(mddev->recovery_wait,
- mddev->resync_max > j
- || kthread_should_stop());
+ while (j >= mddev->resync_max && !kthread_should_stop()) {
+ /* As this condition is controlled by user-space,
+ * we can block indefinitely, so use '_interruptible'
+ * to avoid triggering warnings.
+ */
+ flush_signals(current); /* just in case */
+ wait_event_interruptible(mddev->recovery_wait,
+ mddev->resync_max > j
+ || kthread_should_stop());
+ }
if (kthread_should_stop())
goto interrupted;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index cbe368fa659..237fe3fd235 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -294,7 +294,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for (path = first; path <= last; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
q = rdev->bdev->bd_disk->queue;
- blk_queue_stack_limits(mddev->queue, q);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
@@ -463,9 +464,9 @@ static int multipath_run (mddev_t *mddev)
disk = conf->multipaths + disk_idx;
disk->rdev = rdev;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ab4a489d869..335f490dcad 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -170,8 +170,8 @@ static int create_strip_zones(mddev_t *mddev)
}
dev[j] = rdev1;
- blk_queue_stack_limits(mddev->queue,
- rdev1->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev1->bdev,
+ rdev1->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -250,6 +250,11 @@ static int create_strip_zones(mddev_t *mddev)
mddev->chunk_sectors << 9);
goto abort;
}
+
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
printk(KERN_INFO "raid0: done.\n");
mddev->private = conf;
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 89939a7aef5..0569efba0c0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1123,8 +1123,8 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for (mirror = first; mirror <= last; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -1988,9 +1988,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
-
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ae12ceafe10..7298a5e5a18 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for ( ; mirror <= last ; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
@@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
static int run(mddev_t *mddev)
{
conf_t *conf;
- int i, disk_idx;
+ int i, disk_idx, chunk_size;
mirror_info_t *disk;
mdk_rdev_t *rdev;
int nc, fc, fo;
@@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev)
spin_lock_init(&conf->device_lock);
mddev->queue->queue_lock = &conf->device_lock;
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ if (conf->raid_disks % conf->near_copies)
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
+ else
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks / conf->near_copies));
+
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
@@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev)
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
-
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* a one page request is never in violation.
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f9f991e6e13..37835538b58 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3699,13 +3699,21 @@ static int make_request(struct request_queue *q, struct bio * bi)
goto retry;
}
}
- /* FIXME what if we get a false positive because these
- * are being updated.
- */
- if (logical_sector >= mddev->suspend_lo &&
+
+ if (bio_data_dir(bi) == WRITE &&
+ logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
release_stripe(sh);
- schedule();
+ /* As the suspend_* range is controlled by
+ * userspace, we want an interruptible
+ * wait.
+ */
+ flush_signals(current);
+ prepare_to_wait(&conf->wait_for_overlap,
+ &w, TASK_INTERRUPTIBLE);
+ if (logical_sector >= mddev->suspend_lo &&
+ logical_sector < mddev->suspend_hi)
+ schedule();
goto retry;
}
@@ -4452,7 +4460,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
static int run(mddev_t *mddev)
{
raid5_conf_t *conf;
- int working_disks = 0;
+ int working_disks = 0, chunk_size;
mdk_rdev_t *rdev;
if (mddev->recovery_cp != MaxSector)
@@ -4607,6 +4615,14 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
+ chunk_size = mddev->chunk_sectors << 9;
+ blk_queue_io_min(mddev->queue, chunk_size);
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->raid_disks - conf->max_degraded));
+
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
return 0;
abort:
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 240608cc7ae..a461017ce5c 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1313,6 +1313,12 @@ static int mmc_spi_probe(struct spi_device *spi)
struct mmc_spi_host *host;
int status;
+ /* We rely on full duplex transfers, mostly to reduce
+ * per-transfer overheads (by making fewer transfers).
+ */
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ return -EINVAL;
+
/* MMC and SD specs only seem to care that sampling is on the
* rising edge ... meaning SPI modes 0 or 3. So either SPI mode
* should be legit. We'll use mode 0 since the steady state is 0,
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 5011fa73f91..1479da6d3aa 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -194,7 +194,7 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].name = extra_mem;
extra_mem += name_len + 1;
- dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n",
+ dbg(("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n",
this_part,
parts[this_part].name,
parts[this_part].offset,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 59c46126a5c..ae5fe91867e 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -54,7 +54,7 @@
#define SR_SRWD 0x80 /* SR write protect */
/* Define max times to check status register before we give up. */
-#define MAX_READY_WAIT_JIFFIES (10 * HZ) /* eg. M25P128 specs 6s max sector erase */
+#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
#define CMD_SIZE 4
#ifdef CONFIG_M25PXX_USE_FAST_READ
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 73f05227dc8..d8cf29c01cc 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -226,7 +226,7 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
if (!desperate && inftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
"EUNs (%d)\n", inftl->numfreeEUNs);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -281,7 +281,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
silly = MAX_LOOPS;
while (thisEUN < inftl->nb_blocks) {
for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
- if ((BlockMap[block] != 0xffff) || BlockDeleted[block])
+ if ((BlockMap[block] != BLOCK_NIL) ||
+ BlockDeleted[block])
continue;
if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
@@ -525,7 +526,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
if (!silly--) {
printk(KERN_WARNING "INFTL: infinite loop in "
"Virtual Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -549,7 +550,7 @@ hitused:
* waiting to be picked up. We're going to have to fold
* a chain to make room.
*/
- thisEUN = INFTL_makefreeblock(inftl, 0xffff);
+ thisEUN = INFTL_makefreeblock(inftl, BLOCK_NIL);
/*
* Hopefully we free something, lets try again.
@@ -631,7 +632,7 @@ hitused:
printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
"Unit Chain 0x%x\n", thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/*
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index b08a798ee25..2aac41bde8b 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -42,10 +42,8 @@
#include <mach/hardware.h>
#include <asm/system.h>
-#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2)
-
struct armflash_subdev_info {
- char name[SUBDEV_NAME_SIZE];
+ char *name;
struct mtd_info *mtd;
struct map_info map;
struct flash_platform_data *plat;
@@ -134,6 +132,8 @@ static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
map_destroy(subdev->mtd);
if (subdev->map.virt)
iounmap(subdev->map.virt);
+ kfree(subdev->name);
+ subdev->name = NULL;
release_mem_region(subdev->map.phys, subdev->map.size);
}
@@ -177,16 +177,22 @@ static int armflash_probe(struct platform_device *dev)
if (nr == 1)
/* No MTD concatenation, just use the default name */
- snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s",
- dev_name(&dev->dev));
+ subdev->name = kstrdup(dev_name(&dev->dev), GFP_KERNEL);
else
- snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d",
- dev_name(&dev->dev), i);
+ subdev->name = kasprintf(GFP_KERNEL, "%s-%d",
+ dev_name(&dev->dev), i);
+ if (!subdev->name) {
+ err = -ENOMEM;
+ break;
+ }
subdev->plat = plat;
err = armflash_subdev_probe(subdev, res);
- if (err)
+ if (err) {
+ kfree(subdev->name);
+ subdev->name = NULL;
break;
+ }
}
info->nr_subdev = i;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2802992b39d..20c828ba940 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -534,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
&num_partitions);
if ((!partitions) || (num_partitions == 0)) {
- printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n");
+ printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
res = ENXIO;
goto err_no_partitions;
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0cd76f89f4b..ebd07e95b81 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -11,6 +11,8 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
@@ -541,7 +543,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
unsigned long timeo = jiffies;
- int status, state = this->state;
+ int status = NAND_STATUS_FAIL, state = this->state;
if (state == FL_ERASING)
timeo += (HZ * 400) / 1000;
@@ -556,8 +558,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
while (time_before(jiffies, timeo)) {
status = __raw_readb(this->IO_ADDR_R);
- if (!(status & 0x40))
+ if (status & NAND_STATUS_READY)
break;
+ cond_resched();
}
return status;
}
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index e3f8495a94c..fb86cacd5bd 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -208,7 +208,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
/* Normally, we force a fold to happen before we run out of free blocks completely */
if (!desperate && nftl->numfreeEUNs < 2) {
DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
- return 0xffff;
+ return BLOCK_NIL;
}
/* Scan for a free block */
@@ -230,11 +230,11 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
printk("Argh! No free blocks found! LastFreeEUN = %d, "
"FirstEUN = %d\n", nftl->LastFreeEUN,
le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
- return 0xffff;
+ return BLOCK_NIL;
}
} while (pot != nftl->LastFreeEUN);
- return 0xffff;
+ return BLOCK_NIL;
}
static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
@@ -431,7 +431,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
/* add the header so that it is now a valid chain */
oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
- oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff;
+ oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = BLOCK_NIL;
nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
8, &retlen, (char *)&oob.u);
@@ -515,7 +515,7 @@ static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
if (ChainLength < 2) {
printk(KERN_WARNING "No Virtual Unit Chains available for folding. "
"Failing request\n");
- return 0xffff;
+ return BLOCK_NIL;
}
return NFTL_foldchain (nftl, LongestChain, pendingblock);
@@ -578,7 +578,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING
"Infinite loop in Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
/* Skip to next block in chain */
@@ -601,7 +601,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
//u16 startEUN = nftl->EUNtable[thisVUC];
//printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
- writeEUN = NFTL_makefreeblock(nftl, 0xffff);
+ writeEUN = NFTL_makefreeblock(nftl, BLOCK_NIL);
if (writeEUN == BLOCK_NIL) {
/* OK, we accept that the above comment is
@@ -673,7 +673,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n",
thisVUC);
- return 0xffff;
+ return BLOCK_NIL;
}
static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index e4afbd628c2..607007d75b6 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -281,6 +281,8 @@ static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 619c6583e1a..4003955d7a9 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -365,6 +365,8 @@ static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_PHY)
adapter->wol |= AT_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f703758f0a6..5b4bf3d2cdc 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -73,7 +73,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define BE_MAX_LRO_DESCRIPTORS 16
-#define BE_MAX_FRAGS_PER_FRAME 16
+#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
struct be_dma_mem {
void *va;
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9592f22e4c8..cccc5419ad7 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -162,8 +162,8 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
return -EINVAL;
adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
- if (adapter->max_rx_coal > MAX_SKB_FRAGS)
- adapter->max_rx_coal = MAX_SKB_FRAGS - 1;
+ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
+ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
/* if AIC is being turned on now, start with an EQD of 0 */
if (rx_eq->enable_aic == 0 &&
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66c10c87f51..308eb09ca56 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -666,7 +666,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
{
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct be_rx_page_info *page_info;
- u16 rxq_idx, i, num_rcvd;
+ u16 rxq_idx, i, num_rcvd, j;
u32 pktsize, hdr_len, curr_frag_len;
u8 *start;
@@ -709,22 +709,33 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
/* More frags present for this completion */
pktsize -= curr_frag_len; /* account for above copied frag */
- for (i = 1; i < num_rcvd; i++) {
+ for (i = 1, j = 0; i < num_rcvd; i++) {
index_inc(&rxq_idx, rxq->len);
page_info = get_rx_page_info(adapter, rxq_idx);
curr_frag_len = min(pktsize, rx_frag_size);
- skb_shinfo(skb)->frags[i].page = page_info->page;
- skb_shinfo(skb)->frags[i].page_offset = page_info->page_offset;
- skb_shinfo(skb)->frags[i].size = curr_frag_len;
+ /* Coalesce all frags from the same physical page in one slot */
+ if (page_info->page_offset == 0) {
+ /* Fresh page */
+ j++;
+ skb_shinfo(skb)->frags[j].page = page_info->page;
+ skb_shinfo(skb)->frags[j].page_offset =
+ page_info->page_offset;
+ skb_shinfo(skb)->frags[j].size = 0;
+ skb_shinfo(skb)->nr_frags++;
+ } else {
+ put_page(page_info->page);
+ }
+
+ skb_shinfo(skb)->frags[j].size += curr_frag_len;
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
- skb_shinfo(skb)->nr_frags++;
pktsize -= curr_frag_len;
memset(page_info, 0, sizeof(*page_info));
}
+ BUG_ON(j > MAX_SKB_FRAGS);
done:
be_rx_stats_update(adapter, pktsize, num_rcvd);
@@ -786,7 +797,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
struct be_queue_info *rxq = &adapter->rx_obj.q;
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
- u16 i, rxq_idx = 0, vid;
+ u16 i, rxq_idx = 0, vid, j;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
@@ -794,20 +805,28 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter,
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
remaining = pkt_size;
- for (i = 0; i < num_rcvd; i++) {
+ for (i = 0, j = -1; i < num_rcvd; i++) {
page_info = get_rx_page_info(adapter, rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
- rx_frags[i].page = page_info->page;
- rx_frags[i].page_offset = page_info->page_offset;
- rx_frags[i].size = curr_frag_len;
- remaining -= curr_frag_len;
+ /* Coalesce all frags from the same physical page in one slot */
+ if (i == 0 || page_info->page_offset == 0) {
+ /* First frag or Fresh page */
+ j++;
+ rx_frags[j].page = page_info->page;
+ rx_frags[j].page_offset = page_info->page_offset;
+ rx_frags[j].size = 0;
+ } else {
+ put_page(page_info->page);
+ }
+ rx_frags[j].size += curr_frag_len;
+ remaining -= curr_frag_len;
index_inc(&rxq_idx, rxq->len);
-
memset(page_info, 0, sizeof(*page_info));
}
+ BUG_ON(j > MAX_SKB_FRAGS);
if (likely(!vlanf)) {
lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fbf1352e9c1..951714a7f90 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -8637,6 +8637,14 @@ static int bnx2x_nway_reset(struct net_device *dev)
return 0;
}
+static u32
+bnx2x_get_link(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->link_vars.link_up;
+}
+
static int bnx2x_get_eeprom_len(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10034,7 +10042,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = {
.get_msglevel = bnx2x_get_msglevel,
.set_msglevel = bnx2x_set_msglevel,
.nway_reset = bnx2x_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = bnx2x_get_link,
.get_eeprom_len = bnx2x_get_eeprom_len,
.get_eeprom = bnx2x_get_eeprom,
.set_eeprom = bnx2x_set_eeprom,
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 58afafbd3b9..fd5e32cbcb8 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1097,7 +1097,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_multicast_list = cpmac_set_multicast_list,
- .ndo_so_ioctl = cpmac_ioctl,
+ .ndo_do_ioctl = cpmac_ioctl,
.ndo_set_config = cpmac_config,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5e3356f8eb5..5b8cbdb4b52 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->skb) {
+ if (buffer_info->dma) {
pci_unmap_single(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ }
+
+ buffer_info->dma = 0;
+ if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
@@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
@@ -4222,6 +4227,7 @@ map_skb:
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
break; /* while !buffer_info->skb */
}
@@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 679885a122b..63415bb6f48 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000e_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 3af581303ca..d167090248e 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -188,7 +188,7 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
}
-#ifdef CONFIG_GIANFAR
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
{
struct gfar __iomem *enet_regs;
@@ -206,7 +206,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs)
#endif
-#ifdef CONFIG_UCC_GETH
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
struct device_node *np = NULL;
@@ -291,7 +291,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#ifdef CONFIG_GIANFAR
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs);
#else
err = -ENODEV;
@@ -299,7 +299,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#ifdef CONFIG_UCC_GETH
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ea17319624a..be480292aba 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4549,11 +4549,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
cleaned = true;
cleaned_count++;
+ /* this is the fast path for the non-packet split case */
if (!adapter->rx_ps_hdr_size) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_buffer_len +
- NET_IP_ALIGN,
+ adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
@@ -4570,8 +4571,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
if (!skb_shinfo(skb)->nr_frags) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_ps_hdr_size + NET_IP_ALIGN,
+ adapter->rx_ps_hdr_size,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, hlen);
}
@@ -4713,7 +4715,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
bufsz = adapter->rx_ps_hdr_size;
else
bufsz = adapter->rx_buffer_len;
- bufsz += NET_IP_ALIGN;
while (cleaned_count--) {
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
@@ -4737,7 +4738,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
}
if (!buffer_info->skb) {
- skb = netdev_alloc_skb(netdev, bufsz);
+ skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
@@ -5338,6 +5339,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
igb_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index f3eed6a8fba..911c082cee5 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size)
return 0;
}
+static const struct net_device_ops bfin_sir_ndo = {
+ .ndo_open = bfin_sir_open,
+ .ndo_stop = bfin_sir_stop,
+ .ndo_start_xmit = bfin_sir_hard_xmit,
+ .ndo_do_ioctl = bfin_sir_ioctl,
+ .ndo_get_stats = bfin_sir_stats,
+};
+
static int __devinit bfin_sir_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev)
if (err)
goto err_mem_3;
- dev->hard_start_xmit = bfin_sir_hard_xmit;
- dev->open = bfin_sir_open;
- dev->stop = bfin_sir_stop;
- dev->do_ioctl = bfin_sir_ioctl;
- dev->get_stats = bfin_sir_stats;
- dev->irq = sir_port->irq;
+ dev->netdev_ops = &bfin_sir_ndo;
+ dev->irq = sir_port->irq;
irda_init_max_qos_capabilies(&self->qos);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 86f4f3e36f2..0f7b6a3a2e6 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg);
@@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
s32 err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
/* 10000/copper and 1000/copper must autoneg
* this function does not support any duplex forcing, but can
* limit the advertising of the adapter to only 10000 or 1000 */
@@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
} else {
/* in this case we currently only support 10Gb/FULL */
if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+ (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
(ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e756e220db3..5588ef493a3 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -563,7 +563,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
@@ -593,7 +592,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->skb) {
struct sk_buff *skb;
- skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ skb = netdev_alloc_skb(adapter->netdev,
+ (rx_ring->rx_buf_len +
+ NET_IP_ALIGN));
if (!skb) {
adapter->alloc_rx_buff_failed++;
@@ -608,7 +609,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
skb_reserve(skb, NET_IP_ALIGN);
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data, bufsz,
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
@@ -732,6 +734,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
pci_unmap_single(pdev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
skb_put(skb, len);
}
@@ -2701,7 +2704,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
*/
err = hw->phy.ops.identify(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
ixgbe_down(adapter);
return err;
}
@@ -2812,9 +2818,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- rx_buffer_info->page_dma = 0;
+ if (rx_buffer_info->page_dma) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ }
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
rx_buffer_info->page_offset = 0;
@@ -3720,10 +3728,11 @@ static void ixgbe_sfp_task(struct work_struct *work)
goto reschedule;
ret = hw->phy.ops.reset(hw);
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "failed to initialize because an "
- "unsupported SFP+ module type was detected.\n"
- "Reload the driver after installing a "
- "supported module.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize "
+ "because an unsupported SFP+ module type "
+ "was detected.\n"
+ "Reload the driver after installing a "
+ "supported module.\n");
unregister_netdev(adapter->netdev);
} else {
DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
@@ -4502,7 +4511,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
u32 autoneg;
adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
- if (hw->mac.ops.get_link_capabilities)
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg,
&hw->mac.autoneg);
if (hw->mac.ops.setup_link_speed)
@@ -4526,7 +4536,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
err = hw->phy.ops.identify_sfp(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
ixgbe_down(adapter);
return;
}
@@ -5513,8 +5526,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
round_jiffies(jiffies + (2 * HZ)));
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- dev_err(&adapter->pdev->dev, "failed to load because an "
- "unsupported SFP+ module type was detected.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
goto err_sw_init;
} else if (err) {
dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index dc45e9856c3..6851bdb2ce2 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -14,6 +14,10 @@
#include <linux/mdio.h>
#include <linux/module.h>
+MODULE_DESCRIPTION("Generic support for MDIO-compatible transceivers");
+MODULE_AUTHOR("Copyright 2006-2009 Solarflare Communications Inc.");
+MODULE_LICENSE("GPL");
+
/**
* mdio45_probe - probe for an MDIO (clause 45) device
* @mdio: MDIO interface
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 341882f959f..a2d82ddb3b4 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -865,8 +865,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
- u32 ioaddr, boguscnt = RX_RING_SIZE;
- u32 intr_status = 0;
+ u32 ioaddr, intr_status = 0;
ioaddr = ndev->base_addr;
spin_lock(&mdp->lock);
@@ -901,12 +900,6 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
if (intr_status & cd->eesr_err_check)
sh_eth_error(ndev, intr_status);
- if (--boguscnt < 0) {
- printk(KERN_WARNING
- "%s: Too much work at interrupt, status=0x%4.4x.\n",
- ndev->name, intr_status);
- }
-
other_irq:
spin_unlock(&mdp->lock);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 7681d28c53d..daf961ab68b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2495,7 +2495,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
if (likely(status >> 16 == (status & 0xffff))) {
skb = sky2->rx_ring[sky2->rx_next].skb;
skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = status & 0xffff;
+ skb->csum = le16_to_cpu(status);
} else {
printk(KERN_NOTICE PFX "%s: hardware receive "
"checksum problem (status = %#x)\n",
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 80e01778dd3..cd35d50e46d 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return crc == crc2;
if (unlikely(crc != crc2)) {
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
dev_kfree_skb_any(skb2);
} else
usbnet_skb_return(dev, skb2);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 7ae82446b93..1d3730d6690 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
if (unlikely(status & 0xbf)) {
- if (status & 0x01) dev->stats.rx_fifo_errors++;
- if (status & 0x02) dev->stats.rx_crc_errors++;
- if (status & 0x04) dev->stats.rx_frame_errors++;
- if (status & 0x20) dev->stats.rx_missed_errors++;
- if (status & 0x90) dev->stats.rx_length_errors++;
+ if (status & 0x01) dev->net->stats.rx_fifo_errors++;
+ if (status & 0x02) dev->net->stats.rx_crc_errors++;
+ if (status & 0x04) dev->net->stats.rx_frame_errors++;
+ if (status & 0x20) dev->net->stats.rx_missed_errors++;
+ if (status & 0x90) dev->net->stats.rx_length_errors++;
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 034e8a73ca6..aeb1ab03a9e 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
dbg("rx framesize %d range %d..%d mtu %d", skb->len,
net->hard_header_len, dev->hard_mtu, net->mtu);
#endif
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
nc_ensure_sync(dev);
return 0;
}
@@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
hdr_len = le16_to_cpup(&header->hdr_len);
packet_len = le16_to_cpup(&header->packet_len);
if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("packet too big, %d", packet_len);
nc_ensure_sync(dev);
return 0;
} else if (hdr_len < MIN_HEADER) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("header too short, %d", hdr_len);
nc_ensure_sync(dev);
return 0;
@@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if ((packet_len & 0x01) == 0) {
if (skb->data [packet_len] != PAD_BYTE) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad pad");
return 0;
}
skb_trim(skb, skb->len - 1);
}
if (skb->len != packet_len) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad packet len %d (expected %d)",
skb->len, packet_len);
nc_ensure_sync(dev);
return 0;
}
if (header->packet_id != get_unaligned(&trailer->packet_id)) {
- dev->stats.rx_fifo_errors++;
+ dev->net->stats.rx_fifo_errors++;
dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
le16_to_cpu(header->packet_id),
le16_to_cpu(trailer->packet_id));
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 1bf243ef950..2232232b798 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
|| skb->len < msg_len
|| (data_offset + data_len + 8) > msg_len)) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
le32_to_cpu(hdr->msg_type),
msg_len, data_offset, data_len, skb->len);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 89a91f8c22d..fe045896406 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(header & RX_STS_ES_)) {
if (netif_msg_rx_err(dev))
devdbg(dev, "Error header=0x%08x", header);
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_dropped++;
if (header & RX_STS_CRC_) {
- dev->stats.rx_crc_errors++;
+ dev->net->stats.rx_crc_errors++;
} else {
if (header & (RX_STS_TL_ | RX_STS_RF_))
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
if ((header & RX_STS_LE_) &&
(!(header & RX_STS_FT_)))
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_length_errors++;
}
} else {
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 22c0585a031..edfd9e10ceb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
int status;
skb->protocol = eth_type_trans (skb, dev->net);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
if (netif_msg_rx_status (dev))
devdbg (dev, "< rx, len %zu, type 0x%x",
@@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
if (netif_msg_rx_err (dev))
devdbg (dev, "drop");
error:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
skb_queue_tail (&dev->done, skb);
}
}
@@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb)
case 0:
if (skb->len < dev->net->hard_header_len) {
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx length %d", skb->len);
}
@@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb)
* storm, recovering as needed.
*/
case -EPIPE:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
usbnet_defer_kevent (dev, EVENT_RX_HALT);
// FALLTHROUGH
@@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb)
case -EPROTO:
case -ETIME:
case -EILSEQ:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
if (netif_msg_link (dev))
@@ -465,12 +465,12 @@ block:
/* data overrun ... flush fifo? */
case -EOVERFLOW:
- dev->stats.rx_over_errors++;
+ dev->net->stats.rx_over_errors++;
// FALLTHROUGH
default:
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx status %d", urb_status);
break;
@@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net)
if (netif_msg_ifdown (dev))
devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
- dev->stats.rx_packets, dev->stats.tx_packets,
- dev->stats.rx_errors, dev->stats.tx_errors
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors
);
// ensure there are no more active urbs
@@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += entry->length;
+ dev->net->stats.tx_packets++;
+ dev->net->stats.tx_bytes += entry->length;
} else {
- dev->stats.tx_errors++;
+ dev->net->stats.tx_errors++;
switch (urb->status) {
case -EPIPE:
@@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
devdbg (dev, "drop, code %d", retval);
drop:
retval = NET_XMIT_SUCCESS;
- dev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
if (skb)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 87197dd9c78..1097c72e44d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -208,11 +208,14 @@ rx_drop:
static struct net_device_stats *veth_get_stats(struct net_device *dev)
{
- struct veth_priv *priv = netdev_priv(dev);
- struct net_device_stats *dev_stats = &dev->stats;
- unsigned int cpu;
+ struct veth_priv *priv;
+ struct net_device_stats *dev_stats;
+ int cpu;
struct veth_net_stats *stats;
+ priv = netdev_priv(dev);
+ dev_stats = &dev->stats;
+
dev_stats->rx_packets = 0;
dev_stats->tx_packets = 0;
dev_stats->rx_bytes = 0;
@@ -220,17 +223,16 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev)
dev_stats->tx_dropped = 0;
dev_stats->rx_dropped = 0;
- if (priv->stats)
- for_each_online_cpu(cpu) {
- stats = per_cpu_ptr(priv->stats, cpu);
+ for_each_online_cpu(cpu) {
+ stats = per_cpu_ptr(priv->stats, cpu);
- dev_stats->rx_packets += stats->rx_packets;
- dev_stats->tx_packets += stats->tx_packets;
- dev_stats->rx_bytes += stats->rx_bytes;
- dev_stats->tx_bytes += stats->tx_bytes;
- dev_stats->tx_dropped += stats->tx_dropped;
- dev_stats->rx_dropped += stats->rx_dropped;
- }
+ dev_stats->rx_packets += stats->rx_packets;
+ dev_stats->tx_packets += stats->tx_packets;
+ dev_stats->rx_bytes += stats->rx_bytes;
+ dev_stats->tx_bytes += stats->tx_bytes;
+ dev_stats->tx_dropped += stats->tx_dropped;
+ dev_stats->rx_dropped += stats->rx_dropped;
+ }
return dev_stats;
}
@@ -257,8 +259,6 @@ static int veth_close(struct net_device *dev)
netif_carrier_off(dev);
netif_carrier_off(priv->peer);
- free_percpu(priv->stats);
- priv->stats = NULL;
return 0;
}
@@ -289,6 +289,15 @@ static int veth_dev_init(struct net_device *dev)
return 0;
}
+static void veth_dev_free(struct net_device *dev)
+{
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ free_percpu(priv->stats);
+ free_netdev(dev);
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -306,7 +315,7 @@ static void veth_setup(struct net_device *dev)
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
- dev->destructor = free_netdev;
+ dev->destructor = veth_dev_free;
}
/*
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1032d5fdbd4..2597145a066 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2907,6 +2907,7 @@ enum parport_pc_pci_cards {
netmos_9755,
netmos_9805,
netmos_9815,
+ netmos_9901,
quatech_sppxp100,
};
@@ -2987,7 +2988,7 @@ static struct parport_pc_pci {
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9805 */ { 1, { { 0, -1 }, } },
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
-
+ /* netmos_9901 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -3089,6 +3090,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x2000, 0, 0, netmos_9901 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index e53eacd75c8..53075424a43 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -39,7 +39,6 @@
#include <linux/sysdev.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
-#include <asm/e820.h>
#include "pci.h"
#define ROOT_SIZE VTD_PAGE_SIZE
@@ -57,14 +56,32 @@
#define MAX_AGAW_WIDTH 64
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
+#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
-#ifndef PHYSICAL_PAGE_MASK
-#define PHYSICAL_PAGE_MASK PAGE_MASK
-#endif
+
+/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
+ are never going to work. */
+static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
+{
+ return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+
+static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
+{
+ return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+static inline unsigned long page_to_dma_pfn(struct page *pg)
+{
+ return mm_to_dma_pfn(page_to_pfn(pg));
+}
+static inline unsigned long virt_to_dma_pfn(void *p)
+{
+ return page_to_dma_pfn(virt_to_page(p));
+}
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
@@ -205,12 +222,17 @@ static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
- return (pte->val & VTD_PAGE_MASK);
+#ifdef CONFIG_64BIT
+ return pte->val & VTD_PAGE_MASK;
+#else
+ /* Must have a full atomic 64-bit read */
+ return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
+#endif
}
-static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
+static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
{
- pte->val |= (addr & VTD_PAGE_MASK);
+ pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
}
static inline bool dma_pte_present(struct dma_pte *pte)
@@ -218,6 +240,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0;
}
+static inline int first_pte_in_page(struct dma_pte *pte)
+{
+ return !((unsigned long)pte & ~VTD_PAGE_MASK);
+}
+
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -245,7 +272,6 @@ struct dmar_domain {
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
- spinlock_t mapping_lock; /* page table lock */
int gaw; /* max guest address width */
/* adjusted guest address width, 0 is level 2 30-bit */
@@ -649,80 +675,78 @@ static inline int width_to_agaw(int width)
static inline unsigned int level_to_offset_bits(int level)
{
- return (12 + (level - 1) * LEVEL_STRIDE);
+ return (level - 1) * LEVEL_STRIDE;
}
-static inline int address_level_offset(u64 addr, int level)
+static inline int pfn_level_offset(unsigned long pfn, int level)
{
- return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
+ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
-static inline u64 level_mask(int level)
+static inline unsigned long level_mask(int level)
{
- return ((u64)-1 << level_to_offset_bits(level));
+ return -1UL << level_to_offset_bits(level);
}
-static inline u64 level_size(int level)
+static inline unsigned long level_size(int level)
{
- return ((u64)1 << level_to_offset_bits(level));
+ return 1UL << level_to_offset_bits(level);
}
-static inline u64 align_to_level(u64 addr, int level)
+static inline unsigned long align_to_level(unsigned long pfn, int level)
{
- return ((addr + level_size(level) - 1) & level_mask(level));
+ return (pfn + level_size(level) - 1) & level_mask(level);
}
-static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
+static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
+ unsigned long pfn)
{
- int addr_width = agaw_to_width(domain->agaw);
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
int offset;
- unsigned long flags;
BUG_ON(!domain->pgd);
-
- addr &= (((u64)1) << addr_width) - 1;
+ BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd;
- spin_lock_irqsave(&domain->mapping_lock, flags);
while (level > 0) {
void *tmp_page;
- offset = address_level_offset(addr, level);
+ offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
if (level == 1)
break;
if (!dma_pte_present(pte)) {
+ uint64_t pteval;
+
tmp_page = alloc_pgtable_page();
- if (!tmp_page) {
- spin_unlock_irqrestore(&domain->mapping_lock,
- flags);
+ if (!tmp_page)
return NULL;
+
+ domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
+ pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ if (cmpxchg64(&pte->val, 0ULL, pteval)) {
+ /* Someone else set it while we were thinking; use theirs. */
+ free_pgtable_page(tmp_page);
+ } else {
+ dma_pte_addr(pte);
+ domain_flush_cache(domain, pte, sizeof(*pte));
}
- domain_flush_cache(domain, tmp_page, PAGE_SIZE);
- dma_set_pte_addr(pte, virt_to_phys(tmp_page));
- /*
- * high level table always sets r/w, last level page
- * table control read/write
- */
- dma_set_pte_readable(pte);
- dma_set_pte_writable(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
}
parent = phys_to_virt(dma_pte_addr(pte));
level--;
}
- spin_unlock_irqrestore(&domain->mapping_lock, flags);
return pte;
}
/* return address's pte at specific level */
-static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
- int level)
+static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
+ unsigned long pfn,
+ int level)
{
struct dma_pte *parent, *pte = NULL;
int total = agaw_to_level(domain->agaw);
@@ -730,7 +754,7 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
parent = domain->pgd;
while (level <= total) {
- offset = address_level_offset(addr, total);
+ offset = pfn_level_offset(pfn, total);
pte = &parent[offset];
if (level == total)
return pte;
@@ -743,74 +767,82 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
return NULL;
}
-/* clear one page's page table */
-static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
-{
- struct dma_pte *pte = NULL;
-
- /* get last level pte */
- pte = dma_addr_level_pte(domain, addr, 1);
-
- if (pte) {
- dma_clear_pte(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- }
-}
-
/* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
+static void dma_pte_clear_range(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long last_pfn)
{
- int addr_width = agaw_to_width(domain->agaw);
- int npages;
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ struct dma_pte *first_pte, *pte;
+
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
- start &= (((u64)1) << addr_width) - 1;
- end &= (((u64)1) << addr_width) - 1;
- /* in case it's partial page */
- start &= PAGE_MASK;
- end = PAGE_ALIGN(end);
- npages = (end - start) / VTD_PAGE_SIZE;
+ /* we don't need lock here; nobody else touches the iova range */
+ while (start_pfn <= last_pfn) {
+ first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
+ if (!pte) {
+ start_pfn = align_to_level(start_pfn + 1, 2);
+ continue;
+ }
+ do {
+ dma_clear_pte(pte);
+ start_pfn++;
+ pte++;
+ } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
- /* we don't need lock here, nobody else touches the iova range */
- while (npages--) {
- dma_pte_clear_one(domain, start);
- start += VTD_PAGE_SIZE;
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
}
}
/* free page table pages. last level pte should already be cleared */
static void dma_pte_free_pagetable(struct dmar_domain *domain,
- u64 start, u64 end)
+ unsigned long start_pfn,
+ unsigned long last_pfn)
{
- int addr_width = agaw_to_width(domain->agaw);
- struct dma_pte *pte;
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ struct dma_pte *first_pte, *pte;
int total = agaw_to_level(domain->agaw);
int level;
- u64 tmp;
+ unsigned long tmp;
- start &= (((u64)1) << addr_width) - 1;
- end &= (((u64)1) << addr_width) - 1;
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
- /* we don't need lock here, nobody else touches the iova range */
+ /* We don't need lock here; nobody else touches the iova range */
level = 2;
while (level <= total) {
- tmp = align_to_level(start, level);
- if (tmp >= end || (tmp + level_size(level) > end))
+ tmp = align_to_level(start_pfn, level);
+
+ /* If we can't even clear one PTE at this level, we're done */
+ if (tmp + level_size(level) - 1 > last_pfn)
return;
- while (tmp < end) {
- pte = dma_addr_level_pte(domain, tmp, level);
- if (pte) {
- free_pgtable_page(
- phys_to_virt(dma_pte_addr(pte)));
- dma_clear_pte(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
+ while (tmp + level_size(level) - 1 <= last_pfn) {
+ first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
+ if (!pte) {
+ tmp = align_to_level(tmp + 1, level + 1);
+ continue;
}
- tmp += level_size(level);
+ do {
+ if (dma_pte_present(pte)) {
+ free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
+ dma_clear_pte(pte);
+ }
+ pte++;
+ tmp += level_size(level);
+ } while (!first_pte_in_page(pte) &&
+ tmp + level_size(level) - 1 <= last_pfn);
+
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+
}
level++;
}
/* free pgd */
- if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
+ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
free_pgtable_page(domain->pgd);
domain->pgd = NULL;
}
@@ -1036,11 +1068,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int pages)
+ unsigned long pfn, unsigned int pages)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
+ uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
- BUG_ON(addr & (~VTD_PAGE_MASK));
BUG_ON(pages == 0);
/*
@@ -1055,7 +1087,12 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
else
iommu->flush.flush_iotlb(iommu, did, addr, mask,
DMA_TLB_PSI_FLUSH);
- if (did)
+
+ /*
+ * In caching mode, domain ID 0 is reserved for non-present to present
+ * mapping flush. Device IOTLB doesn't need to be flushed in this case.
+ */
+ if (!cap_caching_mode(iommu->cap) || did)
iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
}
@@ -1280,7 +1317,6 @@ static void dmar_init_reserved_ranges(void)
struct pci_dev *pdev = NULL;
struct iova *iova;
int i;
- u64 addr, size;
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
@@ -1303,12 +1339,9 @@ static void dmar_init_reserved_ranges(void)
r = &pdev->resource[i];
if (!r->flags || !(r->flags & IORESOURCE_MEM))
continue;
- addr = r->start;
- addr &= PHYSICAL_PAGE_MASK;
- size = r->end - addr;
- size = PAGE_ALIGN(size);
- iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
- IOVA_PFN(size + addr) - 1);
+ iova = reserve_iova(&reserved_iova_list,
+ IOVA_PFN(r->start),
+ IOVA_PFN(r->end));
if (!iova)
printk(KERN_ERR "Reserve iova failed\n");
}
@@ -1342,7 +1375,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
unsigned long sagaw;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->mapping_lock);
spin_lock_init(&domain->iommu_lock);
domain_reserve_special_ranges(domain);
@@ -1389,7 +1421,6 @@ static void domain_exit(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- u64 end;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@@ -1398,14 +1429,12 @@ static void domain_exit(struct dmar_domain *domain)
domain_remove_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
- end = DOMAIN_MAX_ADDR(domain->gaw);
- end = end & (~PAGE_MASK);
/* clear ptes */
- dma_pte_clear_range(domain, 0, end);
+ dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
/* free page tables */
- dma_pte_free_pagetable(domain, 0, end);
+ dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
for_each_active_iommu(iommu, drhd)
if (test_bit(iommu->seq_id, &domain->iommu_bmp))
@@ -1619,42 +1648,86 @@ static int domain_context_mapped(struct pci_dev *pdev)
tmp->devfn);
}
-static int
-domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
- u64 hpa, size_t size, int prot)
+static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
{
- u64 start_pfn, end_pfn;
- struct dma_pte *pte;
- int index;
- int addr_width = agaw_to_width(domain->agaw);
+ struct dma_pte *first_pte = NULL, *pte = NULL;
+ phys_addr_t uninitialized_var(pteval);
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ unsigned long sg_res;
- hpa &= (((u64)1) << addr_width) - 1;
+ BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
- iova &= PAGE_MASK;
- start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
- end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
- index = 0;
- while (start_pfn < end_pfn) {
- pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
- if (!pte)
- return -ENOMEM;
+
+ prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
+
+ if (sg)
+ sg_res = 0;
+ else {
+ sg_res = nr_pages + 1;
+ pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
+ }
+
+ while (nr_pages--) {
+ uint64_t tmp;
+
+ if (!sg_res) {
+ sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
+ sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+ sg->dma_length = sg->length;
+ pteval = page_to_phys(sg_page(sg)) | prot;
+ }
+ if (!pte) {
+ first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
+ if (!pte)
+ return -ENOMEM;
+ }
/* We don't need lock here, nobody else
* touches the iova range
*/
- BUG_ON(dma_pte_addr(pte));
- dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
- dma_set_pte_prot(pte, prot);
- if (prot & DMA_PTE_SNP)
- dma_set_pte_snp(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- start_pfn++;
- index++;
+ tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
+ if (tmp) {
+ static int dumps = 5;
+ printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
+ iov_pfn, tmp, (unsigned long long)pteval);
+ if (dumps) {
+ dumps--;
+ debug_dma_dump_mappings(NULL);
+ }
+ WARN_ON(1);
+ }
+ pte++;
+ if (!nr_pages || first_pte_in_page(pte)) {
+ domain_flush_cache(domain, first_pte,
+ (void *)pte - (void *)first_pte);
+ pte = NULL;
+ }
+ iov_pfn++;
+ pteval += VTD_PAGE_SIZE;
+ sg_res--;
+ if (!sg_res)
+ sg = sg_next(sg);
}
return 0;
}
+static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ struct scatterlist *sg, unsigned long nr_pages,
+ int prot)
+{
+ return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
+}
+
+static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phys_pfn, unsigned long nr_pages,
+ int prot)
+{
+ return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
+}
+
static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
if (!iommu)
@@ -1845,58 +1918,61 @@ error:
static int iommu_identity_mapping;
+static int iommu_domain_identity_map(struct dmar_domain *domain,
+ unsigned long long start,
+ unsigned long long end)
+{
+ unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
+ unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
+
+ if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
+ dma_to_mm_pfn(last_vpfn))) {
+ printk(KERN_ERR "IOMMU: reserve iova failed\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
+ start, end, domain->id);
+ /*
+ * RMRR range might have overlap with physical memory range,
+ * clear it first
+ */
+ dma_pte_clear_range(domain, first_vpfn, last_vpfn);
+
+ return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
+ last_vpfn - first_vpfn + 1,
+ DMA_PTE_READ|DMA_PTE_WRITE);
+}
+
static int iommu_prepare_identity_map(struct pci_dev *pdev,
unsigned long long start,
unsigned long long end)
{
struct dmar_domain *domain;
- unsigned long size;
- unsigned long long base;
int ret;
printk(KERN_INFO
- "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- pci_name(pdev), start, end);
- if (iommu_identity_mapping)
- domain = si_domain;
- else
- /* page table init */
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+ pci_name(pdev), start, end);
+
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
- /* The address might not be aligned */
- base = start & PAGE_MASK;
- size = end - base;
- size = PAGE_ALIGN(size);
- if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
- IOVA_PFN(base + size) - 1)) {
- printk(KERN_ERR "IOMMU: reserve iova failed\n");
- ret = -ENOMEM;
- goto error;
- }
-
- pr_debug("Mapping reserved region %lx@%llx for %s\n",
- size, base, pci_name(pdev));
- /*
- * RMRR range might have overlap with physical memory range,
- * clear it first
- */
- dma_pte_clear_range(domain, base, base + size);
-
- ret = domain_page_mapping(domain, base, base, size,
- DMA_PTE_READ|DMA_PTE_WRITE);
+ ret = iommu_domain_identity_map(domain, start, end);
if (ret)
goto error;
/* context entry init */
ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
- if (!ret)
- return 0;
-error:
+ if (ret)
+ goto error;
+
+ return 0;
+
+ error:
domain_exit(domain);
return ret;
-
}
static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
@@ -1908,64 +1984,6 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
rmrr->end_address + 1);
}
-#ifdef CONFIG_DMAR_GFX_WA
-struct iommu_prepare_data {
- struct pci_dev *pdev;
- int ret;
-};
-
-static int __init iommu_prepare_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- struct iommu_prepare_data *data;
-
- data = (struct iommu_prepare_data *)datax;
-
- data->ret = iommu_prepare_identity_map(data->pdev,
- start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
- return data->ret;
-
-}
-
-static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
-{
- int nid;
- struct iommu_prepare_data data;
-
- data.pdev = pdev;
- data.ret = 0;
-
- for_each_online_node(nid) {
- work_with_active_regions(nid, iommu_prepare_work_fn, &data);
- if (data.ret)
- return data.ret;
- }
- return data.ret;
-}
-
-static void __init iommu_prepare_gfx_mapping(void)
-{
- struct pci_dev *pdev = NULL;
- int ret;
-
- for_each_pci_dev(pdev) {
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
- !IS_GFX_DEVICE(pdev))
- continue;
- printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
- pci_name(pdev));
- ret = iommu_prepare_with_active_regions(pdev);
- if (ret)
- printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
- }
-}
-#else /* !CONFIG_DMAR_GFX_WA */
-static inline void iommu_prepare_gfx_mapping(void)
-{
- return;
-}
-#endif
-
#ifdef CONFIG_DMAR_FLOPPY_WA
static inline void iommu_prepare_isa(void)
{
@@ -1976,12 +1994,12 @@ static inline void iommu_prepare_isa(void)
if (!pdev)
return;
- printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
+ printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
if (ret)
- printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
- "floppy might not work\n");
+ printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
+ "floppy might not work\n");
}
#else
@@ -2009,16 +2027,30 @@ static int __init init_context_pass_through(void)
}
static int md_domain_init(struct dmar_domain *domain, int guest_width);
+
+static int __init si_domain_work_fn(unsigned long start_pfn,
+ unsigned long end_pfn, void *datax)
+{
+ int *ret = datax;
+
+ *ret = iommu_domain_identity_map(si_domain,
+ (uint64_t)start_pfn << PAGE_SHIFT,
+ (uint64_t)end_pfn << PAGE_SHIFT);
+ return *ret;
+
+}
+
static int si_domain_init(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- int ret = 0;
+ int nid, ret = 0;
si_domain = alloc_domain();
if (!si_domain)
return -EFAULT;
+ pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
@@ -2035,6 +2067,12 @@ static int si_domain_init(void)
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+ for_each_online_node(nid) {
+ work_with_active_regions(nid, si_domain_work_fn, &ret);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -2081,7 +2119,6 @@ static int domain_add_dev_info(struct dmar_domain *domain,
static int iommu_prepare_static_identity_mapping(void)
{
- int i;
struct pci_dev *pdev = NULL;
int ret;
@@ -2089,20 +2126,14 @@ static int iommu_prepare_static_identity_mapping(void)
if (ret)
return -EFAULT;
- printk(KERN_INFO "IOMMU: Setting identity map:\n");
for_each_pci_dev(pdev) {
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
-
- if (ei->type == E820_RAM) {
- ret = iommu_prepare_identity_map(pdev,
- ei->addr, ei->addr + ei->size);
- if (ret) {
- printk(KERN_INFO "1:1 mapping to one domain failed.\n");
- return -EFAULT;
- }
- }
- }
+ printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
+ pci_name(pdev));
+
+ ret = domain_context_mapping(si_domain, pdev,
+ CONTEXT_TT_MULTI_LEVEL);
+ if (ret)
+ return ret;
ret = domain_add_dev_info(si_domain, pdev);
if (ret)
return ret;
@@ -2293,8 +2324,6 @@ int __init init_dmars(void)
}
}
- iommu_prepare_gfx_mapping();
-
iommu_prepare_isa();
}
@@ -2339,50 +2368,40 @@ error:
return ret;
}
-static inline u64 aligned_size(u64 host_addr, size_t size)
-{
- u64 addr;
- addr = (host_addr & (~PAGE_MASK)) + size;
- return PAGE_ALIGN(addr);
-}
-
-struct iova *
-iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
+static inline unsigned long aligned_nrpages(unsigned long host_addr,
+ size_t size)
{
- struct iova *piova;
+ host_addr &= ~PAGE_MASK;
+ host_addr += size + PAGE_SIZE - 1;
- /* Make sure it's in range */
- end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
- if (!size || (IOVA_START_ADDR + size > end))
- return NULL;
-
- piova = alloc_iova(&domain->iovad,
- size >> PAGE_SHIFT, IOVA_PFN(end), 1);
- return piova;
+ return host_addr >> VTD_PAGE_SHIFT;
}
-static struct iova *
-__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
- size_t size, u64 dma_mask)
+static struct iova *intel_alloc_iova(struct device *dev,
+ struct dmar_domain *domain,
+ unsigned long nrpages, uint64_t dma_mask)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct iova *iova = NULL;
- if (dma_mask <= DMA_BIT_MASK(32) || dmar_forcedac)
- iova = iommu_alloc_iova(domain, size, dma_mask);
- else {
+ /* Restrict dma_mask to the width that the iommu can handle */
+ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+
+ if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
/*
* First try to allocate an io virtual address in
* DMA_BIT_MASK(32) and if that fails then try allocating
* from higher range
*/
- iova = iommu_alloc_iova(domain, size, DMA_BIT_MASK(32));
- if (!iova)
- iova = iommu_alloc_iova(domain, size, dma_mask);
- }
-
- if (!iova) {
- printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
+ iova = alloc_iova(&domain->iovad, nrpages,
+ IOVA_PFN(DMA_BIT_MASK(32)), 1);
+ if (iova)
+ return iova;
+ }
+ iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
+ if (unlikely(!iova)) {
+ printk(KERN_ERR "Allocating %ld-page iova for %s failed",
+ nrpages, pci_name(pdev));
return NULL;
}
@@ -2485,14 +2504,12 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
return 0;
iommu = domain_get_iommu(domain);
- size = aligned_size((u64)paddr, size);
+ size = aligned_nrpages(paddr, size);
- iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
if (!iova)
goto error;
- start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
-
/*
* Check if DMAR supports zero-length reads on write only
* mappings..
@@ -2508,20 +2525,20 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
* might have two guest_addr mapping to the same host paddr, but this
* is not a big problem
*/
- ret = domain_page_mapping(domain, start_paddr,
- ((u64)paddr) & PHYSICAL_PAGE_MASK,
- size, prot);
+ ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
+ paddr >> VTD_PAGE_SHIFT, size, prot);
if (ret)
goto error;
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, start_paddr,
- size >> VTD_PAGE_SHIFT);
+ iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
else
iommu_flush_write_buffer(iommu);
- return start_paddr + ((u64)paddr & (~PAGE_MASK));
+ start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
+ start_paddr += paddr & ~PAGE_MASK;
+ return start_paddr;
error:
if (iova)
@@ -2614,7 +2631,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
{
struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
- unsigned long start_addr;
+ unsigned long start_pfn, last_pfn;
struct iova *iova;
struct intel_iommu *iommu;
@@ -2627,22 +2644,25 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
iommu = domain_get_iommu(domain);
iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
- if (!iova)
+ if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
+ (unsigned long long)dev_addr))
return;
- start_addr = iova->pfn_lo << PAGE_SHIFT;
- size = aligned_size((u64)dev_addr, size);
+ start_pfn = mm_to_dma_pfn(iova->pfn_lo);
+ last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
- pr_debug("Device %s unmapping: %zx@%llx\n",
- pci_name(pdev), size, (unsigned long long)start_addr);
+ pr_debug("Device %s unmapping: pfn %lx-%lx\n",
+ pci_name(pdev), start_pfn, last_pfn);
/* clear the whole page */
- dma_pte_clear_range(domain, start_addr, start_addr + size);
+ dma_pte_clear_range(domain, start_pfn, last_pfn);
+
/* free page tables */
- dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+ dma_pte_free_pagetable(domain, start_pfn, last_pfn);
+
if (intel_iommu_strict) {
- iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
- size >> VTD_PAGE_SHIFT);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ last_pfn - start_pfn + 1);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2700,14 +2720,10 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
- unsigned long start_addr;
+ unsigned long start_pfn, last_pfn;
struct iova *iova;
- size_t size = 0;
- phys_addr_t addr;
- struct scatterlist *sg;
struct intel_iommu *iommu;
if (iommu_no_mapping(pdev))
@@ -2719,22 +2735,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
iommu = domain_get_iommu(domain);
iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
- if (!iova)
+ if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
+ (unsigned long long)sglist[0].dma_address))
return;
- for_each_sg(sglist, sg, nelems, i) {
- addr = page_to_phys(sg_page(sg)) + sg->offset;
- size += aligned_size((u64)addr, sg->length);
- }
- start_addr = iova->pfn_lo << PAGE_SHIFT;
+ start_pfn = mm_to_dma_pfn(iova->pfn_lo);
+ last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
/* clear the whole page */
- dma_pte_clear_range(domain, start_addr, start_addr + size);
+ dma_pte_clear_range(domain, start_pfn, last_pfn);
+
/* free page tables */
- dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+ dma_pte_free_pagetable(domain, start_pfn, last_pfn);
- iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
- size >> VTD_PAGE_SHIFT);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ (last_pfn - start_pfn + 1));
/* free iova */
__free_iova(&domain->iovad, iova);
@@ -2757,17 +2772,16 @@ static int intel_nontranslate_map_sg(struct device *hddev,
static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
- phys_addr_t addr;
int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
- size_t offset = 0;
+ size_t offset_pfn = 0;
struct iova *iova = NULL;
int ret;
struct scatterlist *sg;
- unsigned long start_addr;
+ unsigned long start_vpfn;
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
@@ -2780,12 +2794,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
iommu = domain_get_iommu(domain);
- for_each_sg(sglist, sg, nelems, i) {
- addr = page_to_phys(sg_page(sg)) + sg->offset;
- size += aligned_size((u64)addr, sg->length);
- }
+ for_each_sg(sglist, sg, nelems, i)
+ size += aligned_nrpages(sg->offset, sg->length);
- iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
if (!iova) {
sglist->dma_length = 0;
return 0;
@@ -2801,35 +2813,24 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
prot |= DMA_PTE_WRITE;
- start_addr = iova->pfn_lo << PAGE_SHIFT;
- offset = 0;
- for_each_sg(sglist, sg, nelems, i) {
- addr = page_to_phys(sg_page(sg)) + sg->offset;
- size = aligned_size((u64)addr, sg->length);
- ret = domain_page_mapping(domain, start_addr + offset,
- ((u64)addr) & PHYSICAL_PAGE_MASK,
- size, prot);
- if (ret) {
- /* clear the page */
- dma_pte_clear_range(domain, start_addr,
- start_addr + offset);
- /* free page tables */
- dma_pte_free_pagetable(domain, start_addr,
- start_addr + offset);
- /* free iova */
- __free_iova(&domain->iovad, iova);
- return 0;
- }
- sg->dma_address = start_addr + offset +
- ((u64)addr & (~PAGE_MASK));
- sg->dma_length = sg->length;
- offset += size;
+ start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
+
+ ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
+ if (unlikely(ret)) {
+ /* clear the page */
+ dma_pte_clear_range(domain, start_vpfn,
+ start_vpfn + size - 1);
+ /* free page tables */
+ dma_pte_free_pagetable(domain, start_vpfn,
+ start_vpfn + size - 1);
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
+ return 0;
}
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, start_addr,
- offset >> VTD_PAGE_SHIFT);
+ iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
else
iommu_flush_write_buffer(iommu);
@@ -3334,7 +3335,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
- spin_lock_init(&domain->mapping_lock);
spin_lock_init(&domain->iommu_lock);
domain_reserve_special_ranges(domain);
@@ -3388,8 +3388,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
static void vm_domain_exit(struct dmar_domain *domain)
{
- u64 end;
-
/* Domain 0 is reserved, so dont process it */
if (!domain)
return;
@@ -3397,14 +3395,12 @@ static void vm_domain_exit(struct dmar_domain *domain)
vm_domain_remove_all_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
- end = DOMAIN_MAX_ADDR(domain->gaw);
- end = end & (~VTD_PAGE_MASK);
/* clear ptes */
- dma_pte_clear_range(domain, 0, end);
+ dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
/* free page tables */
- dma_pte_free_pagetable(domain, 0, end);
+ dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
iommu_free_vm_domain(domain);
free_domain_mem(domain);
@@ -3513,7 +3509,7 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
- max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
+ max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
int min_agaw;
u64 end;
@@ -3531,8 +3527,11 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
}
dmar_domain->max_addr = max_addr;
}
-
- ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
+ /* Round up size to next multiple of PAGE_SIZE, if it and
+ the low bits of hpa would take us onto the next page */
+ size = aligned_nrpages(hpa, size);
+ ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ hpa >> VTD_PAGE_SHIFT, size, prot);
return ret;
}
@@ -3540,15 +3539,12 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- dma_addr_t base;
- /* The address might not be aligned */
- base = iova & VTD_PAGE_MASK;
- size = VTD_PAGE_ALIGN(size);
- dma_pte_clear_range(dmar_domain, base, base + size);
+ dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ (iova + size - 1) >> VTD_PAGE_SHIFT);
- if (dmar_domain->max_addr == base + size)
- dmar_domain->max_addr = base;
+ if (dmar_domain->max_addr == iova + size)
+ dmar_domain->max_addr = iova;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3558,7 +3554,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
struct dma_pte *pte;
u64 phys = 0;
- pte = addr_to_dma_pte(dmar_domain, iova);
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
if (pte)
phys = dma_pte_addr(pte);
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 2287116e982..46dd440e231 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -1,9 +1,19 @@
/*
- * Copyright (c) 2006, Intel Corporation.
+ * Copyright © 2006-2009, Intel Corporation.
*
- * This file is released under the GPLv2.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
*
- * Copyright (C) 2006-2008 Intel Corporation
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
*/
@@ -123,7 +133,15 @@ move_left:
/* Insert the new_iova into domain rbtree by holding writer lock */
/* Add new node and rebalance tree. */
{
- struct rb_node **entry = &((prev)), *parent = NULL;
+ struct rb_node **entry, *parent = NULL;
+
+ /* If we have 'prev', it's a valid place to start the
+ insertion. Otherwise, start from the root. */
+ if (prev)
+ entry = &prev;
+ else
+ entry = &iovad->rbroot.rb_node;
+
/* Figure out where to put new node */
while (*entry) {
struct iova *this = container_of(*entry,
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index fee6a4022bc..46dad12f952 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -355,10 +355,9 @@ config EEEPC_LAPTOP
depends on INPUT
depends on EXPERIMENTAL
depends on RFKILL || RFKILL = n
+ depends on HOTPLUG_PCI
select BACKLIGHT_CLASS_DEVICE
select HWMON
- select HOTPLUG
- select HOTPLUG_PCI if PCI
---help---
This driver supports the Fn-Fx keys on Eee PC laptops.
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index aafd3e6ebb0..a118eb0f1e6 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,8 +1,8 @@
/*
* Blackfin On-Chip Real Time Clock Driver
- * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789]
+ * Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
*
- * Copyright 2004-2008 Analog Devices Inc.
+ * Copyright 2004-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -363,7 +363,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
struct bfin_rtc *rtc;
struct device *dev = &pdev->dev;
int ret = 0;
- unsigned long timeout;
+ unsigned long timeout = jiffies + HZ;
dev_dbg_stamp(dev);
@@ -374,32 +374,32 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
device_init_wakeup(dev, 1);
+ /* Register our RTC with the RTC framework */
+ rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops,
+ THIS_MODULE);
+ if (unlikely(IS_ERR(rtc->rtc_dev))) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto err;
+ }
+
/* Grab the IRQ and init the hardware */
ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
- goto err;
+ goto err_reg;
/* sometimes the bootloader touched things, but the write complete was not
* enabled, so let's just do a quick timeout here since the IRQ will not fire ...
*/
- timeout = jiffies + HZ;
while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
if (time_after(jiffies, timeout))
break;
bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
bfin_write_RTC_SWCNT(0);
- /* Register our RTC with the RTC framework */
- rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE);
- if (unlikely(IS_ERR(rtc->rtc_dev))) {
- ret = PTR_ERR(rtc->rtc_dev);
- goto err_irq;
- }
-
return 0;
- err_irq:
- free_irq(IRQ_RTC, dev);
- err:
+err_reg:
+ rtc_device_unregister(rtc->rtc_dev);
+err:
kfree(rtc);
return ret;
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 74369a3f963..c399f485aa7 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -13,6 +13,7 @@
#include <linux/inet.h>
#include <linux/crypto.h>
+#include <linux/if_vlan.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
@@ -184,6 +185,9 @@ static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
struct cxgb3i_adapter *snic;
int i;
+ if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ ndev = vlan_dev_real_dev(ndev);
+
read_lock(&cxgb3i_snic_rwlock);
list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
for (i = 0; i < snic->hba_cnt; i++) {
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index a84072865fc..2c266c01dc5 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -473,16 +473,16 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
* limitation for the device. Try 40-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration "
"aborting\n");
goto err_out_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 32-bit DMA "
@@ -490,7 +490,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 40-bit DMA "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index eabf3650285..bfc996971b8 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -245,7 +245,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct vnic_wq_copy *wq,
struct fnic_io_req *io_req,
struct scsi_cmnd *sc,
- u32 sg_count)
+ int sg_count)
{
struct scatterlist *sg;
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
@@ -260,9 +260,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
char msg[2];
if (sg_count) {
- BUG_ON(sg_count < 0);
- BUG_ON(sg_count > FNIC_MAX_SG_DESC_CNT);
-
/* For each SGE, create a device desc entry */
desc = io_req->sgl_list;
for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
@@ -344,7 +341,7 @@ int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
struct fnic *fnic;
struct vnic_wq_copy *wq;
int ret;
- u32 sg_count;
+ int sg_count;
unsigned long flags;
unsigned long ptr;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 869a11bdccb..9928704e235 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1095,9 +1095,14 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
MAX_INDIRECT_BUFS);
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
+
+ if (hostdata->madapter_info.os_type == 3) {
+ enable_fast_fail(hostdata);
+ return;
+ }
}
- enable_fast_fail(hostdata);
+ send_srp_login(hostdata);
}
/**
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2eee9e6e4fe..292c02f810d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3670,13 +3670,14 @@ static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
int flagset;
+ unsigned long flags;
if (!rport->rqst_q)
return;
get_device(&rport->dev);
- spin_lock(rport->rqst_q->queue_lock);
+ spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset)
@@ -3684,7 +3685,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
__blk_run_queue(rport->rqst_q);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
- spin_unlock(rport->rqst_q->queue_lock);
+ spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
put_device(&rport->dev);
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8201387b4da..ef142fd47a8 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -210,13 +210,11 @@ static void sg_put_dev(Sg_device *sdp);
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
- struct request_queue *q = sfp->parentdp->device->request_queue;
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
- return blk_verify_command(&q->cmd_filter,
- cmd, filp->f_mode & FMODE_WRITE);
+ return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
static int
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 97f3158fa7b..27e84e4b1fa 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -134,7 +134,7 @@ zalon_probe(struct parisc_device *dev)
host = ncr_attach(&zalon7xx_template, unit, &device);
if (!host)
- goto fail;
+ return -ENODEV;
if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
dev_printk(KERN_ERR, &dev->dev, "irq problem with %d, detaching\n ",
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index a07015d646d..6160e03f410 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -759,6 +759,8 @@ static int pci_netmos_init(struct pci_dev *dev)
/* subdevice 0x00PS means <P> parallel, <S> serial */
unsigned int num_serial = dev->subsystem_device & 0xf;
+ if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
+ return 0;
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return 0;
@@ -3557,6 +3559,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_VENDOR_ID_IBM, 0x0299,
0, 0, pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index aa90ddb3706..8980a5640bd 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -514,6 +514,8 @@ static int __init uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+
master->bus_num = 2; /* "official" */
master->num_chipselect = 4;
master->setup = uwire_setup;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 2a5abc08e85..f1db395dd88 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -258,6 +258,11 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
+ int do_setup = -1;
+ int (*setup_transfer)(struct spi_device *,
+ struct spi_transfer *);
+
+ setup_transfer = bitbang->setup_transfer;
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
@@ -269,8 +274,6 @@ static void bitbang_work(struct work_struct *work)
unsigned tmp;
unsigned cs_change;
int status;
- int (*setup_transfer)(struct spi_device *,
- struct spi_transfer *);
m = container_of(bitbang->queue.next, struct spi_message,
queue);
@@ -287,19 +290,19 @@ static void bitbang_work(struct work_struct *work)
tmp = 0;
cs_change = 1;
status = 0;
- setup_transfer = NULL;
list_for_each_entry (t, &m->transfers, transfer_list) {
- /* override or restore speed and wordsize */
- if (t->speed_hz || t->bits_per_word) {
- setup_transfer = bitbang->setup_transfer;
+ /* override speed or wordsize? */
+ if (t->speed_hz || t->bits_per_word)
+ do_setup = 1;
+
+ /* init (-1) or override (1) transfer params */
+ if (do_setup != 0) {
if (!setup_transfer) {
status = -ENOPROTOOPT;
break;
}
- }
- if (setup_transfer) {
status = setup_transfer(spi, t);
if (status < 0)
break;
@@ -363,9 +366,10 @@ static void bitbang_work(struct work_struct *work)
m->status = status;
m->complete(m->context);
- /* restore speed and wordsize */
- if (setup_transfer)
+ /* restore speed and wordsize if it was overridden */
+ if (do_setup == 1)
setup_transfer(spi, NULL);
+ do_setup = 0;
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5d869c4d3eb..606e7a40a8d 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -58,15 +58,20 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
/* Bit masks for spi_device.mode management. Note that incorrect
- * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other
- * devices on a shared bus: CS_HIGH, because this device will be
- * active when it shouldn't be; 3WIRE, because when active it won't
- * behave as it should.
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
*
- * REVISIT should changing those two modes be privileged?
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
- | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP)
+ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
+ | SPI_NO_CS | SPI_READY)
struct spidev_data {
dev_t devt;
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 018850c116c..497ff8af03e 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2414,7 +2414,10 @@ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
if (err)
return err;
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- return fbhw->encode_fix(fix, &par);
+ mutex_lock(&info->mm_lock);
+ err = fbhw->encode_fix(fix, &par);
+ mutex_unlock(&info->mm_lock);
+ return err;
}
static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2743,7 +2746,9 @@ static int atafb_set_par(struct fb_info *info)
/* Decode wanted screen parameters */
fbhw->decode_var(&info->var, par);
+ mutex_lock(&info->mm_lock);
fbhw->encode_fix(&info->fix, par);
+ mutex_unlock(&info->mm_lock);
/* Set new videomode */
ata_set_par(par);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 5afd64482f5..cb88394ba99 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -270,7 +270,9 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
smem_len = (var->xres_virtual * var->yres_virtual
* ((var->bits_per_pixel + 7) / 8));
+ mutex_lock(&info->mm_lock);
info->fix.smem_len = max(smem_len, sinfo->smem_len);
+ mutex_unlock(&info->mm_lock);
info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
(dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index 7691e73823d..1f39a62f899 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -187,6 +187,8 @@ struct atyfb_par {
int mtrr_reg;
#endif
u32 mem_cntl;
+ struct crtc saved_crtc;
+ union aty_pll saved_pll;
};
/*
@@ -217,6 +219,7 @@ struct atyfb_par {
#define M64F_XL_DLL 0x00080000
#define M64F_MFB_FORCE_4 0x00100000
#define M64F_HW_TRIPLE 0x00200000
+#define M64F_XL_MEM 0x00400000
/*
* Register access
*/
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 1207c208a30..63d3739d43a 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -66,6 +66,8 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/backlight.h>
+#include <linux/reboot.h>
+#include <linux/dmi.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -249,8 +251,6 @@ static int aty_init(struct fb_info *info);
static int store_video_par(char *videopar, unsigned char m64_num);
#endif
-static struct crtc saved_crtc;
-static union aty_pll saved_pll;
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -261,6 +261,8 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
static int read_aty_sense(const struct atyfb_par *par);
#endif
+static DEFINE_MUTEX(reboot_lock);
+static struct fb_info *reboot_info;
/*
* Interface used by the world
@@ -361,8 +363,8 @@ static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
-#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4)
-#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS)
+#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM)
+#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS)
static struct {
u16 pci_id;
@@ -539,6 +541,7 @@ static char ram_edo[] __devinitdata = "EDO";
static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
+static char ram_wram[] __devinitdata = "WRAM";
static char ram_off[] __devinitdata = "OFF";
#endif /* CONFIG_FB_ATY_CT */
@@ -553,6 +556,10 @@ static char *aty_gx_ram[8] __devinitdata = {
#ifdef CONFIG_FB_ATY_CT
static char *aty_ct_ram[8] __devinitdata = {
ram_off, ram_dram, ram_edo, ram_edo,
+ ram_sdram, ram_sgram, ram_wram, ram_resv
+};
+static char *aty_xl_ram[8] __devinitdata = {
+ ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_sdram32, ram_resv
};
#endif /* CONFIG_FB_ATY_CT */
@@ -760,6 +767,17 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
+static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
+{
+ u32 line_length = vxres * bpp / 8;
+
+ if (par->ram_type == SGRAM ||
+ (!M64_HAS(XL_MEM) && par->ram_type == WRAM))
+ line_length = (line_length + 63) & ~63;
+
+ return line_length;
+}
+
static int aty_var_to_crtc(const struct fb_info *info,
const struct fb_var_screeninfo *var, struct crtc *crtc)
{
@@ -769,13 +787,14 @@ static int aty_var_to_crtc(const struct fb_info *info,
u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width, dp_pix_width, dp_chain_mask;
+ u32 line_length;
/* input */
- xres = var->xres;
+ xres = (var->xres + 7) & ~7;
yres = var->yres;
- vxres = var->xres_virtual;
+ vxres = (var->xres_virtual + 7) & ~7;
vyres = var->yres_virtual;
- xoffset = var->xoffset;
+ xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
bpp = var->bits_per_pixel;
if (bpp == 16)
@@ -827,7 +846,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
} else
FAIL("invalid bpp");
- if (vxres * vyres * bpp / 8 > info->fix.smem_len)
+ line_length = calc_line_length(par, vxres, bpp);
+
+ if (vyres * line_length > info->fix.smem_len)
FAIL("not enough video RAM");
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
@@ -969,7 +990,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
crtc->xoffset = xoffset;
crtc->yoffset = yoffset;
crtc->bpp = bpp;
- crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19);
+ crtc->off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
crtc->vline_crnt_vline = 0;
crtc->h_tot_disp = h_total | (h_disp<<16);
@@ -1394,7 +1417,9 @@ static int atyfb_set_par(struct fb_info *info)
}
aty_st_8(DAC_MASK, 0xff, par);
- info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8;
+ info->fix.line_length = calc_line_length(par, var->xres_virtual,
+ var->bits_per_pixel);
+
info->fix.visual = var->bits_per_pixel <= 8 ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
@@ -1505,10 +1530,12 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
{
u32 xoffset = info->var.xoffset;
u32 yoffset = info->var.yoffset;
- u32 vxres = par->crtc.vxres;
+ u32 line_length = info->fix.line_length;
u32 bpp = info->var.bits_per_pixel;
- par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19);
+ par->crtc.off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
}
@@ -2201,7 +2228,7 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
const int *refresh_tbl;
int i, size;
- if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) {
+ if (M64_HAS(XL_MEM)) {
refresh_tbl = ragexl_tbl;
size = ARRAY_SIZE(ragexl_tbl);
} else {
@@ -2335,7 +2362,10 @@ static int __devinit aty_init(struct fb_info *info)
par->pll_ops = &aty_pll_ct;
par->bus_type = PCI;
par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
- ramname = aty_ct_ram[par->ram_type];
+ if (M64_HAS(XL_MEM))
+ ramname = aty_xl_ram[par->ram_type];
+ else
+ ramname = aty_ct_ram[par->ram_type];
/* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
par->pll_limits.mclk = 63;
@@ -2390,9 +2420,9 @@ static int __devinit aty_init(struct fb_info *info)
#endif /* CONFIG_FB_ATY_CT */
/* save previous video mode */
- aty_get_crtc(par, &saved_crtc);
+ aty_get_crtc(par, &par->saved_crtc);
if(par->pll_ops->get_pll)
- par->pll_ops->get_pll(info, &saved_pll);
+ par->pll_ops->get_pll(info, &par->saved_pll);
par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
gtb_memsize = M64_HAS(GTB_DSP);
@@ -2667,8 +2697,8 @@ static int __devinit aty_init(struct fb_info *info)
aty_init_exit:
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
@@ -3502,6 +3532,11 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
par->mmap_map[1].prot_flag = _PAGE_E;
#endif /* __sparc__ */
+ mutex_lock(&reboot_lock);
+ if (!reboot_info)
+ reboot_info = info;
+ mutex_unlock(&reboot_lock);
+
return 0;
err_release_io:
@@ -3614,8 +3649,8 @@ static void __devexit atyfb_remove(struct fb_info *info)
struct atyfb_par *par = (struct atyfb_par *) info->par;
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
unregister_framebuffer(info);
@@ -3661,6 +3696,11 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
+ mutex_lock(&reboot_lock);
+ if (reboot_info == info)
+ reboot_info = NULL;
+ mutex_unlock(&reboot_lock);
+
atyfb_remove(info);
}
@@ -3808,6 +3848,56 @@ static int __init atyfb_setup(char *options)
}
#endif /* MODULE */
+static int atyfb_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct atyfb_par *par;
+
+ if (code != SYS_RESTART)
+ return NOTIFY_DONE;
+
+ mutex_lock(&reboot_lock);
+
+ if (!reboot_info)
+ goto out;
+
+ if (!lock_fb_info(reboot_info))
+ goto out;
+
+ par = reboot_info->par;
+
+ /*
+ * HP OmniBook 500's BIOS doesn't like the state of the
+ * hardware after atyfb has been used. Restore the hardware
+ * to the original state to allow successful reboots.
+ */
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(reboot_info, &par->saved_pll);
+
+ unlock_fb_info(reboot_info);
+ out:
+ mutex_unlock(&reboot_lock);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block atyfb_reboot_notifier = {
+ .notifier_call = atyfb_reboot_notify,
+};
+
+static const struct dmi_system_id atyfb_reboot_ids[] = {
+ {
+ .ident = "HP OmniBook 500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"),
+ },
+ },
+
+ { }
+};
+
static int __init atyfb_init(void)
{
int err1 = 1, err2 = 1;
@@ -3826,11 +3916,20 @@ static int __init atyfb_init(void)
err2 = atyfb_atari_probe();
#endif
- return (err1 && err2) ? -ENODEV : 0;
+ if (err1 && err2)
+ return -ENODEV;
+
+ if (dmi_check_system(atyfb_reboot_ids))
+ register_reboot_notifier(&atyfb_reboot_notifier);
+
+ return 0;
}
static void __exit atyfb_exit(void)
{
+ if (dmi_check_system(atyfb_reboot_ids))
+ unregister_reboot_notifier(&atyfb_reboot_notifier);
+
#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
#endif
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index 0cc9724e61a..51fcc0a2c94 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -63,14 +63,17 @@ static void reset_GTC_3D_engine(const struct atyfb_par *par)
void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
{
u32 pitch_value;
+ u32 vxres;
/* determine modal information from global mode structure */
- pitch_value = info->var.xres_virtual;
+ pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8);
+ vxres = info->var.xres_virtual;
if (info->var.bits_per_pixel == 24) {
/* In 24 bpp, the engine is in 8 bpp - this requires that all */
/* horizontal coordinates and widths must be adjusted */
pitch_value *= 3;
+ vxres *= 3;
}
/* On GTC (RagePro), we need to reset the 3D engine before */
@@ -133,7 +136,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
aty_st_le32(SC_LEFT, 0, par);
aty_st_le32(SC_TOP, 0, par);
aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par);
- aty_st_le32(SC_RIGHT, pitch_value - 1, par);
+ aty_st_le32(SC_RIGHT, vxres - 1, par);
/* set background color to minimum value (usually BLACK) */
aty_st_le32(DP_BKGD_CLR, 0, par);
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 1dae7f8f3c6..51422fc4f60 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -356,7 +356,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
- lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, sizeof(GFP_KERNEL));
+ lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
if (lcd->buf == NULL) {
kfree(lcd);
return -ENOMEM;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index f8a09bf8d0c..53ea05645ff 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1310,8 +1310,6 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
-__acquires(&info->lock)
-__releases(&info->lock)
{
int fbidx = iminor(file->f_path.dentry->d_inode);
struct fb_info *info = registered_fb[fbidx];
@@ -1325,16 +1323,14 @@ __releases(&info->lock)
off = vma->vm_pgoff << PAGE_SHIFT;
if (!fb)
return -ENODEV;
+ mutex_lock(&info->mm_lock);
if (fb->fb_mmap) {
int res;
- mutex_lock(&info->lock);
res = fb->fb_mmap(info, vma);
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return res;
}
- mutex_lock(&info->lock);
-
/* frame buffer memory */
start = info->fix.smem_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
@@ -1342,13 +1338,13 @@ __releases(&info->lock)
/* memory mapped io */
off -= len;
if (info->var.accel_flags) {
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return -EINVAL;
}
start = info->fix.mmio_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
}
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len)
return -EINVAL;
@@ -1518,6 +1514,7 @@ register_framebuffer(struct fb_info *fb_info)
break;
fb_info->node = i;
mutex_init(&fb_info->lock);
+ mutex_init(&fb_info->mm_lock);
fb_info->dev = device_create(fb_class, fb_info->device,
MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index f153c581cbd..0bf2190928d 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -750,24 +750,26 @@ static void update_lcdc(struct fb_info *info)
static int map_video_memory(struct fb_info *info)
{
phys_addr_t phys;
+ u32 smem_len = info->fix.line_length * info->var.yres_virtual;
pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
+ pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len);
- info->fix.smem_len = info->fix.line_length * info->var.yres_virtual;
- pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len);
- info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys);
+ info->screen_base = fsl_diu_alloc(smem_len, &phys);
if (info->screen_base == NULL) {
printk(KERN_ERR "Unable to allocate fb memory\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (unsigned long) phys;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
- info->fix.smem_start,
- info->fix.smem_len);
+ info->fix.smem_start, info->fix.smem_len);
pr_debug("screen base %p\n", info->screen_base);
return 0;
@@ -776,9 +778,11 @@ static int map_video_memory(struct fb_info *info)
static void unmap_video_memory(struct fb_info *info)
{
fsl_diu_free(info->screen_base, info->fix.smem_len);
+ mutex_lock(&info->mm_lock);
info->screen_base = NULL;
info->fix.smem_start = 0;
info->fix.smem_len = 0;
+ mutex_unlock(&info->mm_lock);
}
/*
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 2e940199fc8..71960672d72 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1090,8 +1090,10 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strcpy(fix->id, "I810");
+ mutex_lock(&info->mm_lock);
fix->smem_start = par->fb.physical;
fix->smem_len = par->fb.size;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->xpanstep = 8;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 8e7a275df50..59c3a2e1491 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -724,8 +724,10 @@ static void matroxfb_update_fix(WPMINFO2)
struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
DBG(__func__)
+ mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock);
fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
+ mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock);
}
static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2081,6 +2083,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
spin_lock_init(&ACCESS_FBINFO(lock.accel));
init_rwsem(&ACCESS_FBINFO(crtc2.lock));
init_rwsem(&ACCESS_FBINFO(altout.lock));
+ mutex_init(&ACCESS_FBINFO(fbcon).mm_lock);
ACCESS_FBINFO(irq_flags) = 0;
init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait));
init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait));
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 7ac4c5f6145..909e10a1189 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -289,13 +289,16 @@ static int matroxfb_dh_release(struct fb_info* info, int user) {
#undef m2info
}
-static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) {
+static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info)
+{
struct fb_fix_screeninfo *fix = &m2info->fbcon.fix;
strcpy(fix->id, "MATROX DH");
+ mutex_lock(&m2info->fbcon.mm_lock);
fix->smem_start = m2info->video.base;
fix->smem_len = m2info->video.len_usable;
+ mutex_unlock(&m2info->fbcon.mm_lock);
fix->ypanstep = 1;
fix->ywrapstep = 0;
fix->xpanstep = 8; /* TBD */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index b7af5256e88..567fb944bd2 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -669,7 +669,7 @@ static uint32_t bpp_to_pixfmt(int bpp)
}
static int mx3fb_blank(int blank, struct fb_info *fbi);
-static int mx3fb_map_video_memory(struct fb_info *fbi);
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len);
static int mx3fb_unmap_video_memory(struct fb_info *fbi);
/**
@@ -742,8 +742,7 @@ static int mx3fb_set_par(struct fb_info *fbi)
if (fbi->fix.smem_start)
mx3fb_unmap_video_memory(fbi);
- fbi->fix.smem_len = mem_len;
- if (mx3fb_map_video_memory(fbi) < 0) {
+ if (mx3fb_map_video_memory(fbi, mem_len) < 0) {
mutex_unlock(&mx3_fbi->mutex);
return -ENOMEM;
}
@@ -1198,6 +1197,7 @@ static int mx3fb_resume(struct platform_device *pdev)
/**
* mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
* @fbi: framebuffer information pointer
+ * @mem_len: length of mapped memory
* @return: Error code indicating success or failure
*
* This buffer is remapped into a non-cached, non-buffered, memory region to
@@ -1205,23 +1205,26 @@ static int mx3fb_resume(struct platform_device *pdev)
* area is remapped, all virtual memory access to the video memory should occur
* at the new region.
*/
-static int mx3fb_map_video_memory(struct fb_info *fbi)
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len)
{
int retval = 0;
dma_addr_t addr;
fbi->screen_base = dma_alloc_writecombine(fbi->device,
- fbi->fix.smem_len,
+ mem_len,
&addr, GFP_DMA);
if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
- fbi->fix.smem_len);
+ mem_len);
retval = -EBUSY;
goto err0;
}
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = addr;
+ fbi->fix.smem_len = mem_len;
+ mutex_unlock(&fbi->mm_lock);
dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
(uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
@@ -1251,8 +1254,10 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
fbi->screen_base, fbi->fix.smem_start);
fbi->screen_base = 0;
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
return 0;
}
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 060d72fe57c..4ea99bfc37b 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -393,8 +393,10 @@ static void set_fb_fix(struct fb_info *fbi)
rg = &plane->fbdev->mem_desc.region[plane->idx];
fbi->screen_base = rg->vaddr;
+ mutex_lock(&fbi->mm_lock);
fix->smem_start = rg->paddr;
fix->smem_len = rg->size;
+ mutex_unlock(&fbi->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
bpp = var->bits_per_pixel;
@@ -886,8 +888,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
* plane memory is dealloce'd, the other
* screen parameters in var / fix are invalid.
*/
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
}
}
}
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 03b3670130a..bacfabd9ce1 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -141,7 +141,9 @@ static int platinumfb_set_par (struct fb_info *info)
offset = 0x10;
info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
+ mutex_unlock(&info->mm_lock);
info->fix.visual = (pinfo->cmode == CMODE_8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 0889d50c328..6506117c134 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -815,8 +815,10 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb)
ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
ofb->video_mem_size = size;
+ mutex_lock(&ofb->fb.mm_lock);
ofb->fb.fix.smem_start = ofb->video_mem_phys;
ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual;
+ mutex_unlock(&ofb->fb.mm_lock);
ofb->fb.screen_base = ofb->video_mem;
return 0;
}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 653bdfee305..9f6d6e61f0c 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -120,18 +120,6 @@ static int sh7760_setcolreg (u_int regno,
return 0;
}
-static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info,
- unsigned long stride)
-{
- memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strcpy(fix->id, "sh7760-lcdc");
-
- fix->smem_start = (unsigned long)info->screen_base;
- fix->smem_len = info->screen_size;
-
- fix->line_length = stride;
-}
-
static int sh7760fb_get_color_info(struct device *dev,
u16 lddfr, int *bpp, int *gray)
{
@@ -334,7 +322,8 @@ static int sh7760fb_set_par(struct fb_info *info)
iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */
- encode_fix(&info->fix, info, stride);
+ info->fix.line_length = stride;
+
sh7760fb_check_var(&info->var, info);
sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
@@ -435,6 +424,8 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
info->screen_base = fbmem;
info->screen_size = vram;
+ info->fix.smem_start = (unsigned long)info->screen_base;
+ info->fix.smem_len = info->screen_size;
return 0;
}
@@ -520,6 +511,8 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev)
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
+ strcpy(info->fix.id, "sh7760-lcdc");
+
/* set the DON2 bit now, before cmap allocation, as it will randomize
* palette memory.
*/
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index f10d2fbeda0..da983b720f0 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
#include <video/sh_mobile_lcdc.h>
#include <asm/atomic.h>
@@ -33,6 +34,7 @@ struct sh_mobile_lcdc_chan {
struct fb_info info;
dma_addr_t dma_handle;
struct fb_deferred_io defio;
+ struct scatterlist *sglist;
unsigned long frame_end;
wait_queue_head_t frame_end_wait;
};
@@ -206,16 +208,38 @@ static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv) {}
static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) {}
#endif
+static int sh_mobile_lcdc_sginit(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct sh_mobile_lcdc_chan *ch = info->par;
+ unsigned int nr_pages_max = info->fix.smem_len >> PAGE_SHIFT;
+ struct page *page;
+ int nr_pages = 0;
+
+ sg_init_table(ch->sglist, nr_pages_max);
+
+ list_for_each_entry(page, pagelist, lru)
+ sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0);
+
+ return nr_pages;
+}
+
static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct sh_mobile_lcdc_chan *ch = info->par;
+ unsigned int nr_pages;
/* enable clocks before accessing hardware */
sh_mobile_lcdc_clk_on(ch->lcdc);
+ nr_pages = sh_mobile_lcdc_sginit(info, pagelist);
+ dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
+
/* trigger panel update */
lcdc_write_chan(ch, LDSM2R, 1);
+
+ dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
}
static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
@@ -846,21 +870,31 @@ static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
}
for (i = 0; i < j; i++) {
- error = register_framebuffer(&priv->ch[i].info);
+ struct sh_mobile_lcdc_chan *ch = priv->ch + i;
+
+ info = &ch->info;
+
+ if (info->fbdefio) {
+ priv->ch->sglist = vmalloc(sizeof(struct scatterlist) *
+ info->fix.smem_len >> PAGE_SHIFT);
+ if (!priv->ch->sglist) {
+ dev_err(&pdev->dev, "cannot allocate sglist\n");
+ goto err1;
+ }
+ }
+
+ error = register_framebuffer(info);
if (error < 0)
goto err1;
- }
- for (i = 0; i < j; i++) {
- info = &priv->ch[i].info;
dev_info(info->dev,
"registered %s/%s as %dx%d %dbpp.\n",
pdev->name,
- (priv->ch[i].cfg.chan == LCDC_CHAN_MAINLCD) ?
+ (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
"mainlcd" : "sublcd",
- (int) priv->ch[i].cfg.lcd_cfg.xres,
- (int) priv->ch[i].cfg.lcd_cfg.yres,
- priv->ch[i].cfg.bpp);
+ (int) ch->cfg.lcd_cfg.xres,
+ (int) ch->cfg.lcd_cfg.yres,
+ ch->cfg.bpp);
/* deferred io mode: disable clock to save power */
if (info->fbdefio)
@@ -892,6 +926,9 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
if (!info->device)
continue;
+ if (priv->ch[i].sglist)
+ vfree(priv->ch[i].sglist);
+
dma_free_coherent(&pdev->dev, info->fix.smem_len,
info->screen_base, priv->ch[i].dma_handle);
fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7072d19080d..fd33455389b 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1847,8 +1847,10 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
strcpy(fix->id, ivideo->myid);
+ mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
fix->smem_len = ivideo->sisfb_mem;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index eb5d73a0670..98f24f0ec00 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -145,7 +145,7 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
#define SM501_MEMF_ACCEL (8)
static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
- unsigned int why, size_t size)
+ unsigned int why, size_t size, u32 smem_len)
{
struct sm501fb_par *par;
struct fb_info *fbi;
@@ -172,7 +172,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
if (ptr > 0)
ptr &= ~(PAGE_SIZE - 1);
- if (fbi && ptr < fbi->fix.smem_len)
+ if (fbi && ptr < smem_len)
return -ENOMEM;
break;
@@ -197,7 +197,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
case SM501_MEMF_ACCEL:
fbi = inf->fb[HEAD_CRT];
- ptr = fbi ? fbi->fix.smem_len : 0;
+ ptr = fbi ? smem_len : 0;
fbi = inf->fb[HEAD_PANEL];
if (fbi) {
@@ -413,6 +413,7 @@ static int sm501fb_set_par_common(struct fb_info *info,
unsigned int mem_type;
unsigned int clock_type;
unsigned int head_addr;
+ unsigned int smem_len;
dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n",
__func__, var->xres, var->yres, var->bits_per_pixel,
@@ -453,18 +454,20 @@ static int sm501fb_set_par_common(struct fb_info *info,
/* allocate fb memory within 501 */
info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8;
- info->fix.smem_len = info->fix.line_length * var->yres_virtual;
+ smem_len = info->fix.line_length * var->yres_virtual;
dev_dbg(fbi->dev, "%s: line length = %u\n", __func__,
info->fix.line_length);
- if (sm501_alloc_mem(fbi, &par->screen, mem_type,
- info->fix.smem_len)) {
+ if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) {
dev_err(fbi->dev, "no memory available\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_base = fbi->fbmem + par->screen.sm_addr;
info->screen_size = info->fix.smem_len;
@@ -637,7 +640,8 @@ static int sm501fb_set_par_crt(struct fb_info *info)
if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) {
/* the head is displaying panel data... */
- sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0);
+ sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0,
+ info->fix.smem_len);
goto out_update;
}
@@ -1289,7 +1293,8 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
par->cursor_regs = info->regs + reg_base;
- ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024);
+ ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024,
+ fbi->fix.smem_len);
if (ret < 0)
return ret;
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index d0674f1e3f1..8a141c2c637 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -523,6 +523,7 @@ static int w100fb_set_par(struct fb_info *info)
info->fix.ywrapstep = 0;
info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
+ mutex_lock(&info->mm_lock);
if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
par->extmem_active = 1;
info->fix.smem_len = par->mach->mem->size+1;
@@ -530,6 +531,7 @@ static int w100fb_set_par(struct fb_info *info)
par->extmem_active = 0;
info->fix.smem_len = MEM_INT_SIZE+1;
}
+ mutex_unlock(&info->mm_lock);
w100fb_activate_var(par);
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 210acafe4a9..3ff8bdd18fb 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -432,7 +432,6 @@ vfs_rejected_lock:
list_del_init(&fl->fl_u.afs.link);
if (list_empty(&vnode->granted_locks))
afs_defer_unlock(vnode, key);
- spin_unlock(&vnode->lock);
goto abort_attempt;
}
diff --git a/fs/aio.c b/fs/aio.c
index 76da1253795..d065b2c3273 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{
assert_spin_locked(&ctx->ctx_lock);
+ if (req->ki_eventfd != NULL)
+ eventfd_ctx_put(req->ki_eventfd);
if (req->ki_dtor)
req->ki_dtor(req);
if (req->ki_iovec != &req->ki_inline_vec)
@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data)
/* Complete the fput(s) */
if (req->ki_filp != NULL)
__fput(req->ki_filp);
- if (req->ki_eventfd != NULL)
- __fput(req->ki_eventfd);
/* Link the iocb into the context's free list */
spin_lock_irq(&ctx->ctx_lock);
@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data)
*/
static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
{
- int schedule_putreq = 0;
-
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
req, atomic_long_read(&req->ki_filp->f_count));
@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
* we would not be holding the last reference to the file*, so
* this function will be executed w/out any aio kthread wakeup.
*/
- if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count)))
- schedule_putreq++;
- else
- req->ki_filp = NULL;
- if (req->ki_eventfd != NULL) {
- if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
- schedule_putreq++;
- else
- req->ki_eventfd = NULL;
- }
- if (unlikely(schedule_putreq)) {
+ if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
queue_work(aio_wq, &fput_work);
- } else
+ } else {
+ req->ki_filp = NULL;
really_put_req(ctx, req);
+ }
return 1;
}
@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
* an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function.
*/
- req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
+ req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
if (IS_ERR(req->ki_eventfd)) {
ret = PTR_ERR(req->ki_eventfd);
req->ki_eventfd = NULL;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 9fa212b014a..b7c1603cd4b 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1522,11 +1522,11 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
info->thread = NULL;
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
- fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
-
if (psinfo == NULL)
return 0;
+ fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+
/*
* Figure out how many notes we're going to need for each thread.
*/
@@ -1929,7 +1929,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
elf = kmalloc(sizeof(*elf), GFP_KERNEL);
if (!elf)
goto out;
-
+ /*
+ * The number of segs are recored into ELF header as 16bit value.
+ * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
+ */
segs = current->mm->map_count;
#ifdef ELF_CORE_EXTRA_PHDRS
segs += ELF_CORE_EXTRA_PHDRS;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 31c46a241ba..49a34e7f730 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -1,7 +1,7 @@
/*
* bio-integrity.c - bio data integrity extensions
*
- * Copyright (C) 2007, 2008 Oracle Corporation
+ * Copyright (C) 2007, 2008, 2009 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*
* This program is free software; you can redistribute it and/or
@@ -25,63 +25,121 @@
#include <linux/bio.h>
#include <linux/workqueue.h>
-static struct kmem_cache *bio_integrity_slab __read_mostly;
-static mempool_t *bio_integrity_pool;
-static struct bio_set *integrity_bio_set;
+struct integrity_slab {
+ struct kmem_cache *slab;
+ unsigned short nr_vecs;
+ char name[8];
+};
+
+#define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) }
+struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = {
+ IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES),
+};
+#undef IS
+
static struct workqueue_struct *kintegrityd_wq;
+static inline unsigned int vecs_to_idx(unsigned int nr)
+{
+ switch (nr) {
+ case 1:
+ return 0;
+ case 2 ... 4:
+ return 1;
+ case 5 ... 16:
+ return 2;
+ case 17 ... 64:
+ return 3;
+ case 65 ... 128:
+ return 4;
+ case 129 ... BIO_MAX_PAGES:
+ return 5;
+ default:
+ BUG();
+ }
+}
+
+static inline int use_bip_pool(unsigned int idx)
+{
+ if (idx == BIOVEC_NR_POOLS)
+ return 1;
+
+ return 0;
+}
+
/**
- * bio_integrity_alloc - Allocate integrity payload and attach it to bio
+ * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
* @gfp_mask: Memory allocation mask
* @nr_vecs: Number of integrity metadata scatter-gather elements
+ * @bs: bio_set to allocate from
*
* Description: This function prepares a bio for attaching integrity
* metadata. nr_vecs specifies the maximum number of pages containing
* integrity metadata that can be attached.
*/
-struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
- gfp_t gfp_mask,
- unsigned int nr_vecs)
+struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs,
+ struct bio_set *bs)
{
struct bio_integrity_payload *bip;
- struct bio_vec *iv;
- unsigned long idx;
+ unsigned int idx = vecs_to_idx(nr_vecs);
BUG_ON(bio == NULL);
+ bip = NULL;
- bip = mempool_alloc(bio_integrity_pool, gfp_mask);
- if (unlikely(bip == NULL)) {
- printk(KERN_ERR "%s: could not alloc bip\n", __func__);
- return NULL;
- }
+ /* Lower order allocations come straight from slab */
+ if (!use_bip_pool(idx))
+ bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask);
- memset(bip, 0, sizeof(*bip));
+ /* Use mempool if lower order alloc failed or max vecs were requested */
+ if (bip == NULL) {
+ bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
- iv = bvec_alloc_bs(gfp_mask, nr_vecs, &idx, integrity_bio_set);
- if (unlikely(iv == NULL)) {
- printk(KERN_ERR "%s: could not alloc bip_vec\n", __func__);
- mempool_free(bip, bio_integrity_pool);
- return NULL;
+ if (unlikely(bip == NULL)) {
+ printk(KERN_ERR "%s: could not alloc bip\n", __func__);
+ return NULL;
+ }
}
- bip->bip_pool = idx;
- bip->bip_vec = iv;
+ memset(bip, 0, sizeof(*bip));
+
+ bip->bip_slab = idx;
bip->bip_bio = bio;
bio->bi_integrity = bip;
return bip;
}
+EXPORT_SYMBOL(bio_integrity_alloc_bioset);
+
+/**
+ * bio_integrity_alloc - Allocate integrity payload and attach it to bio
+ * @bio: bio to attach integrity metadata to
+ * @gfp_mask: Memory allocation mask
+ * @nr_vecs: Number of integrity metadata scatter-gather elements
+ *
+ * Description: This function prepares a bio for attaching integrity
+ * metadata. nr_vecs specifies the maximum number of pages containing
+ * integrity metadata that can be attached.
+ */
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs)
+{
+ return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
+}
EXPORT_SYMBOL(bio_integrity_alloc);
/**
* bio_integrity_free - Free bio integrity payload
* @bio: bio containing bip to be freed
+ * @bs: bio_set this bio was allocated from
*
* Description: Used to free the integrity portion of a bio. Usually
* called from bio_free().
*/
-void bio_integrity_free(struct bio *bio)
+void bio_integrity_free(struct bio *bio, struct bio_set *bs)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
@@ -92,8 +150,10 @@ void bio_integrity_free(struct bio *bio)
&& bip->bip_buf != NULL)
kfree(bip->bip_buf);
- bvec_free_bs(integrity_bio_set, bip->bip_vec, bip->bip_pool);
- mempool_free(bip, bio_integrity_pool);
+ if (use_bip_pool(bip->bip_slab))
+ mempool_free(bip, bs->bio_integrity_pool);
+ else
+ kmem_cache_free(bip_slab[bip->bip_slab].slab, bip);
bio->bi_integrity = NULL;
}
@@ -114,7 +174,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
struct bio_integrity_payload *bip = bio->bi_integrity;
struct bio_vec *iv;
- if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_pool)) {
+ if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
printk(KERN_ERR "%s: bip_vec full\n", __func__);
return 0;
}
@@ -647,8 +707,8 @@ void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
bp->iv1 = bip->bip_vec[0];
bp->iv2 = bip->bip_vec[0];
- bp->bip1.bip_vec = &bp->iv1;
- bp->bip2.bip_vec = &bp->iv2;
+ bp->bip1.bip_vec[0] = bp->iv1;
+ bp->bip2.bip_vec[0] = bp->iv2;
bp->iv1.bv_len = sectors * bi->tuple_size;
bp->iv2.bv_offset += sectors * bi->tuple_size;
@@ -667,17 +727,19 @@ EXPORT_SYMBOL(bio_integrity_split);
* @bio: New bio
* @bio_src: Original bio
* @gfp_mask: Memory allocation mask
+ * @bs: bio_set to allocate bip from
*
* Description: Called to allocate a bip when cloning a bio
*/
-int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask)
+int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ gfp_t gfp_mask, struct bio_set *bs)
{
struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
struct bio_integrity_payload *bip;
BUG_ON(bip_src == NULL);
- bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
+ bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
if (bip == NULL)
return -EIO;
@@ -693,25 +755,43 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask)
}
EXPORT_SYMBOL(bio_integrity_clone);
-static int __init bio_integrity_init(void)
+int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
- kintegrityd_wq = create_workqueue("kintegrityd");
+ unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
+
+ bs->bio_integrity_pool =
+ mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
+ if (!bs->bio_integrity_pool)
+ return -1;
+
+ return 0;
+}
+EXPORT_SYMBOL(bioset_integrity_create);
+
+void bioset_integrity_free(struct bio_set *bs)
+{
+ if (bs->bio_integrity_pool)
+ mempool_destroy(bs->bio_integrity_pool);
+}
+EXPORT_SYMBOL(bioset_integrity_free);
+
+void __init bio_integrity_init(void)
+{
+ unsigned int i;
+
+ kintegrityd_wq = create_workqueue("kintegrityd");
if (!kintegrityd_wq)
panic("Failed to create kintegrityd\n");
- bio_integrity_slab = KMEM_CACHE(bio_integrity_payload,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) {
+ unsigned int size;
- bio_integrity_pool = mempool_create_slab_pool(BIO_POOL_SIZE,
- bio_integrity_slab);
- if (!bio_integrity_pool)
- panic("bio_integrity: can't allocate bip pool\n");
+ size = sizeof(struct bio_integrity_payload)
+ + bip_slab[i].nr_vecs * sizeof(struct bio_vec);
- integrity_bio_set = bioset_create(BIO_POOL_SIZE, 0);
- if (!integrity_bio_set)
- panic("bio_integrity: can't allocate bio_set\n");
-
- return 0;
+ bip_slab[i].slab =
+ kmem_cache_create(bip_slab[i].name, size, 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ }
}
-subsys_initcall(bio_integrity_init);
diff --git a/fs/bio.c b/fs/bio.c
index 24c91404353..1486b19fc43 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -238,7 +238,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
if (bio_integrity(bio))
- bio_integrity_free(bio);
+ bio_integrity_free(bio, bs);
/*
* If we have front padding, adjust the bio pointer before freeing
@@ -341,7 +341,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
static void bio_kmalloc_destructor(struct bio *bio)
{
if (bio_integrity(bio))
- bio_integrity_free(bio);
+ bio_integrity_free(bio, fs_bio_set);
kfree(bio);
}
@@ -472,7 +472,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
if (bio_integrity(bio)) {
int ret;
- ret = bio_integrity_clone(b, bio, gfp_mask);
+ ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
if (ret < 0) {
bio_put(b);
@@ -1539,6 +1539,7 @@ void bioset_free(struct bio_set *bs)
if (bs->bio_pool)
mempool_destroy(bs->bio_pool);
+ bioset_integrity_free(bs);
biovec_free_pools(bs);
bio_put_slab(bs);
@@ -1579,6 +1580,9 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
if (!bs->bio_pool)
goto bad;
+ if (bioset_integrity_create(bs, pool_size))
+ goto bad;
+
if (!biovec_create_pools(bs, pool_size))
return bs;
@@ -1616,6 +1620,7 @@ static int __init init_bio(void)
if (!bio_slabs)
panic("bio: can't allocate bios\n");
+ bio_integrity_init();
biovec_init_slabs();
fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 7f88628a1a7..6e4f6c50a12 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -299,8 +299,8 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
"btrfs-%s-%d", workers->name,
workers->num_workers + i);
if (IS_ERR(worker->task)) {
- kfree(worker);
ret = PTR_ERR(worker->task);
+ kfree(worker);
goto fail;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2779c2f5360..98a87383871 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2074,8 +2074,7 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
-int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
- *root);
+int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref);
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index edc7d208c5c..a5aca3997d4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -990,15 +990,13 @@ static inline int extent_ref_type(u64 parent, u64 owner)
return type;
}
-static int find_next_key(struct btrfs_path *path, struct btrfs_key *key)
+static int find_next_key(struct btrfs_path *path, int level,
+ struct btrfs_key *key)
{
- int level;
- BUG_ON(!path->keep_locks);
- for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
+ for (; level < BTRFS_MAX_LEVEL; level++) {
if (!path->nodes[level])
break;
- btrfs_assert_tree_locked(path->nodes[level]);
if (path->slots[level] + 1 >=
btrfs_header_nritems(path->nodes[level]))
continue;
@@ -1158,7 +1156,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
* For simplicity, we just do not add new inline back
* ref if there is any kind of item for this block
*/
- if (find_next_key(path, &key) == 0 && key.objectid == bytenr &&
+ if (find_next_key(path, 0, &key) == 0 &&
+ key.objectid == bytenr &&
key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
err = -EAGAIN;
goto out;
@@ -2697,7 +2696,7 @@ again:
printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
", %llu bytes_used, %llu bytes_reserved, "
- "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
+ "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
"%llu total\n", (unsigned long long)bytes,
(unsigned long long)data_sinfo->bytes_delalloc,
(unsigned long long)data_sinfo->bytes_used,
@@ -4128,6 +4127,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
return buf;
}
+#if 0
int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *leaf)
{
@@ -4171,8 +4171,6 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
return 0;
}
-#if 0
-
static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_leaf_ref *ref)
@@ -4553,262 +4551,471 @@ out:
}
#endif
+struct walk_control {
+ u64 refs[BTRFS_MAX_LEVEL];
+ u64 flags[BTRFS_MAX_LEVEL];
+ struct btrfs_key update_progress;
+ int stage;
+ int level;
+ int shared_level;
+ int update_ref;
+ int keep_locks;
+};
+
+#define DROP_REFERENCE 1
+#define UPDATE_BACKREF 2
+
/*
- * helper function for drop_subtree, this function is similar to
- * walk_down_tree. The main difference is that it checks reference
- * counts while tree blocks are locked.
+ * hepler to process tree block while walking down the tree.
+ *
+ * when wc->stage == DROP_REFERENCE, this function checks
+ * reference count of the block. if the block is shared and
+ * we need update back refs for the subtree rooted at the
+ * block, this function changes wc->stage to UPDATE_BACKREF
+ *
+ * when wc->stage == UPDATE_BACKREF, this function updates
+ * back refs for pointers in the block.
+ *
+ * NOTE: return value 1 means we should stop walking down.
*/
-static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- struct btrfs_path *path, int *level)
+ struct btrfs_path *path,
+ struct walk_control *wc)
{
- struct extent_buffer *next;
- struct extent_buffer *cur;
- struct extent_buffer *parent;
- u64 bytenr;
- u64 ptr_gen;
- u64 refs;
- u64 flags;
- u32 blocksize;
+ int level = wc->level;
+ struct extent_buffer *eb = path->nodes[level];
+ struct btrfs_key key;
+ u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
int ret;
- cur = path->nodes[*level];
- ret = btrfs_lookup_extent_info(trans, root, cur->start, cur->len,
- &refs, &flags);
- BUG_ON(ret);
- if (refs > 1)
- goto out;
+ if (wc->stage == UPDATE_BACKREF &&
+ btrfs_header_owner(eb) != root->root_key.objectid)
+ return 1;
- BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+ /*
+ * when reference count of tree block is 1, it won't increase
+ * again. once full backref flag is set, we never clear it.
+ */
+ if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
+ (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
+ BUG_ON(!path->locks[level]);
+ ret = btrfs_lookup_extent_info(trans, root,
+ eb->start, eb->len,
+ &wc->refs[level],
+ &wc->flags[level]);
+ BUG_ON(ret);
+ BUG_ON(wc->refs[level] == 0);
+ }
- while (*level >= 0) {
- cur = path->nodes[*level];
- if (*level == 0) {
- ret = btrfs_drop_leaf_ref(trans, root, cur);
- BUG_ON(ret);
- clean_tree_block(trans, root, cur);
- break;
- }
- if (path->slots[*level] >= btrfs_header_nritems(cur)) {
- clean_tree_block(trans, root, cur);
- break;
+ if (wc->stage == DROP_REFERENCE &&
+ wc->update_ref && wc->refs[level] > 1) {
+ BUG_ON(eb == root->node);
+ BUG_ON(path->slots[level] > 0);
+ if (level == 0)
+ btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
+ else
+ btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
+ if (btrfs_header_owner(eb) == root->root_key.objectid &&
+ btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
+ wc->stage = UPDATE_BACKREF;
+ wc->shared_level = level;
}
+ }
- bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
- blocksize = btrfs_level_size(root, *level - 1);
- ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
+ if (wc->stage == DROP_REFERENCE) {
+ if (wc->refs[level] > 1)
+ return 1;
- next = read_tree_block(root, bytenr, blocksize, ptr_gen);
- btrfs_tree_lock(next);
- btrfs_set_lock_blocking(next);
+ if (path->locks[level] && !wc->keep_locks) {
+ btrfs_tree_unlock(eb);
+ path->locks[level] = 0;
+ }
+ return 0;
+ }
- ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
- &refs, &flags);
+ /* wc->stage == UPDATE_BACKREF */
+ if (!(wc->flags[level] & flag)) {
+ BUG_ON(!path->locks[level]);
+ ret = btrfs_inc_ref(trans, root, eb, 1);
BUG_ON(ret);
- if (refs > 1) {
- parent = path->nodes[*level];
- ret = btrfs_free_extent(trans, root, bytenr,
- blocksize, parent->start,
- btrfs_header_owner(parent),
- *level - 1, 0);
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+ BUG_ON(ret);
+ ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
+ eb->len, flag, 0);
+ BUG_ON(ret);
+ wc->flags[level] |= flag;
+ }
+
+ /*
+ * the block is shared by multiple trees, so it's not good to
+ * keep the tree lock
+ */
+ if (path->locks[level] && level > 0) {
+ btrfs_tree_unlock(eb);
+ path->locks[level] = 0;
+ }
+ return 0;
+}
+
+/*
+ * hepler to process tree block while walking up the tree.
+ *
+ * when wc->stage == DROP_REFERENCE, this function drops
+ * reference count on the block.
+ *
+ * when wc->stage == UPDATE_BACKREF, this function changes
+ * wc->stage back to DROP_REFERENCE if we changed wc->stage
+ * to UPDATE_BACKREF previously while processing the block.
+ *
+ * NOTE: return value 1 means we should stop walking up.
+ */
+static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct walk_control *wc)
+{
+ int ret = 0;
+ int level = wc->level;
+ struct extent_buffer *eb = path->nodes[level];
+ u64 parent = 0;
+
+ if (wc->stage == UPDATE_BACKREF) {
+ BUG_ON(wc->shared_level < level);
+ if (level < wc->shared_level)
+ goto out;
+
+ BUG_ON(wc->refs[level] <= 1);
+ ret = find_next_key(path, level + 1, &wc->update_progress);
+ if (ret > 0)
+ wc->update_ref = 0;
+
+ wc->stage = DROP_REFERENCE;
+ wc->shared_level = -1;
+ path->slots[level] = 0;
+
+ /*
+ * check reference count again if the block isn't locked.
+ * we should start walking down the tree again if reference
+ * count is one.
+ */
+ if (!path->locks[level]) {
+ BUG_ON(level == 0);
+ btrfs_tree_lock(eb);
+ btrfs_set_lock_blocking(eb);
+ path->locks[level] = 1;
+
+ ret = btrfs_lookup_extent_info(trans, root,
+ eb->start, eb->len,
+ &wc->refs[level],
+ &wc->flags[level]);
BUG_ON(ret);
- path->slots[*level]++;
- btrfs_tree_unlock(next);
- free_extent_buffer(next);
- continue;
+ BUG_ON(wc->refs[level] == 0);
+ if (wc->refs[level] == 1) {
+ btrfs_tree_unlock(eb);
+ path->locks[level] = 0;
+ return 1;
+ }
+ } else {
+ BUG_ON(level != 0);
}
+ }
- BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
+ /* wc->stage == DROP_REFERENCE */
+ BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
- *level = btrfs_header_level(next);
- path->nodes[*level] = next;
- path->slots[*level] = 0;
- path->locks[*level] = 1;
- cond_resched();
+ if (wc->refs[level] == 1) {
+ if (level == 0) {
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ ret = btrfs_dec_ref(trans, root, eb, 1);
+ else
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+ BUG_ON(ret);
+ }
+ /* make block locked assertion in clean_tree_block happy */
+ if (!path->locks[level] &&
+ btrfs_header_generation(eb) == trans->transid) {
+ btrfs_tree_lock(eb);
+ btrfs_set_lock_blocking(eb);
+ path->locks[level] = 1;
+ }
+ clean_tree_block(trans, root, eb);
+ }
+
+ if (eb == root->node) {
+ if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ parent = eb->start;
+ else
+ BUG_ON(root->root_key.objectid !=
+ btrfs_header_owner(eb));
+ } else {
+ if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ parent = path->nodes[level + 1]->start;
+ else
+ BUG_ON(root->root_key.objectid !=
+ btrfs_header_owner(path->nodes[level + 1]));
}
-out:
- if (path->nodes[*level] == root->node)
- parent = path->nodes[*level];
- else
- parent = path->nodes[*level + 1];
- bytenr = path->nodes[*level]->start;
- blocksize = path->nodes[*level]->len;
- ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent->start,
- btrfs_header_owner(parent), *level, 0);
+ ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
+ root->root_key.objectid, level, 0);
BUG_ON(ret);
+out:
+ wc->refs[level] = 0;
+ wc->flags[level] = 0;
+ return ret;
+}
+
+static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct walk_control *wc)
+{
+ struct extent_buffer *next;
+ struct extent_buffer *cur;
+ u64 bytenr;
+ u64 ptr_gen;
+ u32 blocksize;
+ int level = wc->level;
+ int ret;
+
+ while (level >= 0) {
+ cur = path->nodes[level];
+ BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
- if (path->locks[*level]) {
- btrfs_tree_unlock(path->nodes[*level]);
- path->locks[*level] = 0;
+ ret = walk_down_proc(trans, root, path, wc);
+ if (ret > 0)
+ break;
+
+ if (level == 0)
+ break;
+
+ bytenr = btrfs_node_blockptr(cur, path->slots[level]);
+ blocksize = btrfs_level_size(root, level - 1);
+ ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
+
+ next = read_tree_block(root, bytenr, blocksize, ptr_gen);
+ btrfs_tree_lock(next);
+ btrfs_set_lock_blocking(next);
+
+ level--;
+ BUG_ON(level != btrfs_header_level(next));
+ path->nodes[level] = next;
+ path->slots[level] = 0;
+ path->locks[level] = 1;
+ wc->level = level;
}
- free_extent_buffer(path->nodes[*level]);
- path->nodes[*level] = NULL;
- *level += 1;
- cond_resched();
return 0;
}
-/*
- * helper for dropping snapshots. This walks back up the tree in the path
- * to find the first node higher up where we haven't yet gone through
- * all the slots
- */
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
- int *level, int max_level)
+ struct walk_control *wc, int max_level)
{
- struct btrfs_root_item *root_item = &root->root_item;
- int i;
- int slot;
+ int level = wc->level;
int ret;
- for (i = *level; i < max_level && path->nodes[i]; i++) {
- slot = path->slots[i];
- if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
- /*
- * there is more work to do in this level.
- * Update the drop_progress marker to reflect
- * the work we've done so far, and then bump
- * the slot number
- */
- path->slots[i]++;
- WARN_ON(*level == 0);
- if (max_level == BTRFS_MAX_LEVEL) {
- btrfs_node_key(path->nodes[i],
- &root_item->drop_progress,
- path->slots[i]);
- root_item->drop_level = i;
- }
- *level = i;
+ path->slots[level] = btrfs_header_nritems(path->nodes[level]);
+ while (level < max_level && path->nodes[level]) {
+ wc->level = level;
+ if (path->slots[level] + 1 <
+ btrfs_header_nritems(path->nodes[level])) {
+ path->slots[level]++;
return 0;
} else {
- struct extent_buffer *parent;
-
- /*
- * this whole node is done, free our reference
- * on it and go up one level
- */
- if (path->nodes[*level] == root->node)
- parent = path->nodes[*level];
- else
- parent = path->nodes[*level + 1];
+ ret = walk_up_proc(trans, root, path, wc);
+ if (ret > 0)
+ return 0;
- clean_tree_block(trans, root, path->nodes[i]);
- ret = btrfs_free_extent(trans, root,
- path->nodes[i]->start,
- path->nodes[i]->len,
- parent->start,
- btrfs_header_owner(parent),
- *level, 0);
- BUG_ON(ret);
- if (path->locks[*level]) {
- btrfs_tree_unlock(path->nodes[i]);
- path->locks[i] = 0;
+ if (path->locks[level]) {
+ btrfs_tree_unlock(path->nodes[level]);
+ path->locks[level] = 0;
}
- free_extent_buffer(path->nodes[i]);
- path->nodes[i] = NULL;
- *level = i + 1;
+ free_extent_buffer(path->nodes[level]);
+ path->nodes[level] = NULL;
+ level++;
}
}
return 1;
}
/*
- * drop the reference count on the tree rooted at 'snap'. This traverses
- * the tree freeing any blocks that have a ref count of zero after being
- * decremented.
+ * drop a subvolume tree.
+ *
+ * this function traverses the tree freeing any blocks that only
+ * referenced by the tree.
+ *
+ * when a shared tree block is found. this function decreases its
+ * reference count by one. if update_ref is true, this function
+ * also make sure backrefs for the shared block and all lower level
+ * blocks are properly updated.
*/
-int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
- *root)
+int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
{
- int ret = 0;
- int wret;
- int level;
struct btrfs_path *path;
- int update_count;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *tree_root = root->fs_info->tree_root;
struct btrfs_root_item *root_item = &root->root_item;
+ struct walk_control *wc;
+ struct btrfs_key key;
+ int err = 0;
+ int ret;
+ int level;
path = btrfs_alloc_path();
BUG_ON(!path);
- level = btrfs_header_level(root->node);
+ wc = kzalloc(sizeof(*wc), GFP_NOFS);
+ BUG_ON(!wc);
+
+ trans = btrfs_start_transaction(tree_root, 1);
+
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
+ level = btrfs_header_level(root->node);
path->nodes[level] = btrfs_lock_root_node(root);
btrfs_set_lock_blocking(path->nodes[level]);
path->slots[level] = 0;
path->locks[level] = 1;
+ memset(&wc->update_progress, 0,
+ sizeof(wc->update_progress));
} else {
- struct btrfs_key key;
- struct btrfs_disk_key found_key;
- struct extent_buffer *node;
-
btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
+ memcpy(&wc->update_progress, &key,
+ sizeof(wc->update_progress));
+
level = root_item->drop_level;
+ BUG_ON(level == 0);
path->lowest_level = level;
- wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (wret < 0) {
- ret = wret;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ path->lowest_level = 0;
+ if (ret < 0) {
+ err = ret;
goto out;
}
- node = path->nodes[level];
- btrfs_node_key(node, &found_key, path->slots[level]);
- WARN_ON(memcmp(&found_key, &root_item->drop_progress,
- sizeof(found_key)));
+ btrfs_node_key_to_cpu(path->nodes[level], &key,
+ path->slots[level]);
+ WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
+
/*
* unlock our path, this is safe because only this
* function is allowed to delete this snapshot
*/
btrfs_unlock_up_safe(path, 0);
+
+ level = btrfs_header_level(root->node);
+ while (1) {
+ btrfs_tree_lock(path->nodes[level]);
+ btrfs_set_lock_blocking(path->nodes[level]);
+
+ ret = btrfs_lookup_extent_info(trans, root,
+ path->nodes[level]->start,
+ path->nodes[level]->len,
+ &wc->refs[level],
+ &wc->flags[level]);
+ BUG_ON(ret);
+ BUG_ON(wc->refs[level] == 0);
+
+ if (level == root_item->drop_level)
+ break;
+
+ btrfs_tree_unlock(path->nodes[level]);
+ WARN_ON(wc->refs[level] != 1);
+ level--;
+ }
}
+
+ wc->level = level;
+ wc->shared_level = -1;
+ wc->stage = DROP_REFERENCE;
+ wc->update_ref = update_ref;
+ wc->keep_locks = 0;
+
while (1) {
- unsigned long update;
- wret = walk_down_tree(trans, root, path, &level);
- if (wret > 0)
+ ret = walk_down_tree(trans, root, path, wc);
+ if (ret < 0) {
+ err = ret;
break;
- if (wret < 0)
- ret = wret;
+ }
- wret = walk_up_tree(trans, root, path, &level,
- BTRFS_MAX_LEVEL);
- if (wret > 0)
+ ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
+ if (ret < 0) {
+ err = ret;
break;
- if (wret < 0)
- ret = wret;
- if (trans->transaction->in_commit ||
- trans->transaction->delayed_refs.flushing) {
- ret = -EAGAIN;
+ }
+
+ if (ret > 0) {
+ BUG_ON(wc->stage != DROP_REFERENCE);
break;
}
- for (update_count = 0; update_count < 16; update_count++) {
+
+ if (wc->stage == DROP_REFERENCE) {
+ level = wc->level;
+ btrfs_node_key(path->nodes[level],
+ &root_item->drop_progress,
+ path->slots[level]);
+ root_item->drop_level = level;
+ }
+
+ BUG_ON(wc->level == 0);
+ if (trans->transaction->in_commit ||
+ trans->transaction->delayed_refs.flushing) {
+ ret = btrfs_update_root(trans, tree_root,
+ &root->root_key,
+ root_item);
+ BUG_ON(ret);
+
+ btrfs_end_transaction(trans, tree_root);
+ trans = btrfs_start_transaction(tree_root, 1);
+ } else {
+ unsigned long update;
update = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (update)
- btrfs_run_delayed_refs(trans, root, update);
- else
- break;
+ btrfs_run_delayed_refs(trans, tree_root,
+ update);
}
}
+ btrfs_release_path(root, path);
+ BUG_ON(err);
+
+ ret = btrfs_del_root(trans, tree_root, &root->root_key);
+ BUG_ON(ret);
+
+ free_extent_buffer(root->node);
+ free_extent_buffer(root->commit_root);
+ kfree(root);
out:
+ btrfs_end_transaction(trans, tree_root);
+ kfree(wc);
btrfs_free_path(path);
- return ret;
+ return err;
}
+/*
+ * drop subtree rooted at tree block 'node'.
+ *
+ * NOTE: this function will unlock and release tree block 'node'
+ */
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *node,
struct extent_buffer *parent)
{
struct btrfs_path *path;
+ struct walk_control *wc;
int level;
int parent_level;
int ret = 0;
int wret;
+ BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+
path = btrfs_alloc_path();
BUG_ON(!path);
+ wc = kzalloc(sizeof(*wc), GFP_NOFS);
+ BUG_ON(!wc);
+
btrfs_assert_tree_locked(parent);
parent_level = btrfs_header_level(parent);
extent_buffer_get(parent);
@@ -4817,24 +5024,33 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
btrfs_assert_tree_locked(node);
level = btrfs_header_level(node);
- extent_buffer_get(node);
path->nodes[level] = node;
path->slots[level] = 0;
+ path->locks[level] = 1;
+
+ wc->refs[parent_level] = 1;
+ wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
+ wc->level = level;
+ wc->shared_level = -1;
+ wc->stage = DROP_REFERENCE;
+ wc->update_ref = 0;
+ wc->keep_locks = 1;
while (1) {
- wret = walk_down_tree(trans, root, path, &level);
- if (wret < 0)
+ wret = walk_down_tree(trans, root, path, wc);
+ if (wret < 0) {
ret = wret;
- if (wret != 0)
break;
+ }
- wret = walk_up_tree(trans, root, path, &level, parent_level);
+ wret = walk_up_tree(trans, root, path, wc, parent_level);
if (wret < 0)
ret = wret;
if (wret != 0)
break;
}
+ kfree(wc);
btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 126477eaecf..7c3cd248d8d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -151,7 +151,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
}
if (end_pos > isize) {
i_size_write(inode, end_pos);
- btrfs_update_inode(trans, root, inode);
+ /* we've only changed i_size in ram, and we haven't updated
+ * the disk i_size. There is no need to log the inode
+ * at this time.
+ */
}
err = btrfs_end_transaction(trans, root);
out_unlock:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index dbe1aabf96c..7ffa3d34ea1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3580,12 +3580,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
owner = 1;
BTRFS_I(inode)->block_group =
btrfs_find_block_group(root, 0, alloc_hint, owner);
- if ((mode & S_IFREG)) {
- if (btrfs_test_opt(root, NODATASUM))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(root, NODATACOW))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
- }
key[0].objectid = objectid;
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -3640,6 +3634,13 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_inherit_iflags(inode, dir);
+ if ((mode & S_IFREG)) {
+ if (btrfs_test_opt(root, NODATASUM))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
+ if (btrfs_test_opt(root, NODATACOW))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
+ }
+
insert_inode_hash(inode);
inode_tree_add(inode);
return inode;
@@ -5082,6 +5083,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
struct extent_map *em;
struct btrfs_trans_handle *trans;
+ struct btrfs_root *root;
int ret;
alloc_start = offset & ~mask;
@@ -5100,6 +5102,13 @@ static long btrfs_fallocate(struct inode *inode, int mode,
goto out;
}
+ root = BTRFS_I(inode)->root;
+
+ ret = btrfs_check_data_free_space(root, inode,
+ alloc_end - alloc_start);
+ if (ret)
+ goto out;
+
locked_end = alloc_end - 1;
while (1) {
struct btrfs_ordered_extent *ordered;
@@ -5107,7 +5116,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
if (!trans) {
ret = -EIO;
- goto out;
+ goto out_free;
}
/* the extent lock is ordered inside the running
@@ -5168,6 +5177,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
GFP_NOFS);
btrfs_end_transaction(trans, BTRFS_I(inode)->root);
+out_free:
+ btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
out:
mutex_unlock(&inode->i_mutex);
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index eff18f5b536..9f4db848db1 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1028,7 +1028,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
struct btrfs_file_extent_item);
comp = btrfs_file_extent_compression(leaf, extent);
type = btrfs_file_extent_type(leaf, extent);
- if (type == BTRFS_FILE_EXTENT_REG) {
+ if (type == BTRFS_FILE_EXTENT_REG ||
+ type == BTRFS_FILE_EXTENT_PREALLOC) {
disko = btrfs_file_extent_disk_bytenr(leaf,
extent);
diskl = btrfs_file_extent_disk_num_bytes(leaf,
@@ -1051,7 +1052,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
new_key.objectid = inode->i_ino;
new_key.offset = key.offset + destoff - off;
- if (type == BTRFS_FILE_EXTENT_REG) {
+ if (type == BTRFS_FILE_EXTENT_REG ||
+ type == BTRFS_FILE_EXTENT_PREALLOC) {
ret = btrfs_insert_empty_item(trans, root, path,
&new_key, size);
if (ret)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b23dc209ae1..00839793477 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1788,7 +1788,7 @@ static void merge_func(struct btrfs_work *work)
btrfs_end_transaction(trans, root);
}
- btrfs_drop_dead_root(reloc_root);
+ btrfs_drop_snapshot(reloc_root, 0);
if (atomic_dec_and_test(async->num_pending))
complete(async->done);
@@ -2075,9 +2075,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
BUG_ON(ret);
-
- btrfs_tree_unlock(eb);
- free_extent_buffer(eb);
}
if (!lowest) {
btrfs_tree_unlock(upper->eb);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 4e83457ea25..2dbf1c1f56e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -593,6 +593,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
return 0;
}
+#if 0
/*
* when dropping snapshots, we generate a ton of delayed refs, and it makes
* sense not to join the transaction while it is trying to flush the current
@@ -681,6 +682,7 @@ int btrfs_drop_dead_root(struct btrfs_root *root)
btrfs_btree_balance_dirty(tree_root, nr);
return ret;
}
+#endif
/*
* new snapshots need to be created at a very specific time in the
@@ -1081,7 +1083,7 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
while (!list_empty(&list)) {
root = list_entry(list.next, struct btrfs_root, root_list);
list_del_init(&root->root_list);
- btrfs_drop_dead_root(root);
+ btrfs_drop_snapshot(root, 0);
}
return 0;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ebdbe62a829..97ce4bf89d1 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -493,9 +493,9 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
return -EBADF;
xid = GetXid();
- mutex_unlock(&pCifsFile->fh_mutex);
+ mutex_lock(&pCifsFile->fh_mutex);
if (!pCifsFile->invalidHandle) {
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
rc = 0;
FreeXid(xid);
return rc;
@@ -527,7 +527,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
if (full_path == NULL) {
rc = -ENOMEM;
reopen_error_exit:
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
FreeXid(xid);
return rc;
}
@@ -569,14 +569,14 @@ reopen_error_exit:
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
cFYI(1, ("cifs_open returned 0x%x", rc));
cFYI(1, ("oplock: %d", oplock));
} else {
reopen_success:
pCifsFile->netfid = netfid;
pCifsFile->invalidHandle = false;
- mutex_lock(&pCifsFile->fh_mutex);
+ mutex_unlock(&pCifsFile->fh_mutex);
pCifsInode = CIFS_I(inode);
if (pCifsInode) {
if (can_flush) {
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 3f0e1974abd..31d12de83a2 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -14,35 +14,44 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/anon_inodes.h>
-#include <linux/eventfd.h>
#include <linux/syscalls.h>
#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/eventfd.h>
struct eventfd_ctx {
+ struct kref kref;
wait_queue_head_t wqh;
/*
* Every time that a write(2) is performed on an eventfd, the
* value of the __u64 being written is added to "count" and a
* wakeup is performed on "wqh". A read(2) will return the "count"
* value to userspace, and will reset "count" to zero. The kernel
- * size eventfd_signal() also, adds to the "count" counter and
+ * side eventfd_signal() also, adds to the "count" counter and
* issue a wakeup.
*/
__u64 count;
unsigned int flags;
};
-/*
- * Adds "n" to the eventfd counter "count". Returns "n" in case of
- * success, or a value lower then "n" in case of coutner overflow.
- * This function is supposed to be called by the kernel in paths
- * that do not allow sleeping. In this function we allow the counter
- * to reach the ULLONG_MAX value, and we signal this as overflow
- * condition by returining a POLLERR to poll(2).
+/**
+ * eventfd_signal - Adds @n to the eventfd counter.
+ * @ctx: [in] Pointer to the eventfd context.
+ * @n: [in] Value of the counter to be added to the eventfd internal counter.
+ * The value cannot be negative.
+ *
+ * This function is supposed to be called by the kernel in paths that do not
+ * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
+ * value, and we signal this as overflow condition by returining a POLLERR
+ * to poll(2).
+ *
+ * Returns @n in case of success, a non-negative number lower than @n in case
+ * of overflow, or the following error codes:
+ *
+ * -EINVAL : The value of @n is negative.
*/
-int eventfd_signal(struct file *file, int n)
+int eventfd_signal(struct eventfd_ctx *ctx, int n)
{
- struct eventfd_ctx *ctx = file->private_data;
unsigned long flags;
if (n < 0)
@@ -59,9 +68,45 @@ int eventfd_signal(struct file *file, int n)
}
EXPORT_SYMBOL_GPL(eventfd_signal);
+static void eventfd_free(struct kref *kref)
+{
+ struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
+
+ kfree(ctx);
+}
+
+/**
+ * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
+ * @ctx: [in] Pointer to the eventfd context.
+ *
+ * Returns: In case of success, returns a pointer to the eventfd context.
+ */
+struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
+{
+ kref_get(&ctx->kref);
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_get);
+
+/**
+ * eventfd_ctx_put - Releases a reference to the internal eventfd context.
+ * @ctx: [in] Pointer to eventfd context.
+ *
+ * The eventfd context reference must have been previously acquired either
+ * with eventfd_ctx_get() or eventfd_ctx_fdget()).
+ */
+void eventfd_ctx_put(struct eventfd_ctx *ctx)
+{
+ kref_put(&ctx->kref, eventfd_free);
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_put);
+
static int eventfd_release(struct inode *inode, struct file *file)
{
- kfree(file->private_data);
+ struct eventfd_ctx *ctx = file->private_data;
+
+ wake_up_poll(&ctx->wqh, POLLHUP);
+ eventfd_ctx_put(ctx);
return 0;
}
@@ -185,6 +230,16 @@ static const struct file_operations eventfd_fops = {
.write = eventfd_write,
};
+/**
+ * eventfd_fget - Acquire a reference of an eventfd file descriptor.
+ * @fd: [in] Eventfd file descriptor.
+ *
+ * Returns a pointer to the eventfd file structure in case of success, or the
+ * following error pointer:
+ *
+ * -EBADF : Invalid @fd file descriptor.
+ * -EINVAL : The @fd file descriptor is not an eventfd file.
+ */
struct file *eventfd_fget(int fd)
{
struct file *file;
@@ -201,6 +256,48 @@ struct file *eventfd_fget(int fd)
}
EXPORT_SYMBOL_GPL(eventfd_fget);
+/**
+ * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
+ * @fd: [in] Eventfd file descriptor.
+ *
+ * Returns a pointer to the internal eventfd context, otherwise the error
+ * pointers returned by the following functions:
+ *
+ * eventfd_fget
+ */
+struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+{
+ struct file *file;
+ struct eventfd_ctx *ctx;
+
+ file = eventfd_fget(fd);
+ if (IS_ERR(file))
+ return (struct eventfd_ctx *) file;
+ ctx = eventfd_ctx_get(file->private_data);
+ fput(file);
+
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
+
+/**
+ * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
+ * @file: [in] Eventfd file pointer.
+ *
+ * Returns a pointer to the internal eventfd context, otherwise the error
+ * pointer:
+ *
+ * -EINVAL : The @fd file descriptor is not an eventfd file.
+ */
+struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
+{
+ if (file->f_op != &eventfd_fops)
+ return ERR_PTR(-EINVAL);
+
+ return eventfd_ctx_get(file->private_data);
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
+
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
int fd;
@@ -217,6 +314,7 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
if (!ctx)
return -ENOMEM;
+ kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 6524ecaebb7..e1dedb0f787 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -66,8 +66,16 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
inode = NULL;
if (ino) {
inode = ext2_iget(dir->i_sb, ino);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
+ if (unlikely(IS_ERR(inode))) {
+ if (PTR_ERR(inode) == -ESTALE) {
+ ext2_error(dir->i_sb, __func__,
+ "deleted inode referenced: %lu",
+ ino);
+ return ERR_PTR(-EIO);
+ } else {
+ return ERR_CAST(inode);
+ }
+ }
}
return d_splice_alias(inode, dentry);
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8fed2ed12f3..f58ecbc416c 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -849,6 +849,81 @@ err:
return err;
}
+static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_inval_inode_out outarg;
+ int err = -EINVAL;
+
+ if (size != sizeof(outarg))
+ goto err;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ goto err;
+ fuse_copy_finish(cs);
+
+ down_read(&fc->killsb);
+ err = -ENOENT;
+ if (!fc->sb)
+ goto err_unlock;
+
+ err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
+ outarg.off, outarg.len);
+
+err_unlock:
+ up_read(&fc->killsb);
+ return err;
+
+err:
+ fuse_copy_finish(cs);
+ return err;
+}
+
+static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_inval_entry_out outarg;
+ int err = -EINVAL;
+ char buf[FUSE_NAME_MAX+1];
+ struct qstr name;
+
+ if (size < sizeof(outarg))
+ goto err;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ goto err;
+
+ err = -ENAMETOOLONG;
+ if (outarg.namelen > FUSE_NAME_MAX)
+ goto err;
+
+ name.name = buf;
+ name.len = outarg.namelen;
+ err = fuse_copy_one(cs, buf, outarg.namelen + 1);
+ if (err)
+ goto err;
+ fuse_copy_finish(cs);
+ buf[outarg.namelen] = 0;
+ name.hash = full_name_hash(name.name, name.len);
+
+ down_read(&fc->killsb);
+ err = -ENOENT;
+ if (!fc->sb)
+ goto err_unlock;
+
+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
+
+err_unlock:
+ up_read(&fc->killsb);
+ return err;
+
+err:
+ fuse_copy_finish(cs);
+ return err;
+}
+
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
unsigned int size, struct fuse_copy_state *cs)
{
@@ -856,6 +931,12 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
case FUSE_NOTIFY_POLL:
return fuse_notify_poll(fc, size, cs);
+ case FUSE_NOTIFY_INVAL_INODE:
+ return fuse_notify_inval_inode(fc, size, cs);
+
+ case FUSE_NOTIFY_INVAL_ENTRY:
+ return fuse_notify_inval_entry(fc, size, cs);
+
default:
fuse_copy_finish(cs);
return -EINVAL;
@@ -910,7 +991,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
int err;
- unsigned nbytes = iov_length(iov, nr_segs);
+ size_t nbytes = iov_length(iov, nr_segs);
struct fuse_req *req;
struct fuse_out_header oh;
struct fuse_copy_state cs;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b3089a083d3..e703654e7f4 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -375,7 +375,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
struct fuse_conn *fc = get_fuse_conn(dir);
struct fuse_req *req;
struct fuse_req *forget_req;
- struct fuse_open_in inarg;
+ struct fuse_create_in inarg;
struct fuse_open_out outopen;
struct fuse_entry_out outentry;
struct fuse_file *ff;
@@ -399,15 +399,20 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
if (!ff)
goto out_put_request;
+ if (!fc->dont_mask)
+ mode &= ~current_umask();
+
flags &= ~O_NOCTTY;
memset(&inarg, 0, sizeof(inarg));
memset(&outentry, 0, sizeof(outentry));
inarg.flags = flags;
inarg.mode = mode;
+ inarg.umask = current_umask();
req->in.h.opcode = FUSE_CREATE;
req->in.h.nodeid = get_node_id(dir);
req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
+ sizeof(inarg);
req->in.args[0].value = &inarg;
req->in.args[1].size = entry->d_name.len + 1;
req->in.args[1].value = entry->d_name.name;
@@ -546,12 +551,17 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
if (IS_ERR(req))
return PTR_ERR(req);
+ if (!fc->dont_mask)
+ mode &= ~current_umask();
+
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
inarg.rdev = new_encode_dev(rdev);
+ inarg.umask = current_umask();
req->in.h.opcode = FUSE_MKNOD;
req->in.numargs = 2;
- req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
+ sizeof(inarg);
req->in.args[0].value = &inarg;
req->in.args[1].size = entry->d_name.len + 1;
req->in.args[1].value = entry->d_name.name;
@@ -578,8 +588,12 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, int mode)
if (IS_ERR(req))
return PTR_ERR(req);
+ if (!fc->dont_mask)
+ mode &= ~current_umask();
+
memset(&inarg, 0, sizeof(inarg));
inarg.mode = mode;
+ inarg.umask = current_umask();
req->in.h.opcode = FUSE_MKDIR;
req->in.numargs = 2;
req->in.args[0].size = sizeof(inarg);
@@ -845,6 +859,43 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
return err;
}
+int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
+ struct qstr *name)
+{
+ int err = -ENOTDIR;
+ struct inode *parent;
+ struct dentry *dir;
+ struct dentry *entry;
+
+ parent = ilookup5(sb, parent_nodeid, fuse_inode_eq, &parent_nodeid);
+ if (!parent)
+ return -ENOENT;
+
+ mutex_lock(&parent->i_mutex);
+ if (!S_ISDIR(parent->i_mode))
+ goto unlock;
+
+ err = -ENOENT;
+ dir = d_find_alias(parent);
+ if (!dir)
+ goto unlock;
+
+ entry = d_lookup(dir, name);
+ dput(dir);
+ if (!entry)
+ goto unlock;
+
+ fuse_invalidate_attr(parent);
+ fuse_invalidate_entry(entry);
+ dput(entry);
+ err = 0;
+
+ unlock:
+ mutex_unlock(&parent->i_mutex);
+ iput(parent);
+ return err;
+}
+
/*
* Calling into a user-controlled filesystem gives the filesystem
* daemon ptrace-like capabilities over the requester process. This
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index fce6ce694fd..cbc464043b6 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1922,7 +1922,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
req = fuse_get_req(fc);
if (IS_ERR(req))
- return PTR_ERR(req);
+ return POLLERR;
req->in.h.opcode = FUSE_POLL;
req->in.h.nodeid = ff->nodeid;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index aaf2f9ff970..52b641fc0fa 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -446,6 +446,9 @@ struct fuse_conn {
/** Do multi-page cached writes */
unsigned big_writes:1;
+ /** Don't apply umask to creation modes */
+ unsigned dont_mask:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -481,6 +484,12 @@ struct fuse_conn {
/** Called on final put */
void (*release)(struct fuse_conn *);
+
+ /** Super block for this connection. */
+ struct super_block *sb;
+
+ /** Read/write semaphore to hold when accessing sb. */
+ struct rw_semaphore killsb;
};
static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
@@ -509,6 +518,11 @@ extern const struct file_operations fuse_dev_operations;
extern const struct dentry_operations fuse_dentry_operations;
/**
+ * Inode to nodeid comparison.
+ */
+int fuse_inode_eq(struct inode *inode, void *_nodeidp);
+
+/**
* Get a filled in inode
*/
struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
@@ -708,6 +722,19 @@ void fuse_release_nowrite(struct inode *inode);
u64 fuse_get_attr_version(struct fuse_conn *fc);
+/**
+ * File-system tells the kernel to invalidate cache for the given node id.
+ */
+int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
+ loff_t offset, loff_t len);
+
+/**
+ * File-system tells the kernel to invalidate parent attributes and
+ * the dentry matching parent/name.
+ */
+int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
+ struct qstr *name);
+
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
bool isdir);
ssize_t fuse_direct_io(struct file *file, const char __user *buf,
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index d8673ccf90b..f91ccc4a189 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -206,7 +206,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
BUG();
}
-static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
+int fuse_inode_eq(struct inode *inode, void *_nodeidp)
{
u64 nodeid = *(u64 *) _nodeidp;
if (get_node_id(inode) == nodeid)
@@ -257,6 +257,31 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
return inode;
}
+int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
+ loff_t offset, loff_t len)
+{
+ struct inode *inode;
+ pgoff_t pg_start;
+ pgoff_t pg_end;
+
+ inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid);
+ if (!inode)
+ return -ENOENT;
+
+ fuse_invalidate_attr(inode);
+ if (offset >= 0) {
+ pg_start = offset >> PAGE_CACHE_SHIFT;
+ if (len <= 0)
+ pg_end = -1;
+ else
+ pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+ invalidate_inode_pages2_range(inode->i_mapping,
+ pg_start, pg_end);
+ }
+ iput(inode);
+ return 0;
+}
+
static void fuse_umount_begin(struct super_block *sb)
{
fuse_abort_conn(get_fuse_conn_super(sb));
@@ -480,6 +505,7 @@ void fuse_conn_init(struct fuse_conn *fc)
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
mutex_init(&fc->inst_mutex);
+ init_rwsem(&fc->killsb);
atomic_set(&fc->count, 1);
init_waitqueue_head(&fc->waitq);
init_waitqueue_head(&fc->blocked_waitq);
@@ -725,6 +751,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
}
if (arg->flags & FUSE_BIG_WRITES)
fc->big_writes = 1;
+ if (arg->flags & FUSE_DONT_MASK)
+ fc->dont_mask = 1;
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
@@ -748,7 +776,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
arg->minor = FUSE_KERNEL_MINOR_VERSION;
arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
- FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES;
+ FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
@@ -860,10 +888,16 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fuse_conn_init(fc);
fc->dev = sb->s_dev;
+ fc->sb = sb;
err = fuse_bdi_init(fc, sb);
if (err)
goto err_put_conn;
+ /* Handle umasking inside the fuse code */
+ if (sb->s_flags & MS_POSIXACL)
+ fc->dont_mask = 1;
+ sb->s_flags |= MS_POSIXACL;
+
fc->release = fuse_free_conn;
fc->flags = d.flags;
fc->user_id = d.user_id;
@@ -941,12 +975,25 @@ static int fuse_get_sb(struct file_system_type *fs_type,
return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
}
+static void fuse_kill_sb_anon(struct super_block *sb)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+ if (fc) {
+ down_write(&fc->killsb);
+ fc->sb = NULL;
+ up_write(&fc->killsb);
+ }
+
+ kill_anon_super(sb);
+}
+
static struct file_system_type fuse_fs_type = {
.owner = THIS_MODULE,
.name = "fuse",
.fs_flags = FS_HAS_SUBTYPE,
.get_sb = fuse_get_sb,
- .kill_sb = kill_anon_super,
+ .kill_sb = fuse_kill_sb_anon,
};
#ifdef CONFIG_BLOCK
@@ -958,11 +1005,24 @@ static int fuse_get_sb_blk(struct file_system_type *fs_type,
mnt);
}
+static void fuse_kill_sb_blk(struct super_block *sb)
+{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+
+ if (fc) {
+ down_write(&fc->killsb);
+ fc->sb = NULL;
+ up_write(&fc->killsb);
+ }
+
+ kill_block_super(sb);
+}
+
static struct file_system_type fuseblk_fs_type = {
.owner = THIS_MODULE,
.name = "fuseblk",
.get_sb = fuse_get_sb_blk,
- .kill_sb = kill_block_super,
+ .kill_sb = fuse_kill_sb_blk,
.fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
};
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index fe02ad4740e..032604e5ef2 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -972,6 +972,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
sb->s_blocksize_bits = 10;
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
/* NULL is printed as <NULL> by sprintf: avoid that. */
if (req_root == NULL)
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 7515e73e2bf..696686cc206 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -130,9 +130,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
if (jffs2_sum_active()) {
s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
if (!s) {
- kfree(flashbuf);
JFFS2_WARNING("Can't allocate memory for summary\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
}
diff --git a/fs/namei.c b/fs/namei.c
index 5b961eb71cb..f3c5b278895 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1761,6 +1761,10 @@ do_last:
goto exit;
}
filp = nameidata_to_filp(&nd, open_flag);
+ if (IS_ERR(filp))
+ ima_counts_put(&nd.path,
+ acc_mode & (MAY_READ | MAY_WRITE |
+ MAY_EXEC));
mnt_drop_write(nd.path.mnt);
if (nd.root.mnt)
path_put(&nd.root);
@@ -1817,6 +1821,9 @@ ok:
goto exit;
}
filp = nameidata_to_filp(&nd, open_flag);
+ if (IS_ERR(filp))
+ ima_counts_put(&nd.path,
+ acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
/*
* It is now safe to drop the mnt write
* because the filp has had a write taken
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index ff231ad2389..ff27a296584 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -296,12 +296,15 @@ static int inotify_fasync(int fd, struct file *file, int on)
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
+ struct user_struct *user = group->inotify_data.user;
fsnotify_clear_marks_by_group(group);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
+ atomic_dec(&user->inotify_devs);
+
return 0;
}
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d7d50d7ee51..aa00800adac 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -97,4 +97,8 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_ATTRIBUTES
#endif
+#ifndef PER_CPU_DEF_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
+#endif
+
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 720af4c7220..a553f1041cf 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -441,7 +441,8 @@
}
#ifdef CONFIG_CONSTRUCTORS
-#define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \
+#define KERNEL_CTORS() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__ctors_start) = .; \
*(.ctors) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b16a957030f..47f7d932a01 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -121,9 +121,9 @@ struct kiocb {
/*
* If the aio_resfd field of the userspace iocb is not zero,
- * this is the underlying file* to deliver event to.
+ * this is the underlying eventfd context to deliver events to.
*/
- struct file *ki_eventfd;
+ struct eventfd_ctx *ki_eventfd;
};
#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY)
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 2a04eb54c0d..2892b710771 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -319,7 +319,6 @@ static inline int bio_has_allocated_vec(struct bio *bio)
*/
struct bio_integrity_payload {
struct bio *bip_bio; /* parent bio */
- struct bio_vec *bip_vec; /* integrity data vector */
sector_t bip_sector; /* virtual start sector */
@@ -328,11 +327,12 @@ struct bio_integrity_payload {
unsigned int bip_size;
- unsigned short bip_pool; /* pool the ivec came from */
+ unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_idx; /* current bip_vec index */
struct work_struct bip_work; /* I/O completion */
+ struct bio_vec bip_vec[0]; /* embedded bvec array */
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -430,6 +430,9 @@ struct bio_set {
unsigned int front_pad;
mempool_t *bio_pool;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ mempool_t *bio_integrity_pool;
+#endif
mempool_t *bvec_pool;
};
@@ -634,8 +637,9 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
#define bio_integrity(bio) (bio->bi_integrity != NULL)
+extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
-extern void bio_integrity_free(struct bio *);
+extern void bio_integrity_free(struct bio *, struct bio_set *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern int bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
@@ -645,21 +649,27 @@ extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
-extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
+extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
+extern int bioset_integrity_create(struct bio_set *, int);
+extern void bioset_integrity_free(struct bio_set *);
+extern void bio_integrity_init(void);
#else /* CONFIG_BLK_DEV_INTEGRITY */
#define bio_integrity(a) (0)
+#define bioset_integrity_create(a, b) (0)
#define bio_integrity_prep(a) (0)
#define bio_integrity_enabled(a) (0)
-#define bio_integrity_clone(a, b, c) (0)
-#define bio_integrity_free(a) do { } while (0)
+#define bio_integrity_clone(a, b, c, d) (0)
+#define bioset_integrity_free(a) do { } while (0)
+#define bio_integrity_free(a, b) do { } while (0)
#define bio_integrity_endio(a, b) do { } while (0)
#define bio_integrity_advance(a, b) do { } while (0)
#define bio_integrity_trim(a, b, c) do { } while (0)
#define bio_integrity_split(a, b, c) do { } while (0)
#define bio_integrity_set_tag(a, b, c) do { } while (0)
#define bio_integrity_get_tag(a, b, c) do { } while (0)
+#define bio_integrity_init(a) do { } while (0)
#endif /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8963d9149b5..49ae07951d5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -301,12 +301,6 @@ struct blk_queue_tag {
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-struct blk_cmd_filter {
- unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
- unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
- struct kobject kobj;
-};
-
struct queue_limits {
unsigned long bounce_pfn;
unsigned long seg_boundary_mask;
@@ -445,7 +439,6 @@ struct request_queue
#if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device bsg_dev;
#endif
- struct blk_cmd_filter cmd_filter;
};
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
@@ -998,13 +991,7 @@ static inline int sb_issue_discard(struct super_block *sb,
return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
}
-/*
-* command filter functions
-*/
-extern int blk_verify_command(struct blk_cmd_filter *filter,
- unsigned char *cmd, fmode_t has_write_perm);
-extern void blk_unregister_filter(struct gendisk *disk);
-extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
+extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index f45a8ae5f82..3b85ba6479f 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -8,10 +8,8 @@
#ifndef _LINUX_EVENTFD_H
#define _LINUX_EVENTFD_H
-#ifdef CONFIG_EVENTFD
-
-/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/fcntl.h>
+#include <linux/file.h>
/*
* CAREFUL: Check include/asm-generic/fcntl.h when defining
@@ -27,16 +25,37 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+#ifdef CONFIG_EVENTFD
+
+struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
+void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
-int eventfd_signal(struct file *file, int n);
+struct eventfd_ctx *eventfd_ctx_fdget(int fd);
+struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
+int eventfd_signal(struct eventfd_ctx *ctx, int n);
#else /* CONFIG_EVENTFD */
-#define eventfd_fget(fd) ERR_PTR(-ENOSYS)
-static inline int eventfd_signal(struct file *file, int n)
-{ return 0; }
+/*
+ * Ugly ugly ugly error layer to support modules that uses eventfd but
+ * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
+ */
+static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+{
+ return -ENOSYS;
+}
+
+static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
+{
+
+}
-#endif /* CONFIG_EVENTFD */
+#endif
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index dd68358996b..f847df9e99b 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -819,6 +819,7 @@ struct fb_info {
int node;
int flags;
struct mutex lock; /* Lock for open/release/ioctl funcs */
+ struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 44848aa830d..6c3de999fb3 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -280,7 +280,7 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
assert_spin_locked(&dentry->d_lock);
parent = dentry->d_parent;
- if (fsnotify_inode_watches_children(parent->d_inode))
+ if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode))
dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index d41ed593f79..cf593bf9fd3 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -25,6 +25,11 @@
* - add IOCTL message
* - add unsolicited notification support
* - add POLL message and NOTIFY_POLL notification
+ *
+ * 7.12
+ * - add umask flag to input argument of open, mknod and mkdir
+ * - add notification messages for invalidation of inodes and
+ * directory entries
*/
#ifndef _LINUX_FUSE_H
@@ -36,7 +41,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 11
+#define FUSE_KERNEL_MINOR_VERSION 12
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -112,6 +117,7 @@ struct fuse_file_lock {
* INIT request/reply flags
*
* FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".."
+ * FUSE_DONT_MASK: don't apply umask to file mode on create operations
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -119,6 +125,7 @@ struct fuse_file_lock {
#define FUSE_ATOMIC_O_TRUNC (1 << 3)
#define FUSE_EXPORT_SUPPORT (1 << 4)
#define FUSE_BIG_WRITES (1 << 5)
+#define FUSE_DONT_MASK (1 << 6)
/**
* CUSE INIT request/reply flags
@@ -224,6 +231,8 @@ enum fuse_opcode {
enum fuse_notify_code {
FUSE_NOTIFY_POLL = 1,
+ FUSE_NOTIFY_INVAL_INODE = 2,
+ FUSE_NOTIFY_INVAL_ENTRY = 3,
FUSE_NOTIFY_CODE_MAX,
};
@@ -262,14 +271,18 @@ struct fuse_attr_out {
struct fuse_attr attr;
};
+#define FUSE_COMPAT_MKNOD_IN_SIZE 8
+
struct fuse_mknod_in {
__u32 mode;
__u32 rdev;
+ __u32 umask;
+ __u32 padding;
};
struct fuse_mkdir_in {
__u32 mode;
- __u32 padding;
+ __u32 umask;
};
struct fuse_rename_in {
@@ -301,7 +314,14 @@ struct fuse_setattr_in {
struct fuse_open_in {
__u32 flags;
+ __u32 unused;
+};
+
+struct fuse_create_in {
+ __u32 flags;
__u32 mode;
+ __u32 umask;
+ __u32 padding;
};
struct fuse_open_out {
@@ -508,4 +528,16 @@ struct fuse_dirent {
#define FUSE_DIRENT_SIZE(d) \
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
+struct fuse_notify_inval_inode_out {
+ __u64 ino;
+ __s64 off;
+ __s64 len;
+};
+
+struct fuse_notify_inval_entry_out {
+ __u64 parent;
+ __u32 namelen;
+ __u32 padding;
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 7400900de94..54648e625ef 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/percpu.h>
+#include <linux/timer.h>
struct hrtimer_clock_base;
@@ -447,6 +448,8 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
{
+ if (likely(!timer->start_pid))
+ return;
timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
timer->function, timer->start_comm, 0);
}
@@ -456,6 +459,8 @@ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
{
+ if (likely(!timer_stats_active))
+ return;
__timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
}
diff --git a/include/linux/ide.h b/include/linux/ide.h
index cf1f3888067..edc93a6d931 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1062,7 +1062,6 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
extern int ide_vlb_clk;
extern int ide_pci_clk;
-unsigned int ide_rq_bytes(struct request *);
int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
void ide_kill_rq(ide_drive_t *, struct request *);
@@ -1420,6 +1419,7 @@ static inline void ide_dma_unmap_sg(ide_drive_t *drive,
#ifdef CONFIG_BLK_DEV_IDEACPI
int ide_acpi_init(void);
+bool ide_port_acpi(ide_hwif_t *hwif);
extern int ide_acpi_exec_tfs(ide_drive_t *drive);
extern void ide_acpi_get_timing(ide_hwif_t *hwif);
extern void ide_acpi_push_timing(ide_hwif_t *hwif);
@@ -1428,6 +1428,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *);
extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
#else
static inline int ide_acpi_init(void) { return 0; }
+static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
diff --git a/include/linux/ima.h b/include/linux/ima.h
index b1b827d091a..0e3f2a4c25f 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -24,6 +24,7 @@ extern int ima_path_check(struct path *path, int mask, int update_counts);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern void ima_counts_get(struct file *file);
+extern void ima_counts_put(struct path *path, int mask);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -60,5 +61,10 @@ static inline void ima_counts_get(struct file *file)
{
return;
}
+
+static inline void ima_counts_put(struct path *path, int mask)
+{
+ return;
+}
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index fac104e7186..d6320a3e8de 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -303,6 +303,7 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in
extern int panic_timeout;
extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
+extern int panic_on_io_nmi;
extern const char *print_tainted(void);
extern void add_taint(unsigned flag);
extern int test_taint(unsigned flag);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index aacc5449f58..16713dc672e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -125,6 +125,7 @@ struct kvm_kernel_irq_routing_entry {
struct kvm {
struct mutex lock; /* protects the vcpus array and APIC accesses */
spinlock_t mmu_lock;
+ spinlock_t requests_lock;
struct rw_semaphore slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
int nmemslots;
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h
new file mode 100644
index 00000000000..afc9f9fd70f
--- /dev/null
+++ b/include/linux/leds-lp3944.h
@@ -0,0 +1,53 @@
+/*
+ * leds-lp3944.h - platform data structure for lp3944 led controller
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_LEDS_LP3944_H
+#define __LINUX_LEDS_LP3944_H
+
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+
+#define LP3944_LED0 0
+#define LP3944_LED1 1
+#define LP3944_LED2 2
+#define LP3944_LED3 3
+#define LP3944_LED4 4
+#define LP3944_LED5 5
+#define LP3944_LED6 6
+#define LP3944_LED7 7
+#define LP3944_LEDS_MAX 8
+
+#define LP3944_LED_STATUS_MASK 0x03
+enum lp3944_status {
+ LP3944_LED_STATUS_OFF = 0x0,
+ LP3944_LED_STATUS_ON = 0x1,
+ LP3944_LED_STATUS_DIM0 = 0x2,
+ LP3944_LED_STATUS_DIM1 = 0x3
+};
+
+enum lp3944_type {
+ LP3944_LED_TYPE_NONE,
+ LP3944_LED_TYPE_LED,
+ LP3944_LED_TYPE_LED_INVERTED,
+};
+
+struct lp3944_led {
+ char *name;
+ enum lp3944_type type;
+ enum lp3944_status status;
+};
+
+struct lp3944_platform_data {
+ struct lp3944_led leds[LP3944_LEDS_MAX];
+ u8 leds_size;
+};
+
+#endif /* __LINUX_LEDS_LP3944_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 376fe07732e..d8bf9665e70 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -45,7 +45,10 @@ struct led_classdev {
/* Get LED brightness level */
enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
- /* Activate hardware accelerated blink */
+ /* Activate hardware accelerated blink, delays are in
+ * miliseconds and if none is provided then a sensible default
+ * should be chosen. The call can adjust the timings if it can't
+ * match the values specified exactly. */
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off);
@@ -141,9 +144,14 @@ struct gpio_led {
const char *name;
const char *default_trigger;
unsigned gpio;
- u8 active_low : 1;
- u8 retain_state_suspended : 1;
+ unsigned active_low : 1;
+ unsigned retain_state_suspended : 1;
+ unsigned default_state : 2;
+ /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
};
+#define LEDS_GPIO_DEFSTATE_OFF 0
+#define LEDS_GPIO_DEFSTATE_ON 1
+#define LEDS_GPIO_DEFSTATE_KEEP 2
struct gpio_led_platform_data {
int num_leds;
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 3430c775194..7ae05338e94 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -81,4 +81,17 @@ struct xt_conntrack_mtinfo1 {
__u8 state_mask, status_mask;
};
+struct xt_conntrack_mtinfo2 {
+ union nf_inet_addr origsrc_addr, origsrc_mask;
+ union nf_inet_addr origdst_addr, origdst_mask;
+ union nf_inet_addr replsrc_addr, replsrc_mask;
+ union nf_inet_addr repldst_addr, repldst_mask;
+ __u32 expires_min, expires_max;
+ __u16 l4proto;
+ __be16 origsrc_port, origdst_port;
+ __be16 replsrc_port, repldst_port;
+ __u16 match_flags, invert_flags;
+ __u16 state_mask, status_mask;
+};
+
#endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_osf.h b/include/linux/netfilter/xt_osf.h
index fd2272e0959..18afa495f97 100644
--- a/include/linux/netfilter/xt_osf.h
+++ b/include/linux/netfilter/xt_osf.h
@@ -20,6 +20,8 @@
#ifndef _XT_OSF_H
#define _XT_OSF_H
+#include <linux/types.h>
+
#define MAXGENRELEN 32
#define XT_OSF_GENRE (1<<0)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a3b00036579..73b46b6b904 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2645,6 +2645,7 @@
#define PCI_DEVICE_ID_NETMOS_9835 0x9835
#define PCI_DEVICE_ID_NETMOS_9845 0x9845
#define PCI_DEVICE_ID_NETMOS_9855 0x9855
+#define PCI_DEVICE_ID_NETMOS_9901 0x9901
#define PCI_VENDOR_ID_3COM_2 0xa727
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 8f921d74f49..68438e18fff 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -24,7 +24,8 @@
#define DEFINE_PER_CPU_SECTION(type, name, section) \
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+ PER_CPU_ATTRIBUTES PER_CPU_DEF_ATTRIBUTES \
+ __typeof__(type) per_cpu__##name
/*
* Variant on the per-CPU variable declaration/definition theme used for
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 89698d8aba5..5e970c7d3fd 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -178,8 +178,10 @@ struct perf_counter_attr {
mmap : 1, /* include mmap data */
comm : 1, /* include comm data */
freq : 1, /* use freq, not period */
+ inherit_stat : 1, /* per task counts */
+ enable_on_exec : 1, /* next exec enables */
- __reserved_1 : 53;
+ __reserved_1 : 51;
__u32 wakeup_events; /* wakeup every n events */
__u32 __reserved_2;
@@ -232,6 +234,14 @@ struct perf_counter_mmap_page {
__u32 lock; /* seqlock for synchronization */
__u32 index; /* hardware counter identifier */
__s64 offset; /* add to hardware counter value */
+ __u64 time_enabled; /* time counter active */
+ __u64 time_running; /* time counter on cpu */
+
+ /*
+ * Hole for extension of the self monitor capabilities
+ */
+
+ __u64 __reserved[123]; /* align to 1k */
/*
* Control data for the mmap() data buffer.
@@ -253,7 +263,6 @@ struct perf_counter_mmap_page {
#define PERF_EVENT_MISC_KERNEL (1 << 0)
#define PERF_EVENT_MISC_USER (2 << 0)
#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
-#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
struct perf_event_header {
__u32 type;
@@ -327,9 +336,18 @@ enum perf_event_type {
PERF_EVENT_FORK = 7,
/*
- * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
- * will be PERF_SAMPLE_*
- *
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, tid;
+ * u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 parent_id; } && PERF_FORMAT_ID
+ * };
+ */
+ PERF_EVENT_READ = 8,
+
+ /*
* struct {
* struct perf_event_header header;
*
@@ -337,8 +355,9 @@ enum perf_event_type {
* { u32 pid, tid; } && PERF_SAMPLE_TID
* { u64 time; } && PERF_SAMPLE_TIME
* { u64 addr; } && PERF_SAMPLE_ADDR
- * { u64 config; } && PERF_SAMPLE_CONFIG
+ * { u64 id; } && PERF_SAMPLE_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 period; } && PERF_SAMPLE_PERIOD
*
* { u64 nr;
* { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
@@ -347,6 +366,9 @@ enum perf_event_type {
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
* };
*/
+ PERF_EVENT_SAMPLE = 9,
+
+ PERF_EVENT_MAX, /* non-ABI */
};
enum perf_callchain_context {
@@ -582,6 +604,7 @@ struct perf_counter_context {
int nr_counters;
int nr_active;
int is_active;
+ int nr_stat;
atomic_t refcount;
struct task_struct *task;
@@ -669,7 +692,16 @@ static inline int is_software_counter(struct perf_counter *counter)
(counter->attr.type != PERF_TYPE_HW_CACHE);
}
-extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
+extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
+
+extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
+
+static inline void
+perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+{
+ if (atomic_read(&perf_swcounter_enabled[event]))
+ __perf_swcounter_event(event, nr, nmi, regs, addr);
+}
extern void __perf_counter_mmap(struct vm_area_struct *vma);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4d075426988..0085d758d64 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -349,8 +349,20 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
struct nsproxy;
struct user_namespace;
-/* Maximum number of active map areas.. This is a random (large) number */
-#define DEFAULT_MAX_MAP_COUNT 65536
+/*
+ * Default maximum number of active map areas, this limits the number of vmas
+ * per mm struct. Users can overwrite this number by sysctl but there is a
+ * problem.
+ *
+ * When a program's coredump is generated as ELF format, a section is created
+ * per a vma. In ELF, the number of sections is represented in unsigned short.
+ * This means the number of sections should be smaller than 65535 at coredump.
+ * Because the kernel adds some informative sections to a image of program at
+ * generating coredump, we need some margin. The number of extra sections is
+ * 1-3 now and depends on arch. We use "5" as safe margin, here.
+ */
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
+#define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
extern int sysctl_max_map_count;
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 9c4cd27f468..c47c4b4da97 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -80,6 +80,8 @@ struct spi_device {
#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
#define SPI_3WIRE 0x10 /* SI/SO signals shared */
#define SPI_LOOP 0x20 /* loopback mode */
+#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
+#define SPI_READY 0x80 /* slave pulls low to pause */
u8 bits_per_word;
int irq;
void *controller_state;
@@ -248,6 +250,10 @@ struct spi_master {
/* spi_device.mode flags understood by this controller driver */
u16 mode_bits;
+ /* other constraints relevant to this driver */
+ u16 flags;
+#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
+
/* Setup mode and clock, etc (spi driver may call many times).
*
* IMPORTANT: this may be called when transfers to another
diff --git a/include/linux/spi/spidev.h b/include/linux/spi/spidev.h
index 95251ccd5a0..bf0570a84f7 100644
--- a/include/linux/spi/spidev.h
+++ b/include/linux/spi/spidev.h
@@ -40,6 +40,8 @@
#define SPI_LSB_FIRST 0x08
#define SPI_3WIRE 0x10
#define SPI_LOOP 0x20
+#define SPI_NO_CS 0x40
+#define SPI_READY 0x80
/*---------------------------------------------------------------------------*/
diff --git a/include/linux/timer.h b/include/linux/timer.h
index ccf882eed8f..be62ec2ebea 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -190,6 +190,8 @@ extern unsigned long get_next_timer_interrupt(unsigned long now);
*/
#ifdef CONFIG_TIMER_STATS
+extern int timer_stats_active;
+
#define TIMER_STATS_FLAG_DEFERRABLE 0x1
extern void init_timer_stats(void);
@@ -203,6 +205,8 @@ extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
{
+ if (likely(!timer_stats_active))
+ return;
__timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
}
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 5d44059f6d6..310e18a880f 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -42,7 +42,6 @@ struct usbnet {
/* protocol/interface state */
struct net_device *net;
- struct net_device_stats stats;
int msg_enable;
unsigned long data [5];
u32 xid;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index a632689b61b..cbdd6284996 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -258,8 +258,8 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
/* Update TCP window tracking data when NAT mangles the packet */
extern void nf_conntrack_tcp_update(const struct sk_buff *skb,
unsigned int dataoff,
- struct nf_conn *ct,
- int dir);
+ struct nf_conn *ct, int dir,
+ s16 offset);
/* Fake conntrack entry for untracked connections */
extern struct nf_conn nf_conntrack_untracked;
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index 5054dc5ea2c..29d12673661 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -45,6 +45,7 @@ int phonet_address_add(struct net_device *dev, u8 addr);
int phonet_address_del(struct net_device *dev, u8 addr);
u8 phonet_address_get(struct net_device *dev, u8 addr);
int phonet_address_lookup(struct net *net, u8 addr);
+void phonet_address_notify(int event, struct net_device *dev, u8 addr);
#define PN_NO_ADDR 0xff
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index e35ba2c3a8d..c5e68adc673 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -32,6 +32,7 @@
#include <linux/nsproxy.h>
#include <linux/pid.h>
#include <linux/ipc_namespace.h>
+#include <linux/ima.h>
#include <net/sock.h>
#include "util.h"
@@ -733,6 +734,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
error = PTR_ERR(filp);
goto out_putfd;
}
+ ima_counts_get(filp);
fd_install(fd, filp);
goto out_upsem;
diff --git a/kernel/Makefile b/kernel/Makefile
index 780c8dcf451..2093a691f1c 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_X86_DS) += trace/
+obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 7afa3156416..9f3391090b3 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -215,6 +215,7 @@ static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file,
static int acct_on(char *name)
{
struct file *file;
+ struct vfsmount *mnt;
int error;
struct pid_namespace *ns;
struct bsd_acct_struct *acct = NULL;
@@ -256,11 +257,12 @@ static int acct_on(char *name)
acct = NULL;
}
- mnt_pin(file->f_path.mnt);
+ mnt = file->f_path.mnt;
+ mnt_pin(mnt);
acct_file_reopen(ns->bacct, file, ns);
spin_unlock(&acct_lock);
- mntput(file->f_path.mnt); /* it's pinned, now give up active reference */
+ mntput(mnt); /* it's pinned, now give up active reference */
kfree(acct);
return 0;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 1a933a221ea..d55a50da234 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -236,6 +236,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
list_add_rcu(&counter->event_entry, &ctx->event_list);
ctx->nr_counters++;
+ if (counter->attr.inherit_stat)
+ ctx->nr_stat++;
}
/*
@@ -250,6 +252,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
if (list_empty(&counter->list_entry))
return;
ctx->nr_counters--;
+ if (counter->attr.inherit_stat)
+ ctx->nr_stat--;
list_del_init(&counter->list_entry);
list_del_rcu(&counter->event_entry);
@@ -1006,6 +1010,81 @@ static int context_equiv(struct perf_counter_context *ctx1,
&& !ctx1->pin_count && !ctx2->pin_count;
}
+static void __perf_counter_read(void *counter);
+
+static void __perf_counter_sync_stat(struct perf_counter *counter,
+ struct perf_counter *next_counter)
+{
+ u64 value;
+
+ if (!counter->attr.inherit_stat)
+ return;
+
+ /*
+ * Update the counter value, we cannot use perf_counter_read()
+ * because we're in the middle of a context switch and have IRQs
+ * disabled, which upsets smp_call_function_single(), however
+ * we know the counter must be on the current CPU, therefore we
+ * don't need to use it.
+ */
+ switch (counter->state) {
+ case PERF_COUNTER_STATE_ACTIVE:
+ __perf_counter_read(counter);
+ break;
+
+ case PERF_COUNTER_STATE_INACTIVE:
+ update_counter_times(counter);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * In order to keep per-task stats reliable we need to flip the counter
+ * values when we flip the contexts.
+ */
+ value = atomic64_read(&next_counter->count);
+ value = atomic64_xchg(&counter->count, value);
+ atomic64_set(&next_counter->count, value);
+
+ swap(counter->total_time_enabled, next_counter->total_time_enabled);
+ swap(counter->total_time_running, next_counter->total_time_running);
+
+ /*
+ * Since we swizzled the values, update the user visible data too.
+ */
+ perf_counter_update_userpage(counter);
+ perf_counter_update_userpage(next_counter);
+}
+
+#define list_next_entry(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+
+static void perf_counter_sync_stat(struct perf_counter_context *ctx,
+ struct perf_counter_context *next_ctx)
+{
+ struct perf_counter *counter, *next_counter;
+
+ if (!ctx->nr_stat)
+ return;
+
+ counter = list_first_entry(&ctx->event_list,
+ struct perf_counter, event_entry);
+
+ next_counter = list_first_entry(&next_ctx->event_list,
+ struct perf_counter, event_entry);
+
+ while (&counter->event_entry != &ctx->event_list &&
+ &next_counter->event_entry != &next_ctx->event_list) {
+
+ __perf_counter_sync_stat(counter, next_counter);
+
+ counter = list_next_entry(counter, event_entry);
+ next_counter = list_next_entry(counter, event_entry);
+ }
+}
+
/*
* Called from scheduler to remove the counters of the current task,
* with interrupts disabled.
@@ -1061,6 +1140,8 @@ void perf_counter_task_sched_out(struct task_struct *task,
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
+
+ perf_counter_sync_stat(ctx, next_ctx);
}
spin_unlock(&next_ctx->lock);
spin_unlock(&ctx->lock);
@@ -1348,9 +1429,56 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
}
/*
+ * Enable all of a task's counters that have been marked enable-on-exec.
+ * This expects task == current.
+ */
+static void perf_counter_enable_on_exec(struct task_struct *task)
+{
+ struct perf_counter_context *ctx;
+ struct perf_counter *counter;
+ unsigned long flags;
+ int enabled = 0;
+
+ local_irq_save(flags);
+ ctx = task->perf_counter_ctxp;
+ if (!ctx || !ctx->nr_counters)
+ goto out;
+
+ __perf_counter_task_sched_out(ctx);
+
+ spin_lock(&ctx->lock);
+
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (!counter->attr.enable_on_exec)
+ continue;
+ counter->attr.enable_on_exec = 0;
+ if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ continue;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->tstamp_enabled =
+ ctx->time - counter->total_time_enabled;
+ enabled = 1;
+ }
+
+ /*
+ * Unclone this context if we enabled any counter.
+ */
+ if (enabled && ctx->parent_ctx) {
+ put_ctx(ctx->parent_ctx);
+ ctx->parent_ctx = NULL;
+ }
+
+ spin_unlock(&ctx->lock);
+
+ perf_counter_task_sched_in(task, smp_processor_id());
+ out:
+ local_irq_restore(flags);
+}
+
+/*
* Cross CPU call to read the hardware counter
*/
-static void __read(void *info)
+static void __perf_counter_read(void *info)
{
struct perf_counter *counter = info;
struct perf_counter_context *ctx = counter->ctx;
@@ -1372,7 +1500,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
*/
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
smp_call_function_single(counter->oncpu,
- __read, counter, 1);
+ __perf_counter_read, counter, 1);
} else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
update_counter_times(counter);
}
@@ -1508,11 +1636,13 @@ static void free_counter(struct perf_counter *counter)
{
perf_pending_sync(counter);
- atomic_dec(&nr_counters);
- if (counter->attr.mmap)
- atomic_dec(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_dec(&nr_comm_counters);
+ if (!counter->parent) {
+ atomic_dec(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_dec(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_dec(&nr_comm_counters);
+ }
if (counter->destroy)
counter->destroy(counter);
@@ -1751,6 +1881,14 @@ int perf_counter_task_disable(void)
return 0;
}
+static int perf_counter_index(struct perf_counter *counter)
+{
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ return 0;
+
+ return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
+}
+
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
@@ -1775,11 +1913,17 @@ void perf_counter_update_userpage(struct perf_counter *counter)
preempt_disable();
++userpg->lock;
barrier();
- userpg->index = counter->hw.idx;
+ userpg->index = perf_counter_index(counter);
userpg->offset = atomic64_read(&counter->count);
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
userpg->offset -= atomic64_read(&counter->hw.prev_count);
+ userpg->time_enabled = counter->total_time_enabled +
+ atomic64_read(&counter->child_total_time_enabled);
+
+ userpg->time_running = counter->total_time_running +
+ atomic64_read(&counter->child_total_time_running);
+
barrier();
++userpg->lock;
preempt_enable();
@@ -2483,15 +2627,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
u32 cpu, reserved;
} cpu_entry;
- header.type = 0;
+ header.type = PERF_EVENT_SAMPLE;
header.size = sizeof(header);
- header.misc = PERF_EVENT_MISC_OVERFLOW;
+ header.misc = 0;
header.misc |= perf_misc_flags(data->regs);
if (sample_type & PERF_SAMPLE_IP) {
ip = perf_instruction_pointer(data->regs);
- header.type |= PERF_SAMPLE_IP;
header.size += sizeof(ip);
}
@@ -2500,7 +2643,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
tid_entry.pid = perf_counter_pid(counter, current);
tid_entry.tid = perf_counter_tid(counter, current);
- header.type |= PERF_SAMPLE_TID;
header.size += sizeof(tid_entry);
}
@@ -2510,34 +2652,25 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
*/
time = sched_clock();
- header.type |= PERF_SAMPLE_TIME;
header.size += sizeof(u64);
}
- if (sample_type & PERF_SAMPLE_ADDR) {
- header.type |= PERF_SAMPLE_ADDR;
+ if (sample_type & PERF_SAMPLE_ADDR)
header.size += sizeof(u64);
- }
- if (sample_type & PERF_SAMPLE_ID) {
- header.type |= PERF_SAMPLE_ID;
+ if (sample_type & PERF_SAMPLE_ID)
header.size += sizeof(u64);
- }
if (sample_type & PERF_SAMPLE_CPU) {
- header.type |= PERF_SAMPLE_CPU;
header.size += sizeof(cpu_entry);
cpu_entry.cpu = raw_smp_processor_id();
}
- if (sample_type & PERF_SAMPLE_PERIOD) {
- header.type |= PERF_SAMPLE_PERIOD;
+ if (sample_type & PERF_SAMPLE_PERIOD)
header.size += sizeof(u64);
- }
if (sample_type & PERF_SAMPLE_GROUP) {
- header.type |= PERF_SAMPLE_GROUP;
header.size += sizeof(u64) +
counter->nr_siblings * sizeof(group_entry);
}
@@ -2547,10 +2680,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
if (callchain) {
callchain_size = (1 + callchain->nr) * sizeof(u64);
-
- header.type |= PERF_SAMPLE_CALLCHAIN;
header.size += callchain_size;
- }
+ } else
+ header.size += sizeof(u64);
}
ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
@@ -2601,13 +2733,79 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
}
}
- if (callchain)
- perf_output_copy(&handle, callchain, callchain_size);
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+ if (callchain)
+ perf_output_copy(&handle, callchain, callchain_size);
+ else {
+ u64 nr = 0;
+ perf_output_put(&handle, nr);
+ }
+ }
perf_output_end(&handle);
}
/*
+ * read event
+ */
+
+struct perf_read_event {
+ struct perf_event_header header;
+
+ u32 pid;
+ u32 tid;
+ u64 value;
+ u64 format[3];
+};
+
+static void
+perf_counter_read_event(struct perf_counter *counter,
+ struct task_struct *task)
+{
+ struct perf_output_handle handle;
+ struct perf_read_event event = {
+ .header = {
+ .type = PERF_EVENT_READ,
+ .misc = 0,
+ .size = sizeof(event) - sizeof(event.format),
+ },
+ .pid = perf_counter_pid(counter, task),
+ .tid = perf_counter_tid(counter, task),
+ .value = atomic64_read(&counter->count),
+ };
+ int ret, i = 0;
+
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ event.header.size += sizeof(u64);
+ event.format[i++] = counter->total_time_enabled;
+ }
+
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ event.header.size += sizeof(u64);
+ event.format[i++] = counter->total_time_running;
+ }
+
+ if (counter->attr.read_format & PERF_FORMAT_ID) {
+ u64 id;
+
+ event.header.size += sizeof(u64);
+ if (counter->parent)
+ id = counter->parent->id;
+ else
+ id = counter->id;
+
+ event.format[i++] = id;
+ }
+
+ ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
+ if (ret)
+ return;
+
+ perf_output_copy(&handle, &event, event.header.size);
+ perf_output_end(&handle);
+}
+
+/*
* fork tracking
*/
@@ -2798,6 +2996,9 @@ void perf_counter_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
+ if (task->perf_counter_ctxp)
+ perf_counter_enable_on_exec(task);
+
if (!atomic_read(&nr_comm_counters))
return;
@@ -3317,8 +3518,8 @@ out:
put_cpu_var(perf_cpu_context);
}
-void
-perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+void __perf_swcounter_event(u32 event, u64 nr, int nmi,
+ struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data = {
.regs = regs,
@@ -3509,9 +3710,21 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
}
#endif
+atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
+
+static void sw_perf_counter_destroy(struct perf_counter *counter)
+{
+ u64 event = counter->attr.config;
+
+ WARN_ON(counter->parent);
+
+ atomic_dec(&perf_swcounter_enabled[event]);
+}
+
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
{
const struct pmu *pmu = NULL;
+ u64 event = counter->attr.config;
/*
* Software counters (currently) can't in general distinguish
@@ -3520,7 +3733,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
* to be kernel events, and page faults are never hypervisor
* events.
*/
- switch (counter->attr.config) {
+ switch (event) {
case PERF_COUNT_SW_CPU_CLOCK:
pmu = &perf_ops_cpu_clock;
@@ -3541,6 +3754,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
case PERF_COUNT_SW_CONTEXT_SWITCHES:
case PERF_COUNT_SW_CPU_MIGRATIONS:
+ if (!counter->parent) {
+ atomic_inc(&perf_swcounter_enabled[event]);
+ counter->destroy = sw_perf_counter_destroy;
+ }
pmu = &perf_ops_generic;
break;
}
@@ -3556,6 +3773,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
int cpu,
struct perf_counter_context *ctx,
struct perf_counter *group_leader,
+ struct perf_counter *parent_counter,
gfp_t gfpflags)
{
const struct pmu *pmu;
@@ -3591,6 +3809,8 @@ perf_counter_alloc(struct perf_counter_attr *attr,
counter->ctx = ctx;
counter->oncpu = -1;
+ counter->parent = parent_counter;
+
counter->ns = get_pid_ns(current->nsproxy->pid_ns);
counter->id = atomic64_inc_return(&perf_counter_id);
@@ -3648,11 +3868,13 @@ done:
counter->pmu = pmu;
- atomic_inc(&nr_counters);
- if (counter->attr.mmap)
- atomic_inc(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_inc(&nr_comm_counters);
+ if (!counter->parent) {
+ atomic_inc(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_inc(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_inc(&nr_comm_counters);
+ }
return counter;
}
@@ -3815,7 +4037,7 @@ SYSCALL_DEFINE5(perf_counter_open,
}
counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
- GFP_KERNEL);
+ NULL, GFP_KERNEL);
ret = PTR_ERR(counter);
if (IS_ERR(counter))
goto err_put_context;
@@ -3881,7 +4103,8 @@ inherit_counter(struct perf_counter *parent_counter,
child_counter = perf_counter_alloc(&parent_counter->attr,
parent_counter->cpu, child_ctx,
- group_leader, GFP_KERNEL);
+ group_leader, parent_counter,
+ GFP_KERNEL);
if (IS_ERR(child_counter))
return child_counter;
get_ctx(child_ctx);
@@ -3904,12 +4127,6 @@ inherit_counter(struct perf_counter *parent_counter,
*/
add_counter_to_ctx(child_counter, child_ctx);
- child_counter->parent = parent_counter;
- /*
- * inherit into child's child as well:
- */
- child_counter->attr.inherit = 1;
-
/*
* Get a reference to the parent filp - we will fput it
* when the child counter exits. This is safe to do because
@@ -3953,10 +4170,14 @@ static int inherit_group(struct perf_counter *parent_counter,
}
static void sync_child_counter(struct perf_counter *child_counter,
- struct perf_counter *parent_counter)
+ struct task_struct *child)
{
+ struct perf_counter *parent_counter = child_counter->parent;
u64 child_val;
+ if (child_counter->attr.inherit_stat)
+ perf_counter_read_event(child_counter, child);
+
child_val = atomic64_read(&child_counter->count);
/*
@@ -3985,7 +4206,8 @@ static void sync_child_counter(struct perf_counter *child_counter,
static void
__perf_counter_exit_task(struct perf_counter *child_counter,
- struct perf_counter_context *child_ctx)
+ struct perf_counter_context *child_ctx,
+ struct task_struct *child)
{
struct perf_counter *parent_counter;
@@ -3999,7 +4221,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter,
* counters need to be zapped - but otherwise linger.
*/
if (parent_counter) {
- sync_child_counter(child_counter, parent_counter);
+ sync_child_counter(child_counter, child);
free_counter(child_counter);
}
}
@@ -4061,7 +4283,7 @@ void perf_counter_exit_task(struct task_struct *child)
again:
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
list_entry)
- __perf_counter_exit_task(child_counter, child_ctx);
+ __perf_counter_exit_task(child_counter, child_ctx, child);
/*
* If the last counter was a group counter, it will have appended all
diff --git a/kernel/pid.c b/kernel/pid.c
index 31310b5d3f5..5fa1db48d8b 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -36,6 +36,7 @@
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
#include <linux/syscalls.h>
+#include <linux/kmemleak.h>
#define pid_hashfn(nr, ns) \
hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
@@ -512,6 +513,12 @@ void __init pidhash_init(void)
pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
if (!pid_hash)
panic("Could not alloc pidhash!\n");
+ /*
+ * pid_hash contains references to allocated struct pid objects and it
+ * must be scanned by kmemleak to avoid false positives.
+ */
+ kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0,
+ GFP_KERNEL);
for (i = 0; i < pidhash_size; i++)
INIT_HLIST_HEAD(&pid_hash[i]);
}
diff --git a/kernel/resource.c b/kernel/resource.c
index ac5f3a36923..78b087221c1 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -787,7 +787,7 @@ static int __init reserve_setup(char *str)
static struct resource reserve[MAXRESERVE];
for (;;) {
- int io_start, io_num;
+ unsigned int io_start, io_num;
int x = reserved;
if (get_option (&str, &io_start) != 2)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 62e4ff9968b..98e02328c67 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -335,7 +335,10 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_timer_migration,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
},
#endif
{
@@ -744,6 +747,14 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "panic_on_io_nmi",
+ .data = &panic_on_io_nmi,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = KERN_BOOTLOADER_TYPE,
.procname = "bootloader_type",
.data = &bootloader_type,
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index c994530d166..4cde8b9c716 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -96,7 +96,7 @@ static DEFINE_MUTEX(show_mutex);
/*
* Collection status, active/inactive:
*/
-static int __read_mostly active;
+int __read_mostly timer_stats_active;
/*
* Beginning/end timestamps of measurement:
@@ -242,7 +242,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
struct entry *entry, input;
unsigned long flags;
- if (likely(!active))
+ if (likely(!timer_stats_active))
return;
lock = &per_cpu(lookup_lock, raw_smp_processor_id());
@@ -254,7 +254,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
input.timer_flag = timer_flag;
spin_lock_irqsave(lock, flags);
- if (!active)
+ if (!timer_stats_active)
goto out_unlock;
entry = tstat_lookup(&input, comm);
@@ -290,7 +290,7 @@ static int tstats_show(struct seq_file *m, void *v)
/*
* If still active then calculate up to now:
*/
- if (active)
+ if (timer_stats_active)
time_stop = ktime_get();
time = ktime_sub(time_stop, time_start);
@@ -368,18 +368,18 @@ static ssize_t tstats_write(struct file *file, const char __user *buf,
mutex_lock(&show_mutex);
switch (ctl[0]) {
case '0':
- if (active) {
- active = 0;
+ if (timer_stats_active) {
+ timer_stats_active = 0;
time_stop = ktime_get();
sync_access();
}
break;
case '1':
- if (!active) {
+ if (!timer_stats_active) {
reset_entries();
time_start = ktime_get();
smp_mb();
- active = 1;
+ timer_stats_active = 1;
}
break;
default:
diff --git a/kernel/timer.c b/kernel/timer.c
index 54d3912f8ca..0b36b9e5cc8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -380,6 +380,8 @@ static void timer_stats_account_timer(struct timer_list *timer)
{
unsigned int flag = 0;
+ if (likely(!timer->start_site))
+ return;
if (unlikely(tbase_get_deferrable(timer->base)))
flag |= TIMER_STATS_FLAG_DEFERRABLE;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3718d55fb4c..f3716bf04df 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -291,7 +291,9 @@ function_stat_next(void *v, int idx)
pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
again:
- rec++;
+ if (idx != 0)
+ rec++;
+
if ((void *)rec >= (void *)&pg->records[pg->index]) {
pg = pg->next;
if (!pg)
@@ -1417,10 +1419,20 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
+
+ if (!(iter->flags & FTRACE_ITER_HASH))
+ *pos = 0;
iter->flags |= FTRACE_ITER_HASH;
- return t_hash_next(m, p, pos);
+ iter->hidx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_hash_next(m, p, &l);
+ if (!p)
+ break;
+ }
+ return p;
}
static int t_hash_show(struct seq_file *m, void *v)
@@ -1467,8 +1479,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
iter->pg = iter->pg->next;
iter->idx = 0;
goto retry;
- } else {
- iter->idx = -1;
}
} else {
rec = &iter->pg->records[iter->idx++];
@@ -1497,6 +1507,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
mutex_lock(&ftrace_lock);
/*
@@ -1508,23 +1519,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
- (*pos)++;
return iter;
}
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_start(m, pos);
- if (*pos > 0) {
- if (iter->idx < 0)
- return p;
- (*pos)--;
- iter->idx--;
+ iter->pg = ftrace_pages_start;
+ iter->idx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_next(m, p, &l);
+ if (!p)
+ break;
}
- p = t_next(m, p, pos);
-
- if (!p)
+ if (!p && iter->flags & FTRACE_ITER_FILTER)
return t_hash_start(m, pos);
return p;
@@ -2500,32 +2509,31 @@ int ftrace_graph_count;
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
static void *
-g_next(struct seq_file *m, void *v, loff_t *pos)
+__g_next(struct seq_file *m, loff_t *pos)
{
unsigned long *array = m->private;
- int index = *pos;
- (*pos)++;
-
- if (index >= ftrace_graph_count)
+ if (*pos >= ftrace_graph_count)
return NULL;
+ return &array[*pos];
+}
- return &array[index];
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return __g_next(m, pos);
}
static void *g_start(struct seq_file *m, loff_t *pos)
{
- void *p = NULL;
-
mutex_lock(&graph_lock);
/* Nothing, tell g_show to print all functions are enabled */
if (!ftrace_graph_count && !*pos)
return (void *)1;
- p = g_next(m, p, pos);
-
- return p;
+ return __g_next(m, pos);
}
static void g_stop(struct seq_file *m, void *p)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 04dac263825..bf27bb7a63e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1563,6 +1563,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
return NULL;
}
+#ifdef CONFIG_TRACING
+
#define TRACE_RECURSIVE_DEPTH 16
static int trace_recursive_lock(void)
@@ -1593,6 +1595,13 @@ static void trace_recursive_unlock(void)
current->trace_recursion--;
}
+#else
+
+#define trace_recursive_lock() (0)
+#define trace_recursive_unlock() do { } while (0)
+
+#endif
+
static DEFINE_PER_CPU(int, rb_need_resched);
/**
@@ -3104,6 +3113,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
+#ifdef CONFIG_TRACING
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -3171,6 +3181,7 @@ static __init int rb_init_debugfs(void)
}
fs_initcall(rb_init_debugfs);
+#endif
#ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 076fa6f0ee4..3aa0a0dfdfa 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -284,13 +284,12 @@ void trace_wake_up(void)
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
- int ret;
if (!str)
return 0;
- ret = strict_strtoul(str, 0, &buf_size);
+ buf_size = memparse(str, &str);
/* nr_entries can not be zero */
- if (ret < 0 || buf_size == 0)
+ if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
return 1;
@@ -2053,25 +2052,23 @@ static int tracing_open(struct inode *inode, struct file *file)
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct tracer *t = m->private;
+ struct tracer *t = v;
(*pos)++;
if (t)
t = t->next;
- m->private = t;
-
return t;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
- struct tracer *t = m->private;
+ struct tracer *t;
loff_t l = 0;
mutex_lock(&trace_types_lock);
- for (; t && l < *pos; t = t_next(m, t, &l))
+ for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
;
return t;
@@ -2107,18 +2104,10 @@ static struct seq_operations show_traces_seq_ops = {
static int show_traces_open(struct inode *inode, struct file *file)
{
- int ret;
-
if (tracing_disabled)
return -ENODEV;
- ret = seq_open(file, &show_traces_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = trace_types;
- }
-
- return ret;
+ return seq_open(file, &show_traces_seq_ops);
}
static ssize_t
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6e735d4771f..3548ae5cc78 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -597,6 +597,7 @@ print_graph_function(struct trace_iterator *iter)
extern struct pid *ftrace_pid_trace;
+#ifdef CONFIG_FUNCTION_TRACER
static inline int ftrace_trace_task(struct task_struct *task)
{
if (!ftrace_pid_trace)
@@ -604,6 +605,12 @@ static inline int ftrace_trace_task(struct task_struct *task)
return test_tsk_trace_trace(task);
}
+#else
+static inline int ftrace_trace_task(struct task_struct *task)
+{
+ return 1;
+}
+#endif
/*
* trace_iterator_flags is an enumeration that defines bit
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index aa08be69a1b..53c8fd376a8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -300,10 +300,18 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
+ struct ftrace_event_call *call = NULL;
+ loff_t l;
+
mutex_lock(&event_mutex);
- if (*pos == 0)
- m->private = ftrace_events.next;
- return t_next(m, NULL, pos);
+
+ m->private = ftrace_events.next;
+ for (l = 0; l <= *pos; ) {
+ call = t_next(m, NULL, &l);
+ if (!call)
+ break;
+ }
+ return call;
}
static void *
@@ -332,10 +340,18 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
static void *s_start(struct seq_file *m, loff_t *pos)
{
+ struct ftrace_event_call *call = NULL;
+ loff_t l;
+
mutex_lock(&event_mutex);
- if (*pos == 0)
- m->private = ftrace_events.next;
- return s_next(m, NULL, pos);
+
+ m->private = ftrace_events.next;
+ for (l = 0; l <= *pos; ) {
+ call = s_next(m, NULL, &l);
+ if (!call)
+ break;
+ }
+ return call;
}
static int t_show(struct seq_file *m, void *v)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 90f13476483..7402144bff2 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -302,8 +302,7 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
if (count == -1)
seq_printf(m, ":unlimited\n");
else
- seq_printf(m, ":count=%ld", count);
- seq_putc(m, '\n');
+ seq_printf(m, ":count=%ld\n", count);
return 0;
}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 9bece9687b6..7b627811082 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -155,25 +155,19 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
EXPORT_SYMBOL_GPL(__ftrace_vprintk);
static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+t_start(struct seq_file *m, loff_t *pos)
{
- const char **fmt = m->private;
- const char **next = fmt;
-
- (*pos)++;
+ const char **fmt = __start___trace_bprintk_fmt + *pos;
if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
return NULL;
-
- next = fmt;
- m->private = ++next;
-
return fmt;
}
-static void *t_start(struct seq_file *m, loff_t *pos)
+static void *t_next(struct seq_file *m, void * v, loff_t *pos)
{
- return t_next(m, NULL, pos);
+ (*pos)++;
+ return t_start(m, pos);
}
static int t_show(struct seq_file *m, void *v)
@@ -224,15 +218,7 @@ static const struct seq_operations show_format_seq_ops = {
static int
ftrace_formats_open(struct inode *inode, struct file *file)
{
- int ret;
-
- ret = seq_open(file, &show_format_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
-
- m->private = __start___trace_bprintk_fmt;
- }
- return ret;
+ return seq_open(file, &show_format_seq_ops);
}
static const struct file_operations ftrace_formats_fops = {
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index c00643733f4..e66f5e49334 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -199,17 +199,13 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
mutex_lock(&session->stat_mutex);
/* If we are in the beginning of the file, print the headers */
- if (!*pos && session->ts->stat_headers) {
- (*pos)++;
+ if (!*pos && session->ts->stat_headers)
return SEQ_START_TOKEN;
- }
node = rb_first(&session->stat_root);
for (i = 0; node && i < *pos; i++)
node = rb_next(node);
- (*pos)++;
-
return node;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4c32b1a1a06..12327b2bb78 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -359,6 +359,18 @@ config DEBUG_KMEMLEAK
In order to access the kmemleak file, debugfs needs to be
mounted (usually at /sys/kernel/debug).
+config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
+ int "Maximum kmemleak early log entries"
+ depends on DEBUG_KMEMLEAK
+ range 200 2000
+ default 400
+ help
+ Kmemleak must track all the memory allocations to avoid
+ reporting false positives. Since memory may be allocated or
+ freed before kmemleak is initialised, an early log buffer is
+ used to store these actions. If kmemleak reports "early log
+ buffer exceeded", please increase this value.
+
config DEBUG_KMEMLEAK_TEST
tristate "Simple test for the kernel memory leak detector"
depends on DEBUG_KMEMLEAK
diff --git a/mm/dmapool.c b/mm/dmapool.c
index b1f0885dda2..3df063706f5 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -86,10 +86,12 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
unsigned pages = 0;
unsigned blocks = 0;
+ spin_lock_irq(&pool->lock);
list_for_each_entry(page, &pool->page_list, page_list) {
pages++;
blocks += page->in_use;
}
+ spin_unlock_irq(&pool->lock);
/* per-pool info, no real statistics yet */
temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c96f2c8700a..e766e1da09d 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -48,10 +48,10 @@
* scanned. This list is only modified during a scanning episode when the
* scan_mutex is held. At the end of a scan, the gray_list is always empty.
* Note that the kmemleak_object.use_count is incremented when an object is
- * added to the gray_list and therefore cannot be freed
- * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
- * file together with modifications to the memory scanning parameters
- * including the scan_thread pointer
+ * added to the gray_list and therefore cannot be freed. This mutex also
+ * prevents multiple users of the "kmemleak" debugfs file together with
+ * modifications to the memory scanning parameters including the scan_thread
+ * pointer
*
* The kmemleak_object structures have a use_count incremented or decremented
* using the get_object()/put_object() functions. When the use_count becomes
@@ -105,7 +105,6 @@
#define MAX_TRACE 16 /* stack trace length */
#define REPORTS_NR 50 /* maximum number of reported leaks */
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
-#define MSECS_SCAN_YIELD 10 /* CPU yielding period */
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
@@ -186,19 +185,16 @@ static atomic_t kmemleak_error = ATOMIC_INIT(0);
static unsigned long min_addr = ULONG_MAX;
static unsigned long max_addr;
-/* used for yielding the CPU to other tasks during scanning */
-static unsigned long next_scan_yield;
static struct task_struct *scan_thread;
-static unsigned long jiffies_scan_yield;
+/* used to avoid reporting of recently allocated objects */
static unsigned long jiffies_min_age;
+static unsigned long jiffies_last_scan;
/* delay between automatic memory scannings */
static signed long jiffies_scan_wait;
/* enables or disables the task stacks scanning */
-static int kmemleak_stack_scan;
-/* mutex protecting the memory scanning */
+static int kmemleak_stack_scan = 1;
+/* protects the memory scanning, parameters and debug/kmemleak file access */
static DEFINE_MUTEX(scan_mutex);
-/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
-static DEFINE_MUTEX(kmemleak_mutex);
/* number of leaks reported (for limitation purposes) */
static int reported_leaks;
@@ -235,7 +231,7 @@ struct early_log {
};
/* early logging buffer and current position */
-static struct early_log early_log[200];
+static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
static int crt_early_log;
static void kmemleak_disable(void);
@@ -279,15 +275,6 @@ static int color_gray(const struct kmemleak_object *object)
}
/*
- * Objects are considered referenced if their color is gray and they have not
- * been deleted.
- */
-static int referenced_object(struct kmemleak_object *object)
-{
- return (object->flags & OBJECT_ALLOCATED) && color_gray(object);
-}
-
-/*
* Objects are considered unreferenced only if their color is white, they have
* not be deleted and have a minimum age to avoid false positives caused by
* pointers temporarily stored in CPU registers.
@@ -295,42 +282,28 @@ static int referenced_object(struct kmemleak_object *object)
static int unreferenced_object(struct kmemleak_object *object)
{
return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
- time_is_before_eq_jiffies(object->jiffies + jiffies_min_age);
+ time_before_eq(object->jiffies + jiffies_min_age,
+ jiffies_last_scan);
}
/*
- * Printing of the (un)referenced objects information, either to the seq file
- * or to the kernel log. The print_referenced/print_unreferenced functions
- * must be called with the object->lock held.
+ * Printing of the unreferenced objects information to the seq file. The
+ * print_unreferenced function must be called with the object->lock held.
*/
-#define print_helper(seq, x...) do { \
- struct seq_file *s = (seq); \
- if (s) \
- seq_printf(s, x); \
- else \
- pr_info(x); \
-} while (0)
-
-static void print_referenced(struct kmemleak_object *object)
-{
- pr_info("referenced object 0x%08lx (size %zu)\n",
- object->pointer, object->size);
-}
-
static void print_unreferenced(struct seq_file *seq,
struct kmemleak_object *object)
{
int i;
- print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n",
- object->pointer, object->size);
- print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
- object->comm, object->pid, object->jiffies);
- print_helper(seq, " backtrace:\n");
+ seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
+ object->pointer, object->size);
+ seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
+ object->comm, object->pid, object->jiffies);
+ seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) {
void *ptr = (void *)object->trace[i];
- print_helper(seq, " [<%p>] %pS\n", ptr, ptr);
+ seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
}
}
@@ -554,8 +527,10 @@ static void delete_object(unsigned long ptr)
write_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, 0);
if (!object) {
+#ifdef DEBUG
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
ptr);
+#endif
write_unlock_irqrestore(&kmemleak_lock, flags);
return;
}
@@ -571,8 +546,6 @@ static void delete_object(unsigned long ptr)
* cannot be freed when it is being scanned.
*/
spin_lock_irqsave(&object->lock, flags);
- if (object->flags & OBJECT_REPORTED)
- print_referenced(object);
object->flags &= ~OBJECT_ALLOCATED;
spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
@@ -696,7 +669,8 @@ static void log_early(int op_type, const void *ptr, size_t size,
struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) {
- kmemleak_stop("Early log buffer exceeded\n");
+ pr_warning("Early log buffer exceeded\n");
+ kmemleak_disable();
return;
}
@@ -808,21 +782,6 @@ void kmemleak_no_scan(const void *ptr)
EXPORT_SYMBOL(kmemleak_no_scan);
/*
- * Yield the CPU so that other tasks get a chance to run. The yielding is
- * rate-limited to avoid excessive number of calls to the schedule() function
- * during memory scanning.
- */
-static void scan_yield(void)
-{
- might_sleep();
-
- if (time_is_before_eq_jiffies(next_scan_yield)) {
- schedule();
- next_scan_yield = jiffies + jiffies_scan_yield;
- }
-}
-
-/*
* Memory scanning is a long process and it needs to be interruptable. This
* function checks whether such interrupt condition occured.
*/
@@ -862,15 +821,6 @@ static void scan_block(void *_start, void *_end,
if (scan_should_stop())
break;
- /*
- * When scanning a memory block with a corresponding
- * kmemleak_object, the CPU yielding is handled in the calling
- * code since it holds the object->lock to avoid the block
- * freeing.
- */
- if (!scanned)
- scan_yield();
-
object = find_and_get_object(pointer, 1);
if (!object)
continue;
@@ -952,6 +902,9 @@ static void kmemleak_scan(void)
struct kmemleak_object *object, *tmp;
struct task_struct *task;
int i;
+ int new_leaks = 0;
+
+ jiffies_last_scan = jiffies;
/* prepare the kmemleak_object's */
rcu_read_lock();
@@ -1033,7 +986,7 @@ static void kmemleak_scan(void)
*/
object = list_entry(gray_list.next, typeof(*object), gray_list);
while (&object->gray_list != &gray_list) {
- scan_yield();
+ cond_resched();
/* may add new objects to the list */
if (!scan_should_stop())
@@ -1049,6 +1002,32 @@ static void kmemleak_scan(void)
object = tmp;
}
WARN_ON(!list_empty(&gray_list));
+
+ /*
+ * If scanning was stopped do not report any new unreferenced objects.
+ */
+ if (scan_should_stop())
+ return;
+
+ /*
+ * Scanning result reporting.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+ spin_lock_irqsave(&object->lock, flags);
+ if (unreferenced_object(object) &&
+ !(object->flags & OBJECT_REPORTED)) {
+ object->flags |= OBJECT_REPORTED;
+ new_leaks++;
+ }
+ spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+
+ if (new_leaks)
+ pr_info("%d new suspected memory leaks (see "
+ "/sys/kernel/debug/kmemleak)\n", new_leaks);
+
}
/*
@@ -1070,36 +1049,12 @@ static int kmemleak_scan_thread(void *arg)
}
while (!kthread_should_stop()) {
- struct kmemleak_object *object;
signed long timeout = jiffies_scan_wait;
mutex_lock(&scan_mutex);
-
kmemleak_scan();
- reported_leaks = 0;
-
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- unsigned long flags;
-
- if (reported_leaks >= REPORTS_NR)
- break;
- spin_lock_irqsave(&object->lock, flags);
- if (!(object->flags & OBJECT_REPORTED) &&
- unreferenced_object(object)) {
- print_unreferenced(NULL, object);
- object->flags |= OBJECT_REPORTED;
- reported_leaks++;
- } else if ((object->flags & OBJECT_REPORTED) &&
- referenced_object(object)) {
- print_referenced(object);
- object->flags &= ~OBJECT_REPORTED;
- }
- spin_unlock_irqrestore(&object->lock, flags);
- }
- rcu_read_unlock();
-
mutex_unlock(&scan_mutex);
+
/* wait before the next scan */
while (timeout && !kthread_should_stop())
timeout = schedule_timeout_interruptible(timeout);
@@ -1112,7 +1067,7 @@ static int kmemleak_scan_thread(void *arg)
/*
* Start the automatic memory scanning thread. This function must be called
- * with the kmemleak_mutex held.
+ * with the scan_mutex held.
*/
void start_scan_thread(void)
{
@@ -1127,7 +1082,7 @@ void start_scan_thread(void)
/*
* Stop the automatic memory scanning thread. This function must be called
- * with the kmemleak_mutex held.
+ * with the scan_mutex held.
*/
void stop_scan_thread(void)
{
@@ -1147,10 +1102,8 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
struct kmemleak_object *object;
loff_t n = *pos;
- if (!n) {
- kmemleak_scan();
+ if (!n)
reported_leaks = 0;
- }
if (reported_leaks >= REPORTS_NR)
return NULL;
@@ -1211,11 +1164,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
unsigned long flags;
spin_lock_irqsave(&object->lock, flags);
- if (!unreferenced_object(object))
- goto out;
- print_unreferenced(seq, object);
- reported_leaks++;
-out:
+ if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
+ print_unreferenced(seq, object);
+ reported_leaks++;
+ }
spin_unlock_irqrestore(&object->lock, flags);
return 0;
}
@@ -1234,13 +1186,10 @@ static int kmemleak_open(struct inode *inode, struct file *file)
if (!atomic_read(&kmemleak_enabled))
return -EBUSY;
- ret = mutex_lock_interruptible(&kmemleak_mutex);
+ ret = mutex_lock_interruptible(&scan_mutex);
if (ret < 0)
goto out;
if (file->f_mode & FMODE_READ) {
- ret = mutex_lock_interruptible(&scan_mutex);
- if (ret < 0)
- goto kmemleak_unlock;
ret = seq_open(file, &kmemleak_seq_ops);
if (ret < 0)
goto scan_unlock;
@@ -1249,8 +1198,6 @@ static int kmemleak_open(struct inode *inode, struct file *file)
scan_unlock:
mutex_unlock(&scan_mutex);
-kmemleak_unlock:
- mutex_unlock(&kmemleak_mutex);
out:
return ret;
}
@@ -1259,11 +1206,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
{
int ret = 0;
- if (file->f_mode & FMODE_READ) {
+ if (file->f_mode & FMODE_READ)
seq_release(inode, file);
- mutex_unlock(&scan_mutex);
- }
- mutex_unlock(&kmemleak_mutex);
+ mutex_unlock(&scan_mutex);
return ret;
}
@@ -1278,6 +1223,7 @@ static int kmemleak_release(struct inode *inode, struct file *file)
* scan=off - stop the automatic memory scanning thread
* scan=... - set the automatic memory scanning period in seconds (0 to
* disable it)
+ * scan - trigger a memory scan
*/
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *ppos)
@@ -1315,7 +1261,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
start_scan_thread();
}
- } else
+ } else if (strncmp(buf, "scan", 4) == 0)
+ kmemleak_scan();
+ else
return -EINVAL;
/* ignore the rest of the buffer, only one command at a time */
@@ -1340,11 +1288,9 @@ static int kmemleak_cleanup_thread(void *arg)
{
struct kmemleak_object *object;
- mutex_lock(&kmemleak_mutex);
+ mutex_lock(&scan_mutex);
stop_scan_thread();
- mutex_unlock(&kmemleak_mutex);
- mutex_lock(&scan_mutex);
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list)
delete_object(object->pointer);
@@ -1411,7 +1357,6 @@ void __init kmemleak_init(void)
int i;
unsigned long flags;
- jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD);
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
@@ -1486,9 +1431,9 @@ static int __init kmemleak_late_init(void)
&kmemleak_fops);
if (!dentry)
pr_warning("Failed to create the debugfs kmemleak file\n");
- mutex_lock(&kmemleak_mutex);
+ mutex_lock(&scan_mutex);
start_scan_thread();
- mutex_unlock(&kmemleak_mutex);
+ mutex_unlock(&scan_mutex);
pr_info("Kernel memory leak detector initialized\n");
diff --git a/mm/nommu.c b/mm/nommu.c
index bf0cc762a7d..53cab10fece 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -238,6 +238,27 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages);
+/**
+ * follow_pfn - look up PFN at a user virtual address
+ * @vma: memory mapping
+ * @address: user virtual address
+ * @pfn: location to store found PFN
+ *
+ * Only IO mappings and raw PFN mappings are allowed.
+ *
+ * Returns zero and the pfn at @pfn on success, -ve otherwise.
+ */
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ unsigned long *pfn)
+{
+ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ return -EINVAL;
+
+ *pfn = address >> PAGE_SHIFT;
+ return 0;
+}
+EXPORT_SYMBOL(follow_pfn);
+
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7b0dcea4935..7687879253b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -541,8 +541,11 @@ static void balance_dirty_pages(struct address_space *mapping)
* filesystems (i.e. NFS) in which data may have been
* written to the server's write cache, but has not yet
* been flushed to permanent storage.
+ * Only move pages to writeback if this bdi is over its
+ * threshold otherwise wait until the disk writes catch
+ * up.
*/
- if (bdi_nr_reclaimable) {
+ if (bdi_nr_reclaimable > bdi_thresh) {
writeback_inodes(&wbc);
pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5d714f8fb30..e0f2cdf9d8b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4032,6 +4032,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
int i, nid;
unsigned long usable_startpfn;
unsigned long kernelcore_node, kernelcore_remaining;
+ /* save the state before borrow the nodemask */
+ nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
unsigned long totalpages = early_calculate_totalpages();
int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
@@ -4059,7 +4061,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
/* If kernelcore was not specified, there is no ZONE_MOVABLE */
if (!required_kernelcore)
- return;
+ goto out;
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
find_usable_zone_for_movable();
@@ -4158,6 +4160,10 @@ restart:
for (nid = 0; nid < MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+
+out:
+ /* restore the node_state */
+ node_states[N_HIGH_MEMORY] = saved_node_state;
}
/* Any regular memory on that node ? */
@@ -4242,11 +4248,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
early_node_map[i].start_pfn,
early_node_map[i].end_pfn);
- /*
- * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
- * that node_mask, clear it at first
- */
- nodes_clear(node_states[N_HIGH_MEMORY]);
/* Initialise every node */
mminit_verify_pageflags_layout();
setup_nr_node_ids();
diff --git a/mm/percpu.c b/mm/percpu.c
index c0b2c1a76e8..b70f2acd885 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -549,14 +549,14 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
* @chunk: chunk of interest
* @page_start: page index of the first page to unmap
* @page_end: page index of the last page to unmap + 1
- * @flush: whether to flush cache and tlb or not
+ * @flush_tlb: whether to flush tlb or not
*
* For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
* If @flush is true, vcache is flushed before unmapping and tlb
* after.
*/
static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
- bool flush)
+ bool flush_tlb)
{
unsigned int last = num_possible_cpus() - 1;
unsigned int cpu;
@@ -569,9 +569,8 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
* the whole region at once rather than doing it for each cpu.
* This could be an overkill but is more scalable.
*/
- if (flush)
- flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
- pcpu_chunk_addr(chunk, last, page_end));
+ flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
+ pcpu_chunk_addr(chunk, last, page_end));
for_each_possible_cpu(cpu)
unmap_kernel_range_noflush(
@@ -579,7 +578,7 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
(page_end - page_start) << PAGE_SHIFT);
/* ditto as flush_cache_vunmap() */
- if (flush)
+ if (flush_tlb)
flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
pcpu_chunk_addr(chunk, last, page_end));
}
@@ -1234,6 +1233,7 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
ssize_t dyn_size, ssize_t unit_size)
{
+ size_t chunk_size;
unsigned int cpu;
/* determine parameters and allocate */
@@ -1248,11 +1248,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
} else
pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
- pcpue_ptr = __alloc_bootmem_nopanic(
- num_possible_cpus() * pcpue_unit_size,
- PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
- if (!pcpue_ptr)
+ chunk_size = pcpue_unit_size * num_possible_cpus();
+
+ pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS));
+ if (!pcpue_ptr) {
+ pr_warning("PERCPU: failed to allocate %zu bytes for "
+ "embedding\n", chunk_size);
return -ENOMEM;
+ }
/* return the leftover and copy */
for_each_possible_cpu(cpu) {
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 9aac5213105..e1241c76239 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -93,7 +93,7 @@ static void __exit br_deinit(void)
unregister_pernet_subsys(&br_net_ops);
- synchronize_net();
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
br_netfilter_fini();
#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
diff --git a/net/core/dev.c b/net/core/dev.c
index 60b57281227..70c27e0c7c3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2823,9 +2823,11 @@ static void net_rx_action(struct softirq_action *h)
* move the instance around on the list at-will.
*/
if (unlikely(work == weight)) {
- if (unlikely(napi_disable_pending(n)))
- __napi_complete(n);
- else
+ if (unlikely(napi_disable_pending(n))) {
+ local_irq_enable();
+ napi_complete(n);
+ local_irq_disable();
+ } else
list_move_tail(&n->poll_list, list);
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index d351b8db0df..77d40289653 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2413,6 +2413,8 @@ static void __exit decnet_exit(void)
proc_net_remove(&init_net, "decnet");
proto_unregister(&dn_proto);
+
+ rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */
}
module_exit(decnet_exit);
#endif
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 105ad10876a..27eda9fdf3c 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -276,6 +276,9 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
else
return NULL;
+ if (!dev)
+ return NULL;
+
if (dev->type != ARPHRD_IEEE802154) {
dev_put(dev);
return NULL;
@@ -521,3 +524,6 @@ static void __exit ieee802154_nl_exit(void)
}
module_exit(ieee802154_nl_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ieee 802.15.4 configuration interface");
+
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8a3881e28ac..c29d75d8f1b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -801,11 +801,8 @@ static int arp_process(struct sk_buff *skb)
* cache.
*/
- /*
- * Special case: IPv4 duplicate address detection packet (RFC2131)
- * and Gratuitous ARP/ARP Announce. (RFC3927, Section 2.4)
- */
- if (sip == 0 || tip == sip) {
+ /* Special case: IPv4 duplicate address detection packet (RFC2131) */
+ if (sip == 0) {
if (arp->ar_op == htons(ARPOP_REQUEST) &&
inet_addr_type(net, tip) == RTN_LOCAL &&
!arp_ignore(in_dev, sip, tip))
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 012cf5a6858..00a54b246df 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1021,6 +1021,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
(struct node *)tn, wasfull);
tp = node_parent((struct node *) tn);
+ if (!tp)
+ rcu_assign_pointer(t->trie, (struct node *)tn);
+
tnode_free_flush();
if (!tp)
break;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 490ce20faf3..db46b4b5b2b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -440,6 +440,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
/* Remove any debris in the socket control block */
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ /* Must drop socket now because of tproxy. */
+ skb_orphan(skb);
+
return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip_rcv_finish);
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 155c008626c..09172a65d9b 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -191,7 +191,8 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
ct, ctinfo);
/* Tell TCP window tracking about seq change */
nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
- ct, CTINFO2DIR(ctinfo));
+ ct, CTINFO2DIR(ctinfo),
+ (int)rep_len - (int)match_len);
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
}
@@ -377,6 +378,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
struct tcphdr *tcph;
int dir;
__be32 newseq, newack;
+ s16 seqoff, ackoff;
struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_nat_seq *this_way, *other_way;
@@ -390,15 +392,18 @@ nf_nat_seq_adjust(struct sk_buff *skb,
tcph = (void *)skb->data + ip_hdrlen(skb);
if (after(ntohl(tcph->seq), this_way->correction_pos))
- newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
+ seqoff = this_way->offset_after;
else
- newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
+ seqoff = this_way->offset_before;
if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
other_way->correction_pos))
- newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
+ ackoff = other_way->offset_after;
else
- newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
+ ackoff = other_way->offset_before;
+
+ newseq = htonl(ntohl(tcph->seq) + seqoff);
+ newack = htonl(ntohl(tcph->ack_seq) - ackoff);
inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
@@ -413,7 +418,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
return 0;
- nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir);
+ nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
return 1;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 17b89c523f9..7870a535dac 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -903,13 +903,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
iov++;
while (seglen > 0) {
- int copy;
+ int copy = 0;
+ int max = size_goal;
skb = tcp_write_queue_tail(sk);
+ if (tcp_send_head(sk)) {
+ if (skb->ip_summed == CHECKSUM_NONE)
+ max = mss_now;
+ copy = max - skb->len;
+ }
- if (!tcp_send_head(sk) ||
- (copy = size_goal - skb->len) <= 0) {
-
+ if (copy <= 0) {
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
@@ -930,6 +934,7 @@ new_segment:
skb_entail(sk, skb);
copy = size_goal;
+ max = size_goal;
}
/* Try to append data to the end of skb. */
@@ -1028,7 +1033,7 @@ new_segment:
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
- if (skb->len < size_goal || (flags & MSG_OOB))
+ if (skb->len < max || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 43bbba7926e..f8d67ccc64f 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -128,7 +128,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
goto kill_with_rst;
/* Dup ACK? */
- if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
+ if (!th->ack ||
+ !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 416fc4c2e7e..5bdf08d312d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -725,7 +725,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
- if (skb->len <= mss_now || !sk_can_gso(sk)) {
+ if (skb->len <= mss_now || !sk_can_gso(sk) ||
+ skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8c1e86afbbf..3883b4036a7 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3362,7 +3362,10 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
valid = ifa->valid_lft;
if (preferred != INFINITY_LIFE_TIME) {
long tval = (jiffies - ifa->tstamp)/HZ;
- preferred -= tval;
+ if (preferred > tval)
+ preferred -= tval;
+ else
+ preferred = 0;
if (valid != INFINITY_LIFE_TIME)
valid -= tval;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 85b3d0036af..caa0278d30a 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1284,6 +1284,8 @@ static void __exit inet6_exit(void)
proto_unregister(&udplitev6_prot);
proto_unregister(&udpv6_prot);
proto_unregister(&tcpv6_prot);
+
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
module_exit(inet6_exit);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index c3a07d75b5f..6d6a4277c67 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -139,6 +139,9 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
rcu_read_unlock();
+ /* Must drop socket now because of tproxy. */
+ skb_orphan(skb);
+
return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip6_rcv_finish);
err:
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index fc712e60705..11cf45bce38 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -494,7 +494,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
* should it be using the interface and enqueuing
* frames at this very time on another CPU.
*/
- synchronize_rcu();
+ rcu_barrier(); /* Wait for RX path and call_rcu()'s */
skb_queue_purge(&sdata->u.mesh.skb_queue);
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index afde8f99164..2032dfe25ca 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -617,8 +617,10 @@ err1:
void nf_conntrack_expect_fini(struct net *net)
{
exp_proc_remove(net);
- if (net_eq(net, &init_net))
+ if (net_eq(net, &init_net)) {
+ rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep);
+ }
nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
nf_ct_expect_hsize);
}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 4b2c769d555..fef95be334b 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -186,6 +186,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
update_alloc_size(type);
mutex_unlock(&nf_ct_ext_type_mutex);
- synchronize_rcu();
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 33fc0a443f3..97a82ba7537 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -720,8 +720,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
/* Caller must linearize skb at tcp header. */
void nf_conntrack_tcp_update(const struct sk_buff *skb,
unsigned int dataoff,
- struct nf_conn *ct,
- int dir)
+ struct nf_conn *ct, int dir,
+ s16 offset)
{
const struct tcphdr *tcph = (const void *)skb->data + dataoff;
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
@@ -734,7 +734,7 @@ void nf_conntrack_tcp_update(const struct sk_buff *skb,
/*
* We have to worry for the ack in the reply packet only...
*/
- if (after(end, ct->proto.tcp.seen[dir].td_end))
+ if (ct->proto.tcp.seen[dir].td_end + offset == end)
ct->proto.tcp.seen[dir].td_end = end;
ct->proto.tcp.last_end = end;
spin_unlock_bh(&ct->lock);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 0b7139f3dd7..fc581800698 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -129,7 +129,7 @@ conntrack_addrcmp(const union nf_inet_addr *kaddr,
static inline bool
conntrack_mt_origsrc(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
@@ -138,7 +138,7 @@ conntrack_mt_origsrc(const struct nf_conn *ct,
static inline bool
conntrack_mt_origdst(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3,
@@ -147,7 +147,7 @@ conntrack_mt_origdst(const struct nf_conn *ct,
static inline bool
conntrack_mt_replsrc(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3,
@@ -156,7 +156,7 @@ conntrack_mt_replsrc(const struct nf_conn *ct,
static inline bool
conntrack_mt_repldst(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3,
@@ -164,7 +164,7 @@ conntrack_mt_repldst(const struct nf_conn *ct,
}
static inline bool
-ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
+ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
const struct nf_conn *ct)
{
const struct nf_conntrack_tuple *tuple;
@@ -204,7 +204,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
static bool
conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
- const struct xt_conntrack_mtinfo1 *info = par->matchinfo;
+ const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int statebit;
@@ -278,6 +278,16 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
+static bool
+conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
+{
+ const struct xt_conntrack_mtinfo2 *const *info = par->matchinfo;
+ struct xt_match_param newpar = *par;
+
+ newpar.matchinfo = *info;
+ return conntrack_mt(skb, &newpar);
+}
+
static bool conntrack_mt_check(const struct xt_mtchk_param *par)
{
if (nf_ct_l3proto_try_module_get(par->family) < 0) {
@@ -288,11 +298,45 @@ static bool conntrack_mt_check(const struct xt_mtchk_param *par)
return true;
}
+static bool conntrack_mt_check_v1(const struct xt_mtchk_param *par)
+{
+ struct xt_conntrack_mtinfo1 *info = par->matchinfo;
+ struct xt_conntrack_mtinfo2 *up;
+ int ret = conntrack_mt_check(par);
+
+ if (ret < 0)
+ return ret;
+
+ up = kmalloc(sizeof(*up), GFP_KERNEL);
+ if (up == NULL) {
+ nf_ct_l3proto_module_put(par->family);
+ return -ENOMEM;
+ }
+
+ /*
+ * The strategy here is to minimize the overhead of v1 matching,
+ * by prebuilding a v2 struct and putting the pointer into the
+ * v1 dataspace.
+ */
+ memcpy(up, info, offsetof(typeof(*info), state_mask));
+ up->state_mask = info->state_mask;
+ up->status_mask = info->status_mask;
+ *(void **)info = up;
+ return true;
+}
+
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_l3proto_module_put(par->family);
}
+static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par)
+{
+ struct xt_conntrack_mtinfo2 **info = par->matchinfo;
+ kfree(*info);
+ conntrack_mt_destroy(par);
+}
+
#ifdef CONFIG_COMPAT
struct compat_xt_conntrack_info
{
@@ -363,6 +407,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
.revision = 1,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo1),
+ .match = conntrack_mt_v1,
+ .checkentry = conntrack_mt_check_v1,
+ .destroy = conntrack_mt_destroy_v1,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "conntrack",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .matchsize = sizeof(struct xt_conntrack_mtinfo2),
.match = conntrack_mt,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 80a322d7790..b0d6ddd82a9 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -69,10 +69,27 @@ static struct phonet_device *__phonet_get(struct net_device *dev)
return NULL;
}
-static void __phonet_device_free(struct phonet_device *pnd)
+static void phonet_device_destroy(struct net_device *dev)
{
- list_del(&pnd->list);
- kfree(pnd);
+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
+ struct phonet_device *pnd;
+
+ ASSERT_RTNL();
+
+ spin_lock_bh(&pndevs->lock);
+ pnd = __phonet_get(dev);
+ if (pnd)
+ list_del(&pnd->list);
+ spin_unlock_bh(&pndevs->lock);
+
+ if (pnd) {
+ u8 addr;
+
+ for (addr = find_first_bit(pnd->addrs, 64); addr < 64;
+ addr = find_next_bit(pnd->addrs, 64, 1+addr))
+ phonet_address_notify(RTM_DELADDR, dev, addr);
+ kfree(pnd);
+ }
}
struct net_device *phonet_device_get(struct net *net)
@@ -126,8 +143,10 @@ int phonet_address_del(struct net_device *dev, u8 addr)
pnd = __phonet_get(dev);
if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs))
err = -EADDRNOTAVAIL;
- else if (bitmap_empty(pnd->addrs, 64))
- __phonet_device_free(pnd);
+ else if (bitmap_empty(pnd->addrs, 64)) {
+ list_del(&pnd->list);
+ kfree(pnd);
+ }
spin_unlock_bh(&pndevs->lock);
return err;
}
@@ -181,18 +200,8 @@ static int phonet_device_notify(struct notifier_block *me, unsigned long what,
{
struct net_device *dev = arg;
- if (what == NETDEV_UNREGISTER) {
- struct phonet_device_list *pndevs;
- struct phonet_device *pnd;
-
- /* Destroy phonet-specific device data */
- pndevs = phonet_device_list(dev_net(dev));
- spin_lock_bh(&pndevs->lock);
- pnd = __phonet_get(dev);
- if (pnd)
- __phonet_device_free(pnd);
- spin_unlock_bh(&pndevs->lock);
- }
+ if (what == NETDEV_UNREGISTER)
+ phonet_device_destroy(dev);
return 0;
}
@@ -218,11 +227,12 @@ static int phonet_init_net(struct net *net)
static void phonet_exit_net(struct net *net)
{
struct phonet_net *pnn = net_generic(net, phonet_net_id);
- struct phonet_device *pnd, *n;
-
- list_for_each_entry_safe(pnd, n, &pnn->pndevs.list, list)
- __phonet_device_free(pnd);
+ struct net_device *dev;
+ rtnl_lock();
+ for_each_netdev(net, dev)
+ phonet_device_destroy(dev);
+ rtnl_unlock();
kfree(pnn);
}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index cec4e595168..f8b4cee434c 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -32,7 +32,7 @@
static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
u32 pid, u32 seq, int event);
-static void rtmsg_notify(int event, struct net_device *dev, u8 addr)
+void phonet_address_notify(int event, struct net_device *dev, u8 addr)
{
struct sk_buff *skb;
int err = -ENOBUFS;
@@ -94,7 +94,7 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
else
err = phonet_address_del(dev, pnaddr);
if (!err)
- rtmsg_notify(nlh->nlmsg_type, dev, pnaddr);
+ phonet_address_notify(nlh->nlmsg_type, dev, pnaddr);
return err;
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index b7641144451..b94c2119056 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -407,7 +407,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
}
dst = dst_clone(tp->dst);
skb_dst_set(nskb, dst);
- if (dst)
+ if (!dst)
goto no_route;
/* Build the SCTP header. */
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 843629f5576..adaa81982f7 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -66,6 +66,7 @@ cleanup_sunrpc(void)
#ifdef CONFIG_PROC_FS
rpc_proc_exit();
#endif
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
MODULE_LICENSE("GPL");
module_init(init_sunrpc);
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index d31ccb48773..faf54c6bf96 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -292,8 +292,8 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
- .name = "cbc(cast128)",
- .compat = "cast128",
+ .name = "cbc(cast5)",
+ .compat = "cast5",
.uinfo = {
.encr = {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5f1f86565f1..f2f7c638083 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -668,22 +668,10 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
- x->id.proto != proto)
+ x->id.proto != proto ||
+ xfrm_addr_cmp(&x->id.daddr, daddr, family))
continue;
- switch (family) {
- case AF_INET:
- if (x->id.daddr.a4 != daddr->a4)
- continue;
- break;
- case AF_INET6:
- if (!ipv6_addr_equal((struct in6_addr *)daddr,
- (struct in6_addr *)
- x->id.daddr.a6))
- continue;
- break;
- }
-
xfrm_state_hold(x);
return x;
}
@@ -699,26 +687,11 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_addre
hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
if (x->props.family != family ||
- x->id.proto != proto)
+ x->id.proto != proto ||
+ xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
+ xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
- switch (family) {
- case AF_INET:
- if (x->id.daddr.a4 != daddr->a4 ||
- x->props.saddr.a4 != saddr->a4)
- continue;
- break;
- case AF_INET6:
- if (!ipv6_addr_equal((struct in6_addr *)daddr,
- (struct in6_addr *)
- x->id.daddr.a6) ||
- !ipv6_addr_equal((struct in6_addr *)saddr,
- (struct in6_addr *)
- x->props.saddr.a6))
- continue;
- break;
- }
-
xfrm_state_hold(x);
return x;
}
@@ -1001,25 +974,11 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
x->props.family != family ||
x->km.state != XFRM_STATE_ACQ ||
x->id.spi != 0 ||
- x->id.proto != proto)
+ x->id.proto != proto ||
+ xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
+ xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
- switch (family) {
- case AF_INET:
- if (x->id.daddr.a4 != daddr->a4 ||
- x->props.saddr.a4 != saddr->a4)
- continue;
- break;
- case AF_INET6:
- if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
- (struct in6_addr *)daddr) ||
- !ipv6_addr_equal((struct in6_addr *)
- x->props.saddr.a6,
- (struct in6_addr *)saddr))
- continue;
- break;
- }
-
xfrm_state_hold(x);
return x;
}
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index ed591e9b7d1..b52d340d759 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1426,6 +1426,8 @@ sub dump_struct($$) {
# strip comments:
$members =~ s/\/\*.*?\*\///gos;
$nested =~ s/\/\*.*?\*\///gos;
+ # strip kmemcheck_bitfield_{begin,end}.*;
+ $members =~ s/kmemcheck_bitfield_.*?;//gos;
create_parameterlist($members, ';', $file);
check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
@@ -1468,8 +1470,6 @@ sub dump_enum($$) {
}
}
- # strip kmemcheck_bitfield_{begin,end}.*;
- $members =~ s/kmemcheck_bitfield_.*?;//gos;
output_declaration($declaration_name,
'enum',
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
index 64f5ddb09ea..5c113123ed9 100644
--- a/scripts/pnmtologo.c
+++ b/scripts/pnmtologo.c
@@ -237,7 +237,7 @@ static void write_header(void)
fprintf(out, " * Linux logo %s\n", logoname);
fputs(" */\n\n", out);
fputs("#include <linux/linux_logo.h>\n\n", out);
- fprintf(out, "static const unsigned char %s_data[] __initconst = {\n",
+ fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
logoname);
}
@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
fputs("\n};\n\n", out);
/* write logo clut */
- fprintf(out, "static const unsigned char %s_clut[] __initconst = {\n",
+ fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
logoname);
write_hex_cnt = 0;
for (i = 0; i < logo_clutsize; i++) {
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 6f611874d10..101c512564e 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -238,7 +238,34 @@ out:
}
/*
- * ima_opens_get - increment file counts
+ * ima_counts_put - decrement file counts
+ *
+ * File counts are incremented in ima_path_check. On file open
+ * error, such as ETXTBSY, decrement the counts to prevent
+ * unnecessary imbalance messages.
+ */
+void ima_counts_put(struct path *path, int mask)
+{
+ struct inode *inode = path->dentry->d_inode;
+ struct ima_iint_cache *iint;
+
+ if (!ima_initialized || !S_ISREG(inode->i_mode))
+ return;
+ iint = ima_iint_find_insert_get(inode);
+ if (!iint)
+ return;
+
+ mutex_lock(&iint->mutex);
+ iint->opencount--;
+ if ((mask & MAY_WRITE) || (mask == 0))
+ iint->writecount--;
+ else if (mask & (MAY_READ | MAY_EXEC))
+ iint->readcount--;
+ mutex_unlock(&iint->mutex);
+}
+
+/*
+ * ima_counts_get - increment file counts
*
* - for IPC shm and shmat file.
* - for nfsd exported files.
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 7ec94314ac0..a0880e9c8e0 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -134,7 +134,8 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
out:
mutex_unlock(&ima_extend_list_mutex);
- integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, entry->template_name,
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
+ entry->template.file_name,
op, audit_cause, result, audit_info);
return result;
}
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index de83608719e..3ee0269e5bd 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -338,7 +338,7 @@ static int __devinit snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard,
return -EBUSY;
acard->mpu = pnp_request_card_device(card, id->devs[2].id, NULL);
- if (acard->play == NULL)
+ if (acard->mpu == NULL)
return -EBUSY;
pdev = acard->cap;
diff --git a/sound/oss/kahlua.c b/sound/oss/kahlua.c
index c180598f171..89466b056be 100644
--- a/sound/oss/kahlua.c
+++ b/sound/oss/kahlua.c
@@ -199,7 +199,7 @@ MODULE_LICENSE("GPL");
*/
static struct pci_device_id id_tbl[] = {
- { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO), 0 },
{ }
};
diff --git a/sound/oss/mpu401.c b/sound/oss/mpu401.c
index 6c0a770ed05..1b2316f35b1 100644
--- a/sound/oss/mpu401.c
+++ b/sound/oss/mpu401.c
@@ -926,31 +926,21 @@ static struct midi_operations mpu401_midi_operations[MAX_MIDI_DEV];
static void mpu401_chk_version(int n, struct mpu_config *devc)
{
int tmp;
- unsigned long flags;
devc->version = devc->revision = 0;
- spin_lock_irqsave(&devc->lock,flags);
- if ((tmp = mpu_cmd(n, 0xAC, 0)) < 0)
- {
- spin_unlock_irqrestore(&devc->lock,flags);
+ tmp = mpu_cmd(n, 0xAC, 0);
+ if (tmp < 0)
return;
- }
if ((tmp & 0xf0) > 0x20) /* Why it's larger than 2.x ??? */
- {
- spin_unlock_irqrestore(&devc->lock,flags);
return;
- }
devc->version = tmp;
- if ((tmp = mpu_cmd(n, 0xAD, 0)) < 0)
- {
+ if ((tmp = mpu_cmd(n, 0xAD, 0)) < 0) {
devc->version = 0;
- spin_unlock_irqrestore(&devc->lock,flags);
return;
}
devc->revision = tmp;
- spin_unlock_irqrestore(&devc->lock,flags);
}
int attach_mpu401(struct address_info *hw_config, struct module *owner)
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 71515ddb459..d6752dff2a4 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -287,10 +287,10 @@ struct atiixp {
/*
*/
static struct pci_device_id snd_atiixp_ids[] = {
- { 0x1002, 0x4341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB200 */
- { 0x1002, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB300 */
- { 0x1002, 0x4370, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB400 */
- { 0x1002, 0x4382, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB600 */
+ { PCI_VDEVICE(ATI, 0x4341), 0 }, /* SB200 */
+ { PCI_VDEVICE(ATI, 0x4361), 0 }, /* SB300 */
+ { PCI_VDEVICE(ATI, 0x4370), 0 }, /* SB400 */
+ { PCI_VDEVICE(ATI, 0x4382), 0 }, /* SB600 */
{ 0, }
};
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index c3136cccc55..e7e147bf8eb 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -262,8 +262,8 @@ struct atiixp_modem {
/*
*/
static struct pci_device_id snd_atiixp_ids[] = {
- { 0x1002, 0x434d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB200 */
- { 0x1002, 0x4378, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* SB400 */
+ { PCI_VDEVICE(ATI, 0x434d), 0 }, /* SB200 */
+ { PCI_VDEVICE(ATI, 0x4378), 0 }, /* SB400 */
{ 0, }
};
diff --git a/sound/pci/au88x0/au8810.c b/sound/pci/au88x0/au8810.c
index fce22c7af0e..c0e8c6b295c 100644
--- a/sound/pci/au88x0/au8810.c
+++ b/sound/pci/au88x0/au8810.c
@@ -1,8 +1,7 @@
#include "au8810.h"
#include "au88x0.h"
static struct pci_device_id snd_vortex_ids[] = {
- {PCI_VENDOR_ID_AUREAL, PCI_DEVICE_ID_AUREAL_ADVANTAGE,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1,},
+ {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_ADVANTAGE), 1,},
{0,}
};
diff --git a/sound/pci/au88x0/au8820.c b/sound/pci/au88x0/au8820.c
index d1fbcce0725..a6527330df5 100644
--- a/sound/pci/au88x0/au8820.c
+++ b/sound/pci/au88x0/au8820.c
@@ -1,8 +1,7 @@
#include "au8820.h"
#include "au88x0.h"
static struct pci_device_id snd_vortex_ids[] = {
- {PCI_VENDOR_ID_AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
+ {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_1), 0,},
{0,}
};
diff --git a/sound/pci/au88x0/au8830.c b/sound/pci/au88x0/au8830.c
index d4f2717c14f..6c702ad4352 100644
--- a/sound/pci/au88x0/au8830.c
+++ b/sound/pci/au88x0/au8830.c
@@ -1,8 +1,7 @@
#include "au8830.h"
#include "au88x0.h"
static struct pci_device_id snd_vortex_ids[] = {
- {PCI_VENDOR_ID_AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
+ {PCI_VDEVICE(AUREAL, PCI_DEVICE_ID_AUREAL_VORTEX_2), 0,},
{0,}
};
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 57b992a5c05..f24bf1ecb36 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1876,7 +1876,7 @@ static int snd_ca0106_resume(struct pci_dev *pci)
// PCI IDs
static struct pci_device_id snd_ca0106_ids[] = {
- { 0x1102, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Audigy LS or Live 24bit */
+ { PCI_VDEVICE(CREATIVE, 0x0007), 0 }, /* Audigy LS or Live 24bit */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_ca0106_ids);
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 449fe02f666..ddcd4a9fd7e 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -2797,11 +2797,11 @@ static inline void snd_cmipci_proc_init(struct cmipci *cm) {}
static struct pci_device_id snd_cmipci_ids[] = {
- {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_AL, PCI_DEVICE_ID_CMEDIA_CM8738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A), 0},
+ {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B), 0},
+ {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738), 0},
+ {PCI_VDEVICE(CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B), 0},
+ {PCI_VDEVICE(AL, PCI_DEVICE_ID_CMEDIA_CM8738), 0},
{0,},
};
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index f6286f84a22..e2e0359bb05 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -495,7 +495,7 @@ struct cs4281 {
static irqreturn_t snd_cs4281_interrupt(int irq, void *dev_id);
static struct pci_device_id snd_cs4281_ids[] = {
- { 0x1013, 0x6005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4281 */
+ { PCI_VDEVICE(CIRRUS, 0x6005), 0, }, /* CS4281 */
{ 0, }
};
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c
index c9b3e3d48cb..033aec43011 100644
--- a/sound/pci/cs46xx/cs46xx.c
+++ b/sound/pci/cs46xx/cs46xx.c
@@ -65,9 +65,9 @@ module_param_array(mmap_valid, bool, NULL, 0444);
MODULE_PARM_DESC(mmap_valid, "Support OSS mmap.");
static struct pci_device_id snd_cs46xx_ids[] = {
- { 0x1013, 0x6001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4280 */
- { 0x1013, 0x6003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4612 */
- { 0x1013, 0x6004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* CS4615 */
+ { PCI_VDEVICE(CIRRUS, 0x6001), 0, }, /* CS4280 */
+ { PCI_VDEVICE(CIRRUS, 0x6003), 0, }, /* CS4612 */
+ { PCI_VDEVICE(CIRRUS, 0x6004), 0, }, /* CS4615 */
{ 0, }
};
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index c7f3b994101..168af67d938 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -77,9 +77,9 @@ MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
* Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400
*/
static struct pci_device_id snd_emu10k1_ids[] = {
- { 0x1102, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* EMU10K1 */
- { 0x1102, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, /* Audigy */
- { 0x1102, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, /* Audigy 2 Value SB0400 */
+ { PCI_VDEVICE(CREATIVE, 0x0002), 0 }, /* EMU10K1 */
+ { PCI_VDEVICE(CREATIVE, 0x0004), 1 }, /* Audigy */
+ { PCI_VDEVICE(CREATIVE, 0x0008), 1 }, /* Audigy 2 Value SB0400 */
{ 0, }
};
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 4d3ad793e98..36e08bd2b3c 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -1607,7 +1607,7 @@ static void __devexit snd_emu10k1x_remove(struct pci_dev *pci)
// PCI IDs
static struct pci_device_id snd_emu10k1x_ids[] = {
- { 0x1102, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* Dell OEM version (EMU10K1) */
+ { PCI_VDEVICE(CREATIVE, 0x0006), 0 }, /* Dell OEM version (EMU10K1) */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_emu10k1x_ids);
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 18f4d1e98c4..2b82c5c723e 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -445,12 +445,12 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id);
static struct pci_device_id snd_audiopci_ids[] = {
#ifdef CHIP1370
- { 0x1274, 0x5000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1370 */
+ { PCI_VDEVICE(ENSONIQ, 0x5000), 0, }, /* ES1370 */
#endif
#ifdef CHIP1371
- { 0x1274, 0x1371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1371 */
- { 0x1274, 0x5880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ES1373 - CT5880 */
- { 0x1102, 0x8938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* Ectiva EV1938 */
+ { PCI_VDEVICE(ENSONIQ, 0x1371), 0, }, /* ES1371 */
+ { PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */
+ { PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */
#endif
{ 0, }
};
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index fbd2ac09aa3..820318ee62c 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -244,7 +244,7 @@ struct es1938 {
static irqreturn_t snd_es1938_interrupt(int irq, void *dev_id);
static struct pci_device_id snd_es1938_ids[] = {
- { 0x125d, 0x1969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* Solo-1 */
+ { PCI_VDEVICE(ESS, 0x1969), 0, }, /* Solo-1 */
{ 0, }
};
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 84cc49ca914..1988582d1ab 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -72,6 +72,7 @@ struct ad198x_spec {
hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
unsigned int jack_present :1;
+ unsigned int inv_jack_detect:1;
#ifdef CONFIG_SND_HDA_POWER_SAVE
struct hda_loopback_check loopback;
@@ -669,39 +670,13 @@ static struct hda_input_mux ad1986a_automic_capture_source = {
},
};
-static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
+static struct snd_kcontrol_new ad1986a_laptop_master_mixers[] = {
HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "External Amplifier",
- .info = ad198x_eapd_info,
- .get = ad198x_eapd_get,
- .put = ad198x_eapd_put,
- .private_value = 0x1b | (1 << 8), /* port-D, inversed */
- },
{ } /* end */
};
-static struct snd_kcontrol_new ad1986a_samsung_mixers[] = {
- HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
- HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
+static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
@@ -727,6 +702,12 @@ static struct snd_kcontrol_new ad1986a_samsung_mixers[] = {
{ } /* end */
};
+static struct snd_kcontrol_new ad1986a_laptop_intmic_mixers[] = {
+ HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
+ { } /* end */
+};
+
/* re-connect the mic boost input according to the jack sensing */
static void ad1986a_automic(struct hda_codec *codec)
{
@@ -776,8 +757,9 @@ static void ad1986a_hp_automute(struct hda_codec *codec)
unsigned int present;
present = snd_hda_codec_read(codec, 0x1a, 0, AC_VERB_GET_PIN_SENSE, 0);
- /* Lenovo N100 seems to report the reversed bit for HP jack-sensing */
- spec->jack_present = !(present & 0x80000000);
+ spec->jack_present = !!(present & 0x80000000);
+ if (spec->inv_jack_detect)
+ spec->jack_present = !spec->jack_present;
ad1986a_update_hp(codec);
}
@@ -816,7 +798,7 @@ static int ad1986a_hp_master_sw_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new ad1986a_laptop_automute_mixers[] = {
+static struct snd_kcontrol_new ad1986a_automute_master_mixers[] = {
HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -826,33 +808,10 @@ static struct snd_kcontrol_new ad1986a_laptop_automute_mixers[] = {
.put = ad1986a_hp_master_sw_put,
.private_value = HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT),
},
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "External Amplifier",
- .info = ad198x_eapd_info,
- .get = ad198x_eapd_get,
- .put = ad198x_eapd_put,
- .private_value = 0x1b | (1 << 8), /* port-D, inversed */
- },
{ } /* end */
};
+
/*
* initialization verbs
*/
@@ -981,6 +940,27 @@ static struct hda_verb ad1986a_hp_init_verbs[] = {
{}
};
+static void ad1986a_samsung_p50_unsol_event(struct hda_codec *codec,
+ unsigned int res)
+{
+ switch (res >> 26) {
+ case AD1986A_HP_EVENT:
+ ad1986a_hp_automute(codec);
+ break;
+ case AD1986A_MIC_EVENT:
+ ad1986a_automic(codec);
+ break;
+ }
+}
+
+static int ad1986a_samsung_p50_init(struct hda_codec *codec)
+{
+ ad198x_init(codec);
+ ad1986a_hp_automute(codec);
+ ad1986a_automic(codec);
+ return 0;
+}
+
/* models */
enum {
@@ -991,6 +971,7 @@ enum {
AD1986A_LAPTOP_AUTOMUTE,
AD1986A_ULTRA,
AD1986A_SAMSUNG,
+ AD1986A_SAMSUNG_P50,
AD1986A_MODELS
};
@@ -1002,6 +983,7 @@ static const char *ad1986a_models[AD1986A_MODELS] = {
[AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute",
[AD1986A_ULTRA] = "ultra",
[AD1986A_SAMSUNG] = "samsung",
+ [AD1986A_SAMSUNG_P50] = "samsung-p50",
};
static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
@@ -1024,6 +1006,7 @@ static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD),
SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
+ SND_PCI_QUIRK(0x144d, 0xc024, "Samsung P50", AD1986A_SAMSUNG_P50),
SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA),
SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_SAMSUNG),
SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK),
@@ -1111,7 +1094,10 @@ static int patch_ad1986a(struct hda_codec *codec)
spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
break;
case AD1986A_LAPTOP_EAPD:
- spec->mixers[0] = ad1986a_laptop_eapd_mixers;
+ spec->num_mixers = 3;
+ spec->mixers[0] = ad1986a_laptop_master_mixers;
+ spec->mixers[1] = ad1986a_laptop_eapd_mixers;
+ spec->mixers[2] = ad1986a_laptop_intmic_mixers;
spec->num_init_verbs = 2;
spec->init_verbs[1] = ad1986a_eapd_init_verbs;
spec->multiout.max_channels = 2;
@@ -1122,7 +1108,9 @@ static int patch_ad1986a(struct hda_codec *codec)
spec->input_mux = &ad1986a_laptop_eapd_capture_source;
break;
case AD1986A_SAMSUNG:
- spec->mixers[0] = ad1986a_samsung_mixers;
+ spec->num_mixers = 2;
+ spec->mixers[0] = ad1986a_laptop_master_mixers;
+ spec->mixers[1] = ad1986a_laptop_eapd_mixers;
spec->num_init_verbs = 3;
spec->init_verbs[1] = ad1986a_eapd_init_verbs;
spec->init_verbs[2] = ad1986a_automic_verbs;
@@ -1135,8 +1123,28 @@ static int patch_ad1986a(struct hda_codec *codec)
codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
codec->patch_ops.init = ad1986a_automic_init;
break;
+ case AD1986A_SAMSUNG_P50:
+ spec->num_mixers = 2;
+ spec->mixers[0] = ad1986a_automute_master_mixers;
+ spec->mixers[1] = ad1986a_laptop_eapd_mixers;
+ spec->num_init_verbs = 4;
+ spec->init_verbs[1] = ad1986a_eapd_init_verbs;
+ spec->init_verbs[2] = ad1986a_automic_verbs;
+ spec->init_verbs[3] = ad1986a_hp_init_verbs;
+ spec->multiout.max_channels = 2;
+ spec->multiout.num_dacs = 1;
+ spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
+ if (!is_jack_available(codec, 0x25))
+ spec->multiout.dig_out_nid = 0;
+ spec->input_mux = &ad1986a_automic_capture_source;
+ codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
+ codec->patch_ops.init = ad1986a_samsung_p50_init;
+ break;
case AD1986A_LAPTOP_AUTOMUTE:
- spec->mixers[0] = ad1986a_laptop_automute_mixers;
+ spec->num_mixers = 3;
+ spec->mixers[0] = ad1986a_automute_master_mixers;
+ spec->mixers[1] = ad1986a_laptop_eapd_mixers;
+ spec->mixers[2] = ad1986a_laptop_intmic_mixers;
spec->num_init_verbs = 3;
spec->init_verbs[1] = ad1986a_eapd_init_verbs;
spec->init_verbs[2] = ad1986a_hp_init_verbs;
@@ -1148,6 +1156,10 @@ static int patch_ad1986a(struct hda_codec *codec)
spec->input_mux = &ad1986a_laptop_eapd_capture_source;
codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
codec->patch_ops.init = ad1986a_hp_init;
+ /* Lenovo N100 seems to report the reversed bit
+ * for HP jack-sensing
+ */
+ spec->inv_jack_detect = 1;
break;
case AD1986A_ULTRA:
spec->mixers[0] = ad1986a_laptop_eapd_mixers;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 33453319742..3a8e58c483d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -945,12 +945,13 @@ static void alc_fix_pll_init(struct hda_codec *codec, hda_nid_t nid,
static void alc_automute_pin(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- unsigned int present;
+ unsigned int present, pincap;
unsigned int nid = spec->autocfg.hp_pins[0];
int i;
- /* need to execute and sync at first */
- snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
+ pincap = snd_hda_query_pin_caps(codec, nid);
+ if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */
+ snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
present = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_PIN_SENSE, 0);
spec->jack_present = (present & AC_PINSENSE_PRESENCE) != 0;
@@ -1392,7 +1393,7 @@ static struct hda_verb alc888_fujitsu_xa3530_verbs[] = {
static void alc_automute_amp(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- unsigned int val, mute;
+ unsigned int val, mute, pincap;
hda_nid_t nid;
int i;
@@ -1401,6 +1402,10 @@ static void alc_automute_amp(struct hda_codec *codec)
nid = spec->autocfg.hp_pins[i];
if (!nid)
break;
+ pincap = snd_hda_query_pin_caps(codec, nid);
+ if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */
+ snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_SET_PIN_SENSE, 0);
val = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_PIN_SENSE, 0);
if (val & AC_PINSENSE_PRESENCE) {
@@ -1471,6 +1476,10 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = {
static struct hda_verb alc888_acer_aspire_6530g_verbs[] = {
/* Bias voltage on for external mic port */
{0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80},
+/* Front Mic: set to PIN_IN (empty by default) */
+ {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+/* Unselect Front Mic by default in input mixer 3 */
+ {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0xb)},
/* Enable unsolicited event for HP jack */
{0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
/* Enable speaker output */
@@ -1560,18 +1569,22 @@ static struct hda_input_mux alc888_2_capture_sources[2] = {
static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = {
/* Interal mic only available on one ADC */
{
- .num_items = 3,
+ .num_items = 5,
.items = {
{ "Ext Mic", 0x0 },
+ { "Line In", 0x2 },
{ "CD", 0x4 },
+ { "Input Mix", 0xa },
{ "Int Mic", 0xb },
},
},
{
- .num_items = 2,
+ .num_items = 4,
.items = {
{ "Ext Mic", 0x0 },
+ { "Line In", 0x2 },
{ "CD", 0x4 },
+ { "Input Mix", 0xa },
},
}
};
@@ -1639,6 +1652,17 @@ static void alc888_acer_aspire_4930g_init_hook(struct hda_codec *codec)
alc_automute_amp(codec);
}
+static void alc888_acer_aspire_6530g_init_hook(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ spec->autocfg.hp_pins[0] = 0x15;
+ spec->autocfg.speaker_pins[0] = 0x14;
+ spec->autocfg.speaker_pins[1] = 0x16;
+ spec->autocfg.speaker_pins[2] = 0x17;
+ alc_automute_amp(codec);
+}
+
static void alc889_acer_aspire_8930g_init_hook(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -8189,6 +8213,8 @@ static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = {
HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
@@ -9064,7 +9090,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC883_AUTO),
SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC883_AUTO),
SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
- ALC888_ACER_ASPIRE_4930G),
+ ALC888_ACER_ASPIRE_6530G),
SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
ALC888_ACER_ASPIRE_6530G),
/* default Acer -- disabled as it causes more problems.
@@ -9317,7 +9343,7 @@ static struct alc_config_preset alc883_presets[] = {
ARRAY_SIZE(alc888_2_capture_sources),
.input_mux = alc888_acer_aspire_6530_sources,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc888_acer_aspire_4930g_init_hook,
+ .init_hook = alc888_acer_aspire_6530g_init_hook,
},
[ALC888_ACER_ASPIRE_8930G] = {
.mixers = { alc888_base_mixer,
@@ -12437,6 +12463,8 @@ static int alc268_parse_auto_config(struct hda_codec *codec)
if (err < 0)
return err;
+ alc_ssid_check(codec, 0x15, 0x1b, 0x14);
+
return 1;
}
@@ -13345,6 +13373,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
if (!spec->cap_mixer && !spec->no_analog)
set_capture_mixer(spec);
+ alc_ssid_check(codec, 0x15, 0x1b, 0x14);
+
return 1;
}
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 0d0cdbdb448..cecf1ffeeaa 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -107,7 +107,7 @@ MODULE_PARM_DESC(dxr_enable, "Enable DXR support for Terratec DMX6FIRE.");
static const struct pci_device_id snd_ice1712_ids[] = {
- { PCI_VENDOR_ID_ICE, PCI_DEVICE_ID_ICE_1712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ICE1712 */
+ { PCI_VDEVICE(ICE, PCI_DEVICE_ID_ICE_1712), 0 }, /* ICE1712 */
{ 0, }
};
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 36ade77cf37..cc84a831eb2 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -93,7 +93,7 @@ MODULE_PARM_DESC(model, "Use the given board model.");
/* Both VT1720 and VT1724 have the same PCI IDs */
static const struct pci_device_id snd_vt1724_ids[] = {
- { PCI_VENDOR_ID_ICE, PCI_DEVICE_ID_VT1724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VDEVICE(ICE, PCI_DEVICE_ID_VT1724), 0 },
{ 0, }
};
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 8aa5687f392..171ada53520 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -421,29 +421,29 @@ struct intel8x0 {
};
static struct pci_device_id snd_intel8x0_ids[] = {
- { 0x8086, 0x2415, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801AA */
- { 0x8086, 0x2425, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82901AB */
- { 0x8086, 0x2445, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801BA */
- { 0x8086, 0x2485, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH3 */
- { 0x8086, 0x24c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH4 */
- { 0x8086, 0x24d5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH5 */
- { 0x8086, 0x25a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ESB */
- { 0x8086, 0x266e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH6 */
- { 0x8086, 0x27de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ICH7 */
- { 0x8086, 0x2698, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL_ICH4 }, /* ESB2 */
- { 0x8086, 0x7195, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 440MX */
- { 0x1039, 0x7012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_SIS }, /* SI7012 */
- { 0x10de, 0x01b1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE */
- { 0x10de, 0x003a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* MCP04 */
- { 0x10de, 0x006a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE2 */
- { 0x10de, 0x0059, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* CK804 */
- { 0x10de, 0x008a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* CK8 */
- { 0x10de, 0x00da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE3 */
- { 0x10de, 0x00ea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* CK8S */
- { 0x10de, 0x026b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* MCP51 */
- { 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD8111 */
- { 0x1022, 0x7445, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD768 */
- { 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI }, /* Ali5455 */
+ { PCI_VDEVICE(INTEL, 0x2415), DEVICE_INTEL }, /* 82801AA */
+ { PCI_VDEVICE(INTEL, 0x2425), DEVICE_INTEL }, /* 82901AB */
+ { PCI_VDEVICE(INTEL, 0x2445), DEVICE_INTEL }, /* 82801BA */
+ { PCI_VDEVICE(INTEL, 0x2485), DEVICE_INTEL }, /* ICH3 */
+ { PCI_VDEVICE(INTEL, 0x24c5), DEVICE_INTEL_ICH4 }, /* ICH4 */
+ { PCI_VDEVICE(INTEL, 0x24d5), DEVICE_INTEL_ICH4 }, /* ICH5 */
+ { PCI_VDEVICE(INTEL, 0x25a6), DEVICE_INTEL_ICH4 }, /* ESB */
+ { PCI_VDEVICE(INTEL, 0x266e), DEVICE_INTEL_ICH4 }, /* ICH6 */
+ { PCI_VDEVICE(INTEL, 0x27de), DEVICE_INTEL_ICH4 }, /* ICH7 */
+ { PCI_VDEVICE(INTEL, 0x2698), DEVICE_INTEL_ICH4 }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x7195), DEVICE_INTEL }, /* 440MX */
+ { PCI_VDEVICE(SI, 0x7012), DEVICE_SIS }, /* SI7012 */
+ { PCI_VDEVICE(NVIDIA, 0x01b1), DEVICE_NFORCE }, /* NFORCE */
+ { PCI_VDEVICE(NVIDIA, 0x003a), DEVICE_NFORCE }, /* MCP04 */
+ { PCI_VDEVICE(NVIDIA, 0x006a), DEVICE_NFORCE }, /* NFORCE2 */
+ { PCI_VDEVICE(NVIDIA, 0x0059), DEVICE_NFORCE }, /* CK804 */
+ { PCI_VDEVICE(NVIDIA, 0x008a), DEVICE_NFORCE }, /* CK8 */
+ { PCI_VDEVICE(NVIDIA, 0x00da), DEVICE_NFORCE }, /* NFORCE3 */
+ { PCI_VDEVICE(NVIDIA, 0x00ea), DEVICE_NFORCE }, /* CK8S */
+ { PCI_VDEVICE(NVIDIA, 0x026b), DEVICE_NFORCE }, /* MCP51 */
+ { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
+ { PCI_VDEVICE(AMD, 0x7445), DEVICE_INTEL }, /* AMD768 */
+ { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
{ 0, }
};
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 6ec0fc50d6b..9e7d12e7673 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -220,24 +220,24 @@ struct intel8x0m {
};
static struct pci_device_id snd_intel8x0m_ids[] = {
- { 0x8086, 0x2416, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801AA */
- { 0x8086, 0x2426, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82901AB */
- { 0x8086, 0x2446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 82801BA */
- { 0x8086, 0x2486, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH3 */
- { 0x8086, 0x24c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH4 */
- { 0x8086, 0x24d6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH5 */
- { 0x8086, 0x266d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH6 */
- { 0x8086, 0x27dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* ICH7 */
- { 0x8086, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* 440MX */
- { 0x1022, 0x7446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD768 */
- { 0x1039, 0x7013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_SIS }, /* SI7013 */
- { 0x10de, 0x01c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE */
- { 0x10de, 0x0069, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE2 */
- { 0x10de, 0x0089, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE2s */
- { 0x10de, 0x00d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_NFORCE }, /* NFORCE3 */
+ { PCI_VDEVICE(INTEL, 0x2416), DEVICE_INTEL }, /* 82801AA */
+ { PCI_VDEVICE(INTEL, 0x2426), DEVICE_INTEL }, /* 82901AB */
+ { PCI_VDEVICE(INTEL, 0x2446), DEVICE_INTEL }, /* 82801BA */
+ { PCI_VDEVICE(INTEL, 0x2486), DEVICE_INTEL }, /* ICH3 */
+ { PCI_VDEVICE(INTEL, 0x24c6), DEVICE_INTEL }, /* ICH4 */
+ { PCI_VDEVICE(INTEL, 0x24d6), DEVICE_INTEL }, /* ICH5 */
+ { PCI_VDEVICE(INTEL, 0x266d), DEVICE_INTEL }, /* ICH6 */
+ { PCI_VDEVICE(INTEL, 0x27dd), DEVICE_INTEL }, /* ICH7 */
+ { PCI_VDEVICE(INTEL, 0x7196), DEVICE_INTEL }, /* 440MX */
+ { PCI_VDEVICE(AMD, 0x7446), DEVICE_INTEL }, /* AMD768 */
+ { PCI_VDEVICE(SI, 0x7013), DEVICE_SIS }, /* SI7013 */
+ { PCI_VDEVICE(NVIDIA, 0x01c1), DEVICE_NFORCE }, /* NFORCE */
+ { PCI_VDEVICE(NVIDIA, 0x0069), DEVICE_NFORCE }, /* NFORCE2 */
+ { PCI_VDEVICE(NVIDIA, 0x0089), DEVICE_NFORCE }, /* NFORCE2s */
+ { PCI_VDEVICE(NVIDIA, 0x00d9), DEVICE_NFORCE }, /* NFORCE3 */
#if 0
- { 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL }, /* AMD8111 */
- { 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI }, /* Ali5455 */
+ { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
+ { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
#endif
{ 0, }
};
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index 18da2ef04d0..11b8c6514b3 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -654,13 +654,12 @@ static int __devinit lx_init_ethersound_config(struct lx6464es *chip)
int i;
u32 orig_conf_es = lx_dsp_reg_read(chip, eReg_CONFES);
- u32 default_conf_es = (64 << IOCR_OUTPUTS_OFFSET) |
+ /* configure 64 io channels */
+ u32 conf_es = (orig_conf_es & CONFES_READ_PART_MASK) |
(64 << IOCR_INPUTS_OFFSET) |
+ (64 << IOCR_OUTPUTS_OFFSET) |
(FREQ_RATIO_SINGLE_MODE << FREQ_RATIO_OFFSET);
- u32 conf_es = (orig_conf_es & CONFES_READ_PART_MASK)
- | (default_conf_es & CONFES_WRITE_PART_MASK);
-
snd_printdd("->lx_init_ethersound\n");
chip->freq_ratio = FREQ_RATIO_SINGLE_MODE;
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 82bc5b9e762..a83d1968a84 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard.");
*/
static struct pci_device_id snd_mixart_ids[] = {
- { 0x1057, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* MC8240 */
+ { PCI_VDEVICE(MOTOROLA, 0x0003), 0, }, /* MC8240 */
{ 0, }
};
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 522a040855d..97a0731331a 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -263,9 +263,9 @@ struct nm256 {
* PCI ids
*/
static struct pci_device_id snd_nm256_ids[] = {
- {PCI_VENDOR_ID_NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO), 0},
+ {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO), 0},
+ {PCI_VDEVICE(NEOMAGIC, PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO), 0},
{0,},
};
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 304da169bfd..5401c547c4e 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -575,8 +575,10 @@ static int ac97_switch_put(struct snd_kcontrol *ctl,
static int ac97_volume_info(struct snd_kcontrol *ctl,
struct snd_ctl_elem_info *info)
{
+ int stereo = (ctl->private_value >> 16) & 1;
+
info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- info->count = 2;
+ info->count = stereo ? 2 : 1;
info->value.integer.min = 0;
info->value.integer.max = 0x1f;
return 0;
@@ -587,6 +589,7 @@ static int ac97_volume_get(struct snd_kcontrol *ctl,
{
struct oxygen *chip = ctl->private_data;
unsigned int codec = (ctl->private_value >> 24) & 1;
+ int stereo = (ctl->private_value >> 16) & 1;
unsigned int index = ctl->private_value & 0xff;
u16 reg;
@@ -594,7 +597,8 @@ static int ac97_volume_get(struct snd_kcontrol *ctl,
reg = oxygen_read_ac97(chip, codec, index);
mutex_unlock(&chip->mutex);
value->value.integer.value[0] = 31 - (reg & 0x1f);
- value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f);
+ if (stereo)
+ value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f);
return 0;
}
@@ -603,6 +607,7 @@ static int ac97_volume_put(struct snd_kcontrol *ctl,
{
struct oxygen *chip = ctl->private_data;
unsigned int codec = (ctl->private_value >> 24) & 1;
+ int stereo = (ctl->private_value >> 16) & 1;
unsigned int index = ctl->private_value & 0xff;
u16 oldreg, newreg;
int change;
@@ -612,8 +617,11 @@ static int ac97_volume_put(struct snd_kcontrol *ctl,
newreg = oldreg;
newreg = (newreg & ~0x1f) |
(31 - (value->value.integer.value[0] & 0x1f));
- newreg = (newreg & ~0x1f00) |
- ((31 - (value->value.integer.value[0] & 0x1f)) << 8);
+ if (stereo)
+ newreg = (newreg & ~0x1f00) |
+ ((31 - (value->value.integer.value[1] & 0x1f)) << 8);
+ else
+ newreg = (newreg & ~0x1f00) | ((newreg & 0x1f) << 8);
change = newreg != oldreg;
if (change)
oxygen_write_ac97(chip, codec, index, newreg);
@@ -673,7 +681,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
.private_value = ((codec) << 24) | ((invert) << 16) | \
((bitnr) << 8) | (index), \
}
-#define AC97_VOLUME(xname, codec, index) { \
+#define AC97_VOLUME(xname, codec, index, stereo) { \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
@@ -682,7 +690,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
.get = ac97_volume_get, \
.put = ac97_volume_put, \
.tlv = { .p = ac97_db_scale, }, \
- .private_value = ((codec) << 24) | (index), \
+ .private_value = ((codec) << 24) | ((stereo) << 16) | (index), \
}
static DECLARE_TLV_DB_SCALE(monitor_db_scale, -1000, 1000, 0);
@@ -882,18 +890,18 @@ static const struct {
};
static const struct snd_kcontrol_new ac97_controls[] = {
- AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC),
+ AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC, 0),
AC97_SWITCH("Mic Capture Switch", 0, AC97_MIC, 15, 1),
AC97_SWITCH("Mic Boost (+20dB)", 0, AC97_MIC, 6, 0),
AC97_SWITCH("Line Capture Switch", 0, AC97_LINE, 15, 1),
- AC97_VOLUME("CD Capture Volume", 0, AC97_CD),
+ AC97_VOLUME("CD Capture Volume", 0, AC97_CD, 1),
AC97_SWITCH("CD Capture Switch", 0, AC97_CD, 15, 1),
- AC97_VOLUME("Aux Capture Volume", 0, AC97_AUX),
+ AC97_VOLUME("Aux Capture Volume", 0, AC97_AUX, 1),
AC97_SWITCH("Aux Capture Switch", 0, AC97_AUX, 15, 1),
};
static const struct snd_kcontrol_new ac97_fp_controls[] = {
- AC97_VOLUME("Front Panel Playback Volume", 1, AC97_HEADPHONE),
+ AC97_VOLUME("Front Panel Playback Volume", 1, AC97_HEADPHONE, 1),
AC97_SWITCH("Front Panel Playback Switch", 1, AC97_HEADPHONE, 15, 1),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index d7b966e7c4c..f977dba7cbd 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -227,12 +227,9 @@ struct rme32 {
};
static struct pci_device_id snd_rme32_ids[] = {
- {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
- {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
- {PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
+ {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32), 0,},
+ {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8), 0,},
+ {PCI_VDEVICE(XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO), 0,},
{0,}
};
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 55fb1c131f5..2ba5c0fd55d 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -232,14 +232,10 @@ struct rme96 {
};
static struct pci_device_id snd_rme96_ids[] = {
- { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- { PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96), 0, },
+ { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8), 0, },
+ { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO), 0, },
+ { PCI_VDEVICE(XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST), 0, },
{ 0, }
};
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 7dc60ad4772..1f6406c4534 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -243,7 +243,7 @@ struct sonicvibes {
};
static struct pci_device_id snd_sonic_ids[] = {
- { 0x5333, 0xca00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(S3, 0xca00), 0, },
{ 0, }
};
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 949fcaf6b70..acfa4760da4 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -402,9 +402,9 @@ struct via82xx {
static struct pci_device_id snd_via82xx_ids[] = {
/* 0x1106, 0x3058 */
- { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_CARD_VIA686, }, /* 686A */
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C686_5), TYPE_CARD_VIA686, }, /* 686A */
/* 0x1106, 0x3059 */
- { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_CARD_VIA8233, }, /* VT8233 */
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8233_5), TYPE_CARD_VIA8233, }, /* VT8233 */
{ 0, }
};
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 0d54e3503c1..47eb61561df 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -261,7 +261,7 @@ struct via82xx_modem {
};
static struct pci_device_id snd_via82xx_modem_ids[] = {
- { 0x1106, 0x3068, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_CARD_VIA82XX_MODEM, },
+ { PCI_VDEVICE(VIA, 0x3068), TYPE_CARD_VIA82XX_MODEM, },
{ 0, }
};
diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
index 4af66661f9b..e6b18b90d45 100644
--- a/sound/pci/ymfpci/ymfpci.c
+++ b/sound/pci/ymfpci/ymfpci.c
@@ -67,12 +67,12 @@ module_param_array(rear_switch, bool, NULL, 0444);
MODULE_PARM_DESC(rear_switch, "Enable shared rear/line-in switch");
static struct pci_device_id snd_ymfpci_ids[] = {
- { 0x1073, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF724 */
- { 0x1073, 0x000d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF724F */
- { 0x1073, 0x000a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF740 */
- { 0x1073, 0x000c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF740C */
- { 0x1073, 0x0010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF744 */
- { 0x1073, 0x0012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* YMF754 */
+ { PCI_VDEVICE(YAMAHA, 0x0004), 0, }, /* YMF724 */
+ { PCI_VDEVICE(YAMAHA, 0x000d), 0, }, /* YMF724F */
+ { PCI_VDEVICE(YAMAHA, 0x000a), 0, }, /* YMF740 */
+ { PCI_VDEVICE(YAMAHA, 0x000c), 0, }, /* YMF740C */
+ { PCI_VDEVICE(YAMAHA, 0x0010), 0, }, /* YMF744 */
+ { PCI_VDEVICE(YAMAHA, 0x0012), 0, }, /* YMF754 */
{ 0, }
};
diff --git a/tools/perf/CREDITS b/tools/perf/CREDITS
new file mode 100644
index 00000000000..c2ddcb3acbd
--- /dev/null
+++ b/tools/perf/CREDITS
@@ -0,0 +1,30 @@
+Most of the infrastructure that 'perf' uses here has been reused
+from the Git project, as of version:
+
+ 66996ec: Sync with 1.6.2.4
+
+Here is an (incomplete!) list of main contributors to those files
+in util/* and elsewhere:
+
+ Alex Riesen
+ Christian Couder
+ Dmitry Potapov
+ Jeff King
+ Johannes Schindelin
+ Johannes Sixt
+ Junio C Hamano
+ Linus Torvalds
+ Matthias Kestenholz
+ Michal Ostrowski
+ Miklos Vajna
+ Petr Baudis
+ Pierre Habouzit
+ René Scharfe
+ Samuel Tardieu
+ Shawn O. Pearce
+ Steffen Prohaska
+ Steve Haslam
+
+Thanks guys!
+
+The full history of the files can be found in the upstream Git commits.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 52d3fc6846a..8aa3f8c8870 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -13,13 +13,25 @@ SYNOPSIS
DESCRIPTION
-----------
This command displays the performance counter profile information recorded
-via perf report.
+via perf record.
OPTIONS
-------
-i::
--input=::
Input file name. (default: perf.data)
+-d::
+--dsos=::
+ Only consider symbols in these dsos. CSV that understands
+ file://filename entries.
+-C::
+--comms=::
+ Only consider symbols in these comms. CSV that understands
+ file://filename entries.
+-S::
+--symbols=::
+ Only consider these symbols. CSV that understands
+ file://filename entries.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index c368a72721d..0d74346d21a 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -8,8 +8,8 @@ perf-stat - Run a command and gather performance counter statistics
SYNOPSIS
--------
[verse]
-'perf stat' [-e <EVENT> | --event=EVENT] [-l] [-a] <command>
-'perf stat' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>]
+'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] <command>
+'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] -- <command> [<options>]
DESCRIPTION
-----------
@@ -40,7 +40,7 @@ OPTIONS
-a::
system-wide collection
--l::
+-S::
scale counter values
EXAMPLES
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 36d7eef4991..9c6d0ae3708 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -290,7 +290,7 @@ LIB_FILE=libperf.a
LIB_H += ../../include/linux/perf_counter.h
LIB_H += perf.h
-LIB_H += types.h
+LIB_H += util/types.h
LIB_H += util/list.h
LIB_H += util/rbtree.h
LIB_H += util/levenshtein.h
@@ -301,6 +301,7 @@ LIB_H += util/util.h
LIB_H += util/help.h
LIB_H += util/strbuf.h
LIB_H += util/string.h
+LIB_H += util/strlist.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
LIB_H += util/symbol.h
@@ -322,12 +323,15 @@ LIB_OBJS += util/run-command.o
LIB_OBJS += util/quote.o
LIB_OBJS += util/strbuf.o
LIB_OBJS += util/string.o
+LIB_OBJS += util/strlist.o
LIB_OBJS += util/usage.o
LIB_OBJS += util/wrapper.o
LIB_OBJS += util/sigchain.o
LIB_OBJS += util/symbol.o
LIB_OBJS += util/color.o
LIB_OBJS += util/pager.o
+LIB_OBJS += util/header.o
+LIB_OBJS += util/callchain.o
BUILTIN_OBJS += builtin-annotate.o
BUILTIN_OBJS += builtin-help.o
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 7e58e3ad150..722c0f54e54 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -855,7 +855,7 @@ static unsigned long total = 0,
total_unknown = 0;
static int
-process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
char level;
int show = 0;
@@ -1013,10 +1013,10 @@ process_period_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
- if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
- return process_overflow_event(event, offset, head);
-
switch (event->header.type) {
+ case PERF_EVENT_SAMPLE:
+ return process_sample_event(event, offset, head);
+
case PERF_EVENT_MMAP:
return process_mmap_event(event, offset, head);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index d7ebbd75754..d18546f37d7 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -14,6 +14,8 @@
#include "util/parse-events.h"
#include "util/string.h"
+#include "util/header.h"
+
#include <unistd.h>
#include <sched.h>
@@ -39,6 +41,8 @@ static int force = 0;
static int append_file = 0;
static int call_graph = 0;
static int verbose = 0;
+static int inherit_stat = 0;
+static int no_samples = 0;
static long samples;
static struct timeval last_read;
@@ -52,7 +56,8 @@ static int nr_poll;
static int nr_cpu;
static int file_new = 1;
-static struct perf_file_header file_header;
+
+struct perf_header *header;
struct mmap_event {
struct perf_event_header header;
@@ -306,12 +311,11 @@ static void pid_synthesize_mmap_samples(pid_t pid)
continue;
pbf += n + 3;
if (*pbf == 'x') { /* vm_exec */
- char *execname = strrchr(bf, ' ');
+ char *execname = strchr(bf, '/');
- if (execname == NULL || execname[1] != '/')
+ if (execname == NULL)
continue;
- execname += 1;
size = strlen(execname);
execname[size - 1] = '\0'; /* Remove \n */
memcpy(mmap_ev.filename, execname, size);
@@ -329,7 +333,7 @@ static void pid_synthesize_mmap_samples(pid_t pid)
fclose(fp);
}
-static void synthesize_samples(void)
+static void synthesize_all(void)
{
DIR *proc;
struct dirent dirent, *next;
@@ -353,10 +357,35 @@ static void synthesize_samples(void)
static int group_fd;
+static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr)
+{
+ struct perf_header_attr *h_attr;
+
+ if (nr < header->attrs) {
+ h_attr = header->attr[nr];
+ } else {
+ h_attr = perf_header_attr__new(a);
+ perf_header__add_attr(header, h_attr);
+ }
+
+ return h_attr;
+}
+
static void create_counter(int counter, int cpu, pid_t pid)
{
struct perf_counter_attr *attr = attrs + counter;
- int track = 1;
+ struct perf_header_attr *h_attr;
+ int track = !counter; /* only the first counter needs these */
+ struct {
+ u64 count;
+ u64 time_enabled;
+ u64 time_running;
+ u64 id;
+ } read_data;
+
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING |
+ PERF_FORMAT_ID;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
@@ -366,25 +395,20 @@ static void create_counter(int counter, int cpu, pid_t pid)
attr->sample_freq = freq;
}
+ if (no_samples)
+ attr->sample_freq = 0;
+
+ if (inherit_stat)
+ attr->inherit_stat = 1;
+
if (call_graph)
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
- if (file_new) {
- file_header.sample_type = attr->sample_type;
- } else {
- if (file_header.sample_type != attr->sample_type) {
- fprintf(stderr, "incompatible append\n");
- exit(-1);
- }
- }
-
attr->mmap = track;
attr->comm = track;
attr->inherit = (cpu < 0) && inherit;
attr->disabled = 1;
- track = 0; /* only the first counter needs these */
-
try_again:
fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0);
@@ -415,6 +439,22 @@ try_again:
exit(-1);
}
+ h_attr = get_header_attr(attr, counter);
+
+ if (!file_new) {
+ if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
+ fprintf(stderr, "incompatible append\n");
+ exit(-1);
+ }
+ }
+
+ if (read(fd[nr_cpu][counter], &read_data, sizeof(read_data)) == -1) {
+ perror("Unable to read perf file descriptor\n");
+ exit(-1);
+ }
+
+ perf_header_attr__add_id(h_attr, read_data.id);
+
assert(fd[nr_cpu][counter] >= 0);
fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK);
@@ -445,11 +485,6 @@ static void open_counters(int cpu, pid_t pid)
{
int counter;
- if (pid > 0) {
- pid_synthesize_comm_event(pid, 0);
- pid_synthesize_mmap_samples(pid);
- }
-
group_fd = -1;
for (counter = 0; counter < nr_counters; counter++)
create_counter(counter, cpu, pid);
@@ -459,17 +494,16 @@ static void open_counters(int cpu, pid_t pid)
static void atexit_header(void)
{
- file_header.data_size += bytes_written;
+ header->data_size += bytes_written;
- if (pwrite(output, &file_header, sizeof(file_header), 0) == -1)
- perror("failed to write on file headers");
+ perf_header__write(header, output);
}
static int __cmd_record(int argc, const char **argv)
{
int i, counter;
struct stat st;
- pid_t pid;
+ pid_t pid = 0;
int flags;
int ret;
@@ -500,22 +534,31 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
- if (!file_new) {
- if (read(output, &file_header, sizeof(file_header)) == -1) {
- perror("failed to read file headers");
- exit(-1);
- }
-
- lseek(output, file_header.data_size, SEEK_CUR);
- }
+ if (!file_new)
+ header = perf_header__read(output);
+ else
+ header = perf_header__new();
atexit(atexit_header);
if (!system_wide) {
- open_counters(-1, target_pid != -1 ? target_pid : getpid());
+ pid = target_pid;
+ if (pid == -1)
+ pid = getpid();
+
+ open_counters(-1, pid);
} else for (i = 0; i < nr_cpus; i++)
open_counters(i, target_pid);
+ if (file_new)
+ perf_header__write(header, output);
+
+ if (!system_wide) {
+ pid_synthesize_comm_event(pid, 0);
+ pid_synthesize_mmap_samples(pid);
+ } else
+ synthesize_all();
+
if (target_pid == -1 && argc) {
pid = fork();
if (pid < 0)
@@ -539,10 +582,7 @@ static int __cmd_record(int argc, const char **argv)
}
}
- if (system_wide)
- synthesize_samples();
-
- while (!done) {
+ for (;;) {
int hits = samples;
for (i = 0; i < nr_cpu; i++) {
@@ -550,8 +590,11 @@ static int __cmd_record(int argc, const char **argv)
mmap_read(&mmap_array[i][counter]);
}
- if (hits == samples)
+ if (hits == samples) {
+ if (done)
+ break;
ret = poll(event_array, nr_poll, 100);
+ }
}
/*
@@ -600,6 +643,10 @@ static const struct option options[] = {
"do call-graph (stack chain/backtrace) recording"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
+ OPT_BOOLEAN('s', "stat", &inherit_stat,
+ "per thread counts"),
+ OPT_BOOLEAN('n', "no-samples", &no_samples,
+ "don't sample"),
OPT_END()
};
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 5eb5566f0c9..135b7837e6b 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -15,8 +15,11 @@
#include "util/rbtree.h"
#include "util/symbol.h"
#include "util/string.h"
+#include "util/callchain.h"
+#include "util/strlist.h"
#include "perf.h"
+#include "util/header.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -30,6 +33,8 @@ static char *vmlinux = NULL;
static char default_sort_order[] = "comm,dso";
static char *sort_order = default_sort_order;
+static char *dso_list_str, *comm_list_str, *sym_list_str;
+static struct strlist *dso_list, *comm_list, *sym_list;
static int input;
static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
@@ -51,6 +56,9 @@ static char *parent_pattern = default_parent_pattern;
static regex_t parent_regex;
static int exclude_other = 1;
+static int callchain;
+
+static u64 sample_type;
struct ip_event {
struct perf_event_header header;
@@ -59,11 +67,6 @@ struct ip_event {
unsigned char __more_data[];
};
-struct ip_callchain {
- u64 nr;
- u64 ips[0];
-};
-
struct mmap_event {
struct perf_event_header header;
u32 pid, tid;
@@ -97,6 +100,13 @@ struct lost_event {
u64 lost;
};
+struct read_event {
+ struct perf_event_header header;
+ u32 pid,tid;
+ u64 value;
+ u64 format[3];
+};
+
typedef union event_union {
struct perf_event_header header;
struct ip_event ip;
@@ -105,6 +115,7 @@ typedef union event_union {
struct fork_event fork;
struct period_event period;
struct lost_event lost;
+ struct read_event read;
} event_t;
static LIST_HEAD(dsos);
@@ -229,7 +240,7 @@ static u64 vdso__map_ip(struct map *map, u64 ip)
static inline int is_anon_memory(const char *filename)
{
- return strcmp(filename, "//anon") == 0;
+ return strcmp(filename, "//anon") == 0;
}
static struct map *map__new(struct mmap_event *event)
@@ -400,9 +411,27 @@ static void thread__insert_map(struct thread *self, struct map *map)
list_for_each_entry_safe(pos, tmp, &self->maps, node) {
if (map__overlap(pos, map)) {
- list_del_init(&pos->node);
- /* XXX leaks dsos */
- free(pos);
+ if (verbose >= 2) {
+ printf("overlapping maps:\n");
+ map__fprintf(map, stdout);
+ map__fprintf(pos, stdout);
+ }
+
+ if (map->start <= pos->start && map->end > pos->start)
+ pos->start = map->end;
+
+ if (map->end >= pos->end && map->start < pos->end)
+ pos->end = map->start;
+
+ if (verbose >= 2) {
+ printf("after collision:\n");
+ map__fprintf(pos, stdout);
+ }
+
+ if (pos->start >= pos->end) {
+ list_del_init(&pos->node);
+ free(pos);
+ }
}
}
@@ -464,17 +493,19 @@ static size_t threads__fprintf(FILE *fp)
static struct rb_root hist;
struct hist_entry {
- struct rb_node rb_node;
-
- struct thread *thread;
- struct map *map;
- struct dso *dso;
- struct symbol *sym;
- struct symbol *parent;
- u64 ip;
- char level;
-
- u64 count;
+ struct rb_node rb_node;
+
+ struct thread *thread;
+ struct map *map;
+ struct dso *dso;
+ struct symbol *sym;
+ struct symbol *parent;
+ u64 ip;
+ char level;
+ struct callchain_node callchain;
+ struct rb_root sorted_chain;
+
+ u64 count;
};
/*
@@ -745,6 +776,48 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
}
static size_t
+callchain__fprintf(FILE *fp, struct callchain_node *self, u64 total_samples)
+{
+ struct callchain_list *chain;
+ size_t ret = 0;
+
+ if (!self)
+ return 0;
+
+ ret += callchain__fprintf(fp, self->parent, total_samples);
+
+
+ list_for_each_entry(chain, &self->val, list)
+ ret += fprintf(fp, " %p\n", (void *)chain->ip);
+
+ return ret;
+}
+
+static size_t
+hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
+ u64 total_samples)
+{
+ struct rb_node *rb_node;
+ struct callchain_node *chain;
+ size_t ret = 0;
+
+ rb_node = rb_first(&self->sorted_chain);
+ while (rb_node) {
+ double percent;
+
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+ percent = chain->hit * 100.0 / total_samples;
+ ret += fprintf(fp, " %6.2f%%\n", percent);
+ ret += callchain__fprintf(fp, chain, total_samples);
+ ret += fprintf(fp, "\n");
+ rb_node = rb_next(rb_node);
+ }
+
+ return ret;
+}
+
+
+static size_t
hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
{
struct sort_entry *se;
@@ -784,6 +857,9 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
ret += fprintf(fp, "\n");
+ if (callchain)
+ hist_entry_callchain__fprintf(fp, self, total_samples);
+
return ret;
}
@@ -797,7 +873,7 @@ resolve_symbol(struct thread *thread, struct map **mapp,
{
struct dso *dso = dsop ? *dsop : NULL;
struct map *map = mapp ? *mapp : NULL;
- uint64_t ip = *ipp;
+ u64 ip = *ipp;
if (!thread)
return NULL;
@@ -814,7 +890,6 @@ resolve_symbol(struct thread *thread, struct map **mapp,
*mapp = map;
got_map:
ip = map->map_ip(map, ip);
- *ipp = ip;
dso = map->dso;
} else {
@@ -828,6 +903,8 @@ got_map:
dso = kernel_dso;
}
dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
+ dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
+ *ipp = ip;
if (dsop)
*dsop = dso;
@@ -867,6 +944,7 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
.level = level,
.count = count,
.parent = NULL,
+ .sorted_chain = RB_ROOT
};
int cmp;
@@ -909,6 +987,8 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
if (!cmp) {
he->count += count;
+ if (callchain)
+ append_chain(&he->callchain, chain);
return 0;
}
@@ -922,6 +1002,10 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
if (!he)
return -ENOMEM;
*he = entry;
+ if (callchain) {
+ callchain_init(&he->callchain);
+ append_chain(&he->callchain, chain);
+ }
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, &hist);
@@ -998,6 +1082,9 @@ static void output__insert_entry(struct hist_entry *he)
struct rb_node *parent = NULL;
struct hist_entry *iter;
+ if (callchain)
+ sort_chain_to_rbtree(&he->sorted_chain, &he->callchain);
+
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
@@ -1115,7 +1202,7 @@ static int validate_chain(struct ip_callchain *chain, event_t *event)
}
static int
-process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
char level;
int show = 0;
@@ -1127,12 +1214,12 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
void *more_data = event->ip.__more_data;
struct ip_callchain *chain = NULL;
- if (event->header.type & PERF_SAMPLE_PERIOD) {
+ if (sample_type & PERF_SAMPLE_PERIOD) {
period = *(u64 *)more_data;
more_data += sizeof(u64);
}
- dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n",
+ dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.misc,
@@ -1140,7 +1227,7 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
(void *)(long)ip,
(long long)period);
- if (event->header.type & PERF_SAMPLE_CALLCHAIN) {
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int i;
chain = (void *)more_data;
@@ -1166,6 +1253,9 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
return -1;
}
+ if (comm_list && !strlist__has_entry(comm_list, thread->comm))
+ return 0;
+
if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
show = SHOW_KERNEL;
level = 'k';
@@ -1188,6 +1278,12 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
if (show & show_mask) {
struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
+ if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
+ return 0;
+
+ if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
+ return 0;
+
if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
eprintf("problem incrementing symbol count, skipping event\n");
return -1;
@@ -1328,14 +1424,27 @@ static void trace_event(event_t *event)
}
static int
+process_read_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->read.pid,
+ event->read.tid,
+ event->read.value);
+
+ return 0;
+}
+
+static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
trace_event(event);
- if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
- return process_overflow_event(event, offset, head);
-
switch (event->header.type) {
+ case PERF_EVENT_SAMPLE:
+ return process_sample_event(event, offset, head);
+
case PERF_EVENT_MMAP:
return process_mmap_event(event, offset, head);
@@ -1351,6 +1460,9 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
case PERF_EVENT_LOST:
return process_lost_event(event, offset, head);
+ case PERF_EVENT_READ:
+ return process_read_event(event, offset, head);
+
/*
* We dont process them right now but they are fine:
*/
@@ -1366,13 +1478,30 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
}
-static struct perf_file_header file_header;
+static struct perf_header *header;
+
+static u64 perf_header__sample_type(void)
+{
+ u64 sample_type = 0;
+ int i;
+
+ for (i = 0; i < header->attrs; i++) {
+ struct perf_header_attr *attr = header->attr[i];
+
+ if (!sample_type)
+ sample_type = attr->attr.sample_type;
+ else if (sample_type != attr->attr.sample_type)
+ die("non matching sample_type");
+ }
+
+ return sample_type;
+}
static int __cmd_report(void)
{
int ret, rc = EXIT_FAILURE;
unsigned long offset = 0;
- unsigned long head = sizeof(file_header);
+ unsigned long head, shift;
struct stat stat;
event_t *event;
uint32_t size;
@@ -1400,13 +1529,12 @@ static int __cmd_report(void)
exit(0);
}
- if (read(input, &file_header, sizeof(file_header)) == -1) {
- perror("failed to read file headers");
- exit(-1);
- }
+ header = perf_header__read(input);
+ head = header->data_offset;
- if (sort__has_parent &&
- !(file_header.sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ sample_type = perf_header__sample_type();
+
+ if (sort__has_parent && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
fprintf(stderr, "selected --sort parent, but no callchain data\n");
exit(-1);
}
@@ -1426,6 +1554,11 @@ static int __cmd_report(void)
cwd = NULL;
cwdlen = 0;
}
+
+ shift = page_size * (head / page_size);
+ offset += shift;
+ head -= shift;
+
remap:
buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
MAP_SHARED, input, offset);
@@ -1442,9 +1575,10 @@ more:
size = 8;
if (head + event->header.size >= page_size * mmap_window) {
- unsigned long shift = page_size * (head / page_size);
int ret;
+ shift = page_size * (head / page_size);
+
ret = munmap(buf, page_size * mmap_window);
assert(ret == 0);
@@ -1482,7 +1616,7 @@ more:
head += size;
- if (offset + head >= sizeof(file_header) + file_header.data_size)
+ if (offset + head >= header->data_offset + header->data_size)
goto done;
if (offset + head < stat.st_size)
@@ -1536,6 +1670,13 @@ static const struct option options[] = {
"regex filter to identify parent, see: '--sort parent'"),
OPT_BOOLEAN('x', "exclude-other", &exclude_other,
"Only display entries with parent-match"),
+ OPT_BOOLEAN('c', "callchain", &callchain, "Display callchains"),
+ OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
+ "only consider symbols in these dsos"),
+ OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
+ "only consider symbols in these comms"),
+ OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
+ "only consider these symbols"),
OPT_END()
};
@@ -1554,6 +1695,19 @@ static void setup_sorting(void)
free(str);
}
+static void setup_list(struct strlist **list, const char *list_str,
+ const char *list_name)
+{
+ if (list_str) {
+ *list = strlist__new(true, list_str);
+ if (!*list) {
+ fprintf(stderr, "problems parsing %s list\n",
+ list_name);
+ exit(129);
+ }
+ }
+}
+
int cmd_report(int argc, const char **argv, const char *prefix)
{
symbol__init();
@@ -1575,6 +1729,10 @@ int cmd_report(int argc, const char **argv, const char *prefix)
if (argc)
usage_with_options(report_usage, options);
+ setup_list(&dso_list, dso_list_str, "dso");
+ setup_list(&comm_list, comm_list_str, "comm");
+ setup_list(&sym_list, sym_list_str, "symbol");
+
setup_pager();
return __cmd_report();
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 6d3eeac1ea2..2e03524a1de 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -32,6 +32,7 @@
* Wu Fengguang <fengguang.wu@intel.com>
* Mike Galbraith <efault@gmx.de>
* Paul Mackerras <paulus@samba.org>
+ * Jaswinder Singh Rajput <jaswinder@kernel.org>
*
* Released under the GPL v2. (and only v2, not any later version)
*/
@@ -45,7 +46,7 @@
#include <sys/prctl.h>
#include <math.h>
-static struct perf_counter_attr default_attrs[MAX_COUNTERS] = {
+static struct perf_counter_attr default_attrs[] = {
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES},
@@ -59,42 +60,28 @@ static struct perf_counter_attr default_attrs[MAX_COUNTERS] = {
};
+#define MAX_RUN 100
+
static int system_wide = 0;
-static int inherit = 1;
static int verbose = 0;
-
-static int fd[MAX_NR_CPUS][MAX_COUNTERS];
-
-static int target_pid = -1;
static int nr_cpus = 0;
-static unsigned int page_size;
+static int run_idx = 0;
+static int run_count = 1;
+static int inherit = 1;
static int scale = 1;
+static int target_pid = -1;
+static int null_run = 0;
-static const unsigned int default_count[] = {
- 1000000,
- 1000000,
- 10000,
- 10000,
- 1000000,
- 10000,
-};
-
-#define MAX_RUN 100
-
-static int run_count = 1;
-static int run_idx = 0;
-
-static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
-static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
-
-//static u64 event_hist[MAX_RUN][MAX_COUNTERS][3];
-
+static int fd[MAX_NR_CPUS][MAX_COUNTERS];
static u64 runtime_nsecs[MAX_RUN];
static u64 walltime_nsecs[MAX_RUN];
static u64 runtime_cycles[MAX_RUN];
+static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
+static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
+
static u64 event_res_avg[MAX_COUNTERS][3];
static u64 event_res_noise[MAX_COUNTERS][3];
@@ -109,7 +96,10 @@ static u64 walltime_nsecs_noise;
static u64 runtime_cycles_avg;
static u64 runtime_cycles_noise;
-static void create_perf_stat_counter(int counter)
+#define ERR_PERF_OPEN \
+"Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n"
+
+static void create_perf_stat_counter(int counter, int pid)
{
struct perf_counter_attr *attr = attrs + counter;
@@ -119,20 +109,21 @@ static void create_perf_stat_counter(int counter)
if (system_wide) {
int cpu;
- for (cpu = 0; cpu < nr_cpus; cpu ++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0);
- if (fd[cpu][counter] < 0 && verbose) {
- printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[cpu][counter], strerror(errno));
- }
+ if (fd[cpu][counter] < 0 && verbose)
+ fprintf(stderr, ERR_PERF_OPEN, counter,
+ fd[cpu][counter], strerror(errno));
}
} else {
- attr->inherit = inherit;
- attr->disabled = 1;
-
- fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0);
- if (fd[0][counter] < 0 && verbose) {
- printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[0][counter], strerror(errno));
- }
+ attr->inherit = inherit;
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+
+ fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0);
+ if (fd[0][counter] < 0 && verbose)
+ fprintf(stderr, ERR_PERF_OPEN, counter,
+ fd[0][counter], strerror(errno));
}
}
@@ -168,7 +159,7 @@ static void read_counter(int counter)
count[0] = count[1] = count[2] = 0;
nv = scale ? 3 : 1;
- for (cpu = 0; cpu < nr_cpus; cpu ++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
if (fd[cpu][counter] < 0)
continue;
@@ -215,32 +206,67 @@ static int run_perf_stat(int argc, const char **argv)
int status = 0;
int counter;
int pid;
+ int child_ready_pipe[2], go_pipe[2];
+ char buf;
if (!system_wide)
nr_cpus = 1;
- for (counter = 0; counter < nr_counters; counter++)
- create_perf_stat_counter(counter);
-
- /*
- * Enable counters and exec the command:
- */
- t0 = rdclock();
- prctl(PR_TASK_PERF_COUNTERS_ENABLE);
+ if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) {
+ perror("failed to create pipes");
+ exit(1);
+ }
if ((pid = fork()) < 0)
perror("failed to fork");
if (!pid) {
- if (execvp(argv[0], (char **)argv)) {
- perror(argv[0]);
- exit(-1);
- }
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ read(go_pipe[0], &buf, 1);
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ exit(-1);
}
+ /*
+ * Wait for the child to be ready to exec.
+ */
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ read(child_ready_pipe[0], &buf, 1);
+ close(child_ready_pipe[0]);
+
+ for (counter = 0; counter < nr_counters; counter++)
+ create_perf_stat_counter(counter, pid);
+
+ /*
+ * Enable counters and exec the command:
+ */
+ t0 = rdclock();
+
+ close(go_pipe[1]);
wait(&status);
- prctl(PR_TASK_PERF_COUNTERS_DISABLE);
t1 = rdclock();
walltime_nsecs[run_idx] = t1 - t0;
@@ -262,7 +288,7 @@ static void nsec_printout(int counter, u64 *count, u64 *noise)
{
double msecs = (double)count[0] / 1000000;
- fprintf(stderr, " %14.6f %-20s", msecs, event_name(counter));
+ fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter));
if (attrs[counter].type == PERF_TYPE_SOFTWARE &&
attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) {
@@ -276,7 +302,7 @@ static void nsec_printout(int counter, u64 *count, u64 *noise)
static void abs_printout(int counter, u64 *count, u64 *noise)
{
- fprintf(stderr, " %14Ld %-20s", count[0], event_name(counter));
+ fprintf(stderr, " %14Ld %-24s", count[0], event_name(counter));
if (runtime_cycles_avg &&
attrs[counter].type == PERF_TYPE_HARDWARE &&
@@ -306,7 +332,7 @@ static void print_counter(int counter)
scaled = event_scaled_avg[counter];
if (scaled == -1) {
- fprintf(stderr, " %14s %-20s\n",
+ fprintf(stderr, " %14s %-24s\n",
"<not counted>", event_name(counter));
return;
}
@@ -364,8 +390,11 @@ static void calc_avg(void)
event_res_avg[j]+1, event_res[i][j]+1);
update_avg("counter/2", j,
event_res_avg[j]+2, event_res[i][j]+2);
- update_avg("scaled", j,
- event_scaled_avg + j, event_scaled[i]+j);
+ if (event_scaled[i][j] != -1)
+ update_avg("scaled", j,
+ event_scaled_avg + j, event_scaled[i]+j);
+ else
+ event_scaled_avg[j] = -1;
}
}
runtime_nsecs_avg /= run_count;
@@ -429,11 +458,14 @@ static void print_stat(int argc, const char **argv)
for (counter = 0; counter < nr_counters; counter++)
print_counter(counter);
-
fprintf(stderr, "\n");
- fprintf(stderr, " %14.9f seconds time elapsed.\n",
+ fprintf(stderr, " %14.9f seconds time elapsed",
(double)walltime_nsecs_avg/1e9);
- fprintf(stderr, "\n");
+ if (run_count > 1) {
+ fprintf(stderr, " ( +- %7.3f%% )",
+ 100.0*(double)walltime_nsecs_noise/(double)walltime_nsecs_avg);
+ }
+ fprintf(stderr, "\n\n");
}
static volatile int signr = -1;
@@ -466,13 +498,15 @@ static const struct option options[] = {
OPT_INTEGER('p', "pid", &target_pid,
"stat events on existing pid"),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
- "system-wide collection from all CPUs"),
+ "system-wide collection from all CPUs"),
OPT_BOOLEAN('S', "scale", &scale,
- "scale/normalize counters"),
+ "scale/normalize counters"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &run_count,
"repeat command and print average + stddev (max: 100)"),
+ OPT_BOOLEAN('n', "null", &null_run,
+ "null run - dont start any counters"),
OPT_END()
};
@@ -480,18 +514,17 @@ int cmd_stat(int argc, const char **argv, const char *prefix)
{
int status;
- page_size = sysconf(_SC_PAGE_SIZE);
-
- memcpy(attrs, default_attrs, sizeof(attrs));
-
argc = parse_options(argc, argv, options, stat_usage, 0);
if (!argc)
usage_with_options(stat_usage, options);
if (run_count <= 0 || run_count > MAX_RUN)
usage_with_options(stat_usage, options);
- if (!nr_counters)
- nr_counters = 8;
+ /* Set attrs and nr_counters if no event is selected and !null_run */
+ if (!null_run && !nr_counters) {
+ memcpy(attrs, default_attrs, sizeof(default_attrs));
+ nr_counters = ARRAY_SIZE(default_attrs);
+ }
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
assert(nr_cpus <= MAX_NR_CPUS);
@@ -511,7 +544,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix)
status = 0;
for (run_idx = 0; run_idx < run_count; run_idx++) {
if (run_count != 1 && verbose)
- fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx+1);
+ fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
status = run_perf_stat(argc, argv);
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 5352b5e352e..cf0d21f1ae1 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -392,11 +392,11 @@ static void record_ip(u64 ip, int counter)
samples--;
}
-static void process_event(u64 ip, int counter)
+static void process_event(u64 ip, int counter, int user)
{
samples++;
- if (ip < min_ip || ip > max_ip) {
+ if (user) {
userspace_samples++;
return;
}
@@ -509,9 +509,10 @@ static void mmap_read_counter(struct mmap_data *md)
old += size;
- if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
- if (event->header.type & PERF_SAMPLE_IP)
- process_event(event->ip.ip, md->counter);
+ if (event->header.type == PERF_EVENT_SAMPLE) {
+ int user =
+ (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER;
+ process_event(event->ip.ip, md->counter, user);
}
}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index ceb68aa51f7..8f729aedc1a 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -19,13 +19,23 @@
#define cpu_relax() asm volatile("" ::: "memory");
#endif
+#ifdef __sh__
+#include "../../arch/sh/include/asm/unistd.h"
+#if defined(__SH4A__) || defined(__SH5__)
+# define rmb() asm volatile("synco" ::: "memory")
+#else
+# define rmb() asm volatile("" ::: "memory")
+#endif
+#define cpu_relax() asm volatile("" ::: "memory")
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include "../../include/linux/perf_counter.h"
-#include "types.h"
+#include "util/types.h"
/*
* prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
@@ -72,10 +82,9 @@ sys_perf_counter_open(struct perf_counter_attr *attr,
#define MAX_COUNTERS 256
#define MAX_NR_CPUS 256
-struct perf_file_header {
- u64 version;
- u64 sample_type;
- u64 data_size;
+struct ip_callchain {
+ u64 nr;
+ u64 ips[0];
};
#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
new file mode 100644
index 00000000000..ad3c2857896
--- /dev/null
+++ b/tools/perf/util/callchain.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ * Handle the callchains from the stream in an ad-hoc radix tree and then
+ * sort them in an rbtree.
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+
+#include "callchain.h"
+
+
+static void rb_insert_callchain(struct rb_root *root, struct callchain_node *chain)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct callchain_node *rnode;
+
+ while (*p) {
+ parent = *p;
+ rnode = rb_entry(parent, struct callchain_node, rb_node);
+
+ if (rnode->hit < chain->hit)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&chain->rb_node, parent, p);
+ rb_insert_color(&chain->rb_node, root);
+}
+
+/*
+ * Once we get every callchains from the stream, we can now
+ * sort them by hit
+ */
+void sort_chain_to_rbtree(struct rb_root *rb_root, struct callchain_node *node)
+{
+ struct callchain_node *child;
+
+ list_for_each_entry(child, &node->children, brothers)
+ sort_chain_to_rbtree(rb_root, child);
+
+ if (node->hit)
+ rb_insert_callchain(rb_root, node);
+}
+
+static struct callchain_node *create_child(struct callchain_node *parent)
+{
+ struct callchain_node *new;
+
+ new = malloc(sizeof(*new));
+ if (!new) {
+ perror("not enough memory to create child for code path tree");
+ return NULL;
+ }
+ new->parent = parent;
+ INIT_LIST_HEAD(&new->children);
+ INIT_LIST_HEAD(&new->val);
+ list_add_tail(&new->brothers, &parent->children);
+
+ return new;
+}
+
+static void
+fill_node(struct callchain_node *node, struct ip_callchain *chain, int start)
+{
+ int i;
+
+ for (i = start; i < chain->nr; i++) {
+ struct callchain_list *call;
+
+ call = malloc(sizeof(*chain));
+ if (!call) {
+ perror("not enough memory for the code path tree");
+ return;
+ }
+ call->ip = chain->ips[i];
+ list_add_tail(&call->list, &node->val);
+ }
+ node->val_nr = i - start;
+}
+
+static void add_child(struct callchain_node *parent, struct ip_callchain *chain)
+{
+ struct callchain_node *new;
+
+ new = create_child(parent);
+ fill_node(new, chain, parent->val_nr);
+
+ new->hit = 1;
+}
+
+static void
+split_add_child(struct callchain_node *parent, struct ip_callchain *chain,
+ struct callchain_list *to_split, int idx)
+{
+ struct callchain_node *new;
+
+ /* split */
+ new = create_child(parent);
+ list_move_tail(&to_split->list, &new->val);
+ new->hit = parent->hit;
+ parent->hit = 0;
+ parent->val_nr = idx;
+
+ /* create the new one */
+ add_child(parent, chain);
+}
+
+static int
+__append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ int start);
+
+static int
+__append_chain_children(struct callchain_node *root, struct ip_callchain *chain)
+{
+ struct callchain_node *rnode;
+
+ /* lookup in childrens */
+ list_for_each_entry(rnode, &root->children, brothers) {
+ int ret = __append_chain(rnode, chain, root->val_nr);
+ if (!ret)
+ return 0;
+ }
+ return -1;
+}
+
+static int
+__append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ int start)
+{
+ struct callchain_list *cnode;
+ int i = start;
+ bool found = false;
+
+ /* lookup in the current node */
+ list_for_each_entry(cnode, &root->val, list) {
+ if (cnode->ip != chain->ips[i++])
+ break;
+ if (!found)
+ found = true;
+ if (i == chain->nr)
+ break;
+ }
+
+ /* matches not, relay on the parent */
+ if (!found)
+ return -1;
+
+ /* we match only a part of the node. Split it and add the new chain */
+ if (i < root->val_nr) {
+ split_add_child(root, chain, cnode, i);
+ return 0;
+ }
+
+ /* we match 100% of the path, increment the hit */
+ if (i == root->val_nr) {
+ root->hit++;
+ return 0;
+ }
+
+ return __append_chain_children(root, chain);
+}
+
+void append_chain(struct callchain_node *root, struct ip_callchain *chain)
+{
+ if (__append_chain_children(root, chain) == -1)
+ add_child(root, chain);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
new file mode 100644
index 00000000000..fa1cd2f71fd
--- /dev/null
+++ b/tools/perf/util/callchain.h
@@ -0,0 +1,33 @@
+#ifndef __PERF_CALLCHAIN_H
+#define __PERF_CALLCHAIN_H
+
+#include "../perf.h"
+#include "list.h"
+#include "rbtree.h"
+
+
+struct callchain_node {
+ struct callchain_node *parent;
+ struct list_head brothers;
+ struct list_head children;
+ struct list_head val;
+ struct rb_node rb_node;
+ int val_nr;
+ int hit;
+};
+
+struct callchain_list {
+ unsigned long ip;
+ struct list_head list;
+};
+
+static inline void callchain_init(struct callchain_node *node)
+{
+ INIT_LIST_HEAD(&node->brothers);
+ INIT_LIST_HEAD(&node->children);
+ INIT_LIST_HEAD(&node->val);
+}
+
+void append_chain(struct callchain_node *root, struct ip_callchain *chain);
+void sort_chain_to_rbtree(struct rb_root *rb_root, struct callchain_node *node);
+#endif
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
new file mode 100644
index 00000000000..450384b3bbe
--- /dev/null
+++ b/tools/perf/util/header.c
@@ -0,0 +1,242 @@
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "util.h"
+#include "header.h"
+
+/*
+ *
+ */
+
+struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr)
+{
+ struct perf_header_attr *self = malloc(sizeof(*self));
+
+ if (!self)
+ die("nomem");
+
+ self->attr = *attr;
+ self->ids = 0;
+ self->size = 1;
+ self->id = malloc(sizeof(u64));
+
+ if (!self->id)
+ die("nomem");
+
+ return self;
+}
+
+void perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
+{
+ int pos = self->ids;
+
+ self->ids++;
+ if (self->ids > self->size) {
+ self->size *= 2;
+ self->id = realloc(self->id, self->size * sizeof(u64));
+ if (!self->id)
+ die("nomem");
+ }
+ self->id[pos] = id;
+}
+
+/*
+ *
+ */
+
+struct perf_header *perf_header__new(void)
+{
+ struct perf_header *self = malloc(sizeof(*self));
+
+ if (!self)
+ die("nomem");
+
+ self->frozen = 0;
+
+ self->attrs = 0;
+ self->size = 1;
+ self->attr = malloc(sizeof(void *));
+
+ if (!self->attr)
+ die("nomem");
+
+ self->data_offset = 0;
+ self->data_size = 0;
+
+ return self;
+}
+
+void perf_header__add_attr(struct perf_header *self,
+ struct perf_header_attr *attr)
+{
+ int pos = self->attrs;
+
+ if (self->frozen)
+ die("frozen");
+
+ self->attrs++;
+ if (self->attrs > self->size) {
+ self->size *= 2;
+ self->attr = realloc(self->attr, self->size * sizeof(void *));
+ if (!self->attr)
+ die("nomem");
+ }
+ self->attr[pos] = attr;
+}
+
+static const char *__perf_magic = "PERFFILE";
+
+#define PERF_MAGIC (*(u64 *)__perf_magic)
+
+struct perf_file_section {
+ u64 offset;
+ u64 size;
+};
+
+struct perf_file_attr {
+ struct perf_counter_attr attr;
+ struct perf_file_section ids;
+};
+
+struct perf_file_header {
+ u64 magic;
+ u64 size;
+ u64 attr_size;
+ struct perf_file_section attrs;
+ struct perf_file_section data;
+};
+
+static void do_write(int fd, void *buf, size_t size)
+{
+ while (size) {
+ int ret = write(fd, buf, size);
+
+ if (ret < 0)
+ die("failed to write");
+
+ size -= ret;
+ buf += ret;
+ }
+}
+
+void perf_header__write(struct perf_header *self, int fd)
+{
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ struct perf_header_attr *attr;
+ int i;
+
+ lseek(fd, sizeof(f_header), SEEK_SET);
+
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ attr->id_offset = lseek(fd, 0, SEEK_CUR);
+ do_write(fd, attr->id, attr->ids * sizeof(u64));
+ }
+
+
+ self->attr_offset = lseek(fd, 0, SEEK_CUR);
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ f_attr = (struct perf_file_attr){
+ .attr = attr->attr,
+ .ids = {
+ .offset = attr->id_offset,
+ .size = attr->ids * sizeof(u64),
+ }
+ };
+ do_write(fd, &f_attr, sizeof(f_attr));
+ }
+
+
+ self->data_offset = lseek(fd, 0, SEEK_CUR);
+
+ f_header = (struct perf_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ .attr_size = sizeof(f_attr),
+ .attrs = {
+ .offset = self->attr_offset,
+ .size = self->attrs * sizeof(f_attr),
+ },
+ .data = {
+ .offset = self->data_offset,
+ .size = self->data_size,
+ },
+ };
+
+ lseek(fd, 0, SEEK_SET);
+ do_write(fd, &f_header, sizeof(f_header));
+ lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+
+ self->frozen = 1;
+}
+
+static void do_read(int fd, void *buf, size_t size)
+{
+ while (size) {
+ int ret = read(fd, buf, size);
+
+ if (ret < 0)
+ die("failed to read");
+
+ size -= ret;
+ buf += ret;
+ }
+}
+
+struct perf_header *perf_header__read(int fd)
+{
+ struct perf_header *self = perf_header__new();
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ u64 f_id;
+
+ int nr_attrs, nr_ids, i, j;
+
+ lseek(fd, 0, SEEK_SET);
+ do_read(fd, &f_header, sizeof(f_header));
+
+ if (f_header.magic != PERF_MAGIC ||
+ f_header.size != sizeof(f_header) ||
+ f_header.attr_size != sizeof(f_attr))
+ die("incompatible file format");
+
+ nr_attrs = f_header.attrs.size / sizeof(f_attr);
+ lseek(fd, f_header.attrs.offset, SEEK_SET);
+
+ for (i = 0; i < nr_attrs; i++) {
+ struct perf_header_attr *attr;
+ off_t tmp = lseek(fd, 0, SEEK_CUR);
+
+ do_read(fd, &f_attr, sizeof(f_attr));
+
+ attr = perf_header_attr__new(&f_attr.attr);
+
+ nr_ids = f_attr.ids.size / sizeof(u64);
+ lseek(fd, f_attr.ids.offset, SEEK_SET);
+
+ for (j = 0; j < nr_ids; j++) {
+ do_read(fd, &f_id, sizeof(f_id));
+
+ perf_header_attr__add_id(attr, f_id);
+ }
+ perf_header__add_attr(self, attr);
+ lseek(fd, tmp, SEEK_SET);
+ }
+
+ self->data_offset = f_header.data.offset;
+ self->data_size = f_header.data.size;
+
+ lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+
+ self->frozen = 1;
+
+ return self;
+}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
new file mode 100644
index 00000000000..b5ef53ad4c7
--- /dev/null
+++ b/tools/perf/util/header.h
@@ -0,0 +1,37 @@
+#ifndef _PERF_HEADER_H
+#define _PERF_HEADER_H
+
+#include "../../../include/linux/perf_counter.h"
+#include <sys/types.h>
+#include "types.h"
+
+struct perf_header_attr {
+ struct perf_counter_attr attr;
+ int ids, size;
+ u64 *id;
+ off_t id_offset;
+};
+
+struct perf_header {
+ int frozen;
+ int attrs, size;
+ struct perf_header_attr **attr;
+ off_t attr_offset;
+ u64 data_offset;
+ u64 data_size;
+};
+
+struct perf_header *perf_header__read(int fd);
+void perf_header__write(struct perf_header *self, int fd);
+
+void perf_header__add_attr(struct perf_header *self,
+ struct perf_header_attr *attr);
+
+struct perf_header_attr *
+perf_header_attr__new(struct perf_counter_attr *attr);
+void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
+
+
+struct perf_header *perf_header__new(void);
+
+#endif /* _PERF_HEADER_H */
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
index 6653f7dd1d7..17a00e0df2c 100644
--- a/tools/perf/util/help.c
+++ b/tools/perf/util/help.c
@@ -126,21 +126,6 @@ static int is_executable(const char *name)
!S_ISREG(st.st_mode))
return 0;
-#ifdef __MINGW32__
- /* cannot trust the executable bit, peek into the file instead */
- char buf[3] = { 0 };
- int n;
- int fd = open(name, O_RDONLY);
- st.st_mode &= ~S_IXUSR;
- if (fd >= 0) {
- n = read(fd, buf, 2);
- if (n == 2)
- /* DOS executables start with "MZ" */
- if (!strcmp(buf, "#!") || !strcmp(buf, "MZ"))
- st.st_mode |= S_IXUSR;
- close(fd);
- }
-#endif
return st.st_mode & S_IXUSR;
}
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c
index a28bccae545..1915de20dca 100644
--- a/tools/perf/util/pager.c
+++ b/tools/perf/util/pager.c
@@ -9,7 +9,6 @@
static int spawned_pager;
-#ifndef __MINGW32__
static void pager_preexec(void)
{
/*
@@ -24,7 +23,6 @@ static void pager_preexec(void)
setenv("LESS", "FRSX", 0);
}
-#endif
static const char *pager_argv[] = { "sh", "-c", NULL, NULL };
static struct child_process pager_process;
@@ -70,9 +68,8 @@ void setup_pager(void)
pager_argv[2] = pager;
pager_process.argv = pager_argv;
pager_process.in = -1;
-#ifndef __MINGW32__
pager_process.preexec_cb = pager_preexec;
-#endif
+
if (start_command(&pager_process))
return;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 35d04da38d6..4d042f104cd 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -16,32 +16,28 @@ struct event_symbol {
u8 type;
u64 config;
char *symbol;
+ char *alias;
};
-#define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y
-#define CR(x, y) .type = PERF_TYPE_##x, .config = y
+#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
+#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
static struct event_symbol event_symbols[] = {
- { C(HARDWARE, HW_CPU_CYCLES), "cpu-cycles", },
- { C(HARDWARE, HW_CPU_CYCLES), "cycles", },
- { C(HARDWARE, HW_INSTRUCTIONS), "instructions", },
- { C(HARDWARE, HW_CACHE_REFERENCES), "cache-references", },
- { C(HARDWARE, HW_CACHE_MISSES), "cache-misses", },
- { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branch-instructions", },
- { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branches", },
- { C(HARDWARE, HW_BRANCH_MISSES), "branch-misses", },
- { C(HARDWARE, HW_BUS_CYCLES), "bus-cycles", },
-
- { C(SOFTWARE, SW_CPU_CLOCK), "cpu-clock", },
- { C(SOFTWARE, SW_TASK_CLOCK), "task-clock", },
- { C(SOFTWARE, SW_PAGE_FAULTS), "page-faults", },
- { C(SOFTWARE, SW_PAGE_FAULTS), "faults", },
- { C(SOFTWARE, SW_PAGE_FAULTS_MIN), "minor-faults", },
- { C(SOFTWARE, SW_PAGE_FAULTS_MAJ), "major-faults", },
- { C(SOFTWARE, SW_CONTEXT_SWITCHES), "context-switches", },
- { C(SOFTWARE, SW_CONTEXT_SWITCHES), "cs", },
- { C(SOFTWARE, SW_CPU_MIGRATIONS), "cpu-migrations", },
- { C(SOFTWARE, SW_CPU_MIGRATIONS), "migrations", },
+ { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
+ { CHW(INSTRUCTIONS), "instructions", "" },
+ { CHW(CACHE_REFERENCES), "cache-references", "" },
+ { CHW(CACHE_MISSES), "cache-misses", "" },
+ { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
+ { CHW(BRANCH_MISSES), "branch-misses", "" },
+ { CHW(BUS_CYCLES), "bus-cycles", "" },
+
+ { CSW(CPU_CLOCK), "cpu-clock", "" },
+ { CSW(TASK_CLOCK), "task-clock", "" },
+ { CSW(PAGE_FAULTS), "page-faults", "faults" },
+ { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
+ { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
+ { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
+ { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
};
#define __PERF_COUNTER_FIELD(config, name) \
@@ -74,26 +70,70 @@ static char *sw_event_names[] = {
#define MAX_ALIASES 8
-static char *hw_cache [][MAX_ALIASES] = {
- { "L1-data" , "l1-d", "l1d" },
- { "L1-instruction" , "l1-i", "l1i" },
- { "L2" , "l2" },
- { "Data-TLB" , "dtlb", "d-tlb" },
- { "Instruction-TLB" , "itlb", "i-tlb" },
- { "Branch" , "bpu" , "btb", "bpc" },
+static char *hw_cache[][MAX_ALIASES] = {
+ { "L1-d$", "l1-d", "l1d", "L1-data", },
+ { "L1-i$", "l1-i", "l1i", "L1-instruction", },
+ { "LLC", "L2" },
+ { "dTLB", "d-tlb", "Data-TLB", },
+ { "iTLB", "i-tlb", "Instruction-TLB", },
+ { "branch", "branches", "bpu", "btb", "bpc", },
};
-static char *hw_cache_op [][MAX_ALIASES] = {
- { "Load" , "read" },
- { "Store" , "write" },
- { "Prefetch" , "speculative-read", "speculative-load" },
+static char *hw_cache_op[][MAX_ALIASES] = {
+ { "load", "loads", "read", },
+ { "store", "stores", "write", },
+ { "prefetch", "prefetches", "speculative-read", "speculative-load", },
};
-static char *hw_cache_result [][MAX_ALIASES] = {
- { "Reference" , "ops", "access" },
- { "Miss" },
+static char *hw_cache_result[][MAX_ALIASES] = {
+ { "refs", "Reference", "ops", "access", },
+ { "misses", "miss", },
};
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ (1 << C(OP_READ))
+#define CACHE_WRITE (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x) (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long hw_cache_stat[C(MAX)] = {
+ [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)] = (CACHE_READ),
+ [C(BPU)] = (CACHE_READ),
+};
+
+static int is_cache_op_valid(u8 cache_type, u8 cache_op)
+{
+ if (hw_cache_stat[cache_type] & COP(cache_op))
+ return 1; /* valid */
+ else
+ return 0; /* invalid */
+}
+
+static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
+{
+ static char name[50];
+
+ if (cache_result) {
+ sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][0],
+ hw_cache_result[cache_result][0]);
+ } else {
+ sprintf(name, "%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][1]);
+ }
+
+ return name;
+}
+
char *event_name(int counter)
{
u64 config = attrs[counter].config;
@@ -113,7 +153,6 @@ char *event_name(int counter)
case PERF_TYPE_HW_CACHE: {
u8 cache_type, cache_op, cache_result;
- static char name[100];
cache_type = (config >> 0) & 0xff;
if (cache_type > PERF_COUNT_HW_CACHE_MAX)
@@ -127,12 +166,10 @@ char *event_name(int counter)
if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
return "unknown-ext-hardware-cache-result";
- sprintf(name, "%s-Cache-%s-%ses",
- hw_cache[cache_type][0],
- hw_cache_op[cache_op][0],
- hw_cache_result[cache_result][0]);
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return "invalid-cache";
- return name;
+ return event_cache_name(cache_type, cache_op, cache_result);
}
case PERF_TYPE_SOFTWARE:
@@ -163,7 +200,8 @@ static int parse_aliases(const char *str, char *names[][MAX_ALIASES], int size)
return -1;
}
-static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr)
+static int
+parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr)
{
int cache_type = -1, cache_op = 0, cache_result = 0;
@@ -182,6 +220,9 @@ static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *a
if (cache_op == -1)
cache_op = PERF_COUNT_HW_CACHE_OP_READ;
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return -EINVAL;
+
cache_result = parse_aliases(str, hw_cache_result,
PERF_COUNT_HW_CACHE_RESULT_MAX);
/*
@@ -196,6 +237,19 @@ static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *a
return 0;
}
+static int check_events(const char *str, unsigned int i)
+{
+ if (!strncmp(str, event_symbols[i].symbol,
+ strlen(event_symbols[i].symbol)))
+ return 1;
+
+ if (strlen(event_symbols[i].alias))
+ if (!strncmp(str, event_symbols[i].alias,
+ strlen(event_symbols[i].alias)))
+ return 1;
+ return 0;
+}
+
/*
* Each event can have multiple symbolic names.
* Symbolic names are (almost) exactly matched.
@@ -235,9 +289,7 @@ static int parse_event_symbols(const char *str, struct perf_counter_attr *attr)
}
for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
- if (!strncmp(str, event_symbols[i].symbol,
- strlen(event_symbols[i].symbol))) {
-
+ if (check_events(str, i)) {
attr->type = event_symbols[i].type;
attr->config = event_symbols[i].config;
@@ -289,6 +341,7 @@ void print_events(void)
{
struct event_symbol *syms = event_symbols;
unsigned int i, type, prev_type = -1;
+ char name[40];
fprintf(stderr, "\n");
fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
@@ -301,14 +354,18 @@ void print_events(void)
if (type != prev_type)
fprintf(stderr, "\n");
- fprintf(stderr, " %-30s [%s]\n", syms->symbol,
+ if (strlen(syms->alias))
+ sprintf(name, "%s OR %s", syms->symbol, syms->alias);
+ else
+ strcpy(name, syms->symbol);
+ fprintf(stderr, " %-40s [%s]\n", name,
event_type_descriptors[type]);
prev_type = type;
}
fprintf(stderr, "\n");
- fprintf(stderr, " %-30s [raw hardware event descriptor]\n",
+ fprintf(stderr, " %-40s [raw hardware event descriptor]\n",
"rNNN");
fprintf(stderr, "\n");
diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c
index b2f5e854f40..a3935343091 100644
--- a/tools/perf/util/run-command.c
+++ b/tools/perf/util/run-command.c
@@ -65,7 +65,6 @@ int start_command(struct child_process *cmd)
cmd->err = fderr[0];
}
-#ifndef __MINGW32__
fflush(NULL);
cmd->pid = fork();
if (!cmd->pid) {
@@ -118,71 +117,6 @@ int start_command(struct child_process *cmd)
}
exit(127);
}
-#else
- int s0 = -1, s1 = -1, s2 = -1; /* backups of stdin, stdout, stderr */
- const char **sargv = cmd->argv;
- char **env = environ;
-
- if (cmd->no_stdin) {
- s0 = dup(0);
- dup_devnull(0);
- } else if (need_in) {
- s0 = dup(0);
- dup2(fdin[0], 0);
- } else if (cmd->in) {
- s0 = dup(0);
- dup2(cmd->in, 0);
- }
-
- if (cmd->no_stderr) {
- s2 = dup(2);
- dup_devnull(2);
- } else if (need_err) {
- s2 = dup(2);
- dup2(fderr[1], 2);
- }
-
- if (cmd->no_stdout) {
- s1 = dup(1);
- dup_devnull(1);
- } else if (cmd->stdout_to_stderr) {
- s1 = dup(1);
- dup2(2, 1);
- } else if (need_out) {
- s1 = dup(1);
- dup2(fdout[1], 1);
- } else if (cmd->out > 1) {
- s1 = dup(1);
- dup2(cmd->out, 1);
- }
-
- if (cmd->dir)
- die("chdir in start_command() not implemented");
- if (cmd->env) {
- env = copy_environ();
- for (; *cmd->env; cmd->env++)
- env = env_setenv(env, *cmd->env);
- }
-
- if (cmd->perf_cmd) {
- cmd->argv = prepare_perf_cmd(cmd->argv);
- }
-
- cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, env);
-
- if (cmd->env)
- free_environ(env);
- if (cmd->perf_cmd)
- free(cmd->argv);
-
- cmd->argv = sargv;
- if (s0 >= 0)
- dup2(s0, 0), close(s0);
- if (s1 >= 0)
- dup2(s1, 1), close(s1);
- if (s2 >= 0)
- dup2(s2, 2), close(s2);
-#endif
if (cmd->pid < 0) {
int err = errno;
@@ -288,14 +222,6 @@ int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const
return run_command(&cmd);
}
-#ifdef __MINGW32__
-static __stdcall unsigned run_thread(void *data)
-{
- struct async *async = data;
- return async->proc(async->fd_for_proc, async->data);
-}
-#endif
-
int start_async(struct async *async)
{
int pipe_out[2];
@@ -304,7 +230,6 @@ int start_async(struct async *async)
return error("cannot create pipe: %s", strerror(errno));
async->out = pipe_out[0];
-#ifndef __MINGW32__
/* Flush stdio before fork() to avoid cloning buffers */
fflush(NULL);
@@ -319,33 +244,17 @@ int start_async(struct async *async)
exit(!!async->proc(pipe_out[1], async->data));
}
close(pipe_out[1]);
-#else
- async->fd_for_proc = pipe_out[1];
- async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL);
- if (!async->tid) {
- error("cannot create thread: %s", strerror(errno));
- close_pair(pipe_out);
- return -1;
- }
-#endif
+
return 0;
}
int finish_async(struct async *async)
{
-#ifndef __MINGW32__
int ret = 0;
if (wait_or_whine(async->pid))
ret = error("waitpid (async) failed");
-#else
- DWORD ret = 0;
- if (WaitForSingleObject(async->tid, INFINITE) != WAIT_OBJECT_0)
- ret = error("waiting for thread failed: %lu", GetLastError());
- else if (!GetExitCodeThread(async->tid, &ret))
- ret = error("cannot get thread exit code: %lu", GetLastError());
- CloseHandle(async->tid);
-#endif
+
return ret;
}
diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h
index 328289f2366..cc1837deba8 100644
--- a/tools/perf/util/run-command.h
+++ b/tools/perf/util/run-command.h
@@ -79,12 +79,7 @@ struct async {
int (*proc)(int fd, void *data);
void *data;
int out; /* caller reads from here and closes it */
-#ifndef __MINGW32__
pid_t pid;
-#else
- HANDLE tid;
- int fd_for_proc;
-#endif
};
int start_async(struct async *async);
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index eaba0930680..464e7ca898c 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -259,7 +259,7 @@ size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f)
res = fread(sb->buf + sb->len, 1, size, f);
if (res > 0)
strbuf_setlen(sb, sb->len + res);
- else if (res < 0 && oldalloc == 0)
+ else if (oldalloc == 0)
strbuf_release(sb);
return res;
}
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index 37b03255b42..3dca2f654cd 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -1,7 +1,7 @@
#ifndef _PERF_STRING_H_
#define _PERF_STRING_H_
-#include "../types.h"
+#include "types.h"
int hex2u64(const char *ptr, u64 *val);
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
new file mode 100644
index 00000000000..025a78edfff
--- /dev/null
+++ b/tools/perf/util/strlist.c
@@ -0,0 +1,184 @@
+/*
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include "strlist.h"
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static struct str_node *str_node__new(const char *s, bool dupstr)
+{
+ struct str_node *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ if (dupstr) {
+ s = strdup(s);
+ if (s == NULL)
+ goto out_delete;
+ }
+ self->s = s;
+ }
+
+ return self;
+
+out_delete:
+ free(self);
+ return NULL;
+}
+
+static void str_node__delete(struct str_node *self, bool dupstr)
+{
+ if (dupstr)
+ free((void *)self->s);
+ free(self);
+}
+
+int strlist__add(struct strlist *self, const char *new_entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+ struct str_node *sn;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, new_entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ sn = str_node__new(new_entry, self->dupstr);
+ if (sn == NULL)
+ return -ENOMEM;
+
+ rb_link_node(&sn->rb_node, parent, p);
+ rb_insert_color(&sn->rb_node, &self->entries);
+
+ return 0;
+}
+
+int strlist__load(struct strlist *self, const char *filename)
+{
+ char entry[1024];
+ int err;
+ FILE *fp = fopen(filename, "r");
+
+ if (fp == NULL)
+ return errno;
+
+ while (fgets(entry, sizeof(entry), fp) != NULL) {
+ const size_t len = strlen(entry);
+
+ if (len == 0)
+ continue;
+ entry[len - 1] = '\0';
+
+ err = strlist__add(self, entry);
+ if (err != 0)
+ goto out;
+ }
+
+ err = 0;
+out:
+ fclose(fp);
+ return err;
+}
+
+void strlist__remove(struct strlist *self, struct str_node *sn)
+{
+ rb_erase(&sn->rb_node, &self->entries);
+ str_node__delete(sn, self->dupstr);
+}
+
+bool strlist__has_entry(struct strlist *self, const char *entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ struct str_node *sn;
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return true;
+ }
+
+ return false;
+}
+
+static int strlist__parse_list_entry(struct strlist *self, const char *s)
+{
+ if (strncmp(s, "file://", 7) == 0)
+ return strlist__load(self, s + 7);
+
+ return strlist__add(self, s);
+}
+
+int strlist__parse_list(struct strlist *self, const char *s)
+{
+ char *sep;
+ int err;
+
+ while ((sep = strchr(s, ',')) != NULL) {
+ *sep = '\0';
+ err = strlist__parse_list_entry(self, s);
+ *sep = ',';
+ if (err != 0)
+ return err;
+ s = sep + 1;
+ }
+
+ return *s ? strlist__parse_list_entry(self, s) : 0;
+}
+
+struct strlist *strlist__new(bool dupstr, const char *slist)
+{
+ struct strlist *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ self->entries = RB_ROOT;
+ self->dupstr = dupstr;
+ if (slist && strlist__parse_list(self, slist) != 0)
+ goto out_error;
+ }
+
+ return self;
+out_error:
+ free(self);
+ return NULL;
+}
+
+void strlist__delete(struct strlist *self)
+{
+ if (self != NULL) {
+ struct str_node *pos;
+ struct rb_node *next = rb_first(&self->entries);
+
+ while (next) {
+ pos = rb_entry(next, struct str_node, rb_node);
+ next = rb_next(&pos->rb_node);
+ strlist__remove(self, pos);
+ }
+ self->entries = RB_ROOT;
+ free(self);
+ }
+}
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
new file mode 100644
index 00000000000..2fb117fb4b6
--- /dev/null
+++ b/tools/perf/util/strlist.h
@@ -0,0 +1,32 @@
+#ifndef STRLIST_H_
+#define STRLIST_H_
+
+#include "rbtree.h"
+#include <stdbool.h>
+
+struct str_node {
+ struct rb_node rb_node;
+ const char *s;
+};
+
+struct strlist {
+ struct rb_root entries;
+ bool dupstr;
+};
+
+struct strlist *strlist__new(bool dupstr, const char *slist);
+void strlist__delete(struct strlist *self);
+
+void strlist__remove(struct strlist *self, struct str_node *sn);
+int strlist__load(struct strlist *self, const char *filename);
+int strlist__add(struct strlist *self, const char *str);
+
+bool strlist__has_entry(struct strlist *self, const char *entry);
+
+static inline bool strlist__empty(const struct strlist *self)
+{
+ return rb_first(&self->entries) == NULL;
+}
+
+int strlist__parse_list(struct strlist *self, const char *s);
+#endif /* STRLIST_H_ */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 86e14375e74..78c2efde01b 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -520,7 +520,9 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
-
+ self->prelinked = elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu.prelink_undo",
+ NULL) != NULL;
elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
struct symbol *f;
u64 obj_start;
@@ -535,7 +537,13 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
gelf_getshdr(sec, &shdr);
obj_start = sym.st_value;
- sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ if (self->prelinked) {
+ if (verbose >= 2)
+ printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n",
+ (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset);
+
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ }
f = symbol__new(sym.st_value, sym.st_size,
elf_sym__name(&sym, symstrs),
@@ -569,6 +577,8 @@ int dso__load(struct dso *self, symbol_filter_t filter, int verbose)
if (!name)
return -1;
+ self->prelinked = 0;
+
if (strncmp(self->name, "/tmp/perf-", 10) == 0)
return dso__load_perf_map(self, filter, verbose);
@@ -629,7 +639,7 @@ int dso__load_kernel(struct dso *self, const char *vmlinux,
if (vmlinux)
err = dso__load_vmlinux(self, vmlinux, filter, verbose);
- if (err)
+ if (err < 0)
err = dso__load_kallsyms(self, filter, verbose);
return err;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index ea332e56e45..2c48ace8203 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -2,7 +2,7 @@
#define _PERF_SYMBOL_ 1
#include <linux/types.h>
-#include "../types.h"
+#include "types.h"
#include "list.h"
#include "rbtree.h"
@@ -20,8 +20,9 @@ struct symbol {
struct dso {
struct list_head node;
struct rb_root syms;
- unsigned int sym_priv_size;
struct symbol *(*find_symbol)(struct dso *, u64 ip);
+ unsigned int sym_priv_size;
+ unsigned char prelinked;
char name[0];
};
diff --git a/tools/perf/types.h b/tools/perf/util/types.h
index 5e75f900594..5e75f900594 100644
--- a/tools/perf/types.h
+++ b/tools/perf/util/types.h
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index b8cfed776d8..b4be6071c10 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -67,7 +67,6 @@
#include <assert.h>
#include <regex.h>
#include <utime.h>
-#ifndef __MINGW32__
#include <sys/wait.h>
#include <sys/poll.h>
#include <sys/socket.h>
@@ -81,20 +80,6 @@
#include <netdb.h>
#include <pwd.h>
#include <inttypes.h>
-#if defined(__CYGWIN__)
-#undef _XOPEN_SOURCE
-#include <grp.h>
-#define _XOPEN_SOURCE 600
-#include "compat/cygwin.h"
-#else
-#undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */
-#include <grp.h>
-#define _ALL_SOURCE 1
-#endif
-#else /* __MINGW32__ */
-/* pull in Windows compatibility stuff */
-#include "compat/mingw.h"
-#endif /* __MINGW32__ */
#ifndef NO_ICONV
#include <iconv.h>
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 764554350ed..2884baf1d5f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -746,6 +746,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
cpumask_clear(cpus);
me = get_cpu();
+ spin_lock(&kvm->requests_lock);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i];
if (!vcpu)
@@ -762,6 +763,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
smp_call_function_many(cpus, ack_flush, NULL, 1);
else
called = false;
+ spin_unlock(&kvm->requests_lock);
put_cpu();
free_cpumask_var(cpus);
return called;
@@ -982,6 +984,7 @@ static struct kvm *kvm_create_vm(void)
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
+ spin_lock_init(&kvm->requests_lock);
kvm_io_bus_init(&kvm->pio_bus);
mutex_init(&kvm->lock);
kvm_io_bus_init(&kvm->mmio_bus);
@@ -1194,6 +1197,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (!new.dirty_bitmap)
goto out_free;
memset(new.dirty_bitmap, 0, dirty_bytes);
+ if (old.npages)
+ kvm_arch_flush_shadow(kvm);
}
#endif /* not defined CONFIG_S390 */