summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-03-27 13:52:49 -0400
committerDavid S. Miller <davem@davemloft.net>2013-03-27 13:52:49 -0400
commite2a553dbf18a5177fdebe29495c32a8e7fd3a4db (patch)
tree5ccb3d498325a7aaf93f49549eca03cb7861ca1c
parent7559d97993ae7d552c96313155286f372cf4cf7c (diff)
parenta8c45289f215e137825bf9630d0abb41c1dc41ff (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: include/net/ipip.h The changes made to ipip.h in 'net' were already included in 'net-next' before that header was moved to another location. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/i2c/busses/i2c-diolan-u2c2
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt2
-rw-r--r--Documentation/sound/alsa/seq_oss.html2
-rw-r--r--MAINTAINERS22
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig.debug1
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi2
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c24
-rw-r--r--arch/powerpc/perf/power7-pmu.c13
-rw-r--r--arch/tile/configs/tilegx_defconfig1
-rw-r--r--arch/tile/configs/tilepro_defconfig1
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c4
-rw-r--r--arch/x86/kernel/kprobes/core.c5
-rw-r--r--arch/x86/kernel/microcode_intel_early.c30
-rw-r--r--arch/x86/kvm/x86.c64
-rw-r--r--arch/x86/lib/usercopy_64.c4
-rw-r--r--drivers/ata/Kconfig13
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libata-acpi.c2
-rw-r--r--drivers/ata/pata_samsung_cf.c13
-rw-r--r--drivers/ata/sata_fsl.c3
-rw-r--r--drivers/block/nvme.c33
-rw-r--r--drivers/edac/amd64_edac.c15
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_mc_sysfs.c17
-rw-r--r--drivers/firmware/Kconfig18
-rw-r--r--drivers/firmware/efivars.c150
-rw-r--r--drivers/gpio/gpiolib-of.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c370
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c14
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c11
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c44
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c21
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c13
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/Kconfig6
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c22
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/irq_remapping.c1
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c64
-rw-r--r--drivers/md/dm-cache-metadata.h2
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c7
-rw-r--r--drivers/md/dm-cache-policy-internal.h2
-rw-r--r--drivers/md/dm-cache-policy-mq.c8
-rw-r--r--drivers/md/dm-cache-policy.c8
-rw-r--r--drivers/md/dm-cache-policy.h2
-rw-r--r--drivers/md/dm-cache-target.c169
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm-verity.c39
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c46
-rw-r--r--drivers/md/raid5.c116
-rw-r--r--drivers/md/raid5.h5
-rw-r--r--drivers/net/bonding/bond_sysfs.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c33
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c24
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c2
-rw-r--r--drivers/pinctrl/pinconf.c2
-rw-r--r--drivers/pinctrl/pinconf.h2
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/pinmux.c5
-rw-r--r--drivers/rtc/rtc-at91rm9200.c50
-rw-r--r--drivers/rtc/rtc-at91rm9200.h1
-rw-r--r--drivers/rtc/rtc-da9052.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c5
-rw-r--r--drivers/target/target_core_file.h2
-rw-r--r--drivers/target/target_core_pscsi.c11
-rw-r--r--drivers/target/target_core_sbc.c7
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/thermal/dove_thermal.c16
-rw-r--r--drivers/thermal/exynos_thermal.c2
-rw-r--r--drivers/thermal/kirkwood_thermal.c8
-rw-r--r--drivers/thermal/rcar_thermal.c29
-rw-r--r--drivers/tty/vt/vc_screen.c6
-rw-r--r--drivers/usb/class/cdc-acm.c22
-rw-r--r--drivers/usb/core/hcd-pci.c23
-rw-r--r--drivers/usb/gadget/f_rndis.c3
-rw-r--r--drivers/usb/gadget/g_ffs.c4
-rw-r--r--drivers/usb/gadget/net2272.c9
-rw-r--r--drivers/usb/gadget/net2280.c8
-rw-r--r--drivers/usb/gadget/u_serial.c2
-rw-r--r--drivers/usb/gadget/udc-core.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/ehci-q.c13
-rw-r--r--drivers/usb/host/ehci-timer.c2
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/musb/da8xx.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c9
-rw-r--r--drivers/usb/serial/ark3116.c10
-rw-r--r--drivers/usb/serial/ch341.c11
-rw-r--r--drivers/usb/serial/cypress_m8.c14
-rw-r--r--drivers/usb/serial/f81232.c9
-rw-r--r--drivers/usb/serial/ftdi_sio.c19
-rw-r--r--drivers/usb/serial/garmin_gps.c7
-rw-r--r--drivers/usb/serial/io_edgeport.c12
-rw-r--r--drivers/usb/serial/io_ti.c13
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/mos7840.c16
-rw-r--r--drivers/usb/serial/oti6858.c10
-rw-r--r--drivers/usb/serial/pl2303.c11
-rw-r--r--drivers/usb/serial/quatech2.c12
-rw-r--r--drivers/usb/serial/spcp8x5.c9
-rw-r--r--drivers/usb/serial/ssu100.c12
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c10
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c1
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c1
-rw-r--r--drivers/vhost/tcm_vhost.c13
-rw-r--r--drivers/video/ep93xx-fb.c1
-rw-r--r--drivers/video/mxsfb.c7
-rw-r--r--drivers/watchdog/sp5100_tco.c126
-rw-r--r--drivers/watchdog/sp5100_tco.h2
-rw-r--r--firmware/Makefile2
-rw-r--r--firmware/intel/sd7220.fw.ihex (renamed from firmware/qlogic/sd7220.fw.ihex)0
-rw-r--r--fs/cifs/asn1.c53
-rw-r--r--fs/cifs/cifsfs.c24
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/inode.c10
-rw-r--r--fs/cifs/netmisc.c2
-rw-r--r--fs/dcache.c16
-rw-r--r--fs/ext4/ext4.h8
-rw-r--r--fs/ext4/extents.c105
-rw-r--r--fs/ext4/extents_status.c212
-rw-r--r--fs/ext4/extents_status.h9
-rw-r--r--fs/ext4/ialloc.c4
-rw-r--r--fs/ext4/inode.c182
-rw-r--r--fs/ext4/mballoc.c23
-rw-r--r--fs/ext4/move_extent.c43
-rw-r--r--fs/ext4/page-io.c12
-rw-r--r--fs/ext4/resize.c4
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/internal.h5
-rw-r--r--fs/jbd2/transaction.c15
-rw-r--r--fs/nfs/blocklayout/blocklayoutdm.c4
-rw-r--r--fs/nfs/idmap.c13
-rw-r--r--fs/nfs/nfs4filelayout.c1
-rw-r--r--fs/nfs/nfs4proc.c16
-rw-r--r--fs/nfs/pnfs.c81
-rw-r--r--fs/nfs/pnfs.h6
-rw-r--r--fs/nfsd/nfscache.c11
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/read_write.c28
-rw-r--r--fs/splice.c4
-rw-r--r--include/drm/drm_pciids.h13
-rw-r--r--include/linux/edac.h7
-rw-r--r--include/linux/hash.h3
-rw-r--r--include/linux/irq_work.h2
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/mxsfb.h7
-rw-r--r--include/linux/nvme.h28
-rw-r--r--include/linux/printk.h6
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/ulpi.h8
-rw-r--r--ipc/mqueue.c3
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/printk.c80
-rw-r--r--kernel/sys.c57
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace.c59
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_irqsoff.c19
-rw-r--r--kernel/trace/trace_sched_wakeup.c18
-rw-r--r--lib/bust_spinlocks.c3
-rw-r--r--lib/dma-debug.c45
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--net/8021q/vlan.c14
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/core/dev.c1
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv6/addrconf.c26
-rw-r--r--net/sunrpc/sched.c9
-rw-r--r--net/unix/af_unix.c11
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_generic.c46
-rw-r--r--sound/pci/hda/hda_intel.c132
-rw-r--r--sound/pci/hda/patch_cirrus.c4
-rw-r--r--sound/pci/hda/patch_conexant.c16
-rw-r--r--sound/usb/mixer.c21
-rw-r--r--tools/lib/traceevent/Makefile2
-rw-r--r--tools/perf/Makefile8
-rw-r--r--tools/perf/bench/bench.h24
-rw-r--r--tools/perf/builtin-record.c6
-rw-r--r--tools/perf/util/hist.h5
-rw-r--r--tools/perf/util/strlist.c2
-rw-r--r--virt/kvm/ioapic.c7
238 files changed, 2886 insertions, 1270 deletions
diff --git a/Documentation/i2c/busses/i2c-diolan-u2c b/Documentation/i2c/busses/i2c-diolan-u2c
index 30fe4bb9a06..0d6018c316c 100644
--- a/Documentation/i2c/busses/i2c-diolan-u2c
+++ b/Documentation/i2c/busses/i2c-diolan-u2c
@@ -5,7 +5,7 @@ Supported adapters:
Documentation:
http://www.diolan.com/i2c/u2c12.html
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
Description
-----------
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index ce6581c8ca2..4499bd94886 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -912,7 +912,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
models depending on the codec chip. The list of available models
is found in HD-Audio-Models.txt
- The model name "genric" is treated as a special case. When this
+ The model name "generic" is treated as a special case. When this
model is given, the driver uses the generic codec parser without
"codec-patch". It's sometimes good for testing and debugging.
diff --git a/Documentation/sound/alsa/seq_oss.html b/Documentation/sound/alsa/seq_oss.html
index d9776cf60c0..9663b45f6fd 100644
--- a/Documentation/sound/alsa/seq_oss.html
+++ b/Documentation/sound/alsa/seq_oss.html
@@ -285,7 +285,7 @@ sample data.
<H4>
7.2.4 Close Callback</H4>
The <TT>close</TT> callback is called when this device is closed by the
-applicaion. If any private data was allocated in open callback, it must
+application. If any private data was allocated in open callback, it must
be released in the close callback. The deletion of ALSA port should be
done here, too. This callback must not be NULL.
<H4>
diff --git a/MAINTAINERS b/MAINTAINERS
index 2932ec21508..d32cb8d288e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1461,6 +1461,12 @@ F: drivers/dma/at_hdmac.c
F: drivers/dma/at_hdmac_regs.h
F: include/linux/platform_data/dma-atmel.h
+ATMEL I2C DRIVER
+M: Ludovic Desroches <ludovic.desroches@atmel.com>
+L: linux-i2c@vger.kernel.org
+S: Supported
+F: drivers/i2c/busses/i2c-at91.c
+
ATMEL ISI DRIVER
M: Josh Wu <josh.wu@atmel.com>
L: linux-media@vger.kernel.org
@@ -2623,7 +2629,7 @@ F: include/uapi/drm/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@ffwll.ch>
-L: intel-gfx@lists.freedesktop.org (subscribers-only)
+L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~danvet/drm-intel
S: Supported
@@ -5641,6 +5647,14 @@ S: Maintained
F: drivers/video/riva/
F: drivers/video/nvidia/
+NVM EXPRESS DRIVER
+M: Matthew Wilcox <willy@linux.intel.com>
+L: linux-nvme@lists.infradead.org
+T: git git://git.infradead.org/users/willy/linux-nvme.git
+S: Supported
+F: drivers/block/nvme.c
+F: include/linux/nvme.h
+
OMAP SUPPORT
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
@@ -5669,7 +5683,7 @@ S: Maintained
F: arch/arm/*omap*/*clock*
OMAP POWER MANAGEMENT SUPPORT
-M: Kevin Hilman <khilman@ti.com>
+M: Kevin Hilman <khilman@deeprootsystems.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/*omap*/*pm*
@@ -5763,7 +5777,7 @@ F: arch/arm/*omap*/usb*
OMAP GPIO DRIVER
M: Santosh Shilimkar <santosh.shilimkar@ti.com>
-M: Kevin Hilman <khilman@ti.com>
+M: Kevin Hilman <khilman@deeprootsystems.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-omap.c
@@ -7161,7 +7175,7 @@ F: arch/arm/mach-s3c2410/bast-irq.c
TI DAVINCI MACHINE SUPPORT
M: Sekhar Nori <nsekhar@ti.com>
-M: Kevin Hilman <khilman@ti.com>
+M: Kevin Hilman <khilman@deeprootsystems.com>
L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
T: git git://gitorious.org/linux-davinci/linux-davinci.git
Q: http://patchwork.kernel.org/project/linux-davinci/list/
diff --git a/Makefile b/Makefile
index 22113a77f8e..54d2b2a0fef 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Unicycling Gorilla
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ecfcdba2d17..9b31f4311ea 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT
DEBUG_IMX53_UART || \
DEBUG_IMX6Q_UART
default 1
+ depends on ARCH_MXC
help
Choose UART port on which kernel low-level debug messages
should be output.
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 48d00a099ce..3d3f64d2111 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -385,7 +385,7 @@
spi@7000d800 {
compatible = "nvidia,tegra20-slink";
- reg = <0x7000d480 0x200>;
+ reg = <0x7000d800 0x200>;
interrupts = <0 83 0x04>;
nvidia,dma-request-selector = <&apbdma 17>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 9d87a3ffe99..dbf46c27256 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -372,7 +372,7 @@
spi@7000d800 {
compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
- reg = <0x7000d480 0x200>;
+ reg = <0x7000d800 0x200>;
interrupts = <0 83 0x04>;
nvidia,dma-request-selector = <&apbdma 17>;
#address-cells = <1>;
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 3218f1f2c0e..e7b781d3788 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
.lower_margin = 4,
.hsync_len = 1,
.vsync_len = 1,
- .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
- FB_SYNC_DOTCLK_FAILING_ACT,
},
};
@@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
.lower_margin = 10,
.hsync_len = 10,
.vsync_len = 10,
- .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
- FB_SYNC_DOTCLK_FAILING_ACT,
},
};
@@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
.lower_margin = 45,
.hsync_len = 1,
.vsync_len = 1,
- .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
},
};
@@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
.lower_margin = 13,
.hsync_len = 48,
.vsync_len = 3,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
- FB_SYNC_DATA_ENABLE_HIGH_ACT |
- FB_SYNC_DOTCLK_FAILING_ACT,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
};
@@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
.lower_margin = 0x15,
.hsync_len = 64,
.vsync_len = 4,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
- FB_SYNC_DATA_ENABLE_HIGH_ACT |
- FB_SYNC_DOTCLK_FAILING_ACT,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
};
@@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
.lower_margin = 2,
.hsync_len = 15,
.vsync_len = 15,
- .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT
},
};
@@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+ MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
static inline void enable_clk_enet_out(void)
@@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+ MXSFB_SYNC_DOTCLK_FAILING_ACT;
mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
}
@@ -297,6 +291,7 @@ static void __init m28evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
mxsfb_pdata.default_bpp = 16;
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
}
static void __init sc_sps1_init(void)
@@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+ MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
@@ -407,6 +404,7 @@ static void __init cfa10049_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
}
static void __init cfa10037_init(void)
@@ -423,6 +421,8 @@ static void __init apf28_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
mxsfb_pdata.default_bpp = 16;
mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
+ mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+ MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
static void __init mxs_machine_init(void)
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index b554879bd31..3c475d6267c 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = {
.attrs = power7_events_attr,
};
+PMU_FORMAT_ATTR(event, "config:0-19");
+
+static struct attribute *power7_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+struct attribute_group power7_pmu_format_group = {
+ .name = "format",
+ .attrs = power7_pmu_format_attr,
+};
+
static const struct attribute_group *power7_pmu_attr_groups[] = {
+ &power7_pmu_format_group,
&power7_pmu_events_group,
NULL,
};
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 8c5eff6d6df..47684815e5c 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
-CONFIG_MULTICORE_RAID456=y
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_DEBUG=y
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index e7a3dfcbcda..dd2b8f0c631 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
-CONFIG_MULTICORE_RAID456=y
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_DEBUG=y
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index d3ddd17405d..5a6d2873f80 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -77,6 +77,7 @@ struct arch_specific_insn {
* a post_handler or break_handler).
*/
int boostable;
+ bool if_modifier;
};
struct arch_optimized_insn {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 635a74d2240..4979778cc7f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
gpa_t time;
struct pvclock_vcpu_time_info hv_clock;
unsigned int hw_tsc_khz;
- unsigned int time_offset;
- struct page *time_page;
+ struct gfn_to_hva_cache pv_time;
+ bool pv_time_enabled;
/* set guest stopped flag in pvclock flags field */
bool pvclock_set_guest_stopped_request;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 529c8931fc0..dab7580c47a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
+ INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
+ INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+ INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 3f06e614998..7bfe318d3d8 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
else
p->ainsn.boostable = -1;
+ /* Check whether the instruction modifies Interrupt Flag or not */
+ p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
+
/* Also, displacement change doesn't affect the first byte */
p->opcode = p->ainsn.insn[0];
}
@@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
- if (is_IF_modifier(p->ainsn.insn))
+ if (p->ainsn.if_modifier)
kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
}
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index 7890bc83895..d893e8ed8ac 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
struct microcode_intel ***mc_saved;
mc_saved = (struct microcode_intel ***)
- __pa_symbol(&mc_saved_data->mc_saved);
+ __pa_nodebug(&mc_saved_data->mc_saved);
for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
struct microcode_intel *p;
p = *(struct microcode_intel **)
- __pa(mc_saved_data->mc_saved + i);
- mc_saved_tmp[i] = (struct microcode_intel *)__pa(p);
+ __pa_nodebug(mc_saved_data->mc_saved + i);
+ mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
}
}
#endif
@@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
struct cpio_data cd;
long offset = 0;
#ifdef CONFIG_X86_32
- char *p = (char *)__pa_symbol(ucode_name);
+ char *p = (char *)__pa_nodebug(ucode_name);
#else
char *p = ucode_name;
#endif
@@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
if (mc_intel == NULL)
return;
- delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info);
- current_mc_date_p = (int *)__pa_symbol(&current_mc_date);
+ delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
+ current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
*delay_ucode_info_p = 1;
*current_mc_date_p = mc_intel->hdr.date;
@@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
}
#endif
-static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
- struct ucode_cpu_info *uci)
+static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
+ struct ucode_cpu_info *uci)
{
struct microcode_intel *mc_intel;
unsigned int val[2];
@@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
#ifdef CONFIG_X86_32
struct boot_params *boot_params_p;
- boot_params_p = (struct boot_params *)__pa_symbol(&boot_params);
+ boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
ramdisk_image = boot_params_p->hdr.ramdisk_image;
ramdisk_size = boot_params_p->hdr.ramdisk_size;
initrd_start_early = ramdisk_image;
initrd_end_early = initrd_start_early + ramdisk_size;
_load_ucode_intel_bsp(
- (struct mc_saved_data *)__pa_symbol(&mc_saved_data),
- (unsigned long *)__pa_symbol(&mc_saved_in_initrd),
+ (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+ (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
initrd_start_early, initrd_end_early, &uci);
#else
ramdisk_image = boot_params.hdr.ramdisk_image;
@@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
unsigned long *initrd_start_p;
mc_saved_in_initrd_p =
- (unsigned long *)__pa_symbol(mc_saved_in_initrd);
- mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data);
- initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start);
- initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p);
+ (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+ mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+ initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+ initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
#else
mc_saved_data_p = &mc_saved_data;
mc_saved_in_initrd_p = mc_saved_in_initrd;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f71500af1f8..f19ac0aca60 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
kernel_ns = 0;
host_tsc = 0;
- /* Keep irq disabled to prevent changes to the clock */
- local_irq_save(flags);
- this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
- if (unlikely(this_tsc_khz == 0)) {
- local_irq_restore(flags);
- kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
- return 1;
- }
-
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
kernel_ns = ka->master_kernel_ns;
}
spin_unlock(&ka->pvclock_gtod_sync_lock);
+
+ /* Keep irq disabled to prevent changes to the clock */
+ local_irq_save(flags);
+ this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+ if (unlikely(this_tsc_khz == 0)) {
+ local_irq_restore(flags);
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
+ return 1;
+ }
if (!use_master_clock) {
host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns();
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+ /* Check that the address is 32-byte aligned. */
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
+ break;
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 05928aae911..906fea31579 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
char c;
unsigned zero_len;
- for (; len; --len) {
+ for (; len; --len, to++) {
if (__get_user_nocheck(c, from++, sizeof(char)))
break;
- if (__put_user_nocheck(c, to++, sizeof(char)))
+ if (__put_user_nocheck(c, to, sizeof(char)))
break;
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 3e751b74615..a5a3ebcbdd2 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -59,15 +59,16 @@ config ATA_ACPI
option libata.noacpi=1
config SATA_ZPODD
- bool "SATA Zero Power ODD Support"
+ bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
depends on ATA_ACPI
default n
help
- This option adds support for SATA ZPODD. It requires both
- ODD and the platform support, and if enabled, will automatically
- power on/off the ODD when certain condition is satisfied. This
- does not impact user's experience of the ODD, only power is saved
- when ODD is not in use(i.e. no disc inside).
+ This option adds support for SATA Zero Power Optical Disc
+ Drive (ZPODD). It requires both the ODD and the platform
+ support, and if enabled, will automatically power on/off the
+ ODD when certain condition is satisfied. This does not impact
+ end user's experience of the ODD, only power is saved when
+ the ODD is not in use (i.e. no disc inside).
If unsure, say N.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a99112cfd8b..6a67b07de49 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -281,6 +281,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
{ PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d2ba439cfe5..ffdd32d2260 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1547,6 +1547,10 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
static int prefer_ms_hyperv = 1;
module_param(prefer_ms_hyperv, int, 0);
+MODULE_PARM_DESC(prefer_ms_hyperv,
+ "Prefer Hyper-V paravirtualization drivers instead of ATA, "
+ "0 - Use ATA drivers, "
+ "1 (Default) - Use the paravirtualization drivers.");
static void piix_ignore_devices_quirk(struct ata_host *host)
{
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index beea3115577..8a52dab412e 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1027,7 +1027,7 @@ static void ata_acpi_register_power_resource(struct ata_device *dev)
handle = ata_dev_acpi_handle(dev);
if (handle)
- acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
+ acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev);
}
static void ata_acpi_unregister_power_resource(struct ata_device *dev)
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 70b0e01372b..6ef27e98c50 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -661,18 +661,7 @@ static struct platform_driver pata_s3c_driver = {
},
};
-static int __init pata_s3c_init(void)
-{
- return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe);
-}
-
-static void __exit pata_s3c_exit(void)
-{
- platform_driver_unregister(&pata_s3c_driver);
-}
-
-module_init(pata_s3c_init);
-module_exit(pata_s3c_exit);
+module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe);
MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 124b2c1d9c0..608f82fed63 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1511,8 +1511,7 @@ error_exit_with_cleanup:
if (hcr_base)
iounmap(hcr_base);
- if (host_priv)
- kfree(host_priv);
+ kfree(host_priv);
return retval;
}
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 07fb2dfaae1..9dcefe40380 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -135,6 +135,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
}
typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
@@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
*fn = special_completion;
return CMD_CTX_INVALID;
}
- *fn = info[cmdid].fn;
+ if (fn)
+ *fn = info[cmdid].fn;
ctx = info[cmdid].ctx;
info[cmdid].fn = special_completion;
info[cmdid].ctx = CMD_CTX_COMPLETED;
@@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
iod->offset = offsetof(struct nvme_iod, sg[nseg]);
iod->npages = -1;
iod->length = nbytes;
+ iod->nents = 0;
}
return iod;
@@ -375,7 +378,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
struct bio *bio = iod->private;
u16 status = le16_to_cpup(&cqe->status) >> 1;
- dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ if (iod->nents)
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_free_iod(dev, iod);
if (status) {
@@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
if (result < 0)
- goto free_iod;
+ goto free_cmdid;
length = result;
cmnd->rw.command_id = cmdid;
@@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return 0;
+ free_cmdid:
+ free_cmdid(nvmeq, cmdid, NULL);
free_iod:
nvme_free_iod(nvmeq->dev, iod);
nomem:
@@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
return nvme_submit_admin_cmd(dev, &c, NULL);
}
-static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
- unsigned nsid, dma_addr_t dma_addr)
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+ dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
@@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
c.features.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- return nvme_submit_admin_cmd(dev, &c, NULL);
+ return nvme_submit_admin_cmd(dev, &c, result);
}
static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
@@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false);
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ bio_endio(bio, -EIO);
+ }
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
@@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
if (length != cmd.data_len)
status = -ENOMEM;
else
- status = nvme_submit_admin_cmd(dev, &c, NULL);
+ status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
nvme_free_iod(dev, iod);
}
+
+ if (!status && copy_to_user(&ucmd->result, &cmd.result,
+ sizeof(cmd.result)))
+ status = -EFAULT;
+
return status;
}
@@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev)
continue;
res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
- dma_addr + 4096);
+ dma_addr + 4096, NULL);
if (res)
- continue;
+ memset(mem + 4096, 0, 4096);
ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
if (ns)
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 910b0116c12..e1d13c463c9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2048,12 +2048,18 @@ static int init_csrows(struct mem_ctl_info *mci)
edac_dbg(1, "MC node: %d, csrow: %d\n",
pvt->mc_node_id, i);
- if (row_dct0)
+ if (row_dct0) {
nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+ csrow->channels[0]->dimm->nr_pages = nr_pages;
+ }
/* K8 has only one DCT */
- if (boot_cpu_data.x86 != 0xf && row_dct1)
- nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
+ if (boot_cpu_data.x86 != 0xf && row_dct1) {
+ int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
+
+ csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
+ nr_pages += row_dct1_pages;
+ }
mtype = amd64_determine_memory_type(pvt, i);
@@ -2072,9 +2078,7 @@ static int init_csrows(struct mem_ctl_info *mci)
dimm = csrow->channels[j]->dimm;
dimm->mtype = mtype;
dimm->edac_mode = edac_mode;
- dimm->nr_pages = nr_pages;
}
- csrow->nr_pages = nr_pages;
}
return empty;
@@ -2419,7 +2423,6 @@ static int amd64_init_one_instance(struct pci_dev *F2)
mci->pvt_info = pvt;
mci->pdev = &pvt->F2->dev;
- mci->csbased = 1;
setup_mci_misc_attrs(mci, fam_type);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index cdb81aa73ab..27e86d93826 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -86,7 +86,7 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
edac_dimm_info_location(dimm, location, sizeof(location));
edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
- dimm->mci->mem_is_per_rank ? "rank" : "dimm",
+ dimm->mci->csbased ? "rank" : "dimm",
number, location, dimm->csrow, dimm->cschannel);
edac_dbg(4, " dimm = %p\n", dimm);
edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
@@ -341,7 +341,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
mci->nr_csrows = tot_csrows;
mci->num_cschannel = tot_channels;
- mci->mem_is_per_rank = per_rank;
+ mci->csbased = per_rank;
/*
* Alocate and fill the csrow/channels structs
@@ -1235,7 +1235,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
* incrementing the compat API counters
*/
edac_dbg(4, "%s csrows map: (%d,%d)\n",
- mci->mem_is_per_rank ? "rank" : "dimm",
+ mci->csbased ? "rank" : "dimm",
dimm->csrow, dimm->cschannel);
if (row == -1)
row = dimm->csrow;
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4f4b6137d74..5899a76eec3 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -143,7 +143,7 @@ static const char *edac_caps[] = {
* and the per-dimm/per-rank one
*/
#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
- struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
+ static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
struct dev_ch_attribute {
struct device_attribute attr;
@@ -180,9 +180,6 @@ static ssize_t csrow_size_show(struct device *dev,
int i;
u32 nr_pages = 0;
- if (csrow->mci->csbased)
- return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
-
for (i = 0; i < csrow->nr_channels; i++)
nr_pages += csrow->channels[i]->dimm->nr_pages;
return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
@@ -612,7 +609,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
device_initialize(&dimm->dev);
dimm->dev.parent = &mci->dev;
- if (mci->mem_is_per_rank)
+ if (mci->csbased)
dev_set_name(&dimm->dev, "rank%d", index);
else
dev_set_name(&dimm->dev, "dimm%d", index);
@@ -778,14 +775,10 @@ static ssize_t mci_size_mb_show(struct device *dev,
for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
struct csrow_info *csrow = mci->csrows[csrow_idx];
- if (csrow->mci->csbased) {
- total_pages += csrow->nr_pages;
- } else {
- for (j = 0; j < csrow->nr_channels; j++) {
- struct dimm_info *dimm = csrow->channels[j]->dimm;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j]->dimm;
- total_pages += dimm->nr_pages;
- }
+ total_pages += dimm->nr_pages;
}
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 9b00072a020..42c759a4d04 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -53,6 +53,24 @@ config EFI_VARS
Subsequent efibootmgr releases may be found at:
<http://linux.dell.com/efibootmgr>
+config EFI_VARS_PSTORE
+ bool "Register efivars backend for pstore"
+ depends on EFI_VARS && PSTORE
+ default y
+ help
+ Say Y here to enable use efivars as a backend to pstore. This
+ will allow writing console messages, crash dumps, or anything
+ else supported by pstore to EFI variables.
+
+config EFI_VARS_PSTORE_DEFAULT_DISABLE
+ bool "Disable using efivars as a pstore backend by default"
+ depends on EFI_VARS_PSTORE
+ default n
+ help
+ Saying Y here will disable the use of efivars as a storage
+ backend for pstore by default. This setting can be overridden
+ using the efivars module's pstore_disable parameter.
+
config EFI_PCDP
bool "Console device selection via EFI PCDP or HCDP table"
depends on ACPI && EFI && IA64
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index fe62aa39223..7acafb80fd4 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -103,6 +103,11 @@ MODULE_VERSION(EFIVARS_VERSION);
*/
#define GUID_LEN 36
+static bool efivars_pstore_disable =
+ IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
+
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
@@ -165,6 +170,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
static void efivar_update_sysfs_entries(struct work_struct *);
static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
+static bool efivar_wq_enabled = true;
/* Return the number of unicode characters in data */
static unsigned long
@@ -1309,9 +1315,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = {
.create = efivarfs_create,
};
-static struct pstore_info efi_pstore_info;
-
-#ifdef CONFIG_PSTORE
+#ifdef CONFIG_EFI_VARS_PSTORE
static int efi_pstore_open(struct pstore_info *psi)
{
@@ -1441,7 +1445,7 @@ static int efi_pstore_write(enum pstore_type_id type,
spin_unlock_irqrestore(&efivars->lock, flags);
- if (reason == KMSG_DUMP_OOPS)
+ if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled)
schedule_work(&efivar_work);
*id = part;
@@ -1514,38 +1518,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
return 0;
}
-#else
-static int efi_pstore_open(struct pstore_info *psi)
-{
- return 0;
-}
-
-static int efi_pstore_close(struct pstore_info *psi)
-{
- return 0;
-}
-
-static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count,
- struct timespec *timespec,
- char **buf, struct pstore_info *psi)
-{
- return -1;
-}
-
-static int efi_pstore_write(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, int count, size_t size,
- struct pstore_info *psi)
-{
- return 0;
-}
-
-static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
- struct timespec time, struct pstore_info *psi)
-{
- return 0;
-}
-#endif
static struct pstore_info efi_pstore_info = {
.owner = THIS_MODULE,
@@ -1557,6 +1529,24 @@ static struct pstore_info efi_pstore_info = {
.erase = efi_pstore_erase,
};
+static void efivar_pstore_register(struct efivars *efivars)
+{
+ efivars->efi_pstore_info = efi_pstore_info;
+ efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+ if (efivars->efi_pstore_info.buf) {
+ efivars->efi_pstore_info.bufsize = 1024;
+ efivars->efi_pstore_info.data = efivars;
+ spin_lock_init(&efivars->efi_pstore_info.buf_lock);
+ pstore_register(&efivars->efi_pstore_info);
+ }
+}
+#else
+static void efivar_pstore_register(struct efivars *efivars)
+{
+ return;
+}
+#endif
+
static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
@@ -1716,6 +1706,31 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
return found;
}
+/*
+ * Returns the size of variable_name, in bytes, including the
+ * terminating NULL character, or variable_name_size if no NULL
+ * character is found among the first variable_name_size bytes.
+ */
+static unsigned long var_name_strnsize(efi_char16_t *variable_name,
+ unsigned long variable_name_size)
+{
+ unsigned long len;
+ efi_char16_t c;
+
+ /*
+ * The variable name is, by definition, a NULL-terminated
+ * string, so make absolutely sure that variable_name_size is
+ * the value we expect it to be. If not, return the real size.
+ */
+ for (len = 2; len <= variable_name_size; len += sizeof(c)) {
+ c = variable_name[(len / sizeof(c)) - 1];
+ if (!c)
+ break;
+ }
+
+ return min(len, variable_name_size);
+}
+
static void efivar_update_sysfs_entries(struct work_struct *work)
{
struct efivars *efivars = &__efivars;
@@ -1756,10 +1771,13 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
if (!found) {
kfree(variable_name);
break;
- } else
+ } else {
+ variable_name_size = var_name_strnsize(variable_name,
+ variable_name_size);
efivar_create_sysfs_entry(efivars,
variable_name_size,
variable_name, &vendor);
+ }
}
}
@@ -1958,6 +1976,35 @@ void unregister_efivars(struct efivars *efivars)
}
EXPORT_SYMBOL_GPL(unregister_efivars);
+/*
+ * Print a warning when duplicate EFI variables are encountered and
+ * disable the sysfs workqueue since the firmware is buggy.
+ */
+static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
+ unsigned long len16)
+{
+ size_t i, len8 = len16 / sizeof(efi_char16_t);
+ char *s8;
+
+ /*
+ * Disable the workqueue since the algorithm it uses for
+ * detecting new variables won't work with this buggy
+ * implementation of GetNextVariableName().
+ */
+ efivar_wq_enabled = false;
+
+ s8 = kzalloc(len8, GFP_KERNEL);
+ if (!s8)
+ return;
+
+ for (i = 0; i < len8; i++)
+ s8[i] = s16[i];
+
+ printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
+ s8, vendor_guid);
+ kfree(s8);
+}
+
int register_efivars(struct efivars *efivars,
const struct efivar_operations *ops,
struct kobject *parent_kobj)
@@ -2006,6 +2053,24 @@ int register_efivars(struct efivars *efivars,
&vendor_guid);
switch (status) {
case EFI_SUCCESS:
+ variable_name_size = var_name_strnsize(variable_name,
+ variable_name_size);
+
+ /*
+ * Some firmware implementations return the
+ * same variable name on multiple calls to
+ * get_next_variable(). Terminate the loop
+ * immediately as there is no guarantee that
+ * we'll ever see a different variable name,
+ * and may end up looping here forever.
+ */
+ if (variable_is_present(variable_name, &vendor_guid)) {
+ dup_variable_bug(variable_name, &vendor_guid,
+ variable_name_size);
+ status = EFI_NOT_FOUND;
+ break;
+ }
+
efivar_create_sysfs_entry(efivars,
variable_name_size,
variable_name,
@@ -2025,15 +2090,8 @@ int register_efivars(struct efivars *efivars,
if (error)
unregister_efivars(efivars);
- efivars->efi_pstore_info = efi_pstore_info;
-
- efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
- if (efivars->efi_pstore_info.buf) {
- efivars->efi_pstore_info.bufsize = 1024;
- efivars->efi_pstore_info.data = efivars;
- spin_lock_init(&efivars->efi_pstore_info.buf_lock);
- pstore_register(&efivars->efi_pstore_info);
- }
+ if (!efivars_pstore_disable)
+ efivar_pstore_register(efivars);
register_filesystem(&efivarfs_type);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a71a54a3e3f..5150df6cba0 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -193,7 +193,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (!np)
return;
- do {
+ for (;; index++) {
ret = of_parse_phandle_with_args(np, "gpio-ranges",
"#gpio-range-cells", index, &pinspec);
if (ret)
@@ -222,8 +222,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (ret)
break;
-
- } while (index++);
+ }
}
#else
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c194f4e680a..e2acfdbf7d3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1634,7 +1634,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
@@ -1715,6 +1715,7 @@ set_size:
}
mode->type = DRM_MODE_TYPE_DRIVER;
+ mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
return mode;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 36493ce71f9..98cc14725ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -38,11 +38,12 @@
/* position control register for hardware window 0, 2 ~ 4.*/
#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
-/* size control register for hardware window 0. */
-#define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08)
-/* alpha control register for hardware window 1 ~ 4. */
-#define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16)
-/* size control register for hardware window 1 ~ 4. */
+/*
+ * size control register for hardware windows 0 and alpha control register
+ * for hardware windows 1 ~ 4
+ */
+#define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16)
+/* size control register for hardware windows 1 ~ 2. */
#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
@@ -50,9 +51,9 @@
#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
/* color key control register for hardware window 1 ~ 4. */
-#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8))
+#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8))
/* color key value register for hardware window 1 ~ 4. */
-#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8))
+#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
@@ -109,9 +110,9 @@ struct fimd_context {
#ifdef CONFIG_OF
static const struct of_device_id fimd_driver_dt_match[] = {
- { .compatible = "samsung,exynos4-fimd",
+ { .compatible = "samsung,exynos4210-fimd",
.data = &exynos4_fimd_driver_data },
- { .compatible = "samsung,exynos5-fimd",
+ { .compatible = "samsung,exynos5250-fimd",
.data = &exynos5_fimd_driver_data },
{},
};
@@ -581,7 +582,7 @@ static void fimd_win_commit(struct device *dev, int zpos)
if (win != 3 && win != 4) {
u32 offset = VIDOSD_D(win);
if (win == 0)
- offset = VIDOSD_C_SIZE_W0;
+ offset = VIDOSD_C(win);
val = win_data->ovl_width * win_data->ovl_height;
writel(val, ctx->regs + offset);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3b0da0378ac..47a493c8a71 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,8 +48,14 @@
/* registers for base address */
#define G2D_SRC_BASE_ADDR 0x0304
+#define G2D_SRC_COLOR_MODE 0x030C
+#define G2D_SRC_LEFT_TOP 0x0310
+#define G2D_SRC_RIGHT_BOTTOM 0x0314
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
#define G2D_DST_BASE_ADDR 0x0404
+#define G2D_DST_COLOR_MODE 0x040C
+#define G2D_DST_LEFT_TOP 0x0410
+#define G2D_DST_RIGHT_BOTTOM 0x0414
#define G2D_DST_PLANE2_BASE_ADDR 0x0418
#define G2D_PAT_BASE_ADDR 0x0500
#define G2D_MSK_BASE_ADDR 0x0520
@@ -82,7 +88,7 @@
#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
/* G2D_DMA_HOLD_CMD */
-#define G2D_USET_HOLD (1 << 2)
+#define G2D_USER_HOLD (1 << 2)
#define G2D_LIST_HOLD (1 << 1)
#define G2D_BITBLT_HOLD (1 << 0)
@@ -91,13 +97,27 @@
#define G2D_START_NHOLT (1 << 1)
#define G2D_START_BITBLT (1 << 0)
+/* buffer color format */
+#define G2D_FMT_XRGB8888 0
+#define G2D_FMT_ARGB8888 1
+#define G2D_FMT_RGB565 2
+#define G2D_FMT_XRGB1555 3
+#define G2D_FMT_ARGB1555 4
+#define G2D_FMT_XRGB4444 5
+#define G2D_FMT_ARGB4444 6
+#define G2D_FMT_PACKED_RGB888 7
+#define G2D_FMT_A8 11
+#define G2D_FMT_L8 12
+
+/* buffer valid length */
+#define G2D_LEN_MIN 1
+#define G2D_LEN_MAX 8000
+
#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
#define G2D_CMDLIST_NUM 64
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
-#define MAX_BUF_ADDR_NR 6
-
/* maximum buffer pool size of userptr is 64MB as default */
#define MAX_POOL (64 * 1024 * 1024)
@@ -106,6 +126,17 @@ enum {
BUF_TYPE_USERPTR,
};
+enum g2d_reg_type {
+ REG_TYPE_NONE = -1,
+ REG_TYPE_SRC,
+ REG_TYPE_SRC_PLANE2,
+ REG_TYPE_DST,
+ REG_TYPE_DST_PLANE2,
+ REG_TYPE_PAT,
+ REG_TYPE_MSK,
+ MAX_REG_TYPE_NR
+};
+
/* cmdlist data structure */
struct g2d_cmdlist {
u32 head;
@@ -113,6 +144,42 @@ struct g2d_cmdlist {
u32 last; /* last data offset */
};
+/*
+ * A structure of buffer description
+ *
+ * @format: color format
+ * @left_x: the x coordinates of left top corner
+ * @top_y: the y coordinates of left top corner
+ * @right_x: the x coordinates of right bottom corner
+ * @bottom_y: the y coordinates of right bottom corner
+ *
+ */
+struct g2d_buf_desc {
+ unsigned int format;
+ unsigned int left_x;
+ unsigned int top_y;
+ unsigned int right_x;
+ unsigned int bottom_y;
+};
+
+/*
+ * A structure of buffer information
+ *
+ * @map_nr: manages the number of mapped buffers
+ * @reg_types: stores regitster type in the order of requested command
+ * @handles: stores buffer handle in its reg_type position
+ * @types: stores buffer type in its reg_type position
+ * @descs: stores buffer description in its reg_type position
+ *
+ */
+struct g2d_buf_info {
+ unsigned int map_nr;
+ enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
+ unsigned long handles[MAX_REG_TYPE_NR];
+ unsigned int types[MAX_REG_TYPE_NR];
+ struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
+};
+
struct drm_exynos_pending_g2d_event {
struct drm_pending_event base;
struct drm_exynos_g2d_event event;
@@ -131,14 +198,11 @@ struct g2d_cmdlist_userptr {
bool in_pool;
bool out_of_list;
};
-
struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
- unsigned int map_nr;
- unsigned long handles[MAX_BUF_ADDR_NR];
- unsigned int obj_type[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr;
+ struct g2d_buf_info buf_info;
struct drm_exynos_pending_g2d_event *event;
};
@@ -188,6 +252,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
+ struct g2d_buf_info *buf_info;
init_dma_attrs(&g2d->cmdlist_dma_attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
@@ -209,11 +274,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
}
for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
+ unsigned int i;
+
node[nr].cmdlist =
g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
node[nr].dma_addr =
g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
+ buf_info = &node[nr].buf_info;
+ for (i = 0; i < MAX_REG_TYPE_NR; i++)
+ buf_info->reg_types[i] = REG_TYPE_NONE;
+
list_add_tail(&node[nr].list, &g2d->free_cmdlist);
}
@@ -450,7 +521,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
DMA_BIDIRECTIONAL);
if (ret < 0) {
DRM_ERROR("failed to map sgt with dma region.\n");
- goto err_free_sgt;
+ goto err_sg_free_table;
}
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
@@ -467,8 +538,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
return &g2d_userptr->dma_addr;
-err_free_sgt:
+err_sg_free_table:
sg_free_table(sgt);
+
+err_free_sgt:
kfree(sgt);
sgt = NULL;
@@ -506,36 +579,172 @@ static void g2d_userptr_free_all(struct drm_device *drm_dev,
g2d->current_pool = 0;
}
+static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
+{
+ enum g2d_reg_type reg_type;
+
+ switch (reg_offset) {
+ case G2D_SRC_BASE_ADDR:
+ case G2D_SRC_COLOR_MODE:
+ case G2D_SRC_LEFT_TOP:
+ case G2D_SRC_RIGHT_BOTTOM:
+ reg_type = REG_TYPE_SRC;
+ break;
+ case G2D_SRC_PLANE2_BASE_ADDR:
+ reg_type = REG_TYPE_SRC_PLANE2;
+ break;
+ case G2D_DST_BASE_ADDR:
+ case G2D_DST_COLOR_MODE:
+ case G2D_DST_LEFT_TOP:
+ case G2D_DST_RIGHT_BOTTOM:
+ reg_type = REG_TYPE_DST;
+ break;
+ case G2D_DST_PLANE2_BASE_ADDR:
+ reg_type = REG_TYPE_DST_PLANE2;
+ break;
+ case G2D_PAT_BASE_ADDR:
+ reg_type = REG_TYPE_PAT;
+ break;
+ case G2D_MSK_BASE_ADDR:
+ reg_type = REG_TYPE_MSK;
+ break;
+ default:
+ reg_type = REG_TYPE_NONE;
+ DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
+ break;
+ };
+
+ return reg_type;
+}
+
+static unsigned long g2d_get_buf_bpp(unsigned int format)
+{
+ unsigned long bpp;
+
+ switch (format) {
+ case G2D_FMT_XRGB8888:
+ case G2D_FMT_ARGB8888:
+ bpp = 4;
+ break;
+ case G2D_FMT_RGB565:
+ case G2D_FMT_XRGB1555:
+ case G2D_FMT_ARGB1555:
+ case G2D_FMT_XRGB4444:
+ case G2D_FMT_ARGB4444:
+ bpp = 2;
+ break;
+ case G2D_FMT_PACKED_RGB888:
+ bpp = 3;
+ break;
+ default:
+ bpp = 1;
+ break;
+ }
+
+ return bpp;
+}
+
+static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
+ enum g2d_reg_type reg_type,
+ unsigned long size)
+{
+ unsigned int width, height;
+ unsigned long area;
+
+ /*
+ * check source and destination buffers only.
+ * so the others are always valid.
+ */
+ if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
+ return true;
+
+ width = buf_desc->right_x - buf_desc->left_x;
+ if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
+ DRM_ERROR("width[%u] is out of range!\n", width);
+ return false;
+ }
+
+ height = buf_desc->bottom_y - buf_desc->top_y;
+ if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
+ DRM_ERROR("height[%u] is out of range!\n", height);
+ return false;
+ }
+
+ area = (unsigned long)width * (unsigned long)height *
+ g2d_get_buf_bpp(buf_desc->format);
+ if (area > size) {
+ DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
+ return false;
+ }
+
+ return true;
+}
+
static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_device *drm_dev,
struct drm_file *file)
{
struct g2d_cmdlist *cmdlist = node->cmdlist;
+ struct g2d_buf_info *buf_info = &node->buf_info;
int offset;
+ int ret;
int i;
- for (i = 0; i < node->map_nr; i++) {
+ for (i = 0; i < buf_info->map_nr; i++) {
+ struct g2d_buf_desc *buf_desc;
+ enum g2d_reg_type reg_type;
+ int reg_pos;
unsigned long handle;
dma_addr_t *addr;
- offset = cmdlist->last - (i * 2 + 1);
- handle = cmdlist->data[offset];
+ reg_pos = cmdlist->last - 2 * (i + 1);
+
+ offset = cmdlist->data[reg_pos];
+ handle = cmdlist->data[reg_pos + 1];
+
+ reg_type = g2d_get_reg_type(offset);
+ if (reg_type == REG_TYPE_NONE) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ buf_desc = &buf_info->descs[reg_type];
+
+ if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
+ unsigned long size;
+
+ size = exynos_drm_gem_get_size(drm_dev, handle, file);
+ if (!size) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
+ size)) {
+ ret = -EFAULT;
+ goto err;
+ }
- if (node->obj_type[i] == BUF_TYPE_GEM) {
addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
file);
if (IS_ERR(addr)) {
- node->map_nr = i;
- return -EFAULT;
+ ret = -EFAULT;
+ goto err;
}
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
if (copy_from_user(&g2d_userptr, (void __user *)handle,
sizeof(struct drm_exynos_g2d_userptr))) {
- node->map_nr = i;
- return -EFAULT;
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
+ g2d_userptr.size)) {
+ ret = -EFAULT;
+ goto err;
}
addr = g2d_userptr_get_dma_addr(drm_dev,
@@ -544,16 +753,21 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
file,
&handle);
if (IS_ERR(addr)) {
- node->map_nr = i;
- return -EFAULT;
+ ret = -EFAULT;
+ goto err;
}
}
- cmdlist->data[offset] = *addr;
- node->handles[i] = handle;
+ cmdlist->data[reg_pos + 1] = *addr;
+ buf_info->reg_types[i] = reg_type;
+ buf_info->handles[reg_type] = handle;
}
return 0;
+
+err:
+ buf_info->map_nr = i;
+ return ret;
}
static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
@@ -561,22 +775,33 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct drm_file *filp)
{
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+ struct g2d_buf_info *buf_info = &node->buf_info;
int i;
- for (i = 0; i < node->map_nr; i++) {
- unsigned long handle = node->handles[i];
+ for (i = 0; i < buf_info->map_nr; i++) {
+ struct g2d_buf_desc *buf_desc;
+ enum g2d_reg_type reg_type;
+ unsigned long handle;
+
+ reg_type = buf_info->reg_types[i];
+
+ buf_desc = &buf_info->descs[reg_type];
+ handle = buf_info->handles[reg_type];
- if (node->obj_type[i] == BUF_TYPE_GEM)
+ if (buf_info->types[reg_type] == BUF_TYPE_GEM)
exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
filp);
else
g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
false);
- node->handles[i] = 0;
+ buf_info->reg_types[i] = REG_TYPE_NONE;
+ buf_info->handles[reg_type] = 0;
+ buf_info->types[reg_type] = 0;
+ memset(buf_desc, 0x00, sizeof(*buf_desc));
}
- node->map_nr = 0;
+ buf_info->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
@@ -589,10 +814,6 @@ static void g2d_dma_start(struct g2d_data *g2d,
pm_runtime_get_sync(g2d->dev);
clk_enable(g2d->gate_clk);
- /* interrupt enable */
- writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
- g2d->regs + G2D_INTEN);
-
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
}
@@ -643,7 +864,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
struct g2d_data *g2d = container_of(work, struct g2d_data,
runqueue_work);
-
mutex_lock(&g2d->runqueue_mutex);
clk_disable(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev);
@@ -724,20 +944,14 @@ static int g2d_check_reg_offset(struct device *dev,
int i;
for (i = 0; i < nr; i++) {
- index = cmdlist->last - 2 * (i + 1);
+ struct g2d_buf_info *buf_info = &node->buf_info;
+ struct g2d_buf_desc *buf_desc;
+ enum g2d_reg_type reg_type;
+ unsigned long value;
- if (for_addr) {
- /* check userptr buffer type. */
- reg_offset = (cmdlist->data[index] &
- ~0x7fffffff) >> 31;
- if (reg_offset) {
- node->obj_type[i] = BUF_TYPE_USERPTR;
- cmdlist->data[index] &= ~G2D_BUF_USERPTR;
- }
- }
+ index = cmdlist->last - 2 * (i + 1);
reg_offset = cmdlist->data[index] & ~0xfffff000;
-
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
goto err;
if (reg_offset % 4)
@@ -753,8 +967,60 @@ static int g2d_check_reg_offset(struct device *dev,
if (!for_addr)
goto err;
- if (node->obj_type[i] != BUF_TYPE_USERPTR)
- node->obj_type[i] = BUF_TYPE_GEM;
+ reg_type = g2d_get_reg_type(reg_offset);
+ if (reg_type == REG_TYPE_NONE)
+ goto err;
+
+ /* check userptr buffer type. */
+ if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
+ buf_info->types[reg_type] = BUF_TYPE_USERPTR;
+ cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+ } else
+ buf_info->types[reg_type] = BUF_TYPE_GEM;
+ break;
+ case G2D_SRC_COLOR_MODE:
+ case G2D_DST_COLOR_MODE:
+ if (for_addr)
+ goto err;
+
+ reg_type = g2d_get_reg_type(reg_offset);
+ if (reg_type == REG_TYPE_NONE)
+ goto err;
+
+ buf_desc = &buf_info->descs[reg_type];
+ value = cmdlist->data[index + 1];
+
+ buf_desc->format = value & 0xf;
+ break;
+ case G2D_SRC_LEFT_TOP:
+ case G2D_DST_LEFT_TOP:
+ if (for_addr)
+ goto err;
+
+ reg_type = g2d_get_reg_type(reg_offset);
+ if (reg_type == REG_TYPE_NONE)
+ goto err;
+
+ buf_desc = &buf_info->descs[reg_type];
+ value = cmdlist->data[index + 1];
+
+ buf_desc->left_x = value & 0x1fff;
+ buf_desc->top_y = (value & 0x1fff0000) >> 16;
+ break;
+ case G2D_SRC_RIGHT_BOTTOM:
+ case G2D_DST_RIGHT_BOTTOM:
+ if (for_addr)
+ goto err;
+
+ reg_type = g2d_get_reg_type(reg_offset);
+ if (reg_type == REG_TYPE_NONE)
+ goto err;
+
+ buf_desc = &buf_info->descs[reg_type];
+ value = cmdlist->data[index + 1];
+
+ buf_desc->right_x = value & 0x1fff;
+ buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
break;
default:
if (for_addr)
@@ -860,9 +1126,23 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
cmdlist->data[cmdlist->last++] = 0;
+ /*
+ * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
+ * and GCF bit should be set to INTEN register if user wants
+ * G2D interrupt event once current command list execution is
+ * finished.
+ * Otherwise only ACF bit should be set to INTEN register so
+ * that one interrupt is occured after all command lists
+ * have been completed.
+ */
if (node->event) {
+ cmdlist->data[cmdlist->last++] = G2D_INTEN;
+ cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
+ } else {
+ cmdlist->data[cmdlist->last++] = G2D_INTEN;
+ cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
}
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
@@ -887,7 +1167,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
if (ret < 0)
goto err_free_event;
- node->map_nr = req->cmd_buf_nr;
+ node->buf_info.map_nr = req->cmd_buf_nr;
if (req->cmd_buf_nr) {
struct drm_exynos_g2d_cmd *cmd_buf;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 67e17ce112b..0e6fe000578 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -164,6 +164,27 @@ out:
exynos_gem_obj = NULL;
}
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return 0;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return exynos_gem_obj->buffer->size;
+}
+
+
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 35ebac47dc2..468766bee45 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -130,6 +130,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* get buffer size to gem handle. */
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv);
+
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 13ccbd4bcfa..9504b0cd825 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -117,13 +117,12 @@ static struct edid *vidi_get_edid(struct device *dev,
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
- edid = kzalloc(edid_len, GFP_KERNEL);
+ edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
DRM_DEBUG_KMS("failed to allocate edid\n");
return ERR_PTR(-ENOMEM);
}
- memcpy(edid, ctx->raw_edid, edid_len);
return edid;
}
@@ -563,12 +562,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
- ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
+ ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
if (!ctx->raw_edid) {
DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
return -ENOMEM;
}
- memcpy(ctx->raw_edid, raw_edid, edid_len);
} else {
/*
* with connection = 0, free raw_edid
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e919aba29b3..2f4f72f0704 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -818,7 +818,7 @@ static void mixer_win_disable(void *ctx, int win)
mixer_ctx->win_data[win].enabled = false;
}
-int mixer_check_timing(void *ctx, struct fb_videomode *timing)
+static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
{
struct mixer_context *mixer_ctx = ctx;
u32 w, h;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index aae31489c89..7299ea45dd0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
+ seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0a8eceb7590..e9b57893db2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -125,6 +125,11 @@ MODULE_PARM_DESC(preliminary_hw_support,
"Enable Haswell and ValleyView Support. "
"(default: false)");
+int i915_disable_power_well __read_mostly = 0;
+module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
+MODULE_PARM_DESC(disable_power_well,
+ "Disable the power well when possible (default: false)");
+
static struct drm_driver driver;
extern int intel_agp_enabled;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e95337c9745..01769e2a995 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1398,6 +1398,7 @@ extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly;
+extern int i915_disable_power_well __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2f2daebd0ee..3b11ab0fbc9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -732,6 +732,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
+ int relocs_total = 0;
+ int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
@@ -740,10 +742,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
return -EINVAL;
- /* First check for malicious input causing overflow */
- if (exec[i].relocation_count >
- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ /* First check for malicious input causing overflow in
+ * the worst case where we need to allocate the entire
+ * relocation tree as a single array.
+ */
+ if (exec[i].relocation_count > relocs_max - relocs_total)
return -EINVAL;
+ relocs_total += exec[i].relocation_count;
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 287b42c9d1a..b20d50192fc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5771,6 +5771,11 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
+ if (is_cpu_edp)
+ intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+ else
+ intel_crtc->cpu_transcoder = pipe;
+
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
INTEL_PCH_TYPE(dev));
@@ -5837,11 +5842,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int ret;
- if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
- intel_crtc->cpu_transcoder = TRANSCODER_EDP;
- else
- intel_crtc->cpu_transcoder = pipe;
-
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6f728e5ee79..d7d4afe0134 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -820,6 +820,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct intel_link_m_n m_n;
int pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+ int target_clock;
/*
* Find the lane count in the intel_encoder private
@@ -835,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
}
+ target_clock = mode->clock;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ target_clock = intel_edp_target_clock(intel_encoder,
+ mode);
+ break;
+ }
+ }
+
/*
* Compute the GMCH and Link ratios. The '3' here is
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
intel_link_compute_m_n(intel_crtc->bpp, lane_count,
- mode->clock, adjusted_mode->clock, &m_n);
+ target_clock, adjusted_mode->clock, &m_n);
if (IS_HASWELL(dev)) {
I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@@ -1930,7 +1940,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == intel_dp->lane_count && voltage_tries == 5) {
+ if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index acf8aec9ada..ef4744e1bf0 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -203,7 +203,13 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4)
+/*
+ * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
+ * mode. This results in spurious interrupt warnings if the legacy irq no. is
+ * shared with another device. The kernel then disables that interrupt source
+ * and so prevents the other device from working properly.
+ */
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
@@ -214,6 +220,9 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2 = 0;
DEFINE_WAIT(wait);
+ if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ gmbus4_irq_en = 0;
+
/* Important: The hw handles only the first bit, so set only one! Since
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves. */
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a3730e0289e..bee8cb6108a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
- dev_priv->backlight_enabled = true;
- intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
-
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
@@ -359,12 +356,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
}
set_level:
- /* Check the current backlight level and try to set again if it's zero.
- * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically
- * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written.
+ /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+ * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+ * registers are set.
*/
- if (!intel_panel_get_backlight(dev))
- intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+ dev_priv->backlight_enabled = true;
+ intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a1794c6df1b..adca00783e6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4079,6 +4079,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
if (!IS_HASWELL(dev))
return;
+ if (!i915_disable_power_well && !enable)
+ return;
+
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE;
enable_requested = tmp & HSW_PWR_WELL_ENABLE;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a274b9906ef..fe22bb780e1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
m = n = p = 0;
vcomax = 800000;
vcomin = 400000;
- pllreffreq = 3333;
+ pllreffreq = 33333;
delta = 0xffffffff;
permitteddelta = clock * 5 / 1000;
- for (testp = 16; testp > 0; testp--) {
+ for (testp = 16; testp > 0; testp >>= 1) {
if (clock * testp > vcomax)
continue;
if (clock * testp < vcomin)
continue;
for (testm = 1; testm < 33; testm++) {
- for (testn = 1; testn < 257; testn++) {
+ for (testn = 17; testn < 257; testn++) {
computed = (pllreffreq * testn) /
(testm * testp);
if (computed > clock)
@@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
if (tmpdelta < delta) {
delta = tmpdelta;
n = testn - 1;
- m = (testm - 1) | ((n >> 1) & 0x80);
+ m = (testm - 1);
p = testp - 1;
}
if ((clock * testp) >= 600000)
- p |= 80;
+ p |= 0x80;
}
}
}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 0daab62ea14..3b2e7b6304d 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -278,7 +278,6 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
struct nouveau_object *parent = NULL;
struct nouveau_object *namedb = NULL;
struct nouveau_handle *handle = NULL;
- int ret = -EINVAL;
parent = nouveau_handle_ref(client, _parent);
if (!parent)
@@ -295,7 +294,7 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
}
nouveau_object_ref(NULL, &parent);
- return ret;
+ return handle ? 0 : -EINVAL;
}
int
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index 6b17b614629..0b20fc0d19c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -4,7 +4,7 @@
#include <core/device.h>
#include <core/subdev.h>
-enum nouveau_therm_mode {
+enum nouveau_therm_fan_mode {
NOUVEAU_THERM_CTRL_NONE = 0,
NOUVEAU_THERM_CTRL_MANUAL = 1,
NOUVEAU_THERM_CTRL_AUTO = 2,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index f794dc89a3b..a00a5a76e2d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -134,7 +134,7 @@ nouveau_therm_alarm(struct nouveau_alarm *alarm)
}
int
-nouveau_therm_mode(struct nouveau_therm *therm, int mode)
+nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
{
struct nouveau_therm_priv *priv = (void *)therm;
struct nouveau_device *device = nv_device(therm);
@@ -149,10 +149,15 @@ nouveau_therm_mode(struct nouveau_therm *therm, int mode)
(mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
return -EINVAL;
+ /* do not allow automatic fan management if the thermal sensor is
+ * not available */
+ if (priv->mode == 2 && therm->temp_get(therm) < 0)
+ return -EINVAL;
+
if (priv->mode == mode)
return 0;
- nv_info(therm, "Thermal management: %s\n", name[mode]);
+ nv_info(therm, "fan management: %s\n", name[mode]);
nouveau_therm_update(therm, mode);
return 0;
}
@@ -213,7 +218,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
priv->fan->bios.max_duty = value;
return 0;
case NOUVEAU_THERM_ATTR_FAN_MODE:
- return nouveau_therm_mode(therm, value);
+ return nouveau_therm_fan_mode(therm, value);
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
priv->bios_sensor.thrs_fan_boost.temp = value;
priv->sensor.program_alarms(therm);
@@ -263,7 +268,7 @@ _nouveau_therm_init(struct nouveau_object *object)
return ret;
if (priv->suspend >= 0)
- nouveau_therm_mode(therm, priv->mode);
+ nouveau_therm_fan_mode(therm, priv->mode);
priv->sensor.program_alarms(therm);
return 0;
}
@@ -313,11 +318,12 @@ nouveau_therm_create_(struct nouveau_object *parent,
int
nouveau_therm_preinit(struct nouveau_therm *therm)
{
- nouveau_therm_ic_ctor(therm);
nouveau_therm_sensor_ctor(therm);
+ nouveau_therm_ic_ctor(therm);
nouveau_therm_fan_ctor(therm);
- nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+ nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+ nouveau_therm_sensor_preinit(therm);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e24090bac19..8b3adec5fbb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -32,6 +32,7 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
struct i2c_board_info *info)
{
struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
+ struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
struct i2c_client *client;
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
@@ -46,8 +47,9 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
}
nv_info(priv,
- "Found an %s at address 0x%x (controlled by lm_sensors)\n",
- info->type, info->addr);
+ "Found an %s at address 0x%x (controlled by lm_sensors, "
+ "temp offset %+i C)\n",
+ info->type, info->addr, sensor->offset_constant);
priv->ic = client;
return true;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
index 0f5363edb96..a70d1b7e397 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -29,54 +29,83 @@ struct nv40_therm_priv {
struct nouveau_therm_priv base;
};
+enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
+
+static enum nv40_sensor_style
+nv40_sensor_style(struct nouveau_therm *therm)
+{
+ struct nouveau_device *device = nv_device(therm);
+
+ switch (device->chipset) {
+ case 0x43:
+ case 0x44:
+ case 0x4a:
+ case 0x47:
+ return OLD_STYLE;
+
+ case 0x46:
+ case 0x49:
+ case 0x4b:
+ case 0x4e:
+ case 0x4c:
+ case 0x67:
+ case 0x68:
+ case 0x63:
+ return NEW_STYLE;
+ default:
+ return INVALID_STYLE;
+ }
+}
+
static int
nv40_sensor_setup(struct nouveau_therm *therm)
{
- struct nouveau_device *device = nv_device(therm);
+ enum nv40_sensor_style style = nv40_sensor_style(therm);
/* enable ADC readout and disable the ALARM threshold */
- if (device->chipset >= 0x46) {
+ if (style == NEW_STYLE) {
nv_mask(therm, 0x15b8, 0x80000000, 0);
nv_wr32(therm, 0x15b0, 0x80003fff);
- mdelay(10); /* wait for the temperature to stabilize */
+ mdelay(20); /* wait for the temperature to stabilize */
return nv_rd32(therm, 0x15b4) & 0x3fff;
- } else {
+ } else if (style == OLD_STYLE) {
nv_wr32(therm, 0x15b0, 0xff);
+ mdelay(20); /* wait for the temperature to stabilize */
return nv_rd32(therm, 0x15b4) & 0xff;
- }
+ } else
+ return -ENODEV;
}
static int
nv40_temp_get(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- struct nouveau_device *device = nv_device(therm);
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+ enum nv40_sensor_style style = nv40_sensor_style(therm);
int core_temp;
- if (device->chipset >= 0x46) {
+ if (style == NEW_STYLE) {
nv_wr32(therm, 0x15b0, 0x80003fff);
core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
- } else {
+ } else if (style == OLD_STYLE) {
nv_wr32(therm, 0x15b0, 0xff);
core_temp = nv_rd32(therm, 0x15b4) & 0xff;
- }
-
- /* Setup the sensor if the temperature is 0 */
- if (core_temp == 0)
- core_temp = nv40_sensor_setup(therm);
+ } else
+ return -ENODEV;
- if (sensor->slope_div == 0)
- sensor->slope_div = 1;
- if (sensor->offset_den == 0)
- sensor->offset_den = 1;
- if (sensor->slope_mult < 1)
- sensor->slope_mult = 1;
+ /* if the slope or the offset is unset, do no use the sensor */
+ if (!sensor->slope_div || !sensor->slope_mult ||
+ !sensor->offset_num || !sensor->offset_den)
+ return -ENODEV;
core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
core_temp = core_temp + sensor->offset_num / sensor->offset_den;
core_temp = core_temp + sensor->offset_constant - 8;
+ /* reserve negative temperatures for errors */
+ if (core_temp < 0)
+ core_temp = 0;
+
return core_temp;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 06b98706b3f..438d9824b77 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -102,7 +102,7 @@ struct nouveau_therm_priv {
struct i2c_client *ic;
};
-int nouveau_therm_mode(struct nouveau_therm *therm, int mode);
+int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode);
int nouveau_therm_attr_get(struct nouveau_therm *therm,
enum nouveau_therm_attr_type type);
int nouveau_therm_attr_set(struct nouveau_therm *therm,
@@ -122,6 +122,7 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm);
int nouveau_therm_preinit(struct nouveau_therm *);
+void nouveau_therm_sensor_preinit(struct nouveau_therm *);
void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
enum nouveau_therm_thrs thrs,
enum nouveau_therm_thrs_state st);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index b37624af829..470f6a47b65 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -34,10 +34,6 @@ nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
- priv->bios_sensor.slope_mult = 1;
- priv->bios_sensor.slope_div = 1;
- priv->bios_sensor.offset_num = 0;
- priv->bios_sensor.offset_den = 1;
priv->bios_sensor.offset_constant = 0;
priv->bios_sensor.thrs_fan_boost.temp = 90;
@@ -60,11 +56,6 @@ nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
struct nouveau_therm_priv *priv = (void *)therm;
struct nvbios_therm_sensor *s = &priv->bios_sensor;
- if (!priv->bios_sensor.slope_div)
- priv->bios_sensor.slope_div = 1;
- if (!priv->bios_sensor.offset_den)
- priv->bios_sensor.offset_den = 1;
-
/* enforce a minimum hysteresis on thresholds */
s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
@@ -106,16 +97,16 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
const char *thresolds[] = {
"fanboost", "downclock", "critical", "shutdown"
};
- uint8_t temperature = therm->temp_get(therm);
+ int temperature = therm->temp_get(therm);
if (thrs < 0 || thrs > 3)
return;
if (dir == NOUVEAU_THERM_THRS_FALLING)
- nv_info(therm, "temperature (%u C) went below the '%s' threshold\n",
+ nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
temperature, thresolds[thrs]);
else
- nv_info(therm, "temperature (%u C) hit the '%s' threshold\n",
+ nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
temperature, thresolds[thrs]);
active = (dir == NOUVEAU_THERM_THRS_RISING);
@@ -123,7 +114,7 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
case NOUVEAU_THERM_THRS_FANBOOST:
if (active) {
nouveau_therm_fan_set(therm, true, 100);
- nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
+ nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
}
break;
case NOUVEAU_THERM_THRS_DOWNCLOCK:
@@ -202,7 +193,7 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
NOUVEAU_THERM_THRS_SHUTDOWN);
/* schedule the next poll in one second */
- if (list_empty(&alarm->head))
+ if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
@@ -225,6 +216,17 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
alarm_timer_callback(&priv->sensor.therm_poll_alarm);
}
+void
+nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
+{
+ const char *sensor_avail = "yes";
+
+ if (therm->temp_get(therm) < 0)
+ sensor_avail = "no";
+
+ nv_info(therm, "internal sensor: %s\n", sensor_avail);
+}
+
int
nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index bb54098c6d9..936b442a6ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -402,8 +402,12 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_therm *therm = nouveau_therm(drm->device);
+ int temp = therm->temp_get(therm);
- return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000);
+ if (temp < 0)
+ return temp;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000);
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
NULL, 0);
@@ -871,7 +875,12 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
nouveau_hwmon_get_pwm1_max,
nouveau_hwmon_set_pwm1_max, 0);
-static struct attribute *hwmon_attributes[] = {
+static struct attribute *hwmon_default_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_update_rate.dev_attr.attr,
+ NULL
+};
+static struct attribute *hwmon_temp_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
@@ -882,8 +891,6 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_emergency.dev_attr.attr,
&sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
- &sensor_dev_attr_name.dev_attr.attr,
- &sensor_dev_attr_update_rate.dev_attr.attr,
NULL
};
static struct attribute *hwmon_fan_rpm_attributes[] = {
@@ -898,8 +905,11 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
NULL
};
-static const struct attribute_group hwmon_attrgroup = {
- .attrs = hwmon_attributes,
+static const struct attribute_group hwmon_default_attrgroup = {
+ .attrs = hwmon_default_attributes,
+};
+static const struct attribute_group hwmon_temp_attrgroup = {
+ .attrs = hwmon_temp_attributes,
};
static const struct attribute_group hwmon_fan_rpm_attrgroup = {
.attrs = hwmon_fan_rpm_attributes,
@@ -931,13 +941,22 @@ nouveau_hwmon_init(struct drm_device *dev)
}
dev_set_drvdata(hwmon_dev, dev);
- /* default sysfs entries */
- ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup);
+ /* set the default attributes */
+ ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
if (ret) {
if (ret)
goto error;
}
+ /* if the card has a working thermal sensor */
+ if (therm->temp_get(therm) >= 0) {
+ ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
+ if (ret) {
+ if (ret)
+ goto error;
+ }
+ }
+
/* if the card has a pwm fan */
/*XXX: incorrect, need better detection for this, some boards have
* the gpio entries for pwm fan control even when there's no
@@ -979,11 +998,10 @@ nouveau_hwmon_fini(struct drm_device *dev)
struct nouveau_pm *pm = nouveau_pm(dev);
if (pm->hwmon) {
- sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
- sysfs_remove_group(&pm->hwmon->kobj,
- &hwmon_pwm_fan_attrgroup);
- sysfs_remove_group(&pm->hwmon->kobj,
- &hwmon_fan_rpm_attrgroup);
+ sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup);
+ sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup);
+ sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
+ sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
hwmon_device_unregister(pm->hwmon);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2db57990f65..7f0e6c3f37d 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -524,6 +524,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
swap_interval <<= 4;
if (swap_interval == 0)
swap_interval |= 0x100;
+ if (chan == NULL)
+ evo_sync(crtc->dev);
push = evo_wait(sync, 128);
if (unlikely(push == NULL))
@@ -586,8 +588,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
sync->addr ^= 0x10;
sync->data++;
FIRE_RING (chan);
- } else {
- evo_sync(crtc->dev);
}
/* queue the flip */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index d4c633e1286..27769e724b6 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -468,13 +468,19 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x9907) ||
(rdev->pdev->device == 0x9908) ||
(rdev->pdev->device == 0x9909) ||
+ (rdev->pdev->device == 0x990B) ||
+ (rdev->pdev->device == 0x990C) ||
+ (rdev->pdev->device == 0x990F) ||
(rdev->pdev->device == 0x9910) ||
- (rdev->pdev->device == 0x9917)) {
+ (rdev->pdev->device == 0x9917) ||
+ (rdev->pdev->device == 0x9999)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) ||
+ (rdev->pdev->device == 0x990D) ||
+ (rdev->pdev->device == 0x990E) ||
(rdev->pdev->device == 0x9913) ||
(rdev->pdev->device == 0x9918)) {
rdev->config.cayman.max_simds_per_se = 4;
@@ -483,6 +489,9 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) ||
(rdev->pdev->device == 0x9994) ||
+ (rdev->pdev->device == 0x9995) ||
+ (rdev->pdev->device == 0x9996) ||
+ (rdev->pdev->device == 0x999A) ||
(rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
@@ -616,11 +625,22 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
- tmp = gb_addr_config & NUM_PIPES_MASK;
- tmp = r6xx_remap_render_backend(rdev, tmp,
- rdev->config.cayman.max_backends_per_se *
- rdev->config.cayman.max_shader_engines,
- CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+ if ((rdev->config.cayman.max_backends_per_se == 1) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ if ((disabled_rb_mask & 3) == 1) {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
+ } else {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
+ }
+ } else {
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp,
+ rdev->config.cayman.max_backends_per_se *
+ rdev->config.cayman.max_shader_engines,
+ CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+ }
WREG32(GB_BACKEND_MAP, tmp);
cgts_tcc_disable = 0xffff0000;
@@ -1771,6 +1791,7 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
evergreen_irq_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index bedda9caadd..6e05a2e75a4 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -122,10 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
goto out_cleanup;
}
- /* r100 doesn't have dma engine so skip the test */
- /* also, VRAM-to-VRAM test doesn't make much sense for DMA */
- /* skip it as well if domains are the same */
- if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
+ if (rdev->asic->copy.dma) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_DMA, n);
if (time < 0)
@@ -135,13 +132,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
sdomain, ddomain, "dma");
}
- time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
- RADEON_BENCHMARK_COPY_BLIT, n);
- if (time < 0)
- goto out_cleanup;
- if (time > 0)
- radeon_benchmark_log_results(n, size, time,
- sdomain, ddomain, "blit");
+ if (rdev->asic->copy.blit) {
+ time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+ RADEON_BENCHMARK_COPY_BLIT, n);
+ if (time < 0)
+ goto out_cleanup;
+ if (time > 0)
+ radeon_benchmark_log_results(n, size, time,
+ sdomain, ddomain, "blit");
+ }
out_cleanup:
if (sobj) {
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 9128120da04..bafbe321695 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4469,6 +4469,7 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
+ radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
si_irq_suspend(rdev);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index e9205ee8cf9..130f02cc9d9 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -80,6 +80,7 @@
/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
#define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */
#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
@@ -185,6 +186,7 @@ struct ismt_priv {
static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 36704e3ab3f..b714776b6dd 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -411,7 +411,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
u32 clk_divisor;
- tegra_i2c_clock_enable(i2c_dev);
+ err = tegra_i2c_clock_enable(i2c_dev);
+ if (err < 0) {
+ dev_err(i2c_dev->dev, "Clock enable failed %d\n", err);
+ return err;
+ }
tegra_periph_reset_assert(i2c_dev->div_clk);
udelay(2);
@@ -628,7 +632,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (i2c_dev->is_suspended)
return -EBUSY;
- tegra_i2c_clock_enable(i2c_dev);
+ ret = tegra_i2c_clock_enable(i2c_dev);
+ if (ret < 0) {
+ dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret);
+ return ret;
+ }
+
for (i = 0; i < num; i++) {
enum msg_end_type end_type = MSG_END_STOP;
if (i < (num - 1)) {
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index f3b8f9a6a89..966a18a5d12 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2010 Ericsson AB.
*
- * Author: Guenter Roeck <guenter.roeck@ericsson.com>
+ * Author: Guenter Roeck <linux@roeck-us.net>
*
* Derived from:
* pca954x.c
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 9fe6f1e8437..5b059e2d80c 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -194,8 +194,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
wq->rq.memsize, &(wq->rq.dma_addr),
GFP_KERNEL);
- if (!wq->rq.queue)
+ if (!wq->rq.queue) {
+ ret = -ENOMEM;
goto free_sq;
+ }
PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
__func__, wq->sq.queue,
(unsigned long long)virt_to_phys(wq->sq.queue),
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 439c35d4a66..ea93870266e 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
goto bail;
}
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+ opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
dev->opstats[opcode].n_bytes += tlen;
dev->opstats[opcode].n_packets++;
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index 8349f9c5064..1e603a37506 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -1,7 +1,7 @@
config INFINIBAND_QIB
- tristate "QLogic PCIe HCA support"
+ tristate "Intel PCIe HCA support"
depends on 64BIT
---help---
- This is a low-level driver for QLogic PCIe QLE InfiniBand host
- channel adapters. This driver does not support the QLogic
+ This is a low-level driver for Intel PCIe QLE InfiniBand host
+ channel adapters. This driver does not support the Intel
HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 5423edcab51..216092477df 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
"Attempt pre-IBTA 1.2 DDR speed negotiation");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("QLogic <support@qlogic.com>");
-MODULE_DESCRIPTION("QLogic IB driver");
+MODULE_AUTHOR("Intel <ibsupport@intel.com>");
+MODULE_DESCRIPTION("Intel IB driver");
MODULE_VERSION(QIB_DRIVER_VERSION);
/*
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a099ac171e2..0232ae56b1f 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64);
/*
* This file contains all the chip-specific register information and
- * access functions for the QLogic QLogic_IB PCI-Express chip.
+ * access functions for the Intel Intel_IB PCI-Express chip.
*
*/
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 50e33aa0b4e..173f805790d 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
static void qib_remove_one(struct pci_dev *);
static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
-#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
+#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
@@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dd = qib_init_iba6120_funcs(pdev, ent);
#else
qib_early_err(&pdev->dev,
- "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
+ "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
ent->device);
dd = ERR_PTR(-ENODEV);
#endif
@@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
default:
qib_early_err(&pdev->dev,
- "Failing on unknown QLogic deviceid 0x%x\n",
+ "Failing on unknown Intel deviceid 0x%x\n",
ent->device);
ret = -ENODEV;
}
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 50a8a0d4fe6..08a6c6d39e5 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -44,7 +44,7 @@
#include "qib.h"
#include "qib_7220.h"
-#define SD7220_FW_NAME "qlogic/sd7220.fw"
+#define SD7220_FW_NAME "intel/sd7220.fw"
MODULE_FIRMWARE(SD7220_FW_NAME);
/*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index ba51a4715a1..7c0ab16a2fe 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
ibdev->dma_ops = &qib_dma_mapping_ops;
snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
- "QLogic Infiniband HCA %s", init_utsname()->nodename);
+ "Intel Infiniband HCA %s", init_utsname()->nodename);
ret = ib_register_device(ibdev, qib_create_port_files);
if (ret)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 67b0c1d2367..1ef880de3a4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
- ipoib_warn(priv, "request notify on send CQ failed\n");
netif_stop_queue(dev);
+ rc = ib_req_notify_cq(priv->send_cq,
+ IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
+ if (rc < 0)
+ ipoib_warn(priv, "request notify on send CQ failed\n");
+ else if (rc)
+ ipoib_send_comp_handler(priv->send_cq, dev);
}
}
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5c514d0711d..c332fb98480 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -130,7 +130,7 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP2PLUS
select IOMMU_API
config OMAP_IOVMM
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98f555dafb5..b287ca33833 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2466,18 +2466,16 @@ static int device_change_notifier(struct notifier_block *nb,
/* allocate a protection domain if a device is added */
dma_domain = find_protection_domain(devid);
- if (dma_domain)
- goto out;
- dma_domain = dma_ops_domain_alloc();
- if (!dma_domain)
- goto out;
- dma_domain->target_dev = devid;
-
- spin_lock_irqsave(&iommu_pd_list_lock, flags);
- list_add_tail(&dma_domain->list, &iommu_pd_list);
- spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
-
- dev_data = get_dev_data(dev);
+ if (!dma_domain) {
+ dma_domain = dma_ops_domain_alloc();
+ if (!dma_domain)
+ goto out;
+ dma_domain->target_dev = devid;
+
+ spin_lock_irqsave(&iommu_pd_list_lock, flags);
+ list_add_tail(&dma_domain->list, &iommu_pd_list);
+ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+ }
dev->archdata.dma_ops = &amd_iommu_dma_ops;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b6ecddb63cd..e3c2d74b768 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -980,7 +980,7 @@ static void __init free_iommu_all(void)
* BIOS should disable L2B micellaneous clock gating by setting
* L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
*/
-static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
{
u32 value;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index d56f8c17c5f..7c11ff368d0 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -2,7 +2,6 @@
#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/msi.h>
#include <linux/irq.h>
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 3c955e10a61..c6083132c4b 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1025,6 +1025,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
{
struct blk_plug plug;
+ BUG_ON(dm_bufio_in_request());
+
blk_start_plug(&plug);
dm_bufio_lock(c);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index fbd3625f274..83e995fece8 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -83,6 +83,8 @@ struct cache_disk_superblock {
__le32 read_misses;
__le32 write_hits;
__le32 write_misses;
+
+ __le32 policy_version[CACHE_POLICY_VERSION_SIZE];
} __packed;
struct dm_cache_metadata {
@@ -109,6 +111,7 @@ struct dm_cache_metadata {
bool clean_when_opened:1;
char policy_name[CACHE_POLICY_NAME_SIZE];
+ unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
size_t policy_hint_size;
struct dm_cache_statistics stats;
};
@@ -268,7 +271,8 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
disk_super->version = cpu_to_le32(CACHE_VERSION);
- memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE);
+ memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
+ memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
disk_super->policy_hint_size = 0;
r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
@@ -284,7 +288,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
disk_super->cache_blocks = cpu_to_le32(0);
- memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
disk_super->read_hits = cpu_to_le32(0);
disk_super->read_misses = cpu_to_le32(0);
@@ -478,6 +481,9 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,
cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
+ cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
+ cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
+ cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
@@ -572,6 +578,9 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
+ disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
+ disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
+ disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
@@ -854,18 +863,43 @@ struct thunk {
bool hints_valid;
};
+static bool policy_unchanged(struct dm_cache_metadata *cmd,
+ struct dm_cache_policy *policy)
+{
+ const char *policy_name = dm_cache_policy_get_name(policy);
+ const unsigned *policy_version = dm_cache_policy_get_version(policy);
+ size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
+
+ /*
+ * Ensure policy names match.
+ */
+ if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
+ return false;
+
+ /*
+ * Ensure policy major versions match.
+ */
+ if (cmd->policy_version[0] != policy_version[0])
+ return false;
+
+ /*
+ * Ensure policy hint sizes match.
+ */
+ if (cmd->policy_hint_size != policy_hint_size)
+ return false;
+
+ return true;
+}
+
static bool hints_array_initialized(struct dm_cache_metadata *cmd)
{
return cmd->hint_root && cmd->policy_hint_size;
}
static bool hints_array_available(struct dm_cache_metadata *cmd,
- const char *policy_name)
+ struct dm_cache_policy *policy)
{
- bool policy_names_match = !strncmp(cmd->policy_name, policy_name,
- sizeof(cmd->policy_name));
-
- return cmd->clean_when_opened && policy_names_match &&
+ return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
hints_array_initialized(cmd);
}
@@ -899,7 +933,8 @@ static int __load_mapping(void *context, uint64_t cblock, void *leaf)
return r;
}
-static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+static int __load_mappings(struct dm_cache_metadata *cmd,
+ struct dm_cache_policy *policy,
load_mapping_fn fn, void *context)
{
struct thunk thunk;
@@ -909,18 +944,19 @@ static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_nam
thunk.cmd = cmd;
thunk.respect_dirty_flags = cmd->clean_when_opened;
- thunk.hints_valid = hints_array_available(cmd, policy_name);
+ thunk.hints_valid = hints_array_available(cmd, policy);
return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
}
-int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+ struct dm_cache_policy *policy,
load_mapping_fn fn, void *context)
{
int r;
down_read(&cmd->root_lock);
- r = __load_mappings(cmd, policy_name, fn, context);
+ r = __load_mappings(cmd, policy, fn, context);
up_read(&cmd->root_lock);
return r;
@@ -979,7 +1015,7 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
/* nothing to be done */
return 0;
- value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0));
+ value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
__dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
@@ -1070,13 +1106,15 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
__le32 value;
size_t hint_size;
const char *policy_name = dm_cache_policy_get_name(policy);
+ const unsigned *policy_version = dm_cache_policy_get_version(policy);
if (!policy_name[0] ||
(strlen(policy_name) > sizeof(cmd->policy_name) - 1))
return -EINVAL;
- if (strcmp(cmd->policy_name, policy_name)) {
+ if (!policy_unchanged(cmd, policy)) {
strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
+ memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
hint_size = dm_cache_policy_get_hint_size(policy);
if (!hint_size)
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 135864ea0ee..f45cef21f3d 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -89,7 +89,7 @@ typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
dm_cblock_t cblock, bool dirty,
uint32_t hint, bool hint_valid);
int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
- const char *policy_name,
+ struct dm_cache_policy *policy,
load_mapping_fn fn,
void *context);
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index cc05d70b3cb..b04d1f904d0 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -17,7 +17,6 @@
/*----------------------------------------------------------------*/
#define DM_MSG_PREFIX "cache cleaner"
-#define CLEANER_VERSION "1.0.0"
/* Cache entry struct. */
struct wb_cache_entry {
@@ -434,6 +433,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
static struct dm_cache_policy_type wb_policy_type = {
.name = "cleaner",
+ .version = {1, 0, 0},
.hint_size = 0,
.owner = THIS_MODULE,
.create = wb_create
@@ -446,7 +446,10 @@ static int __init wb_init(void)
if (r < 0)
DMERR("register failed %d", r);
else
- DMINFO("version " CLEANER_VERSION " loaded");
+ DMINFO("version %u.%u.%u loaded",
+ wb_policy_type.version[0],
+ wb_policy_type.version[1],
+ wb_policy_type.version[2]);
return r;
}
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 52a75beeced..0928abdc49f 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -117,6 +117,8 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
*/
const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
+const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
+
size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 96415325507..dc112a7137f 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -14,7 +14,6 @@
#include <linux/vmalloc.h>
#define DM_MSG_PREFIX "cache-policy-mq"
-#define MQ_VERSION "1.0.0"
static struct kmem_cache *mq_entry_cache;
@@ -1133,6 +1132,7 @@ bad_cache_alloc:
static struct dm_cache_policy_type mq_policy_type = {
.name = "mq",
+ .version = {1, 0, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = mq_create
@@ -1140,6 +1140,7 @@ static struct dm_cache_policy_type mq_policy_type = {
static struct dm_cache_policy_type default_policy_type = {
.name = "default",
+ .version = {1, 0, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = mq_create
@@ -1164,7 +1165,10 @@ static int __init mq_init(void)
r = dm_cache_policy_register(&default_policy_type);
if (!r) {
- DMINFO("version " MQ_VERSION " loaded");
+ DMINFO("version %u.%u.%u loaded",
+ mq_policy_type.version[0],
+ mq_policy_type.version[1],
+ mq_policy_type.version[2]);
return 0;
}
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
index 2cbf5fdaac5..21c03c570c0 100644
--- a/drivers/md/dm-cache-policy.c
+++ b/drivers/md/dm-cache-policy.c
@@ -150,6 +150,14 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
}
EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
+const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ return t->version;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_version);
+
size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
{
struct dm_cache_policy_type *t = p->private;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index f0f51b26054..558bdfdabf5 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -196,6 +196,7 @@ struct dm_cache_policy {
* We maintain a little register of the different policy types.
*/
#define CACHE_POLICY_NAME_SIZE 16
+#define CACHE_POLICY_VERSION_SIZE 3
struct dm_cache_policy_type {
/* For use by the register code only. */
@@ -206,6 +207,7 @@ struct dm_cache_policy_type {
* what gets passed on the target line to select your policy.
*/
char name[CACHE_POLICY_NAME_SIZE];
+ unsigned version[CACHE_POLICY_VERSION_SIZE];
/*
* Policies may store a hint for each each cache block.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0f4e84b15c3..66120bd46d1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -142,6 +142,7 @@ struct cache {
spinlock_t lock;
struct bio_list deferred_bios;
struct bio_list deferred_flush_bios;
+ struct bio_list deferred_writethrough_bios;
struct list_head quiesced_migrations;
struct list_head completed_migrations;
struct list_head need_commit_migrations;
@@ -158,7 +159,7 @@ struct cache {
/*
* origin_blocks entries, discarded if set.
*/
- sector_t discard_block_size; /* a power of 2 times sectors per block */
+ uint32_t discard_block_size; /* a power of 2 times sectors per block */
dm_dblock_t discard_nr_blocks;
unsigned long *discard_bitset;
@@ -199,6 +200,11 @@ struct per_bio_data {
bool tick:1;
unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry;
+
+ /* writethrough fields */
+ struct cache *cache;
+ dm_cblock_t cblock;
+ bio_end_io_t *saved_bi_end_io;
};
struct dm_cache_migration {
@@ -412,17 +418,24 @@ static bool block_size_is_power_of_two(struct cache *cache)
return cache->sectors_per_block_shift >= 0;
}
+static dm_block_t block_div(dm_block_t b, uint32_t n)
+{
+ do_div(b, n);
+
+ return b;
+}
+
static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
{
- sector_t discard_blocks = cache->discard_block_size;
+ uint32_t discard_blocks = cache->discard_block_size;
dm_block_t b = from_oblock(oblock);
if (!block_size_is_power_of_two(cache))
- (void) sector_div(discard_blocks, cache->sectors_per_block);
+ discard_blocks = discard_blocks / cache->sectors_per_block;
else
discard_blocks >>= cache->sectors_per_block_shift;
- (void) sector_div(b, discard_blocks);
+ b = block_div(b, discard_blocks);
return to_dblock(b);
}
@@ -609,6 +622,56 @@ static void issue(struct cache *cache, struct bio *bio)
spin_unlock_irqrestore(&cache->lock, flags);
}
+static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_add(&cache->deferred_writethrough_bios, bio);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void writethrough_endio(struct bio *bio, int err)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio);
+ bio->bi_end_io = pb->saved_bi_end_io;
+
+ if (err) {
+ bio_endio(bio, err);
+ return;
+ }
+
+ remap_to_cache(pb->cache, bio, pb->cblock);
+
+ /*
+ * We can't issue this bio directly, since we're in interrupt
+ * context. So it get's put on a bio list for processing by the
+ * worker thread.
+ */
+ defer_writethrough_bio(pb->cache, bio);
+}
+
+/*
+ * When running in writethrough mode we need to send writes to clean blocks
+ * to both the cache and origin devices. In future we'd like to clone the
+ * bio and send them in parallel, but for now we're doing them in
+ * series as this is easier.
+ */
+static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ pb->cache = cache;
+ pb->cblock = cblock;
+ pb->saved_bi_end_io = bio->bi_end_io;
+ bio->bi_end_io = writethrough_endio;
+
+ remap_to_origin_clear_discard(pb->cache, bio, oblock);
+}
+
/*----------------------------------------------------------------
* Migration processing
*
@@ -1002,7 +1065,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio)
dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
dm_block_t b;
- (void) sector_div(end_block, cache->discard_block_size);
+ end_block = block_div(end_block, cache->discard_block_size);
for (b = start_block; b < end_block; b++)
set_discard(cache, to_dblock(b));
@@ -1070,14 +1133,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
inc_hit_counter(cache, bio);
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
- if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
- /*
- * No need to mark anything dirty in write through mode.
- */
- pb->req_nr == 0 ?
- remap_to_cache(cache, bio, lookup_result.cblock) :
- remap_to_origin_clear_discard(cache, bio, block);
- } else
+ if (is_writethrough_io(cache, bio, lookup_result.cblock))
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+ else
remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
issue(cache, bio);
@@ -1086,17 +1144,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
case POLICY_MISS:
inc_miss_counter(cache, bio);
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
-
- if (pb->req_nr != 0) {
- /*
- * This is a duplicate writethrough io that is no
- * longer needed because the block has been demoted.
- */
- bio_endio(bio, 0);
- } else {
- remap_to_origin_clear_discard(cache, bio, block);
- issue(cache, bio);
- }
+ remap_to_origin_clear_discard(cache, bio, block);
+ issue(cache, bio);
break;
case POLICY_NEW:
@@ -1217,6 +1266,23 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
submit_bios ? generic_make_request(bio) : bio_io_error(bio);
}
+static void process_deferred_writethrough_bios(struct cache *cache)
+{
+ unsigned long flags;
+ struct bio_list bios;
+ struct bio *bio;
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&bios, &cache->deferred_writethrough_bios);
+ bio_list_init(&cache->deferred_writethrough_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ generic_make_request(bio);
+}
+
static void writeback_some_dirty_blocks(struct cache *cache)
{
int r = 0;
@@ -1313,6 +1379,7 @@ static int more_work(struct cache *cache)
else
return !bio_list_empty(&cache->deferred_bios) ||
!bio_list_empty(&cache->deferred_flush_bios) ||
+ !bio_list_empty(&cache->deferred_writethrough_bios) ||
!list_empty(&cache->quiesced_migrations) ||
!list_empty(&cache->completed_migrations) ||
!list_empty(&cache->need_commit_migrations);
@@ -1331,6 +1398,8 @@ static void do_worker(struct work_struct *ws)
writeback_some_dirty_blocks(cache);
+ process_deferred_writethrough_bios(cache);
+
if (commit_if_needed(cache)) {
process_deferred_flush_bios(cache, false);
@@ -1756,8 +1825,11 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
}
r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
- if (r)
+ if (r) {
+ *error = "Error setting cache policy's config values";
dm_cache_policy_destroy(cache->policy);
+ cache->policy = NULL;
+ }
return r;
}
@@ -1793,8 +1865,6 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
-static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio);
-
static int cache_create(struct cache_args *ca, struct cache **result)
{
int r = 0;
@@ -1821,9 +1891,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
memcpy(&cache->features, &ca->features, sizeof(cache->features));
- if (cache->features.write_through)
- ti->num_write_bios = cache_num_write_bios;
-
cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -1835,7 +1902,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
/* FIXME: factor out this whole section */
origin_blocks = cache->origin_sectors = ca->origin_sectors;
- (void) sector_div(origin_blocks, ca->block_size);
+ origin_blocks = block_div(origin_blocks, ca->block_size);
cache->origin_blocks = to_oblock(origin_blocks);
cache->sectors_per_block = ca->block_size;
@@ -1848,7 +1915,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
dm_block_t cache_size = ca->cache_sectors;
cache->sectors_per_block_shift = -1;
- (void) sector_div(cache_size, ca->block_size);
+ cache_size = block_div(cache_size, ca->block_size);
cache->cache_size = to_cblock(cache_size);
} else {
cache->sectors_per_block_shift = __ffs(ca->block_size);
@@ -1873,6 +1940,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
spin_lock_init(&cache->lock);
bio_list_init(&cache->deferred_bios);
bio_list_init(&cache->deferred_flush_bios);
+ bio_list_init(&cache->deferred_writethrough_bios);
INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations);
@@ -2002,6 +2070,8 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out;
r = cache_create(ca, &cache);
+ if (r)
+ goto out;
r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
if (r) {
@@ -2016,20 +2086,6 @@ out:
return r;
}
-static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio)
-{
- int r;
- struct cache *cache = ti->private;
- dm_oblock_t block = get_bio_block(cache, bio);
- dm_cblock_t cblock;
-
- r = policy_lookup(cache->policy, block, &cblock);
- if (r < 0)
- return 2; /* assume the worst */
-
- return (!r && !is_dirty(cache, cblock)) ? 2 : 1;
-}
-
static int cache_map(struct dm_target *ti, struct bio *bio)
{
struct cache *cache = ti->private;
@@ -2097,18 +2153,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
inc_hit_counter(cache, bio);
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
- if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
- /*
- * No need to mark anything dirty in write through mode.
- */
- pb->req_nr == 0 ?
- remap_to_cache(cache, bio, lookup_result.cblock) :
- remap_to_origin_clear_discard(cache, bio, block);
- cell_defer(cache, cell, false);
- } else {
+ if (is_writethrough_io(cache, bio, lookup_result.cblock))
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+ else
remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
- cell_defer(cache, cell, false);
- }
+
+ cell_defer(cache, cell, false);
break;
case POLICY_MISS:
@@ -2319,8 +2369,7 @@ static int cache_preresume(struct dm_target *ti)
}
if (!cache->loaded_mappings) {
- r = dm_cache_load_mappings(cache->cmd,
- dm_cache_policy_get_name(cache->policy),
+ r = dm_cache_load_mappings(cache->cmd, cache->policy,
load_mapping, cache);
if (r) {
DMERR("could not load cache mappings");
@@ -2535,7 +2584,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 009339d6282..004ad1652b7 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1577,6 +1577,11 @@ static bool data_dev_supports_discard(struct pool_c *pt)
return q && blk_queue_discard(q);
}
+static bool is_factor(sector_t block_size, uint32_t n)
+{
+ return !sector_div(block_size, n);
+}
+
/*
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not.
@@ -1602,7 +1607,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
else if (data_limits->discard_granularity > block_size)
reason = "discard granularity larger than a block";
- else if (block_size & (data_limits->discard_granularity - 1))
+ else if (!is_factor(block_size, data_limits->discard_granularity))
reason = "discard granularity not a factor of block size";
if (reason) {
@@ -2544,7 +2549,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 6, 1},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2831,7 +2836,7 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 7, 1},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 6ad538375c3..a746f1d21c6 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -93,6 +93,13 @@ struct dm_verity_io {
*/
};
+struct dm_verity_prefetch_work {
+ struct work_struct work;
+ struct dm_verity *v;
+ sector_t block;
+ unsigned n_blocks;
+};
+
static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
{
return (struct shash_desc *)(io + 1);
@@ -424,15 +431,18 @@ static void verity_end_io(struct bio *bio, int error)
* The root buffer is not prefetched, it is assumed that it will be cached
* all the time.
*/
-static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io)
+static void verity_prefetch_io(struct work_struct *work)
{
+ struct dm_verity_prefetch_work *pw =
+ container_of(work, struct dm_verity_prefetch_work, work);
+ struct dm_verity *v = pw->v;
int i;
for (i = v->levels - 2; i >= 0; i--) {
sector_t hash_block_start;
sector_t hash_block_end;
- verity_hash_at_level(v, io->block, i, &hash_block_start, NULL);
- verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL);
+ verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
+ verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
@@ -452,6 +462,25 @@ no_prefetch_cluster:
dm_bufio_prefetch(v->bufio, hash_block_start,
hash_block_end - hash_block_start + 1);
}
+
+ kfree(pw);
+}
+
+static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
+{
+ struct dm_verity_prefetch_work *pw;
+
+ pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
+ GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+ if (!pw)
+ return;
+
+ INIT_WORK(&pw->work, verity_prefetch_io);
+ pw->v = v;
+ pw->block = io->block;
+ pw->n_blocks = io->n_blocks;
+ queue_work(v->verify_wq, &pw->work);
}
/*
@@ -498,7 +527,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
memcpy(io->io_vec, bio_iovec(bio),
io->io_vec_size * sizeof(struct bio_vec));
- verity_prefetch_io(v, io);
+ verity_submit_prefetch(v, io);
generic_make_request(bio);
@@ -858,7 +887,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 1, 1},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fcb878f8879..aeceedfc530 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7663,10 +7663,8 @@ static int remove_and_add_spares(struct mddev *mddev)
removed++;
}
}
- if (removed)
- sysfs_notify(&mddev->kobj, NULL,
- "degraded");
-
+ if (removed && mddev->kobj.sd)
+ sysfs_notify(&mddev->kobj, NULL, "degraded");
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index eca59c3074e..d90fb1a879e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -506,7 +506,7 @@ static inline char * mdname (struct mddev * mddev)
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- if (!test_bit(Replacement, &rdev->flags)) {
+ if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
sprintf(nm, "rd%d", rdev->raid_disk);
return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
} else
@@ -516,7 +516,7 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- if (!test_bit(Replacement, &rdev->flags)) {
+ if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
}
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index c4f28133ef8..b88757cd0d1 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -139,15 +139,8 @@ struct child {
struct btree_node *n;
};
-static struct dm_btree_value_type le64_type = {
- .context = NULL,
- .size = sizeof(__le64),
- .inc = NULL,
- .dec = NULL,
- .equal = NULL
-};
-
-static int init_child(struct dm_btree_info *info, struct btree_node *parent,
+static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
+ struct btree_node *parent,
unsigned index, struct child *result)
{
int r, inc;
@@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent,
result->n = dm_block_data(result->block);
if (inc)
- inc_children(info->tm, result->n, &le64_type);
+ inc_children(info->tm, result->n, vt);
*((__le64 *) value_ptr(parent, index)) =
cpu_to_le64(dm_block_location(result->block));
@@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
}
static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
- unsigned left_index)
+ struct dm_btree_value_type *vt, unsigned left_index)
{
int r;
struct btree_node *parent;
@@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
parent = dm_block_data(shadow_current(s));
- r = init_child(info, parent, left_index, &left);
+ r = init_child(info, vt, parent, left_index, &left);
if (r)
return r;
- r = init_child(info, parent, left_index + 1, &right);
+ r = init_child(info, vt, parent, left_index + 1, &right);
if (r) {
exit_child(info, &left);
return r;
@@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
}
static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
- unsigned left_index)
+ struct dm_btree_value_type *vt, unsigned left_index)
{
int r;
struct btree_node *parent = dm_block_data(shadow_current(s));
@@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
/*
* FIXME: fill out an array?
*/
- r = init_child(info, parent, left_index, &left);
+ r = init_child(info, vt, parent, left_index, &left);
if (r)
return r;
- r = init_child(info, parent, left_index + 1, &center);
+ r = init_child(info, vt, parent, left_index + 1, &center);
if (r) {
exit_child(info, &left);
return r;
}
- r = init_child(info, parent, left_index + 2, &right);
+ r = init_child(info, vt, parent, left_index + 2, &right);
if (r) {
exit_child(info, &left);
exit_child(info, &center);
@@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
}
static int rebalance_children(struct shadow_spine *s,
- struct dm_btree_info *info, uint64_t key)
+ struct dm_btree_info *info,
+ struct dm_btree_value_type *vt, uint64_t key)
{
int i, r, has_left_sibling, has_right_sibling;
uint32_t child_entries;
@@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s,
has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
if (!has_left_sibling)
- r = rebalance2(s, info, i);
+ r = rebalance2(s, info, vt, i);
else if (!has_right_sibling)
- r = rebalance2(s, info, i - 1);
+ r = rebalance2(s, info, vt, i - 1);
else
- r = rebalance3(s, info, i - 1);
+ r = rebalance3(s, info, vt, i - 1);
return r;
}
@@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
if (le32_to_cpu(n->header.flags) & LEAF_NODE)
return do_leaf(n, key, index);
- r = rebalance_children(s, info, key);
+ r = rebalance_children(s, info, vt, key);
if (r)
break;
@@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}
+static struct dm_btree_value_type le64_type = {
+ .context = NULL,
+ .size = sizeof(__le64),
+ .inc = NULL,
+ .dec = NULL,
+ .equal = NULL
+};
+
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root)
{
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3ee2912889e..24909eb13fe 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -671,9 +671,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_next = NULL;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
- trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
- bi, disk_devt(conf->mddev->gendisk),
- sh->dev[i].sector);
+
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+ bi, disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
generic_make_request(bi);
}
if (rrdev) {
@@ -701,9 +703,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE;
rbi->bi_next = NULL;
- trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
- rbi, disk_devt(conf->mddev->gendisk),
- sh->dev[i].sector);
+ if (conf->mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+ rbi, disk_devt(conf->mddev->gendisk),
+ sh->dev[i].sector);
generic_make_request(rbi);
}
if (!rdev && !rrdev) {
@@ -2280,17 +2283,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int level = conf->level;
if (rcw) {
- /* if we are not expanding this is a proper write request, and
- * there will be bios with new data to be drained into the
- * stripe cache
- */
- if (!expand) {
- sh->reconstruct_state = reconstruct_state_drain_run;
- set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
- } else
- sh->reconstruct_state = reconstruct_state_run;
-
- set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -2303,6 +2295,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
s->locked++;
}
}
+ /* if we are not expanding this is a proper write request, and
+ * there will be bios with new data to be drained into the
+ * stripe cache
+ */
+ if (!expand) {
+ if (!s->locked)
+ /* False alarm, nothing to do */
+ return;
+ sh->reconstruct_state = reconstruct_state_drain_run;
+ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+ } else
+ sh->reconstruct_state = reconstruct_state_run;
+
+ set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
+
if (s->locked + conf->max_degraded == disks)
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
atomic_inc(&conf->pending_full_writes);
@@ -2311,11 +2318,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
- sh->reconstruct_state = reconstruct_state_prexor_drain_run;
- set_bit(STRIPE_OP_PREXOR, &s->ops_request);
- set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
- set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
-
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (i == pd_idx)
@@ -2330,6 +2332,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
s->locked++;
}
}
+ if (!s->locked)
+ /* False alarm - nothing to do */
+ return;
+ sh->reconstruct_state = reconstruct_state_prexor_drain_run;
+ set_bit(STRIPE_OP_PREXOR, &s->ops_request);
+ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+ set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
}
/* keep the parity disk(s) locked while asynchronous operations
@@ -2564,6 +2573,8 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
int i;
clear_bit(STRIPE_SYNCING, &sh->state);
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
+ wake_up(&conf->wait_for_overlap);
s->syncing = 0;
s->replacing = 0;
/* There is nothing more to do for sync/check/repair.
@@ -2737,6 +2748,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
{
int i;
struct r5dev *dev;
+ int discard_pending = 0;
for (i = disks; i--; )
if (sh->dev[i].written) {
@@ -2765,9 +2777,23 @@ static void handle_stripe_clean_event(struct r5conf *conf,
STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
- }
- } else if (test_bit(R5_Discard, &sh->dev[i].flags))
- clear_bit(R5_Discard, &sh->dev[i].flags);
+ } else if (test_bit(R5_Discard, &dev->flags))
+ discard_pending = 1;
+ }
+ if (!discard_pending &&
+ test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
+ clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
+ clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
+ if (sh->qd_idx >= 0) {
+ clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
+ clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
+ }
+ /* now that discard is done we can proceed with any sync */
+ clear_bit(STRIPE_DISCARD, &sh->state);
+ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
+ set_bit(STRIPE_HANDLE, &sh->state);
+
+ }
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -2826,8 +2852,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_HANDLE, &sh->state);
if (rmw < rcw && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
- blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
- (unsigned long long)sh->sector, rmw);
+ if (conf->mddev->queue)
+ blk_add_trace_msg(conf->mddev->queue,
+ "raid5 rmw %llu %d",
+ (unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
@@ -2877,7 +2905,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
}
}
}
- if (rcw)
+ if (rcw && conf->mddev->queue)
blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
(unsigned long long)sh->sector,
rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
@@ -3417,9 +3445,15 @@ static void handle_stripe(struct stripe_head *sh)
return;
}
- if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
- set_bit(STRIPE_SYNCING, &sh->state);
- clear_bit(STRIPE_INSYNC, &sh->state);
+ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
+ spin_lock(&sh->stripe_lock);
+ /* Cannot process 'sync' concurrently with 'discard' */
+ if (!test_bit(STRIPE_DISCARD, &sh->state) &&
+ test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
+ set_bit(STRIPE_SYNCING, &sh->state);
+ clear_bit(STRIPE_INSYNC, &sh->state);
+ }
+ spin_unlock(&sh->stripe_lock);
}
clear_bit(STRIPE_DELAYED, &sh->state);
@@ -3579,6 +3613,8 @@ static void handle_stripe(struct stripe_head *sh)
test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
clear_bit(STRIPE_SYNCING, &sh->state);
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
+ wake_up(&conf->wait_for_overlap);
}
/* If the failed drives are just a ReadError, then we might need
@@ -3982,9 +4018,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
atomic_inc(&conf->active_aligned_reads);
spin_unlock_irq(&conf->device_lock);
- trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
- align_bi, disk_devt(mddev->gendisk),
- raid_bio->bi_sector);
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+ align_bi, disk_devt(mddev->gendisk),
+ raid_bio->bi_sector);
generic_make_request(align_bi);
return 1;
} else {
@@ -4078,7 +4115,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
}
spin_unlock_irq(&conf->device_lock);
}
- trace_block_unplug(mddev->queue, cnt, !from_schedule);
+ if (mddev->queue)
+ trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb);
}
@@ -4141,6 +4179,13 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
prepare_to_wait(&conf->wait_for_overlap, &w,
TASK_UNINTERRUPTIBLE);
+ set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
+ if (test_bit(STRIPE_SYNCING, &sh->state)) {
+ release_stripe(sh);
+ schedule();
+ goto again;
+ }
+ clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
spin_lock_irq(&sh->stripe_lock);
for (d = 0; d < conf->raid_disks; d++) {
if (d == sh->pd_idx || d == sh->qd_idx)
@@ -4153,6 +4198,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
goto again;
}
}
+ set_bit(STRIPE_DISCARD, &sh->state);
finish_wait(&conf->wait_for_overlap, &w);
for (d = 0; d < conf->raid_disks; d++) {
if (d == sh->pd_idx || d == sh->qd_idx)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 18b2c4a8a1f..b0b663b119a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -221,10 +221,6 @@ struct stripe_head {
struct stripe_operations {
int target, target2;
enum sum_check_flags zero_sum_result;
- #ifdef CONFIG_MULTICORE_RAID456
- unsigned long request;
- wait_queue_head_t wait_for_ops;
- #endif
} ops;
struct r5dev {
/* rreq and rvec are used for the replacement device when
@@ -323,6 +319,7 @@ enum {
STRIPE_COMPUTE_RUN,
STRIPE_OPS_REQ_PENDING,
STRIPE_ON_UNPLUG_LIST,
+ STRIPE_DISCARD,
};
/*
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1c9e09fbdff..db103e03ba0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master,
sprintf(linkname, "slave_%s", slave->name);
ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
linkname);
+
+ /* free the master link created earlier in case of error */
+ if (ret)
+ sysfs_remove_link(&(slave->dev.kobj), "master");
+
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 568205436a1..91ecd6a00d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
break;
default:
BNX2X_ERR("Non valid capability ID\n");
- rval = -EINVAL;
+ rval = 1;
break;
}
} else {
DP(BNX2X_MSG_DCB, "DCB disabled\n");
- rval = -EINVAL;
+ rval = 1;
}
DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
@@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
break;
default:
BNX2X_ERR("Non valid TC-ID\n");
- rval = -EINVAL;
+ rval = 1;
break;
}
} else {
DP(BNX2X_MSG_DCB, "DCB disabled\n");
- rval = -EINVAL;
+ rval = 1;
}
return rval;
@@ -2188,7 +2188,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
return -EINVAL;
}
-static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
{
struct bnx2x *bp = netdev_priv(netdev);
DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
@@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
break;
default:
BNX2X_ERR("Non valid featrue-ID\n");
- rval = -EINVAL;
+ rval = 1;
break;
}
} else {
DP(BNX2X_MSG_DCB, "DCB disabled\n");
- rval = -EINVAL;
+ rval = 1;
}
return rval;
@@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
break;
default:
BNX2X_ERR("Non valid featrue-ID\n");
- rval = -EINVAL;
+ rval = 1;
break;
}
} else {
DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
- rval = -EINVAL;
+ rval = 1;
}
return rval;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b64542acfa3..12b1d848080 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1818,27 +1818,32 @@ out:
**/
void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
{
- u32 dtxswc;
+ u32 reg_val, reg_offset;
switch (hw->mac.type) {
case e1000_82576:
+ reg_offset = E1000_DTXSWC;
+ break;
case e1000_i350:
- dtxswc = rd32(E1000_DTXSWC);
- if (enable) {
- dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
- E1000_DTXSWC_VLAN_SPOOF_MASK);
- /* The PF can spoof - it has to in order to
- * support emulation mode NICs */
- dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
- } else {
- dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
- E1000_DTXSWC_VLAN_SPOOF_MASK);
- }
- wr32(E1000_DTXSWC, dtxswc);
+ reg_offset = E1000_TXSWC;
break;
default:
- break;
+ return;
+ }
+
+ reg_val = rd32(reg_offset);
+ if (enable) {
+ reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ /* The PF can spoof - it has to in order to
+ * support emulation mode NICs
+ */
+ reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ } else {
+ reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
}
+ wr32(reg_offset, reg_val);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 4623502054d..0478a1abe54 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -39,7 +39,7 @@
#include <linux/pci.h>
#ifdef CONFIG_IGB_HWMON
-struct i2c_board_info i350_sensor_info = {
+static struct i2c_board_info i350_sensor_info = {
I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 4dbd62968c7..8496adfc6a6 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2542,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
return;
- igb_enable_sriov(pdev, max_vfs);
pci_sriov_set_totalvfs(pdev, 7);
+ igb_enable_sriov(pdev, max_vfs);
#endif /* CONFIG_PCI_IOV */
}
@@ -2652,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
if (max_vfs > 7) {
dev_warn(&pdev->dev,
"Maximum of 7 VFs per PF, using max\n");
- adapter->vfs_allocated_count = 7;
+ max_vfs = adapter->vfs_allocated_count = 7;
} else
adapter->vfs_allocated_count = max_vfs;
if (adapter->vfs_allocated_count)
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0987822359f..0a237507ee8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
case e1000_82576:
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
- adapter->ptp_caps.max_adj = 1000000000;
+ adapter->ptp_caps.max_adj = 999999881;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ac0c315659d..5563250883a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -950,9 +950,17 @@ free_queue_irqs:
free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]);
}
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
+ /* This failure is non-recoverable - it indicates the system is
+ * out of MSIX vector resources and the VF driver cannot run
+ * without them. Set the number of msix vectors to zero
+ * indicating that not enough can be allocated. The error
+ * will be returned to the user indicating device open failed.
+ * Any further attempts to force the driver to open will also
+ * fail. The only way to recover is to unload the driver and
+ * reload it again. If the system has recovered some MSIX
+ * vectors then it may succeed.
+ */
+ adapter->num_msix_vectors = 0;
return err;
}
@@ -2575,6 +2583,15 @@ static int ixgbevf_open(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw;
int err;
+ /* A previous failure to open the device because of a lack of
+ * available MSIX vector resources may have reset the number
+ * of msix vectors variable to zero. The only way to recover
+ * is to unload/reload the driver and hope that the system has
+ * been able to recover some MSIX vector resources.
+ */
+ if (!adapter->num_msix_vectors)
+ return -ENOMEM;
+
/* disallow open during test */
if (test_bit(__IXGBEVF_TESTING, &adapter->state))
return -EBUSY;
@@ -2631,7 +2648,6 @@ static int ixgbevf_open(struct net_device *netdev)
err_req_irq:
ixgbevf_down(adapter);
- ixgbevf_free_irq(adapter);
err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
err_setup_tx:
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index abd5fba09b8..60eb890800e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1724,9 +1724,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
- skb->ip_summed = CHECKSUM_NONE;
- else
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
napi_gro_receive(&adapter->napi, skb);
(*work_done)++;
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index c689c04a4f5..2d2f0a43d36 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -620,7 +620,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
/* special soc specific control */
if (ctrl->mpp_get || ctrl->mpp_set) {
- if (!ctrl->name || !ctrl->mpp_set || !ctrl->mpp_set) {
+ if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) {
dev_err(&pdev->dev, "wrong soc control info\n");
return -EINVAL;
}
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index ac8d382a79b..d611ecfcbf7 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -622,7 +622,7 @@ static const struct file_operations pinconf_dbg_pinname_fops = {
static int pinconf_dbg_state_print(struct seq_file *s, void *d)
{
if (strlen(dbg_state_name))
- seq_printf(s, "%s\n", dbg_pinname);
+ seq_printf(s, "%s\n", dbg_state_name);
else
seq_printf(s, "No pin state set\n");
return 0;
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index e3ed8cb072a..bfda73d64ee 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -90,7 +90,7 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot,
* pin config.
*/
-#ifdef CONFIG_GENERIC_PINCONF
+#if defined(CONFIG_GENERIC_PINCONF) && defined(CONFIG_DEBUG_FS)
void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin);
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
index caecdd37306..c542a97c82f 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -422,7 +422,7 @@ static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
}
/* check if pin use AlternateFunction register */
- if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED))
+ if ((af.alt_bit1 == UNUSED) && (af.alt_bit2 == UNUSED))
return mode;
/*
* if pin GPIOSEL bit is set and pin supports alternate function,
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 1a00658b3ea..bd83c8b01cd 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -194,6 +194,11 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
}
if (!gpio_range) {
+ /*
+ * A pin should not be freed more times than allocated.
+ */
+ if (WARN_ON(!desc->mux_usecount))
+ return NULL;
desc->mux_usecount--;
if (desc->mux_usecount)
return NULL;
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 434ebc3a99d..0a9f27e094e 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -44,6 +44,7 @@ static DECLARE_COMPLETION(at91_rtc_updated);
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
static void __iomem *at91_rtc_regs;
static int irq;
+static u32 at91_rtc_imr;
/*
* Decode time/date into rtc_time structure
@@ -108,9 +109,11 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
cr = at91_rtc_read(AT91_RTC_CR);
at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
+ at91_rtc_imr |= AT91_RTC_ACKUPD;
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+ at91_rtc_imr &= ~AT91_RTC_ACKUPD;
at91_rtc_write(AT91_RTC_TIMR,
bin2bcd(tm->tm_sec) << 0
@@ -142,7 +145,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = at91_alarm_year - 1900;
- alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
+ alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM)
? 1 : 0;
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -168,6 +171,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm.tm_sec = alrm->time.tm_sec;
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_imr &= ~AT91_RTC_ALARM;
at91_rtc_write(AT91_RTC_TIMALR,
bin2bcd(tm.tm_sec) << 0
| bin2bcd(tm.tm_min) << 8
@@ -180,6 +184,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
if (alrm->enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_imr |= AT91_RTC_ALARM;
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
}
@@ -196,9 +201,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_imr |= AT91_RTC_ALARM;
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
- } else
+ } else {
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_imr &= ~AT91_RTC_ALARM;
+ }
return 0;
}
@@ -207,12 +215,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
*/
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
{
- unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
-
seq_printf(seq, "update_IRQ\t: %s\n",
- (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
+ (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no");
seq_printf(seq, "periodic_IRQ\t: %s\n",
- (imr & AT91_RTC_SECEV) ? "yes" : "no");
+ (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no");
return 0;
}
@@ -227,7 +233,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
unsigned int rtsr;
unsigned long events = 0;
- rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
+ rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr;
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM)
events |= (RTC_AF | RTC_IRQF);
@@ -291,6 +297,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
+ at91_rtc_imr = 0;
ret = request_irq(irq, at91_rtc_interrupt,
IRQF_SHARED,
@@ -329,6 +336,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
+ at91_rtc_imr = 0;
free_irq(irq, pdev);
rtc_device_unregister(rtc);
@@ -341,31 +349,35 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
/* AT91RM9200 RTC Power management control */
-static u32 at91_rtc_imr;
+static u32 at91_rtc_bkpimr;
+
static int at91_rtc_suspend(struct device *dev)
{
/* this IRQ is shared with DBGU and other hardware which isn't
* necessarily doing PM like we are...
*/
- at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
- & (AT91_RTC_ALARM|AT91_RTC_SECEV);
- if (at91_rtc_imr) {
- if (device_may_wakeup(dev))
+ at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV);
+ if (at91_rtc_bkpimr) {
+ if (device_may_wakeup(dev)) {
enable_irq_wake(irq);
- else
- at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
- }
+ } else {
+ at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr);
+ at91_rtc_imr &= ~at91_rtc_bkpimr;
+ }
+}
return 0;
}
static int at91_rtc_resume(struct device *dev)
{
- if (at91_rtc_imr) {
- if (device_may_wakeup(dev))
+ if (at91_rtc_bkpimr) {
+ if (device_may_wakeup(dev)) {
disable_irq_wake(irq);
- else
- at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
+ } else {
+ at91_rtc_imr |= at91_rtc_bkpimr;
+ at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr);
+ }
}
return 0;
}
diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h
index da1945e5f71..5f940b6844c 100644
--- a/drivers/rtc/rtc-at91rm9200.h
+++ b/drivers/rtc/rtc-at91rm9200.h
@@ -64,7 +64,6 @@
#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
-#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
#define AT91_RTC_VER 0x2c /* Valid Entry Register */
#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 0dde688ca09..969abbad7fe 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -239,11 +239,9 @@ static int da9052_rtc_probe(struct platform_device *pdev)
rtc->da9052 = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, rtc);
- rtc->irq = platform_get_irq_byname(pdev, "ALM");
- ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
- da9052_rtc_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "ALM", rtc);
+ rtc->irq = DA9052_IRQ_ALARM;
+ ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM",
+ da9052_rtc_irq, rtc);
if (ret != 0) {
rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
return ret;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index db0cf7c8add..a0fc7b9eea6 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -166,6 +166,7 @@ static int chap_server_compute_md5(
{
char *endptr;
unsigned long id;
+ unsigned char id_as_uchar;
unsigned char digest[MD5_SIGNATURE_SIZE];
unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
unsigned char identifier[10], *challenge = NULL;
@@ -355,7 +356,9 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, &id, 1);
+ /* To handle both endiannesses */
+ id_as_uchar = id;
+ sg_init_one(&sg, &id_as_uchar, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index bc02b018ae4..37ffc5bd239 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -7,7 +7,7 @@
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
-#define FD_MAX_SECTORS 1024
+#define FD_MAX_SECTORS 2048
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 82e78d72fdb..e992b27aa09 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -883,7 +883,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
page, len, off);
- while (len > 0 && data_len > 0) {
+ /*
+ * We only have one page of data in each sg element,
+ * we can not cross a page boundary.
+ */
+ if (off + len > PAGE_SIZE)
+ goto fail;
+
+ if (len > 0 && data_len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
@@ -940,9 +947,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
bio = NULL;
}
- len -= bytes;
data_len -= bytes;
- off = 0;
}
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 290230de2c5..60d4b5185f3 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -464,8 +464,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
- if (!ops->execute_sync_cache)
- return TCM_UNSUPPORTED_SCSI_OPCODE;
+ if (!ops->execute_sync_cache) {
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_noop;
+ break;
+ }
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 9169d6a5d7e..aac9d2727e3 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -711,7 +711,8 @@ int core_tpg_register(
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
- kfree(se_tpg);
+ array_free(se_tpg->tpg_lun_list,
+ TRANSPORT_MAX_LUNS_PER_TPG);
return -ENOMEM;
}
}
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 7b0bfa0e7a9..3078c403b42 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -143,22 +143,18 @@ static int dove_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
- if (!priv->sensor) {
- dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
- return -EADDRNOTAVAIL;
- }
+ priv->sensor = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->sensor))
+ return PTR_ERR(priv->sensor);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
dev_err(&pdev->dev, "Failed to get platform resource\n");
return -ENODEV;
}
- priv->control = devm_request_and_ioremap(&pdev->dev, res);
- if (!priv->control) {
- dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
- return -EADDRNOTAVAIL;
- }
+ priv->control = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->control))
+ return PTR_ERR(priv->control);
ret = dove_init_sensor(priv);
if (ret) {
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index e04ebd8671a..46568c078de 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -476,7 +476,7 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
if (IS_ERR(th_zone->therm_dev)) {
pr_err("Failed to register thermal zone device\n");
- ret = -EINVAL;
+ ret = PTR_ERR(th_zone->therm_dev);
goto err_unregister;
}
th_zone->mode = THERMAL_DEVICE_ENABLED;
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 65cb4f09e8f..e5500edb528 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -85,11 +85,9 @@ static int kirkwood_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
- if (!priv->sensor) {
- dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
- return -EADDRNOTAVAIL;
- }
+ priv->sensor = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->sensor))
+ return PTR_ERR(priv->sensor);
thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
priv, &ops, NULL, 0, 0);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 28f09199401..2cc5b6115e3 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -145,6 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
struct device *dev = rcar_priv_to_dev(priv);
int i;
int ctemp, old, new;
+ int ret = -EINVAL;
mutex_lock(&priv->lock);
@@ -174,7 +175,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
if (!ctemp) {
dev_err(dev, "thermal sensor was broken\n");
- return -EINVAL;
+ goto err_out_unlock;
}
/*
@@ -192,10 +193,10 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp);
priv->ctemp = ctemp;
-
+ ret = 0;
+err_out_unlock:
mutex_unlock(&priv->lock);
-
- return 0;
+ return ret;
}
static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
@@ -363,6 +364,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
struct resource *res, *irq;
int mres = 0;
int i;
+ int ret = -ENODEV;
int idle = IDLE_INTERVAL;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
@@ -399,11 +401,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
/*
* rcar_has_irq_support() will be enabled
*/
- common->base = devm_request_and_ioremap(dev, res);
- if (!common->base) {
- dev_err(dev, "Unable to ioremap thermal register\n");
- return -ENOMEM;
- }
+ common->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(common->base))
+ return PTR_ERR(common->base);
/* enable temperature comparation */
rcar_thermal_common_write(common, ENR, 0x00030303);
@@ -422,11 +422,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
return -ENOMEM;
}
- priv->base = devm_request_and_ioremap(dev, res);
- if (!priv->base) {
- dev_err(dev, "Unable to ioremap priv register\n");
- return -ENOMEM;
- }
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
priv->common = common;
priv->id = i;
@@ -441,6 +439,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
idle);
if (IS_ERR(priv->zone)) {
dev_err(dev, "can't register thermal zone\n");
+ ret = PTR_ERR(priv->zone);
goto error_unregister;
}
@@ -460,7 +459,7 @@ error_unregister:
rcar_thermal_for_each_priv(priv, common)
thermal_zone_device_unregister(priv->zone);
- return -ENODEV;
+ return ret;
}
static int rcar_thermal_remove(struct platform_device *pdev)
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index e4ca345873c..d7799deacb2 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll)
static struct vcs_poll_data *
vcs_poll_data_get(struct file *file)
{
- struct vcs_poll_data *poll = file->private_data;
+ struct vcs_poll_data *poll = file->private_data, *kill = NULL;
if (poll)
return poll;
@@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file)
file->private_data = poll;
} else {
/* someone else raced ahead of us */
- vcs_poll_data_free(poll);
+ kill = poll;
poll = file->private_data;
}
spin_unlock(&file->f_lock);
+ if (kill)
+ vcs_poll_data_free(kill);
return poll;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 8ac25adf31b..387dc6c8ad2 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -593,7 +593,6 @@ static void acm_port_destruct(struct tty_port *port)
dev_dbg(&acm->control->dev, "%s\n", __func__);
- tty_unregister_device(acm_tty_driver, acm->minor);
acm_release_minor(acm);
usb_put_intf(acm->control);
kfree(acm->country_codes);
@@ -977,6 +976,8 @@ static int acm_probe(struct usb_interface *intf,
int num_rx_buf;
int i;
int combined_interfaces = 0;
+ struct device *tty_dev;
+ int rv = -ENOMEM;
/* normal quirks */
quirks = (unsigned long)id->driver_info;
@@ -1339,11 +1340,24 @@ skip_countries:
usb_set_intfdata(data_interface, acm);
usb_get_intf(control_interface);
- tty_port_register_device(&acm->port, acm_tty_driver, minor,
+ tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
&control_interface->dev);
+ if (IS_ERR(tty_dev)) {
+ rv = PTR_ERR(tty_dev);
+ goto alloc_fail8;
+ }
return 0;
+alloc_fail8:
+ if (acm->country_codes) {
+ device_remove_file(&acm->control->dev,
+ &dev_attr_wCountryCodes);
+ device_remove_file(&acm->control->dev,
+ &dev_attr_iCountryCodeRelDate);
+ }
+ device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
alloc_fail7:
+ usb_set_intfdata(intf, NULL);
for (i = 0; i < ACM_NW; i++)
usb_free_urb(acm->wb[i].urb);
alloc_fail6:
@@ -1359,7 +1373,7 @@ alloc_fail2:
acm_release_minor(acm);
kfree(acm);
alloc_fail:
- return -ENOMEM;
+ return rv;
}
static void stop_data_traffic(struct acm *acm)
@@ -1411,6 +1425,8 @@ static void acm_disconnect(struct usb_interface *intf)
stop_data_traffic(acm);
+ tty_unregister_device(acm_tty_driver, acm->minor);
+
usb_free_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_free_urb(acm->wb[i].urb);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 622b4a48e73..2b487d4797b 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct hc_driver *driver;
struct usb_hcd *hcd;
int retval;
+ int hcd_irq = 0;
if (usb_disabled())
return -ENODEV;
@@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
dev->current_state = PCI_D0;
- /* The xHCI driver supports MSI and MSI-X,
- * so don't fail if the BIOS doesn't provide a legacy IRQ.
+ /*
+ * The xHCI driver has its own irq management
+ * make sure irq setup is not touched for xhci in generic hcd code
*/
- if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
- dev_err(&dev->dev,
- "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
- pci_name(dev));
- retval = -ENODEV;
- goto disable_pci;
+ if ((driver->flags & HCD_MASK) != HCD_USB3) {
+ if (!dev->irq) {
+ dev_err(&dev->dev,
+ "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
+ pci_name(dev));
+ retval = -ENODEV;
+ goto disable_pci;
+ }
+ hcd_irq = dev->irq;
}
hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev));
@@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_master(dev);
- retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED);
+ retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED);
if (retval != 0)
goto unmap_registers;
set_hs_companion(dev, hcd);
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 71beeb83355..cc9c49c57c8 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -447,14 +447,13 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_rndis *rndis = req->context;
- struct usb_composite_dev *cdev = rndis->port.func.config->cdev;
int status;
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
// spin_lock(&dev->lock);
status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
if (status < 0)
- ERROR(cdev, "RNDIS command error %d, %d/%d\n",
+ pr_err("RNDIS command error %d, %d/%d\n",
status, req->actual, req->length);
// spin_unlock(&dev->lock);
}
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index 3953dd4d718..3b343b23e4b 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -357,7 +357,7 @@ static int gfs_bind(struct usb_composite_dev *cdev)
goto error;
gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id;
- for (i = func_num; --i; ) {
+ for (i = func_num; i--; ) {
ret = functionfs_bind(ffs_tab[i].ffs_data, cdev);
if (unlikely(ret < 0)) {
while (++i < func_num)
@@ -413,7 +413,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
gether_cleanup();
gfs_ether_setup = false;
- for (i = func_num; --i; )
+ for (i = func_num; i--; )
if (ffs_tab[i].ffs_data)
functionfs_unbind(ffs_tab[i].ffs_data);
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index d226058e3b8..32524b63195 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -59,7 +59,7 @@ static const char * const ep_name[] = {
};
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
-#ifdef CONFIG_USB_GADGET_NET2272_DMA
+#ifdef CONFIG_USB_NET2272_DMA
/*
* use_dma: the NET2272 can use an external DMA controller.
* Note that since there is no generic DMA api, some functions,
@@ -1495,6 +1495,13 @@ stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
for (i = 0; i < 4; ++i)
net2272_dequeue_all(&dev->ep[i]);
+ /* report disconnect; the driver is already quiesced */
+ if (driver) {
+ spin_unlock(&dev->lock);
+ driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+
net2272_usb_reinit(dev);
}
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index a1b650e1133..3bd0f992fb4 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1924,7 +1924,6 @@ static int net2280_start(struct usb_gadget *_gadget,
err_func:
device_remove_file (&dev->pdev->dev, &dev_attr_function);
err_unbind:
- driver->unbind (&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
return retval;
@@ -1946,6 +1945,13 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
for (i = 0; i < 7; i++)
nuke (&dev->ep [i]);
+ /* report disconnect; the driver is already quiesced */
+ if (driver) {
+ spin_unlock(&dev->lock);
+ driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+
usb_reinit (dev);
}
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index c5034d9c946..b369292d4b9 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -136,7 +136,7 @@ static struct portmaster {
pr_debug(fmt, ##arg)
#endif /* pr_vdebug */
#else
-#ifndef pr_vdebig
+#ifndef pr_vdebug
#define pr_vdebug(fmt, arg...) \
({ if (0) pr_debug(fmt, ##arg); })
#endif /* pr_vdebug */
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 2a9cd369f71..f8f62c3ed65 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -216,7 +216,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
usb_gadget_disconnect(udc->gadget);
udc->driver->disconnect(udc->gadget);
udc->driver->unbind(udc->gadget);
- usb_gadget_udc_stop(udc->gadget, udc->driver);
+ usb_gadget_udc_stop(udc->gadget, NULL);
udc->driver = NULL;
udc->dev.driver = NULL;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 5726cb144ab..416a6dce5e1 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -302,6 +302,7 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
static void end_unlink_async(struct ehci_hcd *ehci);
static void unlink_empty_async(struct ehci_hcd *ehci);
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 4d3b294f203..7d06e77f6c4 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -328,7 +328,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci->rh_state = EHCI_RH_SUSPENDED;
end_unlink_async(ehci);
- unlink_empty_async(ehci);
+ unlink_empty_async_suspended(ehci);
ehci_handle_intr_unlinks(ehci);
end_free_itds(ehci);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 5464665f0b6..23d13690428 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1316,6 +1316,19 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
}
}
+/* The root hub is suspended; unlink all the async QHs */
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+
+ while (ehci->async->qh_next.qh) {
+ qh = ehci->async->qh_next.qh;
+ WARN_ON(!list_empty(&qh->qtd_list));
+ single_unlink_async(ehci, qh);
+ }
+ start_iaa_cycle(ehci, false);
+}
+
/* makes sure the async qh will become idle */
/* caller must own ehci->lock */
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 20dbdcbe9b0..c3fa1305f83 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
* (a) SMP races against real IAA firing and retriggering, and
* (b) clean HC shutdown, when IAA watchdog was pending.
*/
- if (ehci->async_iaa) {
+ if (1) {
u32 cmd, status;
/* If we get here, IAA is *REALLY* late. It's barely
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f1f01a834ba..849470b1883 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
* generate interrupts. Don't even try to enable MSI.
*/
if (xhci->quirks & XHCI_BROKEN_MSI)
- return 0;
+ goto legacy_irq;
/* unregister the legacy interrupt */
if (hcd->irq)
@@ -371,6 +371,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
return -EINVAL;
}
+ legacy_irq:
/* fall back to legacy interrupt*/
ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f791bd0aee6..2c510e4a7d4 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -206,8 +206,8 @@ struct xhci_op_regs {
/* bits 12:31 are reserved (and should be preserved on writes). */
/* IMAN - Interrupt Management Register */
-#define IMAN_IP (1 << 1)
-#define IMAN_IE (1 << 0)
+#define IMAN_IE (1 << 1)
+#define IMAN_IP (1 << 0)
/* USBSTS - USB status - status bitmasks */
/* HC not running - set to 1 when run/stop bit is cleared. */
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 7c71769d71f..41613a2b35e 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -327,7 +327,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
int err;
- err = musb->int_usb & USB_INTR_VBUSERROR;
+ err = musb->int_usb & MUSB_INTR_VBUSERROR;
if (err) {
/*
* The Mentor core doesn't debounce VBUS as needed
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index be18537c5f1..83eddedcd9b 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -141,7 +141,9 @@ static inline void map_dma_buffer(struct musb_request *request,
static inline void unmap_dma_buffer(struct musb_request *request,
struct musb *musb)
{
- if (!is_buffer_mapped(request))
+ struct musb_ep *musb_ep = request->ep;
+
+ if (!is_buffer_mapped(request) || !musb_ep->dma)
return;
if (request->request.dma == DMA_ADDR_INVALID) {
@@ -195,7 +197,10 @@ __acquires(ep->musb->lock)
ep->busy = 1;
spin_unlock(&musb->lock);
- unmap_dma_buffer(req, musb);
+
+ if (!dma_mapping_error(&musb->g.dev, request->dma))
+ unmap_dma_buffer(req, musb);
+
if (request->status == 0)
dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
ep->end_point.name, request,
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index cbd904b8fba..4775f8209e5 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -62,7 +62,6 @@ static int is_irda(struct usb_serial *serial)
}
struct ark3116_private {
- wait_queue_head_t delta_msr_wait;
struct async_icount icount;
int irda; /* 1 for irda device */
@@ -146,7 +145,6 @@ static int ark3116_port_probe(struct usb_serial_port *port)
if (!priv)
return -ENOMEM;
- init_waitqueue_head(&priv->delta_msr_wait);
mutex_init(&priv->hw_lock);
spin_lock_init(&priv->status_lock);
@@ -456,10 +454,14 @@ static int ark3116_ioctl(struct tty_struct *tty,
case TIOCMIWAIT:
for (;;) {
struct async_icount prev = priv->icount;
- interruptible_sleep_on(&priv->delta_msr_wait);
+ interruptible_sleep_on(&port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+
+ if (port->serial->disconnected)
+ return -EIO;
+
if ((prev.rng == priv->icount.rng) &&
(prev.dsr == priv->icount.dsr) &&
(prev.dcd == priv->icount.dcd) &&
@@ -580,7 +582,7 @@ static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
priv->icount.dcd++;
if (msr & UART_MSR_TERI)
priv->icount.rng++;
- wake_up_interruptible(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
}
}
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index d255f66e708..07d4650a32a 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -80,7 +80,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
struct ch341_private {
spinlock_t lock; /* access lock */
- wait_queue_head_t delta_msr_wait; /* wait queue for modem status */
unsigned baud_rate; /* set baud rate */
u8 line_control; /* set line control value RTS/DTR */
u8 line_status; /* active status of modem control inputs */
@@ -252,7 +251,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->lock);
- init_waitqueue_head(&priv->delta_msr_wait);
priv->baud_rate = DEFAULT_BAUD_RATE;
priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
@@ -298,7 +296,7 @@ static void ch341_dtr_rts(struct usb_serial_port *port, int on)
priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR);
spin_unlock_irqrestore(&priv->lock, flags);
ch341_set_handshake(port->serial->dev, priv->line_control);
- wake_up_interruptible(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
}
static void ch341_close(struct usb_serial_port *port)
@@ -491,7 +489,7 @@ static void ch341_read_int_callback(struct urb *urb)
tty_kref_put(tty);
}
- wake_up_interruptible(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
}
exit:
@@ -517,11 +515,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
spin_unlock_irqrestore(&priv->lock, flags);
while (!multi_change) {
- interruptible_sleep_on(&priv->delta_msr_wait);
+ interruptible_sleep_on(&port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+ if (port->serial->disconnected)
+ return -EIO;
+
spin_lock_irqsave(&priv->lock, flags);
status = priv->line_status;
multi_change = priv->multi_status_change;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 8efa19d0e9f..ba7352e4187 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -111,7 +111,6 @@ struct cypress_private {
int baud_rate; /* stores current baud rate in
integer form */
int isthrottled; /* if throttled, discard reads */
- wait_queue_head_t delta_msr_wait; /* used for TIOCMIWAIT */
char prev_status, diff_status; /* used for TIOCMIWAIT */
/* we pass a pointer to this as the argument sent to
cypress_set_termios old_termios */
@@ -449,7 +448,6 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
kfree(priv);
return -ENOMEM;
}
- init_waitqueue_head(&priv->delta_msr_wait);
usb_reset_configuration(serial->dev);
@@ -868,12 +866,16 @@ static int cypress_ioctl(struct tty_struct *tty,
switch (cmd) {
/* This code comes from drivers/char/serial.c and ftdi_sio.c */
case TIOCMIWAIT:
- while (priv != NULL) {
- interruptible_sleep_on(&priv->delta_msr_wait);
+ for (;;) {
+ interruptible_sleep_on(&port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
- else {
+
+ if (port->serial->disconnected)
+ return -EIO;
+
+ {
char diff = priv->diff_status;
if (diff == 0)
return -EIO; /* no change => error */
@@ -1187,7 +1189,7 @@ static void cypress_read_int_callback(struct urb *urb)
if (priv->current_status != priv->prev_status) {
priv->diff_status |= priv->current_status ^
priv->prev_status;
- wake_up_interruptible(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
priv->prev_status = priv->current_status;
}
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index b1b2dc64b50..a172ad5c5ce 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -47,7 +47,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
struct f81232_private {
spinlock_t lock;
- wait_queue_head_t delta_msr_wait;
u8 line_control;
u8 line_status;
};
@@ -111,7 +110,7 @@ static void f81232_process_read_urb(struct urb *urb)
line_status = priv->line_status;
priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
- wake_up_interruptible(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
if (!urb->actual_length)
return;
@@ -256,11 +255,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
spin_unlock_irqrestore(&priv->lock, flags);
while (1) {
- interruptible_sleep_on(&priv->delta_msr_wait);
+ interruptible_sleep_on(&port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+ if (port->serial->disconnected)
+ return -EIO;
+
spin_lock_irqsave(&priv->lock, flags);
status = priv->line_status;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -322,7 +324,6 @@ static int f81232_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->lock);
- init_waitqueue_head(&priv->delta_msr_wait);
usb_set_serial_port_data(port, priv);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index edd162df49c..d4809d55147 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -69,9 +69,7 @@ struct ftdi_private {
int flags; /* some ASYNC_xxxx flags are supported */
unsigned long last_dtr_rts; /* saved modem control outputs */
struct async_icount icount;
- wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
char prev_status; /* Used for TIOCMIWAIT */
- bool dev_gone; /* Used to abort TIOCMIWAIT */
char transmit_empty; /* If transmitter is empty or not */
__u16 interface; /* FT2232C, FT2232H or FT4232H port interface
(0 for FT232/245) */
@@ -1691,10 +1689,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
kref_init(&priv->kref);
mutex_init(&priv->cfg_lock);
- init_waitqueue_head(&priv->delta_msr_wait);
priv->flags = ASYNC_LOW_LATENCY;
- priv->dev_gone = false;
if (quirk && quirk->port_probe)
quirk->port_probe(priv);
@@ -1840,8 +1836,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- priv->dev_gone = true;
- wake_up_interruptible_all(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
remove_sysfs_attrs(port);
@@ -1989,7 +1984,7 @@ static int ftdi_process_packet(struct usb_serial_port *port,
if (diff_status & FTDI_RS0_RLSD)
priv->icount.dcd++;
- wake_up_interruptible_all(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
priv->prev_status = status;
}
@@ -2440,11 +2435,15 @@ static int ftdi_ioctl(struct tty_struct *tty,
*/
case TIOCMIWAIT:
cprev = priv->icount;
- while (!priv->dev_gone) {
- interruptible_sleep_on(&priv->delta_msr_wait);
+ for (;;) {
+ interruptible_sleep_on(&port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+
+ if (port->serial->disconnected)
+ return -EIO;
+
cnow = priv->icount;
if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
@@ -2454,8 +2453,6 @@ static int ftdi_ioctl(struct tty_struct *tty,
}
cprev = cnow;
}
- return -EIO;
- break;
case TIOCSERGETLSR:
return get_lsr_info(port, (struct serial_struct __user *)arg);
break;
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 1a07b12ef34..81caf5623ee 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -956,10 +956,7 @@ static void garmin_close(struct usb_serial_port *port)
if (!serial)
return;
- mutex_lock(&port->serial->disc_mutex);
-
- if (!port->serial->disconnected)
- garmin_clear(garmin_data_p);
+ garmin_clear(garmin_data_p);
/* shutdown our urbs */
usb_kill_urb(port->read_urb);
@@ -968,8 +965,6 @@ static void garmin_close(struct usb_serial_port *port)
/* keep reset state so we know that we must start a new session */
if (garmin_data_p->state != STATE_RESET)
garmin_data_p->state = STATE_DISCONNECTED;
-
- mutex_unlock(&port->serial->disc_mutex);
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index b00e5cbf741..efd8b978128 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -110,7 +110,6 @@ struct edgeport_port {
wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */
wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */
- wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
struct async_icount icount;
struct usb_serial_port *port; /* loop back to the owner of this object */
@@ -884,7 +883,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
/* initialize our wait queues */
init_waitqueue_head(&edge_port->wait_open);
init_waitqueue_head(&edge_port->wait_chase);
- init_waitqueue_head(&edge_port->delta_msr_wait);
init_waitqueue_head(&edge_port->wait_command);
/* initialize our icount structure */
@@ -1669,13 +1667,17 @@ static int edge_ioctl(struct tty_struct *tty,
dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number);
cprev = edge_port->icount;
while (1) {
- prepare_to_wait(&edge_port->delta_msr_wait,
+ prepare_to_wait(&port->delta_msr_wait,
&wait, TASK_INTERRUPTIBLE);
schedule();
- finish_wait(&edge_port->delta_msr_wait, &wait);
+ finish_wait(&port->delta_msr_wait, &wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+
+ if (port->serial->disconnected)
+ return -EIO;
+
cnow = edge_port->icount;
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
@@ -2051,7 +2053,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr)
icount->dcd++;
if (newMsr & EDGEPORT_MSR_DELTA_RI)
icount->rng++;
- wake_up_interruptible(&edge_port->delta_msr_wait);
+ wake_up_interruptible(&edge_port->port->delta_msr_wait);
}
/* Save the new modem status */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index c23776679f7..7777172206d 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -87,9 +87,6 @@ struct edgeport_port {
int close_pending;
int lsr_event;
struct async_icount icount;
- wait_queue_head_t delta_msr_wait; /* for handling sleeping while
- waiting for msr change to
- happen */
struct edgeport_serial *edge_serial;
struct usb_serial_port *port;
__u8 bUartMode; /* Port type, 0: RS232, etc. */
@@ -1459,7 +1456,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr)
icount->dcd++;
if (msr & EDGEPORT_MSR_DELTA_RI)
icount->rng++;
- wake_up_interruptible(&edge_port->delta_msr_wait);
+ wake_up_interruptible(&edge_port->port->delta_msr_wait);
}
/* Save the new modem status */
@@ -1754,7 +1751,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
dev = port->serial->dev;
memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount));
- init_waitqueue_head(&edge_port->delta_msr_wait);
/* turn off loopback */
status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
@@ -2434,10 +2430,14 @@ static int edge_ioctl(struct tty_struct *tty,
dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__);
cprev = edge_port->icount;
while (1) {
- interruptible_sleep_on(&edge_port->delta_msr_wait);
+ interruptible_sleep_on(&port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+
+ if (port->serial->disconnected)
+ return -EIO;
+
cnow = edge_port->icount;
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
@@ -2649,6 +2649,7 @@ static struct usb_serial_driver edgeport_2port_device = {
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
+ .get_icount = edge_get_icount,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index a64d420f687..06d5a60be2c 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -114,8 +114,6 @@ struct mct_u232_private {
unsigned char last_msr; /* Modem Status Register */
unsigned int rx_flags; /* Throttling flags */
struct async_icount icount;
- wait_queue_head_t msr_wait; /* for handling sleeping while waiting
- for msr change to happen */
};
#define THROTTLED 0x01
@@ -409,7 +407,6 @@ static int mct_u232_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->lock);
- init_waitqueue_head(&priv->msr_wait);
usb_set_serial_port_data(port, priv);
@@ -601,7 +598,7 @@ static void mct_u232_read_int_callback(struct urb *urb)
tty_kref_put(tty);
}
#endif
- wake_up_interruptible(&priv->msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
spin_unlock_irqrestore(&priv->lock, flags);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -810,13 +807,17 @@ static int mct_u232_ioctl(struct tty_struct *tty,
cprev = mct_u232_port->icount;
spin_unlock_irqrestore(&mct_u232_port->lock, flags);
for ( ; ; ) {
- prepare_to_wait(&mct_u232_port->msr_wait,
+ prepare_to_wait(&port->delta_msr_wait,
&wait, TASK_INTERRUPTIBLE);
schedule();
- finish_wait(&mct_u232_port->msr_wait, &wait);
+ finish_wait(&port->delta_msr_wait, &wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+
+ if (port->serial->disconnected)
+ return -EIO;
+
spin_lock_irqsave(&mct_u232_port->lock, flags);
cnow = mct_u232_port->icount;
spin_unlock_irqrestore(&mct_u232_port->lock, flags);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 809fb329eca..b8051fa6191 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -219,7 +219,6 @@ struct moschip_port {
char open;
char open_ports;
wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
- wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
int delta_msr_cond;
struct async_icount icount;
struct usb_serial_port *port; /* loop back to the owner of this object */
@@ -423,6 +422,9 @@ static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
icount->rng++;
smp_wmb();
}
+
+ mos7840_port->delta_msr_cond = 1;
+ wake_up_interruptible(&port->port->delta_msr_wait);
}
}
@@ -1127,7 +1129,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
/* initialize our wait queues */
init_waitqueue_head(&mos7840_port->wait_chase);
- init_waitqueue_head(&mos7840_port->delta_msr_wait);
/* initialize our icount structure */
memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount));
@@ -2017,8 +2018,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
mos7840_port->read_urb_busy = false;
}
}
- wake_up(&mos7840_port->delta_msr_wait);
- mos7840_port->delta_msr_cond = 1;
dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__,
mos7840_port->shadowLCR);
}
@@ -2219,13 +2218,18 @@ static int mos7840_ioctl(struct tty_struct *tty,
while (1) {
/* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */
mos7840_port->delta_msr_cond = 0;
- wait_event_interruptible(mos7840_port->delta_msr_wait,
- (mos7840_port->
+ wait_event_interruptible(port->delta_msr_wait,
+ (port->serial->disconnected ||
+ mos7840_port->
delta_msr_cond == 1));
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+
+ if (port->serial->disconnected)
+ return -EIO;
+
cnow = mos7840_port->icount;
smp_rmb();
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index a958fd41b5b..87c71ccfee8 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -188,7 +188,6 @@ struct oti6858_private {
u8 setup_done;
struct delayed_work delayed_setup_work;
- wait_queue_head_t intr_wait;
struct usb_serial_port *port; /* USB port with which associated */
};
@@ -339,7 +338,6 @@ static int oti6858_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->lock);
- init_waitqueue_head(&priv->intr_wait);
priv->port = port;
INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
@@ -664,11 +662,15 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
spin_unlock_irqrestore(&priv->lock, flags);
while (1) {
- wait_event_interruptible(priv->intr_wait,
+ wait_event_interruptible(port->delta_msr_wait,
+ port->serial->disconnected ||
priv->status.pin_state != prev);
if (signal_pending(current))
return -ERESTARTSYS;
+ if (port->serial->disconnected)
+ return -EIO;
+
spin_lock_irqsave(&priv->lock, flags);
status = priv->status.pin_state & PIN_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -763,7 +765,7 @@ static void oti6858_read_int_callback(struct urb *urb)
if (!priv->transient) {
if (xs->pin_state != priv->status.pin_state)
- wake_up_interruptible(&priv->intr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE);
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 54adc9125e5..3b10018d89a 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -139,7 +139,6 @@ struct pl2303_serial_private {
struct pl2303_private {
spinlock_t lock;
- wait_queue_head_t delta_msr_wait;
u8 line_control;
u8 line_status;
};
@@ -233,7 +232,6 @@ static int pl2303_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->lock);
- init_waitqueue_head(&priv->delta_msr_wait);
usb_set_serial_port_data(port, priv);
@@ -607,11 +605,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
spin_unlock_irqrestore(&priv->lock, flags);
while (1) {
- interruptible_sleep_on(&priv->delta_msr_wait);
+ interruptible_sleep_on(&port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+ if (port->serial->disconnected)
+ return -EIO;
+
spin_lock_irqsave(&priv->lock, flags);
status = priv->line_status;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -719,7 +720,7 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
spin_unlock_irqrestore(&priv->lock, flags);
if (priv->line_status & UART_BREAK_ERROR)
usb_serial_handle_break(port);
- wake_up_interruptible(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
tty = tty_port_tty_get(&port->port);
if (!tty)
@@ -783,7 +784,7 @@ static void pl2303_process_read_urb(struct urb *urb)
line_status = priv->line_status;
priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
- wake_up_interruptible(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
if (!urb->actual_length)
return;
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index d643a4d4d77..75f125ddb0c 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -128,7 +128,6 @@ struct qt2_port_private {
u8 shadowLSR;
u8 shadowMSR;
- wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
struct async_icount icount;
struct usb_serial_port *port;
@@ -506,8 +505,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
spin_unlock_irqrestore(&priv->lock, flags);
while (1) {
- wait_event_interruptible(priv->delta_msr_wait,
- ((priv->icount.rng != prev.rng) ||
+ wait_event_interruptible(port->delta_msr_wait,
+ (port->serial->disconnected ||
+ (priv->icount.rng != prev.rng) ||
(priv->icount.dsr != prev.dsr) ||
(priv->icount.dcd != prev.dcd) ||
(priv->icount.cts != prev.cts)));
@@ -515,6 +515,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
if (signal_pending(current))
return -ERESTARTSYS;
+ if (port->serial->disconnected)
+ return -EIO;
+
spin_lock_irqsave(&priv->lock, flags);
cur = priv->icount;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -827,7 +830,6 @@ static int qt2_port_probe(struct usb_serial_port *port)
spin_lock_init(&port_priv->lock);
spin_lock_init(&port_priv->urb_lock);
- init_waitqueue_head(&port_priv->delta_msr_wait);
port_priv->port = port;
port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -970,7 +972,7 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
if (newMSR & UART_MSR_TERI)
port_priv->icount.rng++;
- wake_up_interruptible(&port_priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
}
}
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 91ff8e3bddb..549ef68ff5f 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -149,7 +149,6 @@ enum spcp8x5_type {
struct spcp8x5_private {
spinlock_t lock;
enum spcp8x5_type type;
- wait_queue_head_t delta_msr_wait;
u8 line_control;
u8 line_status;
};
@@ -179,7 +178,6 @@ static int spcp8x5_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->lock);
- init_waitqueue_head(&priv->delta_msr_wait);
priv->type = type;
usb_set_serial_port_data(port , priv);
@@ -475,7 +473,7 @@ static void spcp8x5_process_read_urb(struct urb *urb)
priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
/* wake up the wait for termios */
- wake_up_interruptible(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
if (!urb->actual_length)
return;
@@ -526,12 +524,15 @@ static int spcp8x5_wait_modem_info(struct usb_serial_port *port,
while (1) {
/* wake up in bulk read */
- interruptible_sleep_on(&priv->delta_msr_wait);
+ interruptible_sleep_on(&port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
+ if (port->serial->disconnected)
+ return -EIO;
+
spin_lock_irqsave(&priv->lock, flags);
status = priv->line_status;
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index b57cf841c5b..4b2a19757b4 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -61,7 +61,6 @@ struct ssu100_port_private {
spinlock_t status_lock;
u8 shadowLSR;
u8 shadowMSR;
- wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
struct async_icount icount;
};
@@ -355,8 +354,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
spin_unlock_irqrestore(&priv->status_lock, flags);
while (1) {
- wait_event_interruptible(priv->delta_msr_wait,
- ((priv->icount.rng != prev.rng) ||
+ wait_event_interruptible(port->delta_msr_wait,
+ (port->serial->disconnected ||
+ (priv->icount.rng != prev.rng) ||
(priv->icount.dsr != prev.dsr) ||
(priv->icount.dcd != prev.dcd) ||
(priv->icount.cts != prev.cts)));
@@ -364,6 +364,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
if (signal_pending(current))
return -ERESTARTSYS;
+ if (port->serial->disconnected)
+ return -EIO;
+
spin_lock_irqsave(&priv->status_lock, flags);
cur = priv->icount;
spin_unlock_irqrestore(&priv->status_lock, flags);
@@ -445,7 +448,6 @@ static int ssu100_port_probe(struct usb_serial_port *port)
return -ENOMEM;
spin_lock_init(&priv->status_lock);
- init_waitqueue_head(&priv->delta_msr_wait);
usb_set_serial_port_data(port, priv);
@@ -537,7 +539,7 @@ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
priv->icount.dcd++;
if (msr & UART_MSR_TERI)
priv->icount.rng++;
- wake_up_interruptible(&priv->delta_msr_wait);
+ wake_up_interruptible(&port->delta_msr_wait);
}
}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 39cb9b807c3..73deb029fc0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -74,7 +74,6 @@ struct ti_port {
int tp_flags;
int tp_closing_wait;/* in .01 secs */
struct async_icount tp_icount;
- wait_queue_head_t tp_msr_wait; /* wait for msr change */
wait_queue_head_t tp_write_wait;
struct ti_device *tp_tdev;
struct usb_serial_port *tp_port;
@@ -432,7 +431,6 @@ static int ti_port_probe(struct usb_serial_port *port)
else
tport->tp_uart_base_addr = TI_UART2_BASE_ADDR;
tport->tp_closing_wait = closing_wait;
- init_waitqueue_head(&tport->tp_msr_wait);
init_waitqueue_head(&tport->tp_write_wait);
if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) {
kfree(tport);
@@ -784,9 +782,13 @@ static int ti_ioctl(struct tty_struct *tty,
dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__);
cprev = tport->tp_icount;
while (1) {
- interruptible_sleep_on(&tport->tp_msr_wait);
+ interruptible_sleep_on(&port->delta_msr_wait);
if (signal_pending(current))
return -ERESTARTSYS;
+
+ if (port->serial->disconnected)
+ return -EIO;
+
cnow = tport->tp_icount;
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
@@ -1392,7 +1394,7 @@ static void ti_handle_new_msr(struct ti_port *tport, __u8 msr)
icount->dcd++;
if (msr & TI_MSR_DELTA_RI)
icount->rng++;
- wake_up_interruptible(&tport->tp_msr_wait);
+ wake_up_interruptible(&tport->tp_port->delta_msr_wait);
spin_unlock_irqrestore(&tport->tp_lock, flags);
}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index a19ed74d770..2e70efa08b7 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -151,6 +151,7 @@ static void destroy_serial(struct kref *kref)
}
}
+ usb_put_intf(serial->interface);
usb_put_dev(serial->dev);
kfree(serial);
}
@@ -620,7 +621,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
}
serial->dev = usb_get_dev(dev);
serial->type = driver;
- serial->interface = interface;
+ serial->interface = usb_get_intf(interface);
kref_init(&serial->kref);
mutex_init(&serial->disc_mutex);
serial->minor = SERIAL_TTY_NO_MINOR;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index da04a074e79..1799335288b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -496,6 +496,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
+/* Added by Dmitry Artamonow <mad_soft@inbox.ru> */
+UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999,
+ "Samsung",
+ "YP-Z3",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64),
+
/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
* Device uses standards-violating 32-byte Bulk Command Block Wrappers and
* reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 964ff22bf28..aeb00fc2d3b 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
+#include <linux/slab.h>
#include "vfio_pci_private.h"
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 3639371fa69..a96509187de 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -22,6 +22,7 @@
#include <linux/vfio.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/slab.h>
#include "vfio_pci_private.h"
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 9951297b242..43fb11ee2e8 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -850,7 +850,7 @@ static int vhost_scsi_clear_endpoint(
for (index = 0; index < vs->dev.nvqs; ++index) {
if (!vhost_vq_access_ok(&vs->vqs[index])) {
ret = -EFAULT;
- goto err;
+ goto err_dev;
}
}
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
@@ -860,10 +860,11 @@ static int vhost_scsi_clear_endpoint(
if (!tv_tpg)
continue;
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
tv_tport = tv_tpg->tport;
if (!tv_tport) {
ret = -ENODEV;
- goto err;
+ goto err_tpg;
}
if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
@@ -872,16 +873,19 @@ static int vhost_scsi_clear_endpoint(
tv_tport->tport_name, tv_tpg->tport_tpgt,
t->vhost_wwpn, t->vhost_tpgt);
ret = -EINVAL;
- goto err;
+ goto err_tpg;
}
tv_tpg->tv_tpg_vhost_count--;
vs->vs_tpg[target] = NULL;
vs->vs_endpoint = false;
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
}
mutex_unlock(&vs->dev.mutex);
return 0;
-err:
+err_tpg:
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+err_dev:
mutex_unlock(&vs->dev.mutex);
return ret;
}
@@ -937,6 +941,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
vhost_scsi_flush_vq(vs, i);
+ vhost_work_flush(&vs->dev, &vs->vs_completion_work);
}
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index 3f2519d3071..e06cd5d90c9 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/fb.h>
+#include <linux/io.h>
#include <linux/platform_data/video-ep93xx.h>
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 755556ca5b2..45169cbaba6 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -169,6 +169,7 @@ struct mxsfb_info {
unsigned dotclk_delay;
const struct mxsfb_devdata *devdata;
int mapped;
+ u32 sync;
};
#define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
@@ -456,9 +457,9 @@ static int mxsfb_set_par(struct fb_info *fb_info)
vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
- if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT)
+ if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT)
vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
- if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT)
+ if (host->sync & MXSFB_SYNC_DOTCLK_FAILING_ACT)
vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING;
writel(vdctrl0, host->base + LCDC_VDCTRL0);
@@ -861,6 +862,8 @@ static int mxsfb_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&fb_info->modelist);
+ host->sync = pdata->sync;
+
ret = mxsfb_init_fbinfo(host);
if (ret != 0)
goto error_init_fb;
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index e3b8f757d2d..0e9d8c479c3 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -40,13 +40,12 @@
#include "sp5100_tco.h"
/* Module and version information */
-#define TCO_VERSION "0.03"
+#define TCO_VERSION "0.05"
#define TCO_MODULE_NAME "SP5100 TCO timer"
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
/* internal variables */
static u32 tcobase_phys;
-static u32 resbase_phys;
static u32 tco_wdt_fired;
static void __iomem *tcobase;
static unsigned int pm_iobase;
@@ -54,10 +53,6 @@ static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
static unsigned long timer_alive;
static char tco_expect_close;
static struct pci_dev *sp5100_tco_pci;
-static struct resource wdt_res = {
- .name = "Watchdog Timer",
- .flags = IORESOURCE_MEM,
-};
/* the watchdog platform device */
static struct platform_device *sp5100_tco_platform_device;
@@ -75,12 +70,6 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static unsigned int force_addr;
-module_param(force_addr, uint, 0);
-MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address."
- " ONLY USE THIS PARAMETER IF YOU REALLY KNOW"
- " WHAT YOU ARE DOING (default=none)");
-
/*
* Some TCO specific functions
*/
@@ -176,39 +165,6 @@ static void tco_timer_enable(void)
}
}
-static void tco_timer_disable(void)
-{
- int val;
-
- if (sp5100_tco_pci->revision >= 0x40) {
- /* For SB800 or later */
- /* Enable watchdog decode bit and Disable watchdog timer */
- outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG);
- val = inb(SB800_IO_PM_DATA_REG);
- val |= SB800_PCI_WATCHDOG_DECODE_EN;
- val |= SB800_PM_WATCHDOG_DISABLE;
- outb(val, SB800_IO_PM_DATA_REG);
- } else {
- /* For SP5100 or SB7x0 */
- /* Enable watchdog decode bit */
- pci_read_config_dword(sp5100_tco_pci,
- SP5100_PCI_WATCHDOG_MISC_REG,
- &val);
-
- val |= SP5100_PCI_WATCHDOG_DECODE_EN;
-
- pci_write_config_dword(sp5100_tco_pci,
- SP5100_PCI_WATCHDOG_MISC_REG,
- val);
-
- /* Disable Watchdog timer */
- outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
- val = inb(SP5100_IO_PM_DATA_REG);
- val |= SP5100_PM_WATCHDOG_DISABLE;
- outb(val, SP5100_IO_PM_DATA_REG);
- }
-}
-
/*
* /dev/watchdog handling
*/
@@ -361,7 +317,7 @@ static unsigned char sp5100_tco_setupdevice(void)
{
struct pci_dev *dev = NULL;
const char *dev_name = NULL;
- u32 val, tmp_val;
+ u32 val;
u32 index_reg, data_reg, base_addr;
/* Match the PCI device */
@@ -459,63 +415,8 @@ static unsigned char sp5100_tco_setupdevice(void)
} else
pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val);
- /*
- * Lastly re-programming the watchdog timer MMIO address,
- * This method is a last resort...
- *
- * Before re-programming, to ensure that the watchdog timer
- * is disabled, disable the watchdog timer.
- */
- tco_timer_disable();
-
- if (force_addr) {
- /*
- * Force the use of watchdog timer MMIO address, and aligned to
- * 8byte boundary.
- */
- force_addr &= ~0x7;
- val = force_addr;
-
- pr_info("Force the use of 0x%04x as MMIO address\n", val);
- } else {
- /*
- * Get empty slot into the resource tree for watchdog timer.
- */
- if (allocate_resource(&iomem_resource,
- &wdt_res,
- SP5100_WDT_MEM_MAP_SIZE,
- 0xf0000000,
- 0xfffffff8,
- 0x8,
- NULL,
- NULL)) {
- pr_err("MMIO allocation failed\n");
- goto unreg_region;
- }
-
- val = resbase_phys = wdt_res.start;
- pr_debug("Got 0x%04x from resource tree\n", val);
- }
-
- /* Restore to the low three bits */
- outb(base_addr+0, index_reg);
- tmp_val = val | (inb(data_reg) & 0x7);
-
- /* Re-programming the watchdog timer base address */
- outb(base_addr+0, index_reg);
- outb((tmp_val >> 0) & 0xff, data_reg);
- outb(base_addr+1, index_reg);
- outb((tmp_val >> 8) & 0xff, data_reg);
- outb(base_addr+2, index_reg);
- outb((tmp_val >> 16) & 0xff, data_reg);
- outb(base_addr+3, index_reg);
- outb((tmp_val >> 24) & 0xff, data_reg);
-
- if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
- dev_name)) {
- pr_err("MMIO address 0x%04x already in use\n", val);
- goto unreg_resource;
- }
+ pr_notice("failed to find MMIO address, giving up.\n");
+ goto unreg_region;
setup_wdt:
tcobase_phys = val;
@@ -555,9 +456,6 @@ setup_wdt:
unreg_mem_region:
release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
-unreg_resource:
- if (resbase_phys)
- release_resource(&wdt_res);
unreg_region:
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
exit:
@@ -567,7 +465,6 @@ exit:
static int sp5100_tco_init(struct platform_device *dev)
{
int ret;
- char addr_str[16];
/*
* Check whether or not the hardware watchdog is there. If found, then
@@ -599,23 +496,14 @@ static int sp5100_tco_init(struct platform_device *dev)
clear_bit(0, &timer_alive);
/* Show module parameters */
- if (force_addr == tcobase_phys)
- /* The force_addr is vaild */
- sprintf(addr_str, "0x%04x", force_addr);
- else
- strcpy(addr_str, "none");
-
- pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, "
- "force_addr=%s)\n",
- tcobase, heartbeat, nowayout, addr_str);
+ pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
+ tcobase, heartbeat, nowayout);
return 0;
exit:
iounmap(tcobase);
release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
- if (resbase_phys)
- release_resource(&wdt_res);
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
return ret;
}
@@ -630,8 +518,6 @@ static void sp5100_tco_cleanup(void)
misc_deregister(&sp5100_tco_miscdev);
iounmap(tcobase);
release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
- if (resbase_phys)
- release_resource(&wdt_res);
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
}
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index 71594a0c14b..2b28c00da0d 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -57,7 +57,7 @@
#define SB800_PM_WATCHDOG_DISABLE (1 << 2)
#define SB800_PM_WATCHDOG_SECOND_RES (3 << 0)
#define SB800_ACPI_MMIO_DECODE_EN (1 << 0)
-#define SB800_ACPI_MMIO_SEL (1 << 2)
+#define SB800_ACPI_MMIO_SEL (1 << 1)
#define SB800_PM_WDT_MMIO_OFFSET 0xB00
diff --git a/firmware/Makefile b/firmware/Makefile
index cbb09ce9730..5d8ee1319b5 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -82,7 +82,7 @@ fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
qlogic/12160.bin
fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
-fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw
+fw-shipped-$(CONFIG_INFINIBAND_QIB) += intel/sd7220.fw
fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp
fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \
ess/maestro3_assp_minisrc.fw
diff --git a/firmware/qlogic/sd7220.fw.ihex b/firmware/intel/sd7220.fw.ihex
index a3363631911..a3363631911 100644
--- a/firmware/qlogic/sd7220.fw.ihex
+++ b/firmware/intel/sd7220.fw.ihex
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index cfd1ce34e0b..1d36db11477 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
}
- /* mechlistMIC */
- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- /* Check if we have reached the end of the blob, but with
- no mechListMic (e.g. NTLMSSP instead of KRB5) */
- if (ctx.error == ASN1_ERR_DEC_EMPTY)
- goto decode_negtoken_exit;
- cFYI(1, "Error decoding last part negTokenInit exit3");
- return 0;
- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
- /* tag = 3 indicating mechListMIC */
- cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end);
- return 0;
- }
-
- /* sequence */
- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, "Error decoding last part negTokenInit exit5");
- return 0;
- } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
- || (tag != ASN1_SEQ)) {
- cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end);
- }
-
- /* sequence of */
- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, "Error decoding last part negTokenInit exit 7");
- return 0;
- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
- cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end);
- return 0;
- }
-
- /* general string */
- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, "Error decoding last part negTokenInit exit9");
- return 0;
- } else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
- || (tag != ASN1_GENSTR)) {
- cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
- cls, con, tag, end, *end);
- return 0;
- }
- cFYI(1, "Need to call asn1_octets_decode() function for %s",
- ctx.pointer); /* is this UTF-8 or ASCII? */
-decode_negtoken_exit:
+ /*
+ * We currently ignore anything at the end of the SPNEGO blob after
+ * the mechTypes have been parsed, since none of that info is
+ * used at the moment.
+ */
return 1;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3cf8a15af91..345fc89c428 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -91,6 +91,30 @@ struct workqueue_struct *cifsiod_wq;
__u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE];
#endif
+/*
+ * Bumps refcount for cifs super block.
+ * Note that it should be only called if a referece to VFS super block is
+ * already held, e.g. in open-type syscalls context. Otherwise it can race with
+ * atomic_dec_and_test in deactivate_locked_super.
+ */
+void
+cifs_sb_active(struct super_block *sb)
+{
+ struct cifs_sb_info *server = CIFS_SB(sb);
+
+ if (atomic_inc_return(&server->active) == 1)
+ atomic_inc(&sb->s_active);
+}
+
+void
+cifs_sb_deactive(struct super_block *sb)
+{
+ struct cifs_sb_info *server = CIFS_SB(sb);
+
+ if (atomic_dec_and_test(&server->active))
+ deactivate_super(sb);
+}
+
static int
cifs_read_super(struct super_block *sb)
{
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 7163419cecd..0e32c3446ce 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_type;
extern const struct address_space_operations cifs_addr_ops;
extern const struct address_space_operations cifs_addr_ops_smallbuf;
+/* Functions related to super block operations */
+extern void cifs_sb_active(struct super_block *sb);
+extern void cifs_sb_deactive(struct super_block *sb);
+
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
extern struct inode *cifs_root_iget(struct super_block *);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8c0d8557731..7a0dd99e450 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -300,6 +300,8 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
mutex_init(&cfile->fh_mutex);
+ cifs_sb_active(inode->i_sb);
+
/*
* If the server returned a read oplock and we have mandatory brlocks,
* set oplock level to None.
@@ -349,7 +351,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct super_block *sb = inode->i_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsLockInfo *li, *tmp;
struct cifs_fid fid;
struct cifs_pending_open open;
@@ -414,6 +417,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
cifs_put_tlink(cifs_file->tlink);
dput(cifs_file->dentry);
+ cifs_sb_deactive(sb);
kfree(cifs_file);
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 0079696305c..20887bf6312 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1043,7 +1043,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc != 0) {
- rc = -ETXTBSY;
+ rc = -EBUSY;
goto undo_setattr;
}
@@ -1062,7 +1062,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
if (rc == -ENOENT)
rc = 0;
else if (rc != 0) {
- rc = -ETXTBSY;
+ rc = -EBUSY;
goto undo_rename;
}
cifsInode->delete_pending = true;
@@ -1169,15 +1169,13 @@ psx_del_no_retry:
cifs_drop_nlink(inode);
} else if (rc == -ENOENT) {
d_drop(dentry);
- } else if (rc == -ETXTBSY) {
+ } else if (rc == -EBUSY) {
if (server->ops->rename_pending_delete) {
rc = server->ops->rename_pending_delete(full_path,
dentry, xid);
if (rc == 0)
cifs_drop_nlink(inode);
}
- if (rc == -ETXTBSY)
- rc = -EBUSY;
} else if ((rc == -EACCES) && (dosattr == 0) && inode) {
attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
if (attrs == NULL) {
@@ -1518,7 +1516,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
* source. Note that cross directory moves do not work with
* rename by filehandle to various Windows servers.
*/
- if (rc == 0 || rc != -ETXTBSY)
+ if (rc == 0 || rc != -EBUSY)
goto do_rename_exit;
/* open-file renames don't work across directories */
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index a82bc51fdc8..c0b25b28be6 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -62,7 +62,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
{ERRdiffdevice, -EXDEV},
{ERRnofiles, -ENOENT},
{ERRwriteprot, -EROFS},
- {ERRbadshare, -ETXTBSY},
+ {ERRbadshare, -EBUSY},
{ERRlock, -EACCES},
{ERRunsup, -EINVAL},
{ERRnosuchshare, -ENXIO},
diff --git a/fs/dcache.c b/fs/dcache.c
index fbfae008ba4..e8bc3420d63 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2542,7 +2542,6 @@ static int prepend_path(const struct path *path,
bool slash = false;
int error = 0;
- br_read_lock(&vfsmount_lock);
while (dentry != root->dentry || vfsmnt != root->mnt) {
struct dentry * parent;
@@ -2572,8 +2571,6 @@ static int prepend_path(const struct path *path,
if (!error && !slash)
error = prepend(buffer, buflen, "/", 1);
-out:
- br_read_unlock(&vfsmount_lock);
return error;
global_root:
@@ -2590,7 +2587,7 @@ global_root:
error = prepend(buffer, buflen, "/", 1);
if (!error)
error = is_mounted(vfsmnt) ? 1 : 2;
- goto out;
+ return error;
}
/**
@@ -2617,9 +2614,11 @@ char *__d_path(const struct path *path,
int error;
prepend(&res, &buflen, "\0", 1);
+ br_read_lock(&vfsmount_lock);
write_seqlock(&rename_lock);
error = prepend_path(path, root, &res, &buflen);
write_sequnlock(&rename_lock);
+ br_read_unlock(&vfsmount_lock);
if (error < 0)
return ERR_PTR(error);
@@ -2636,9 +2635,11 @@ char *d_absolute_path(const struct path *path,
int error;
prepend(&res, &buflen, "\0", 1);
+ br_read_lock(&vfsmount_lock);
write_seqlock(&rename_lock);
error = prepend_path(path, &root, &res, &buflen);
write_sequnlock(&rename_lock);
+ br_read_unlock(&vfsmount_lock);
if (error > 1)
error = -EINVAL;
@@ -2702,11 +2703,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
get_fs_root(current->fs, &root);
+ br_read_lock(&vfsmount_lock);
write_seqlock(&rename_lock);
error = path_with_deleted(path, &root, &res, &buflen);
+ write_sequnlock(&rename_lock);
+ br_read_unlock(&vfsmount_lock);
if (error < 0)
res = ERR_PTR(error);
- write_sequnlock(&rename_lock);
path_put(&root);
return res;
}
@@ -2830,6 +2833,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
get_fs_root_and_pwd(current->fs, &root, &pwd);
error = -ENOENT;
+ br_read_lock(&vfsmount_lock);
write_seqlock(&rename_lock);
if (!d_unlinked(pwd.dentry)) {
unsigned long len;
@@ -2839,6 +2843,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
prepend(&cwd, &buflen, "\0", 1);
error = prepend_path(&pwd, &root, &cwd, &buflen);
write_sequnlock(&rename_lock);
+ br_read_unlock(&vfsmount_lock);
if (error < 0)
goto out;
@@ -2859,6 +2864,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
}
} else {
write_sequnlock(&rename_lock);
+ br_read_unlock(&vfsmount_lock);
}
out:
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 4a01ba31526..3b83cd60479 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -335,9 +335,9 @@ struct ext4_group_desc
*/
struct flex_groups {
- atomic_t free_inodes;
- atomic_t free_clusters;
- atomic_t used_dirs;
+ atomic64_t free_clusters;
+ atomic_t free_inodes;
+ atomic_t used_dirs;
};
#define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */
@@ -2617,7 +2617,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
extern int __init ext4_init_pageio(void);
extern void ext4_add_complete_io(ext4_io_end_t *io_end);
extern void ext4_exit_pageio(void);
-extern void ext4_ioend_wait(struct inode *);
+extern void ext4_ioend_shutdown(struct inode *);
extern void ext4_free_io_end(ext4_io_end_t *io);
extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
extern void ext4_end_io_work(struct work_struct *work);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 28dd8eeea6a..56efcaadf84 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1584,10 +1584,12 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
unsigned short ext1_ee_len, ext2_ee_len, max_len;
/*
- * Make sure that either both extents are uninitialized, or
- * both are _not_.
+ * Make sure that both extents are initialized. We don't merge
+ * uninitialized extents so that we can be sure that end_io code has
+ * the extent that was written properly split out and conversion to
+ * initialized is trivial.
*/
- if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
+ if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2))
return 0;
if (ext4_ext_is_uninitialized(ex1))
@@ -2923,7 +2925,7 @@ static int ext4_split_extent_at(handle_t *handle,
{
ext4_fsblk_t newblock;
ext4_lblk_t ee_block;
- struct ext4_extent *ex, newex, orig_ex;
+ struct ext4_extent *ex, newex, orig_ex, zero_ex;
struct ext4_extent *ex2 = NULL;
unsigned int ee_len, depth;
int err = 0;
@@ -2943,6 +2945,10 @@ static int ext4_split_extent_at(handle_t *handle,
newblock = split - ee_block + ext4_ext_pblock(ex);
BUG_ON(split < ee_block || split >= (ee_block + ee_len));
+ BUG_ON(!ext4_ext_is_uninitialized(ex) &&
+ split_flag & (EXT4_EXT_MAY_ZEROOUT |
+ EXT4_EXT_MARK_UNINIT1 |
+ EXT4_EXT_MARK_UNINIT2));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
@@ -2990,12 +2996,26 @@ static int ext4_split_extent_at(handle_t *handle,
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
- if (split_flag & EXT4_EXT_DATA_VALID1)
+ if (split_flag & EXT4_EXT_DATA_VALID1) {
err = ext4_ext_zeroout(inode, ex2);
- else
+ zero_ex.ee_block = ex2->ee_block;
+ zero_ex.ee_len = ext4_ext_get_actual_len(ex2);
+ ext4_ext_store_pblock(&zero_ex,
+ ext4_ext_pblock(ex2));
+ } else {
err = ext4_ext_zeroout(inode, ex);
- } else
+ zero_ex.ee_block = ex->ee_block;
+ zero_ex.ee_len = ext4_ext_get_actual_len(ex);
+ ext4_ext_store_pblock(&zero_ex,
+ ext4_ext_pblock(ex));
+ }
+ } else {
err = ext4_ext_zeroout(inode, &orig_ex);
+ zero_ex.ee_block = orig_ex.ee_block;
+ zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex);
+ ext4_ext_store_pblock(&zero_ex,
+ ext4_ext_pblock(&orig_ex));
+ }
if (err)
goto fix_extent_len;
@@ -3003,6 +3023,12 @@ static int ext4_split_extent_at(handle_t *handle,
ex->ee_len = cpu_to_le16(ee_len);
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+ if (err)
+ goto fix_extent_len;
+
+ /* update extent status tree */
+ err = ext4_es_zeroout(inode, &zero_ex);
+
goto out;
} else if (err)
goto fix_extent_len;
@@ -3041,6 +3067,7 @@ static int ext4_split_extent(handle_t *handle,
int err = 0;
int uninitialized;
int split_flag1, flags1;
+ int allocated = map->m_len;
depth = ext_depth(inode);
ex = path[depth].p_ext;
@@ -3060,20 +3087,29 @@ static int ext4_split_extent(handle_t *handle,
map->m_lblk + map->m_len, split_flag1, flags1);
if (err)
goto out;
+ } else {
+ allocated = ee_len - (map->m_lblk - ee_block);
}
-
+ /*
+ * Update path is required because previous ext4_split_extent_at() may
+ * result in split of original leaf or extent zeroout.
+ */
ext4_ext_drop_refs(path);
path = ext4_ext_find_extent(inode, map->m_lblk, path);
if (IS_ERR(path))
return PTR_ERR(path);
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ uninitialized = ext4_ext_is_uninitialized(ex);
+ split_flag1 = 0;
if (map->m_lblk >= ee_block) {
- split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
- EXT4_EXT_DATA_VALID2);
- if (uninitialized)
+ split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
+ if (uninitialized) {
split_flag1 |= EXT4_EXT_MARK_UNINIT1;
- if (split_flag & EXT4_EXT_MARK_UNINIT2)
- split_flag1 |= EXT4_EXT_MARK_UNINIT2;
+ split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
+ EXT4_EXT_MARK_UNINIT2);
+ }
err = ext4_split_extent_at(handle, inode, path,
map->m_lblk, split_flag1, flags);
if (err)
@@ -3082,7 +3118,7 @@ static int ext4_split_extent(handle_t *handle,
ext4_ext_show_leaf(inode, path);
out:
- return err ? err : map->m_len;
+ return err ? err : allocated;
}
/*
@@ -3137,6 +3173,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
allocated = ee_len - (map->m_lblk - ee_block);
+ zero_ex.ee_len = 0;
trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
@@ -3227,13 +3264,16 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
if (EXT4_EXT_MAY_ZEROOUT & split_flag)
max_zeroout = sbi->s_extent_max_zeroout_kb >>
- inode->i_sb->s_blocksize_bits;
+ (inode->i_sb->s_blocksize_bits - 10);
/* If extent is less than s_max_zeroout_kb, zeroout directly */
if (max_zeroout && (ee_len <= max_zeroout)) {
err = ext4_ext_zeroout(inode, ex);
if (err)
goto out;
+ zero_ex.ee_block = ex->ee_block;
+ zero_ex.ee_len = ext4_ext_get_actual_len(ex);
+ ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
@@ -3292,6 +3332,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
err = allocated;
out:
+ /* If we have gotten a failure, don't zero out status tree */
+ if (!err)
+ err = ext4_es_zeroout(inode, &zero_ex);
return err ? err : allocated;
}
@@ -3374,8 +3417,19 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
"block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)ee_block, ee_len);
- /* If extent is larger than requested then split is required */
+ /* If extent is larger than requested it is a clear sign that we still
+ * have some extent state machine issues left. So extent_split is still
+ * required.
+ * TODO: Once all related issues will be fixed this situation should be
+ * illegal.
+ */
if (ee_block != map->m_lblk || ee_len > map->m_len) {
+#ifdef EXT4_DEBUG
+ ext4_warning("Inode (%ld) finished: extent logical block %llu,"
+ " len %u; IO logical block %llu, len %u\n",
+ inode->i_ino, (unsigned long long)ee_block, ee_len,
+ (unsigned long long)map->m_lblk, map->m_len);
+#endif
err = ext4_split_unwritten_extents(handle, inode, map, path,
EXT4_GET_BLOCKS_CONVERT);
if (err < 0)
@@ -3626,6 +3680,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
path, map->m_len);
} else
err = ret;
+ map->m_flags |= EXT4_MAP_MAPPED;
+ if (allocated > map->m_len)
+ allocated = map->m_len;
+ map->m_len = allocated;
goto out2;
}
/* buffered IO case */
@@ -3675,6 +3733,7 @@ out:
allocated - map->m_len);
allocated = map->m_len;
}
+ map->m_len = allocated;
/*
* If we have done fallocate with the offset that is already
@@ -4106,9 +4165,6 @@ got_allocated_blocks:
}
} else {
BUG_ON(allocated_clusters < reserved_clusters);
- /* We will claim quota for all newly allocated blocks.*/
- ext4_da_update_reserve_space(inode, allocated_clusters,
- 1);
if (reserved_clusters < allocated_clusters) {
struct ext4_inode_info *ei = EXT4_I(inode);
int reservation = allocated_clusters -
@@ -4159,6 +4215,15 @@ got_allocated_blocks:
ei->i_reserved_data_blocks += reservation;
spin_unlock(&ei->i_block_reservation_lock);
}
+ /*
+ * We will claim quota for all newly allocated blocks.
+ * We're updating the reserved space *after* the
+ * correction above so we do not accidentally free
+ * all the metadata reservation because we might
+ * actually need it later on.
+ */
+ ext4_da_update_reserve_space(inode, allocated_clusters,
+ 1);
}
}
@@ -4368,8 +4433,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (len <= EXT_UNINIT_MAX_LEN << blkbits)
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
- /* Prevent race condition between unwritten */
- ext4_flush_unwritten_io(inode);
retry:
while (ret >= 0 && ret < max_blocks) {
map.m_lblk = map.m_lblk + ret;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 95796a1b752..fe3337a85ed 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -333,17 +333,27 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
static int ext4_es_can_be_merged(struct extent_status *es1,
struct extent_status *es2)
{
- if (es1->es_lblk + es1->es_len != es2->es_lblk)
+ if (ext4_es_status(es1) != ext4_es_status(es2))
return 0;
- if (ext4_es_status(es1) != ext4_es_status(es2))
+ if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL)
return 0;
- if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
- (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2)))
+ if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
return 0;
- return 1;
+ if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
+ (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
+ return 1;
+
+ if (ext4_es_is_hole(es1))
+ return 1;
+
+ /* we need to check delayed extent is without unwritten status */
+ if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
+ return 1;
+
+ return 0;
}
static struct extent_status *
@@ -389,6 +399,179 @@ ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
return es;
}
+#ifdef ES_AGGRESSIVE_TEST
+static void ext4_es_insert_extent_ext_check(struct inode *inode,
+ struct extent_status *es)
+{
+ struct ext4_ext_path *path = NULL;
+ struct ext4_extent *ex;
+ ext4_lblk_t ee_block;
+ ext4_fsblk_t ee_start;
+ unsigned short ee_len;
+ int depth, ee_status, es_status;
+
+ path = ext4_ext_find_extent(inode, es->es_lblk, NULL);
+ if (IS_ERR(path))
+ return;
+
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+
+ if (ex) {
+
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_start = ext4_ext_pblock(ex);
+ ee_len = ext4_ext_get_actual_len(ex);
+
+ ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0;
+ es_status = ext4_es_is_unwritten(es) ? 1 : 0;
+
+ /*
+ * Make sure ex and es are not overlap when we try to insert
+ * a delayed/hole extent.
+ */
+ if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
+ if (in_range(es->es_lblk, ee_block, ee_len)) {
+ pr_warn("ES insert assertation failed for "
+ "inode: %lu we can find an extent "
+ "at block [%d/%d/%llu/%c], but we "
+ "want to add an delayed/hole extent "
+ "[%d/%d/%llu/%llx]\n",
+ inode->i_ino, ee_block, ee_len,
+ ee_start, ee_status ? 'u' : 'w',
+ es->es_lblk, es->es_len,
+ ext4_es_pblock(es), ext4_es_status(es));
+ }
+ goto out;
+ }
+
+ /*
+ * We don't check ee_block == es->es_lblk, etc. because es
+ * might be a part of whole extent, vice versa.
+ */
+ if (es->es_lblk < ee_block ||
+ ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
+ pr_warn("ES insert assertation failed for inode: %lu "
+ "ex_status [%d/%d/%llu/%c] != "
+ "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
+ ee_block, ee_len, ee_start,
+ ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
+ ext4_es_pblock(es), es_status ? 'u' : 'w');
+ goto out;
+ }
+
+ if (ee_status ^ es_status) {
+ pr_warn("ES insert assertation failed for inode: %lu "
+ "ex_status [%d/%d/%llu/%c] != "
+ "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
+ ee_block, ee_len, ee_start,
+ ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
+ ext4_es_pblock(es), es_status ? 'u' : 'w');
+ }
+ } else {
+ /*
+ * We can't find an extent on disk. So we need to make sure
+ * that we don't want to add an written/unwritten extent.
+ */
+ if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
+ pr_warn("ES insert assertation failed for inode: %lu "
+ "can't find an extent at block %d but we want "
+ "to add an written/unwritten extent "
+ "[%d/%d/%llu/%llx]\n", inode->i_ino,
+ es->es_lblk, es->es_lblk, es->es_len,
+ ext4_es_pblock(es), ext4_es_status(es));
+ }
+ }
+out:
+ if (path) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ }
+}
+
+static void ext4_es_insert_extent_ind_check(struct inode *inode,
+ struct extent_status *es)
+{
+ struct ext4_map_blocks map;
+ int retval;
+
+ /*
+ * Here we call ext4_ind_map_blocks to lookup a block mapping because
+ * 'Indirect' structure is defined in indirect.c. So we couldn't
+ * access direct/indirect tree from outside. It is too dirty to define
+ * this function in indirect.c file.
+ */
+
+ map.m_lblk = es->es_lblk;
+ map.m_len = es->es_len;
+
+ retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
+ if (retval > 0) {
+ if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
+ /*
+ * We want to add a delayed/hole extent but this
+ * block has been allocated.
+ */
+ pr_warn("ES insert assertation failed for inode: %lu "
+ "We can find blocks but we want to add a "
+ "delayed/hole extent [%d/%d/%llu/%llx]\n",
+ inode->i_ino, es->es_lblk, es->es_len,
+ ext4_es_pblock(es), ext4_es_status(es));
+ return;
+ } else if (ext4_es_is_written(es)) {
+ if (retval != es->es_len) {
+ pr_warn("ES insert assertation failed for "
+ "inode: %lu retval %d != es_len %d\n",
+ inode->i_ino, retval, es->es_len);
+ return;
+ }
+ if (map.m_pblk != ext4_es_pblock(es)) {
+ pr_warn("ES insert assertation failed for "
+ "inode: %lu m_pblk %llu != "
+ "es_pblk %llu\n",
+ inode->i_ino, map.m_pblk,
+ ext4_es_pblock(es));
+ return;
+ }
+ } else {
+ /*
+ * We don't need to check unwritten extent because
+ * indirect-based file doesn't have it.
+ */
+ BUG_ON(1);
+ }
+ } else if (retval == 0) {
+ if (ext4_es_is_written(es)) {
+ pr_warn("ES insert assertation failed for inode: %lu "
+ "We can't find the block but we want to add "
+ "an written extent [%d/%d/%llu/%llx]\n",
+ inode->i_ino, es->es_lblk, es->es_len,
+ ext4_es_pblock(es), ext4_es_status(es));
+ return;
+ }
+ }
+}
+
+static inline void ext4_es_insert_extent_check(struct inode *inode,
+ struct extent_status *es)
+{
+ /*
+ * We don't need to worry about the race condition because
+ * caller takes i_data_sem locking.
+ */
+ BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ ext4_es_insert_extent_ext_check(inode, es);
+ else
+ ext4_es_insert_extent_ind_check(inode, es);
+}
+#else
+static inline void ext4_es_insert_extent_check(struct inode *inode,
+ struct extent_status *es)
+{
+}
+#endif
+
static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
{
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
@@ -471,6 +654,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_es_store_status(&newes, status);
trace_ext4_es_insert_extent(inode, &newes);
+ ext4_es_insert_extent_check(inode, &newes);
+
write_lock(&EXT4_I(inode)->i_es_lock);
err = __es_remove_extent(inode, lblk, end);
if (err != 0)
@@ -669,6 +854,23 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
return err;
}
+int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex)
+{
+ ext4_lblk_t ee_block;
+ ext4_fsblk_t ee_pblock;
+ unsigned int ee_len;
+
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
+ ee_pblock = ext4_ext_pblock(ex);
+
+ if (ee_len == 0)
+ return 0;
+
+ return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
+ EXTENT_STATUS_WRITTEN);
+}
+
static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
{
struct ext4_sb_info *sbi = container_of(shrink,
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index f190dfe969d..d8e2d4dc311 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -21,6 +21,12 @@
#endif
/*
+ * With ES_AGGRESSIVE_TEST defined, the result of es caching will be
+ * checked with old map_block's result.
+ */
+#define ES_AGGRESSIVE_TEST__
+
+/*
* These flags live in the high bits of extent_status.es_pblk
*/
#define EXTENT_STATUS_WRITTEN (1ULL << 63)
@@ -33,6 +39,8 @@
EXTENT_STATUS_DELAYED | \
EXTENT_STATUS_HOLE)
+struct ext4_extent;
+
struct extent_status {
struct rb_node rb_node;
ext4_lblk_t es_lblk; /* first logical block extent covers */
@@ -58,6 +66,7 @@ extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status *es);
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status *es);
+extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex);
static inline int ext4_es_is_written(struct extent_status *es)
{
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 32fd2b9075d..6c5bb8d993f 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -324,8 +324,8 @@ error_return:
}
struct orlov_stats {
+ __u64 free_clusters;
__u32 free_inodes;
- __u32 free_clusters;
__u32 used_dirs;
};
@@ -342,7 +342,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
if (flex_size > 1) {
stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
- stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
+ stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
return;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9ea0cde3fa9..b3a5213bc73 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -185,8 +185,6 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
- ext4_ioend_wait(inode);
-
if (inode->i_nlink) {
/*
* When journalling data dirty buffers are tracked only in the
@@ -207,7 +205,8 @@ void ext4_evict_inode(struct inode *inode)
* don't use page cache.
*/
if (ext4_should_journal_data(inode) &&
- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
+ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
+ inode->i_ino != EXT4_JOURNAL_INO) {
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
@@ -216,6 +215,7 @@ void ext4_evict_inode(struct inode *inode)
filemap_write_and_wait(&inode->i_data);
}
truncate_inode_pages(&inode->i_data, 0);
+ ext4_ioend_shutdown(inode);
goto no_delete;
}
@@ -225,6 +225,7 @@ void ext4_evict_inode(struct inode *inode)
if (ext4_should_order_data(inode))
ext4_begin_ordered_truncate(inode, 0);
truncate_inode_pages(&inode->i_data, 0);
+ ext4_ioend_shutdown(inode);
if (is_bad_inode(inode))
goto no_delete;
@@ -482,6 +483,58 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
return num;
}
+#ifdef ES_AGGRESSIVE_TEST
+static void ext4_map_blocks_es_recheck(handle_t *handle,
+ struct inode *inode,
+ struct ext4_map_blocks *es_map,
+ struct ext4_map_blocks *map,
+ int flags)
+{
+ int retval;
+
+ map->m_flags = 0;
+ /*
+ * There is a race window that the result is not the same.
+ * e.g. xfstests #223 when dioread_nolock enables. The reason
+ * is that we lookup a block mapping in extent status tree with
+ * out taking i_data_sem. So at the time the unwritten extent
+ * could be converted.
+ */
+ if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
+ down_read((&EXT4_I(inode)->i_data_sem));
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+ retval = ext4_ext_map_blocks(handle, inode, map, flags &
+ EXT4_GET_BLOCKS_KEEP_SIZE);
+ } else {
+ retval = ext4_ind_map_blocks(handle, inode, map, flags &
+ EXT4_GET_BLOCKS_KEEP_SIZE);
+ }
+ if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
+ up_read((&EXT4_I(inode)->i_data_sem));
+ /*
+ * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
+ * because it shouldn't be marked in es_map->m_flags.
+ */
+ map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
+
+ /*
+ * We don't check m_len because extent will be collpased in status
+ * tree. So the m_len might not equal.
+ */
+ if (es_map->m_lblk != map->m_lblk ||
+ es_map->m_flags != map->m_flags ||
+ es_map->m_pblk != map->m_pblk) {
+ printk("ES cache assertation failed for inode: %lu "
+ "es_cached ex [%d/%d/%llu/%x] != "
+ "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
+ inode->i_ino, es_map->m_lblk, es_map->m_len,
+ es_map->m_pblk, es_map->m_flags, map->m_lblk,
+ map->m_len, map->m_pblk, map->m_flags,
+ retval, flags);
+ }
+}
+#endif /* ES_AGGRESSIVE_TEST */
+
/*
* The ext4_map_blocks() function tries to look up the requested blocks,
* and returns if the blocks are already mapped.
@@ -509,6 +562,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
{
struct extent_status es;
int retval;
+#ifdef ES_AGGRESSIVE_TEST
+ struct ext4_map_blocks orig_map;
+
+ memcpy(&orig_map, map, sizeof(*map));
+#endif
map->m_flags = 0;
ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
@@ -531,6 +589,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
} else {
BUG_ON(1);
}
+#ifdef ES_AGGRESSIVE_TEST
+ ext4_map_blocks_es_recheck(handle, inode, map,
+ &orig_map, flags);
+#endif
goto found;
}
@@ -551,6 +613,15 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
int ret;
unsigned long long status;
+#ifdef ES_AGGRESSIVE_TEST
+ if (retval != map->m_len) {
+ printk("ES len assertation failed for inode: %lu "
+ "retval %d != map->m_len %d "
+ "in %s (lookup)\n", inode->i_ino, retval,
+ map->m_len, __func__);
+ }
+#endif
+
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
@@ -643,6 +714,24 @@ found:
int ret;
unsigned long long status;
+#ifdef ES_AGGRESSIVE_TEST
+ if (retval != map->m_len) {
+ printk("ES len assertation failed for inode: %lu "
+ "retval %d != map->m_len %d "
+ "in %s (allocation)\n", inode->i_ino, retval,
+ map->m_len, __func__);
+ }
+#endif
+
+ /*
+ * If the extent has been zeroed out, we don't need to update
+ * extent status tree.
+ */
+ if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
+ ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
+ if (ext4_es_is_written(&es))
+ goto has_zeroout;
+ }
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
@@ -655,6 +744,7 @@ found:
retval = ret;
}
+has_zeroout:
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
int ret = check_block_validity(inode, map);
@@ -1216,6 +1306,55 @@ static int ext4_journalled_write_end(struct file *file,
}
/*
+ * Reserve a metadata for a single block located at lblock
+ */
+static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
+{
+ int retries = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int md_needed;
+ ext4_lblk_t save_last_lblock;
+ int save_len;
+
+ /*
+ * recalculate the amount of metadata blocks to reserve
+ * in order to allocate nrblocks
+ * worse case is one extent per block
+ */
+repeat:
+ spin_lock(&ei->i_block_reservation_lock);
+ /*
+ * ext4_calc_metadata_amount() has side effects, which we have
+ * to be prepared undo if we fail to claim space.
+ */
+ save_len = ei->i_da_metadata_calc_len;
+ save_last_lblock = ei->i_da_metadata_calc_last_lblock;
+ md_needed = EXT4_NUM_B2C(sbi,
+ ext4_calc_metadata_amount(inode, lblock));
+ trace_ext4_da_reserve_space(inode, md_needed);
+
+ /*
+ * We do still charge estimated metadata to the sb though;
+ * we cannot afford to run out of free blocks.
+ */
+ if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
+ ei->i_da_metadata_calc_len = save_len;
+ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+ spin_unlock(&ei->i_block_reservation_lock);
+ if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+ cond_resched();
+ goto repeat;
+ }
+ return -ENOSPC;
+ }
+ ei->i_reserved_meta_blocks += md_needed;
+ spin_unlock(&ei->i_block_reservation_lock);
+
+ return 0; /* success */
+}
+
+/*
* Reserve a single cluster located at lblock
*/
static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
@@ -1263,7 +1402,7 @@ repeat:
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
- yield();
+ cond_resched();
goto repeat;
}
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
@@ -1768,6 +1907,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
struct extent_status es;
int retval;
sector_t invalid_block = ~((sector_t) 0xffff);
+#ifdef ES_AGGRESSIVE_TEST
+ struct ext4_map_blocks orig_map;
+
+ memcpy(&orig_map, map, sizeof(*map));
+#endif
if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
invalid_block = ~0;
@@ -1809,6 +1953,9 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
else
BUG_ON(1);
+#ifdef ES_AGGRESSIVE_TEST
+ ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
+#endif
return retval;
}
@@ -1843,8 +1990,11 @@ add_delayed:
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
*/
- /* If the block was allocated from previously allocated cluster,
- * then we dont need to reserve it again. */
+ /*
+ * If the block was allocated from previously allocated cluster,
+ * then we don't need to reserve it again. However we still need
+ * to reserve metadata for every block we're going to write.
+ */
if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
ret = ext4_da_reserve_space(inode, iblock);
if (ret) {
@@ -1852,6 +2002,13 @@ add_delayed:
retval = ret;
goto out_unlock;
}
+ } else {
+ ret = ext4_da_reserve_metadata(inode, iblock);
+ if (ret) {
+ /* not enough space to reserve */
+ retval = ret;
+ goto out_unlock;
+ }
}
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@@ -1873,6 +2030,15 @@ add_delayed:
int ret;
unsigned long long status;
+#ifdef ES_AGGRESSIVE_TEST
+ if (retval != map->m_len) {
+ printk("ES len assertation failed for inode: %lu "
+ "retval %d != map->m_len %d "
+ "in %s (lookup)\n", inode->i_ino, retval,
+ map->m_len, __func__);
+ }
+#endif
+
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@@ -2908,8 +3074,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
trace_ext4_releasepage(page);
- WARN_ON(PageChecked(page));
- if (!page_has_buffers(page))
+ /* Page has dirty journalled data -> cannot release */
+ if (PageChecked(page))
return 0;
if (journal)
return jbd2_journal_try_to_free_buffers(journal, page, wait);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 7bb713a46fe..ee6614bdb63 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2804,8 +2804,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
ac->ac_b_ex.fe_group);
- atomic_sub(ac->ac_b_ex.fe_len,
- &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic64_sub(ac->ac_b_ex.fe_len,
+ &sbi->s_flex_groups[flex_group].free_clusters);
}
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
@@ -3692,11 +3692,7 @@ repeat:
if (free < needed && busy) {
busy = 0;
ext4_unlock_group(sb, group);
- /*
- * Yield the CPU here so that we don't get soft lockup
- * in non preempt case.
- */
- yield();
+ cond_resched();
goto repeat;
}
@@ -4246,7 +4242,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
/* let others to free the space */
- yield();
+ cond_resched();
ar->len = ar->len >> 1;
}
if (!ar->len) {
@@ -4464,7 +4460,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bitmap_bh = NULL;
struct super_block *sb = inode->i_sb;
struct ext4_group_desc *gdp;
- unsigned long freed = 0;
unsigned int overflow;
ext4_grpblk_t bit;
struct buffer_head *gd_bh;
@@ -4666,14 +4661,12 @@ do_more:
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- atomic_add(count_clusters,
- &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic64_add(count_clusters,
+ &sbi->s_flex_groups[flex_group].free_clusters);
}
ext4_mb_unload_buddy(&e4b);
- freed += count;
-
if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
@@ -4811,8 +4804,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),
- &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
+ &sbi->s_flex_groups[flex_group].free_clusters);
}
ext4_mb_unload_buddy(&e4b);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 4e81d47aa8c..33e1c086858 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -32,16 +32,18 @@
*/
static inline int
get_ext_path(struct inode *inode, ext4_lblk_t lblock,
- struct ext4_ext_path **path)
+ struct ext4_ext_path **orig_path)
{
int ret = 0;
+ struct ext4_ext_path *path;
- *path = ext4_ext_find_extent(inode, lblock, *path);
- if (IS_ERR(*path)) {
- ret = PTR_ERR(*path);
- *path = NULL;
- } else if ((*path)[ext_depth(inode)].p_ext == NULL)
+ path = ext4_ext_find_extent(inode, lblock, *orig_path);
+ if (IS_ERR(path))
+ ret = PTR_ERR(path);
+ else if (path[ext_depth(inode)].p_ext == NULL)
ret = -ENODATA;
+ else
+ *orig_path = path;
return ret;
}
@@ -611,24 +613,25 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
{
struct ext4_ext_path *path = NULL;
struct ext4_extent *ext;
+ int ret = 0;
ext4_lblk_t last = from + count;
while (from < last) {
*err = get_ext_path(inode, from, &path);
if (*err)
- return 0;
+ goto out;
ext = path[ext_depth(inode)].p_ext;
- if (!ext) {
- ext4_ext_drop_refs(path);
- return 0;
- }
- if (uninit != ext4_ext_is_uninitialized(ext)) {
- ext4_ext_drop_refs(path);
- return 0;
- }
+ if (uninit != ext4_ext_is_uninitialized(ext))
+ goto out;
from += ext4_ext_get_actual_len(ext);
ext4_ext_drop_refs(path);
}
- return 1;
+ ret = 1;
+out:
+ if (path) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ }
+ return ret;
}
/**
@@ -666,6 +669,14 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
int replaced_count = 0;
int dext_alen;
+ *err = ext4_es_remove_extent(orig_inode, from, count);
+ if (*err)
+ goto out;
+
+ *err = ext4_es_remove_extent(donor_inode, from, count);
+ if (*err)
+ goto out;
+
/* Get the original extent for the block "orig_off" */
*err = get_ext_path(orig_inode, orig_off, &orig_path);
if (*err)
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 809b31003ec..047a6de04a0 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -50,11 +50,21 @@ void ext4_exit_pageio(void)
kmem_cache_destroy(io_page_cachep);
}
-void ext4_ioend_wait(struct inode *inode)
+/*
+ * This function is called by ext4_evict_inode() to make sure there is
+ * no more pending I/O completion work left to do.
+ */
+void ext4_ioend_shutdown(struct inode *inode)
{
wait_queue_head_t *wq = ext4_ioend_wq(inode);
wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
+ /*
+ * We need to make sure the work structure is finished being
+ * used before we let the inode get destroyed.
+ */
+ if (work_pending(&EXT4_I(inode)->i_unwritten_work))
+ cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
}
static void put_io_page(struct ext4_io_page *io_page)
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index b2c8ee56eb9..c169477a62c 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1360,8 +1360,8 @@ static void ext4_update_super(struct super_block *sb,
sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, group_data[0].group);
- atomic_add(EXT4_NUM_B2C(sbi, free_blocks),
- &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
+ &sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
&sbi->s_flex_groups[flex_group].free_inodes);
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b3818b48f41..5d6d5357812 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1927,8 +1927,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
flex_group = ext4_flex_group(sbi, i);
atomic_add(ext4_free_inodes_count(sb, gdp),
&sbi->s_flex_groups[flex_group].free_inodes);
- atomic_add(ext4_free_group_clusters(sb, gdp),
- &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic64_add(ext4_free_group_clusters(sb, gdp),
+ &sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(ext4_used_dirs_count(sb, gdp),
&sbi->s_flex_groups[flex_group].used_dirs);
}
diff --git a/fs/internal.h b/fs/internal.h
index 507141fceb9..4be78237d89 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -125,3 +125,8 @@ extern int invalidate_inodes(struct super_block *, bool);
* dcache.c
*/
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
+
+/*
+ * read_write.c
+ */
+extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index d6ee5aed56b..325bc019ed8 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1065,9 +1065,12 @@ out:
void jbd2_journal_set_triggers(struct buffer_head *bh,
struct jbd2_buffer_trigger_type *type)
{
- struct journal_head *jh = bh2jh(bh);
+ struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
+ if (WARN_ON(!jh))
+ return;
jh->b_triggers = type;
+ jbd2_journal_put_journal_head(jh);
}
void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
@@ -1119,17 +1122,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
- struct journal_head *jh = bh2jh(bh);
+ struct journal_head *jh;
int ret = 0;
- jbd_debug(5, "journal_head %p\n", jh);
- JBUFFER_TRACE(jh, "entry");
if (is_handle_aborted(handle))
goto out;
- if (!buffer_jbd(bh)) {
+ jh = jbd2_journal_grab_journal_head(bh);
+ if (!jh) {
ret = -EUCLEAN;
goto out;
}
+ jbd_debug(5, "journal_head %p\n", jh);
+ JBUFFER_TRACE(jh, "entry");
jbd_lock_bh_state(bh);
@@ -1220,6 +1224,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
spin_unlock(&journal->j_list_lock);
out_unlock_bh:
jbd_unlock_bh_state(bh);
+ jbd2_journal_put_journal_head(jh);
out:
JBUFFER_TRACE(jh, "exit");
WARN_ON(ret); /* All errors are bugs, so dump the stack */
diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c
index 737d839bc17..6fc7b5cae92 100644
--- a/fs/nfs/blocklayout/blocklayoutdm.c
+++ b/fs/nfs/blocklayout/blocklayoutdm.c
@@ -55,7 +55,8 @@ static void dev_remove(struct net *net, dev_t dev)
bl_pipe_msg.bl_wq = &nn->bl_wq;
memset(msg, 0, sizeof(*msg));
- msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS);
+ msg->len = sizeof(bl_msg) + bl_msg.totallen;
+ msg->data = kzalloc(msg->len, GFP_NOFS);
if (!msg->data)
goto out;
@@ -66,7 +67,6 @@ static void dev_remove(struct net *net, dev_t dev)
memcpy(msg->data, &bl_msg, sizeof(bl_msg));
dataptr = (uint8_t *) msg->data;
memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
- msg->len = sizeof(bl_msg) + bl_msg.totallen;
add_wait_queue(&nn->bl_wq, &wq);
if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) {
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index dc0f98dfa71..c516da5873f 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -726,9 +726,9 @@ out1:
return ret;
}
-static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data)
+static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen)
{
- return key_instantiate_and_link(key, data, strlen(data) + 1,
+ return key_instantiate_and_link(key, data, datalen,
id_resolver_cache->thread_keyring,
authkey);
}
@@ -738,6 +738,7 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
struct key *key, struct key *authkey)
{
char id_str[NFS_UINT_MAXLEN];
+ size_t len;
int ret = -ENOKEY;
/* ret = -ENOKEY */
@@ -747,13 +748,15 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
case IDMAP_CONV_NAMETOID:
if (strcmp(upcall->im_name, im->im_name) != 0)
break;
- sprintf(id_str, "%d", im->im_id);
- ret = nfs_idmap_instantiate(key, authkey, id_str);
+ /* Note: here we store the NUL terminator too */
+ len = sprintf(id_str, "%d", im->im_id) + 1;
+ ret = nfs_idmap_instantiate(key, authkey, id_str, len);
break;
case IDMAP_CONV_IDTONAME:
if (upcall->im_id != im->im_id)
break;
- ret = nfs_idmap_instantiate(key, authkey, im->im_name);
+ len = strlen(im->im_name);
+ ret = nfs_idmap_instantiate(key, authkey, im->im_name, len);
break;
default:
ret = -EINVAL;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 49eeb044c10..4fb234d3aef 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -129,7 +129,6 @@ static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
{
if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
return;
- clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
pnfs_return_layout(inode);
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b2671cb0f90..26431cf62dd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2632,7 +2632,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
int status;
if (pnfs_ld_layoutret_on_setattr(inode))
- pnfs_return_layout(inode);
+ pnfs_commit_and_return_layout(inode);
nfs_fattr_init(fattr);
@@ -6416,22 +6416,8 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
static void nfs4_layoutcommit_release(void *calldata)
{
struct nfs4_layoutcommit_data *data = calldata;
- struct pnfs_layout_segment *lseg, *tmp;
- unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
pnfs_cleanup_layoutcommit(data);
- /* Matched by references in pnfs_set_layoutcommit */
- list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
- list_del_init(&lseg->pls_lc_list);
- if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
- &lseg->pls_flags))
- pnfs_put_lseg(lseg);
- }
-
- clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
- smp_mb__after_clear_bit();
- wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
-
put_rpccred(data->cred);
kfree(data);
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 48ac5aad625..4bdffe0ba02 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -417,6 +417,16 @@ should_free_lseg(struct pnfs_layout_range *lseg_range,
lo_seg_intersecting(lseg_range, recall_range);
}
+static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
+ struct list_head *tmp_list)
+{
+ if (!atomic_dec_and_test(&lseg->pls_refcount))
+ return false;
+ pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
+ list_add(&lseg->pls_list, tmp_list);
+ return true;
+}
+
/* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
struct list_head *tmp_list)
@@ -430,11 +440,8 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
*/
dprintk("%s: lseg %p ref %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount));
- if (atomic_dec_and_test(&lseg->pls_refcount)) {
- pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
- list_add(&lseg->pls_list, tmp_list);
+ if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
rv = 1;
- }
}
return rv;
}
@@ -777,6 +784,21 @@ send_layoutget(struct pnfs_layout_hdr *lo,
return lseg;
}
+static void pnfs_clear_layoutcommit(struct inode *inode,
+ struct list_head *head)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct pnfs_layout_segment *lseg, *tmp;
+
+ if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
+ return;
+ list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
+ if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+ continue;
+ pnfs_lseg_dec_and_remove_zero(lseg, head);
+ }
+}
+
/*
* Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
* when the layout segment list is empty.
@@ -808,6 +830,7 @@ _pnfs_return_layout(struct inode *ino)
/* Reference matched in nfs4_layoutreturn_release */
pnfs_get_layout_hdr(lo);
empty = list_empty(&lo->plh_segs);
+ pnfs_clear_layoutcommit(ino, &tmp_list);
pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
/* Don't send a LAYOUTRETURN if list was initially empty */
if (empty) {
@@ -820,8 +843,6 @@ _pnfs_return_layout(struct inode *ino)
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
- WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
-
lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
if (unlikely(lrp == NULL)) {
status = -ENOMEM;
@@ -845,6 +866,33 @@ out:
}
EXPORT_SYMBOL_GPL(_pnfs_return_layout);
+int
+pnfs_commit_and_return_layout(struct inode *inode)
+{
+ struct pnfs_layout_hdr *lo;
+ int ret;
+
+ spin_lock(&inode->i_lock);
+ lo = NFS_I(inode)->layout;
+ if (lo == NULL) {
+ spin_unlock(&inode->i_lock);
+ return 0;
+ }
+ pnfs_get_layout_hdr(lo);
+ /* Block new layoutgets and read/write to ds */
+ lo->plh_block_lgets++;
+ spin_unlock(&inode->i_lock);
+ filemap_fdatawait(inode->i_mapping);
+ ret = pnfs_layoutcommit_inode(inode, true);
+ if (ret == 0)
+ ret = _pnfs_return_layout(inode);
+ spin_lock(&inode->i_lock);
+ lo->plh_block_lgets--;
+ spin_unlock(&inode->i_lock);
+ pnfs_put_layout_hdr(lo);
+ return ret;
+}
+
bool pnfs_roc(struct inode *ino)
{
struct pnfs_layout_hdr *lo;
@@ -1458,7 +1506,6 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
dprintk("pnfs write error = %d\n", hdr->pnfs_error);
if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
PNFS_LAYOUTRET_ON_ERROR) {
- clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
pnfs_return_layout(hdr->inode);
}
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1613,7 +1660,6 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
dprintk("pnfs read error = %d\n", hdr->pnfs_error);
if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
PNFS_LAYOUTRET_ON_ERROR) {
- clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
pnfs_return_layout(hdr->inode);
}
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1746,11 +1792,27 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
if (lseg->pls_range.iomode == IOMODE_RW &&
- test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+ test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
list_add(&lseg->pls_lc_list, listp);
}
}
+static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
+{
+ struct pnfs_layout_segment *lseg, *tmp;
+ unsigned long *bitlock = &NFS_I(inode)->flags;
+
+ /* Matched by references in pnfs_set_layoutcommit */
+ list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
+ list_del_init(&lseg->pls_lc_list);
+ pnfs_put_lseg(lseg);
+ }
+
+ clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
+ smp_mb__after_clear_bit();
+ wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
+}
+
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{
pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
@@ -1795,6 +1857,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
+ pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
}
/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 94ba8041774..f5f8a470a64 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -219,6 +219,7 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
int _pnfs_return_layout(struct inode *);
+int pnfs_commit_and_return_layout(struct inode *);
void pnfs_ld_write_done(struct nfs_write_data *);
void pnfs_ld_read_done(struct nfs_read_data *);
struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
@@ -407,6 +408,11 @@ static inline int pnfs_return_layout(struct inode *ino)
return 0;
}
+static inline int pnfs_commit_and_return_layout(struct inode *inode)
+{
+ return 0;
+}
+
static inline bool
pnfs_ld_layoutret_on_setattr(struct inode *inode)
{
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 62c1ee128ae..ca05f6dc354 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -102,7 +102,8 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
{
if (rp->c_type == RC_REPLBUFF)
kfree(rp->c_replvec.iov_base);
- hlist_del(&rp->c_hash);
+ if (!hlist_unhashed(&rp->c_hash))
+ hlist_del(&rp->c_hash);
list_del(&rp->c_lru);
--num_drc_entries;
kmem_cache_free(drc_slab, rp);
@@ -118,6 +119,10 @@ nfsd_reply_cache_free(struct svc_cacherep *rp)
int nfsd_reply_cache_init(void)
{
+ INIT_LIST_HEAD(&lru_head);
+ max_drc_entries = nfsd_cache_size_limit();
+ num_drc_entries = 0;
+
register_shrinker(&nfsd_reply_cache_shrinker);
drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
0, 0, NULL);
@@ -128,10 +133,6 @@ int nfsd_reply_cache_init(void)
if (!cache_hash)
goto out_nomem;
- INIT_LIST_HEAD(&lru_head);
- max_drc_entries = nfsd_cache_size_limit();
- num_drc_entries = 0;
-
return 0;
out_nomem:
printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 2a7eb536de0..2b2e2396a86 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1013,6 +1013,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
int host_err;
int stable = *stablep;
int use_wgather;
+ loff_t pos = offset;
dentry = file->f_path.dentry;
inode = dentry->d_inode;
@@ -1025,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
+ host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
set_fs(oldfs);
if (host_err < 0)
goto out_nfserr;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index a86aebc9ba7..869116c2afb 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -446,9 +446,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
{
- struct inode *inode = iget_locked(sb, de->low_ino);
+ struct inode *inode = new_inode_pseudo(sb);
- if (inode && (inode->i_state & I_NEW)) {
+ if (inode) {
+ inode->i_ino = de->low_ino;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
PROC_I(inode)->pde = de;
@@ -476,7 +477,6 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
inode->i_fop = de->proc_fops;
}
}
- unlock_new_inode(inode);
} else
pde_put(de);
return inode;
diff --git a/fs/read_write.c b/fs/read_write.c
index a698eff457f..e6ddc8dceb9 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -17,6 +17,7 @@
#include <linux/splice.h>
#include <linux/compat.h>
#include "read_write.h"
+#include "internal.h"
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -417,6 +418,33 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
EXPORT_SYMBOL(do_sync_write);
+ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
+{
+ mm_segment_t old_fs;
+ const char __user *p;
+ ssize_t ret;
+
+ if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+ return -EINVAL;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ p = (__force const char __user *)buf;
+ if (count > MAX_RW_COUNT)
+ count = MAX_RW_COUNT;
+ if (file->f_op->write)
+ ret = file->f_op->write(file, p, count, pos);
+ else
+ ret = do_sync_write(file, p, count, pos);
+ set_fs(old_fs);
+ if (ret > 0) {
+ fsnotify_modify(file);
+ add_wchar(current, ret);
+ }
+ inc_syscw(current);
+ return ret;
+}
+
ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
ssize_t ret;
diff --git a/fs/splice.c b/fs/splice.c
index 718bd005638..29e394e49dd 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -31,6 +31,7 @@
#include <linux/security.h>
#include <linux/gfp.h>
#include <linux/socket.h>
+#include "internal.h"
/*
* Attempt to steal a page from a pipe buffer. This should perhaps go into
@@ -1048,9 +1049,10 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
{
int ret;
void *data;
+ loff_t tmp = sd->pos;
data = buf->ops->map(pipe, buf, 0);
- ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos);
+ ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
buf->ops->unmap(pipe, buf, data);
return ret;
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index a386b0b654c..918e8fe2f5e 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -581,7 +581,11 @@
{0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -592,6 +596,13 @@
{0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 4fd4999ccb5..0b763276f61 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -561,7 +561,6 @@ struct csrow_info {
u32 ue_count; /* Uncorrectable Errors for this csrow */
u32 ce_count; /* Correctable Errors for this csrow */
- u32 nr_pages; /* combined pages count of all channels */
struct mem_ctl_info *mci; /* the parent */
@@ -676,11 +675,11 @@ struct mem_ctl_info {
* sees memory sticks ("dimms"), and the ones that sees memory ranks.
* All old memory controllers enumerate memories per rank, but most
* of the recent drivers enumerate memories per DIMM, instead.
- * When the memory controller is per rank, mem_is_per_rank is true.
+ * When the memory controller is per rank, csbased is true.
*/
unsigned n_layers;
struct edac_mc_layer *layers;
- bool mem_is_per_rank;
+ bool csbased;
/*
* DIMM info. Will eventually remove the entire csrows_info some day
@@ -741,8 +740,6 @@ struct mem_ctl_info {
u32 fake_inject_ue;
u16 fake_inject_count;
#endif
- __u8 csbased : 1, /* csrow-based memory controller */
- __resv : 7;
};
#endif
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 61c97ae22e0..f09a0ae4d85 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -15,6 +15,7 @@
*/
#include <asm/types.h>
+#include <linux/compiler.h>
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
@@ -31,7 +32,7 @@
#error Wordsize not 32 or 64
#endif
-static inline u64 hash_64(u64 val, unsigned int bits)
+static __always_inline u64 hash_64(u64 val, unsigned int bits)
{
u64 hash = val;
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index f5dbce50466..66017028dcb 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -37,7 +37,7 @@ void irq_work_sync(struct irq_work *work);
#ifdef CONFIG_IRQ_WORK
bool irq_work_needs_cpu(void);
#else
-static bool irq_work_needs_cpu(void) { return false; }
+static inline bool irq_work_needs_cpu(void) { return false; }
#endif
#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 80d36874689..79fdd80a42d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -390,7 +390,6 @@ extern struct pid *session_of_pgrp(struct pid *pgrp);
unsigned long int_sqrt(unsigned long);
extern void bust_spinlocks(int yes);
-extern void wake_up_klogd(void);
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
extern int panic_timeout;
extern int panic_on_oops;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ede274957e0..c74092eebf5 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -527,7 +527,7 @@ static inline int zone_is_oom_locked(const struct zone *zone)
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
}
-static inline unsigned zone_end_pfn(const struct zone *zone)
+static inline unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
}
diff --git a/include/linux/mxsfb.h b/include/linux/mxsfb.h
index f14943d5531..f80af867434 100644
--- a/include/linux/mxsfb.h
+++ b/include/linux/mxsfb.h
@@ -24,8 +24,8 @@
#define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
-#define FB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
-#define FB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */
+#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
+#define MXSFB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */
struct mxsfb_platform_data {
struct fb_videomode *mode_list;
@@ -44,6 +44,9 @@ struct mxsfb_platform_data {
* allocated. If specified,fb_size must also be specified.
* fb_phys must be unused by Linux.
*/
+ u32 sync; /* sync mask, contains MXSFB specifics not
+ * carried in fb_info->var.sync
+ */
};
#endif /* __LINUX_MXSFB_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c25cccaa555..4fa3b0b9b07 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -137,6 +137,34 @@ enum {
NVME_LBAF_RP_DEGRADED = 3,
};
+struct nvme_smart_log {
+ __u8 critical_warning;
+ __u8 temperature[2];
+ __u8 avail_spare;
+ __u8 spare_thresh;
+ __u8 percent_used;
+ __u8 rsvd6[26];
+ __u8 data_units_read[16];
+ __u8 data_units_written[16];
+ __u8 host_reads[16];
+ __u8 host_writes[16];
+ __u8 ctrl_busy_time[16];
+ __u8 power_cycles[16];
+ __u8 power_on_hours[16];
+ __u8 unsafe_shutdowns[16];
+ __u8 media_errors[16];
+ __u8 num_err_log_entries[16];
+ __u8 rsvd192[320];
+};
+
+enum {
+ NVME_SMART_CRIT_SPARE = 1 << 0,
+ NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
+ NVME_SMART_CRIT_RELIABILITY = 1 << 2,
+ NVME_SMART_CRIT_MEDIA = 1 << 3,
+ NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
+};
+
struct nvme_lba_range_type {
__u8 type;
__u8 attributes;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 1249a54d17e..822171fcb1c 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -134,6 +134,8 @@ extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
+extern void wake_up_klogd(void);
+
void log_buf_kexec_setup(void);
void __init setup_log_buf(int early);
#else
@@ -162,6 +164,10 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
return false;
}
+static inline void wake_up_klogd(void)
+{
+}
+
static inline void log_buf_kexec_setup(void)
{
}
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index ef9be7e1e19..1819b59aab2 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -66,6 +66,7 @@
* port.
* @flags: usb serial port flags
* @write_wait: a wait_queue_head_t used by the port.
+ * @delta_msr_wait: modem-status-change wait queue
* @work: work queue entry for the line discipline waking up.
* @throttled: nonzero if the read urb is inactive to throttle the device
* @throttle_req: nonzero if the tty wants to throttle us
@@ -112,6 +113,7 @@ struct usb_serial_port {
unsigned long flags;
wait_queue_head_t write_wait;
+ wait_queue_head_t delta_msr_wait;
struct work_struct work;
char throttled;
char throttle_req;
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 6f033a415ec..5c295c26ad3 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -181,8 +181,16 @@
/*-------------------------------------------------------------------------*/
+#if IS_ENABLED(CONFIG_USB_ULPI)
struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags);
+#else
+static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
+ unsigned int flags)
+{
+ return NULL;
+}
+#endif
#ifdef CONFIG_USB_ULPI_VIEWPORT
/* access ops for controllers with a viewport register */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index e5c4f609f22..3953fda2e8b 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -840,7 +840,8 @@ out_putfd:
fd = error;
}
mutex_unlock(&root->d_inode->i_mutex);
- mnt_drop_write(mnt);
+ if (!ro)
+ mnt_drop_write(mnt);
out_putname:
putname(name);
return fd;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b0cd86501c3..59412d037ee 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4434,12 +4434,15 @@ static void perf_event_task_event(struct perf_task_event *task_event)
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+ if (ctx)
+ perf_event_task_ctx(ctx, task_event);
}
- if (ctx)
- perf_event_task_ctx(ctx, task_event);
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
+ if (task_event->task_ctx)
+ perf_event_task_ctx(task_event->task_ctx, task_event);
+
rcu_read_unlock();
}
@@ -5647,6 +5650,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
event->attr.sample_period = NSEC_PER_SEC / freq;
hwc->sample_period = event->attr.sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
+ hwc->last_period = hwc->sample_period;
event->attr.freq = 0;
}
}
diff --git a/kernel/printk.c b/kernel/printk.c
index 0b31715f335..abbdd9e2ac8 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
-DECLARE_WAIT_QUEUE_HEAD(log_wait);
-
int console_printk[4] = {
DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
@@ -224,6 +222,7 @@ struct log {
static DEFINE_RAW_SPINLOCK(logbuf_lock);
#ifdef CONFIG_PRINTK
+DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static u32 syslog_idx;
@@ -1957,45 +1956,6 @@ int is_console_locked(void)
return console_locked;
}
-/*
- * Delayed printk version, for scheduler-internal messages:
- */
-#define PRINTK_BUF_SIZE 512
-
-#define PRINTK_PENDING_WAKEUP 0x01
-#define PRINTK_PENDING_SCHED 0x02
-
-static DEFINE_PER_CPU(int, printk_pending);
-static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
-
-static void wake_up_klogd_work_func(struct irq_work *irq_work)
-{
- int pending = __this_cpu_xchg(printk_pending, 0);
-
- if (pending & PRINTK_PENDING_SCHED) {
- char *buf = __get_cpu_var(printk_sched_buf);
- printk(KERN_WARNING "[sched_delayed] %s", buf);
- }
-
- if (pending & PRINTK_PENDING_WAKEUP)
- wake_up_interruptible(&log_wait);
-}
-
-static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
- .func = wake_up_klogd_work_func,
- .flags = IRQ_WORK_LAZY,
-};
-
-void wake_up_klogd(void)
-{
- preempt_disable();
- if (waitqueue_active(&log_wait)) {
- this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
- irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
- }
- preempt_enable();
-}
-
static void console_cont_flush(char *text, size_t size)
{
unsigned long flags;
@@ -2458,6 +2418,44 @@ static int __init printk_late_init(void)
late_initcall(printk_late_init);
#if defined CONFIG_PRINTK
+/*
+ * Delayed printk version, for scheduler-internal messages:
+ */
+#define PRINTK_BUF_SIZE 512
+
+#define PRINTK_PENDING_WAKEUP 0x01
+#define PRINTK_PENDING_SCHED 0x02
+
+static DEFINE_PER_CPU(int, printk_pending);
+static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
+
+static void wake_up_klogd_work_func(struct irq_work *irq_work)
+{
+ int pending = __this_cpu_xchg(printk_pending, 0);
+
+ if (pending & PRINTK_PENDING_SCHED) {
+ char *buf = __get_cpu_var(printk_sched_buf);
+ printk(KERN_WARNING "[sched_delayed] %s", buf);
+ }
+
+ if (pending & PRINTK_PENDING_WAKEUP)
+ wake_up_interruptible(&log_wait);
+}
+
+static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
+ .func = wake_up_klogd_work_func,
+ .flags = IRQ_WORK_LAZY,
+};
+
+void wake_up_klogd(void)
+{
+ preempt_disable();
+ if (waitqueue_active(&log_wait)) {
+ this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
+ irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
+ }
+ preempt_enable();
+}
int printk_sched(const char *fmt, ...)
{
diff --git a/kernel/sys.c b/kernel/sys.c
index 81f56445fba..39c9c4a2949 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2185,9 +2185,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
-static int __orderly_poweroff(void)
+static int __orderly_poweroff(bool force)
{
- int argc;
char **argv;
static char *envp[] = {
"HOME=/",
@@ -2196,20 +2195,40 @@ static int __orderly_poweroff(void)
};
int ret;
- argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
- if (argv == NULL) {
+ argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL);
+ if (argv) {
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
+ argv_free(argv);
+ } else {
printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
- __func__, poweroff_cmd);
- return -ENOMEM;
+ __func__, poweroff_cmd);
+ ret = -ENOMEM;
}
- ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC,
- NULL, NULL, NULL);
- argv_free(argv);
+ if (ret && force) {
+ printk(KERN_WARNING "Failed to start orderly shutdown: "
+ "forcing the issue\n");
+ /*
+ * I guess this should try to kick off some daemon to sync and
+ * poweroff asap. Or not even bother syncing if we're doing an
+ * emergency shutdown?
+ */
+ emergency_sync();
+ kernel_power_off();
+ }
return ret;
}
+static bool poweroff_force;
+
+static void poweroff_work_func(struct work_struct *work)
+{
+ __orderly_poweroff(poweroff_force);
+}
+
+static DECLARE_WORK(poweroff_work, poweroff_work_func);
+
/**
* orderly_poweroff - Trigger an orderly system poweroff
* @force: force poweroff if command execution fails
@@ -2219,21 +2238,9 @@ static int __orderly_poweroff(void)
*/
int orderly_poweroff(bool force)
{
- int ret = __orderly_poweroff();
-
- if (ret && force) {
- printk(KERN_WARNING "Failed to start orderly shutdown: "
- "forcing the issue\n");
-
- /*
- * I guess this should try to kick off some daemon to sync and
- * poweroff asap. Or not even bother syncing if we're doing an
- * emergency shutdown?
- */
- emergency_sync();
- kernel_power_off();
- }
-
- return ret;
+ if (force) /* do not override the pending "true" */
+ poweroff_force = true;
+ schedule_work(&poweroff_work);
+ return 0;
}
EXPORT_SYMBOL_GPL(orderly_poweroff);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 2fb8cb88df8..7f32fe0e52c 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -67,7 +67,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
*/
int tick_check_broadcast_device(struct clock_event_device *dev)
{
- if ((tick_broadcast_device.evtdev &&
+ if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
+ (tick_broadcast_device.evtdev &&
tick_broadcast_device.evtdev->rating >= dev->rating) ||
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ab25b88aae5..6893d5a2bf0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3104,8 +3104,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
continue;
}
- hlist_del(&entry->node);
- call_rcu(&entry->rcu, ftrace_free_entry_rcu);
+ hlist_del_rcu(&entry->node);
+ call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
}
}
__disable_ftrace_function_probe();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1f835a83cb2..4f1dade5698 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
- struct ring_buffer *buf = tr->buffer;
+ struct ring_buffer *buf;
if (trace_stop_count)
return;
@@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
arch_spin_lock(&ftrace_max_lock);
+ buf = tr->buffer;
tr->buffer = max_tr.buffer;
max_tr.buffer = buf;
@@ -2880,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
return -EINVAL;
}
-static void set_tracer_flags(unsigned int mask, int enabled)
+/* Some tracers require overwrite to stay enabled */
+int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+{
+ if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
+ return -1;
+
+ return 0;
+}
+
+int set_tracer_flag(unsigned int mask, int enabled)
{
/* do nothing if flag is already set */
if (!!(trace_flags & mask) == !!enabled)
- return;
+ return 0;
+
+ /* Give the tracer a chance to approve the change */
+ if (current_trace->flag_changed)
+ if (current_trace->flag_changed(current_trace, mask, !!enabled))
+ return -EINVAL;
if (enabled)
trace_flags |= mask;
@@ -2894,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled)
if (mask == TRACE_ITER_RECORD_CMD)
trace_event_enable_cmd_record(enabled);
- if (mask == TRACE_ITER_OVERWRITE)
+ if (mask == TRACE_ITER_OVERWRITE) {
ring_buffer_change_overwrite(global_trace.buffer, enabled);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ ring_buffer_change_overwrite(max_tr.buffer, enabled);
+#endif
+ }
if (mask == TRACE_ITER_PRINTK)
trace_printk_start_stop_comm(enabled);
+
+ return 0;
}
static int trace_set_options(char *option)
{
char *cmp;
int neg = 0;
- int ret = 0;
+ int ret = -ENODEV;
int i;
cmp = strstrip(option);
@@ -2915,19 +2936,20 @@ static int trace_set_options(char *option)
cmp += 2;
}
+ mutex_lock(&trace_types_lock);
+
for (i = 0; trace_options[i]; i++) {
if (strcmp(cmp, trace_options[i]) == 0) {
- set_tracer_flags(1 << i, !neg);
+ ret = set_tracer_flag(1 << i, !neg);
break;
}
}
/* If no option could be set, test the specific tracer options */
- if (!trace_options[i]) {
- mutex_lock(&trace_types_lock);
+ if (!trace_options[i])
ret = set_tracer_option(current_trace, cmp, neg);
- mutex_unlock(&trace_types_lock);
- }
+
+ mutex_unlock(&trace_types_lock);
return ret;
}
@@ -2937,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
+ int ret;
if (cnt >= sizeof(buf))
return -EINVAL;
@@ -2946,7 +2969,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
buf[cnt] = 0;
- trace_set_options(buf);
+ ret = trace_set_options(buf);
+ if (ret < 0)
+ return ret;
*ppos += cnt;
@@ -3250,6 +3275,9 @@ static int tracing_set_tracer(const char *buf)
goto out;
trace_branch_disable();
+
+ current_trace->enabled = false;
+
if (current_trace->reset)
current_trace->reset(tr);
@@ -3294,6 +3322,7 @@ static int tracing_set_tracer(const char *buf)
}
current_trace = t;
+ current_trace->enabled = true;
trace_branch_enable(tr);
out:
mutex_unlock(&trace_types_lock);
@@ -4780,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (val != 0 && val != 1)
return -EINVAL;
- set_tracer_flags(1 << index, val);
+
+ mutex_lock(&trace_types_lock);
+ ret = set_tracer_flag(1 << index, val);
+ mutex_unlock(&trace_types_lock);
+
+ if (ret < 0)
+ return ret;
*ppos += cnt;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 57d7e5397d5..2081971367e 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -283,11 +283,15 @@ struct tracer {
enum print_line_t (*print_line)(struct trace_iterator *iter);
/* If you handled the flag setting, return 0 */
int (*set_flag)(u32 old_flags, u32 bit, int set);
+ /* Return 0 if OK with change, else return non-zero */
+ int (*flag_changed)(struct tracer *tracer,
+ u32 mask, int set);
struct tracer *next;
struct tracer_flags *flags;
bool print_max;
bool use_max_tr;
bool allocated_snapshot;
+ bool enabled;
};
@@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[];
void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
+int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
+int set_tracer_flag(unsigned int mask, int enabled);
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 713a2cac488..443b25b43b4 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -32,7 +32,7 @@ enum {
static int trace_type __read_mostly;
-static int save_lat_flag;
+static int save_flags;
static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph);
@@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
static void __irqsoff_tracer_init(struct trace_array *tr)
{
- save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
- trace_flags |= TRACE_ITER_LATENCY_FMT;
+ save_flags = trace_flags;
+
+ /* non overwrite screws up the latency tracers */
+ set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
+ set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
tracing_max_latency = 0;
irqsoff_trace = tr;
@@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
static void irqsoff_tracer_reset(struct trace_array *tr)
{
+ int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
+ int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+
stop_irqsoff_tracer(tr, is_graph());
- if (!save_lat_flag)
- trace_flags &= ~TRACE_ITER_LATENCY_FMT;
+ set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
+ set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
}
static void irqsoff_tracer_start(struct trace_array *tr)
@@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
+ .flag_changed = trace_keep_overwrite,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff,
#endif
@@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
+ .flag_changed = trace_keep_overwrite,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff,
#endif
@@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
+ .flag_changed = trace_keep_overwrite,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff,
#endif
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 75aa97fbe1a..fde652c9a51 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);
static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
static void wakeup_graph_return(struct ftrace_graph_ret *trace);
-static int save_lat_flag;
+static int save_flags;
#define TRACE_DISPLAY_GRAPH 1
@@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
static int __wakeup_tracer_init(struct trace_array *tr)
{
- save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
- trace_flags |= TRACE_ITER_LATENCY_FMT;
+ save_flags = trace_flags;
+
+ /* non overwrite screws up the latency tracers */
+ set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
+ set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
tracing_max_latency = 0;
wakeup_trace = tr;
@@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
static void wakeup_tracer_reset(struct trace_array *tr)
{
+ int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
+ int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+
stop_wakeup_tracer(tr);
/* make sure we put back any tasks we are tracing */
wakeup_reset(tr);
- if (!save_lat_flag)
- trace_flags &= ~TRACE_ITER_LATENCY_FMT;
+ set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
+ set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
}
static void wakeup_tracer_start(struct trace_array *tr)
@@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly =
.print_line = wakeup_print_line,
.flags = &tracer_flags,
.set_flag = wakeup_set_flag,
+ .flag_changed = trace_keep_overwrite,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
#endif
@@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.print_line = wakeup_print_line,
.flags = &tracer_flags,
.set_flag = wakeup_set_flag,
+ .flag_changed = trace_keep_overwrite,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
#endif
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 9681d54b95d..f8e0e536739 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/tty.h>
#include <linux/wait.h>
@@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes)
wake_up_klogd();
}
}
-
-
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 5e396accd3d..d87a17a819d 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref)
entry = bucket_find_exact(bucket, ref);
if (!entry) {
+ /* must drop lock before calling dma_mapping_error */
+ put_hash_bucket(bucket, &flags);
+
if (dma_mapping_error(ref->dev, ref->dev_addr)) {
err_printk(ref->dev, NULL,
- "DMA-API: device driver tries "
- "to free an invalid DMA memory address\n");
- return;
+ "DMA-API: device driver tries to free an "
+ "invalid DMA memory address\n");
+ } else {
+ err_printk(ref->dev, NULL,
+ "DMA-API: device driver tries to free DMA "
+ "memory it has not allocated [device "
+ "address=0x%016llx] [size=%llu bytes]\n",
+ ref->dev_addr, ref->size);
}
- err_printk(ref->dev, NULL, "DMA-API: device driver tries "
- "to free DMA memory it has not allocated "
- "[device address=0x%016llx] [size=%llu bytes]\n",
- ref->dev_addr, ref->size);
- goto out;
+ return;
}
if (ref->size != entry->size) {
@@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref)
hash_bucket_del(entry);
dma_entry_free(entry);
-out:
put_hash_bucket(bucket, &flags);
}
@@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
ref.dev = dev;
ref.dev_addr = dma_addr;
bucket = get_hash_bucket(&ref, &flags);
- entry = bucket_find_exact(bucket, &ref);
- if (!entry)
- goto out;
+ list_for_each_entry(entry, &bucket->list, list) {
+ if (!exact_match(&ref, entry))
+ continue;
+
+ /*
+ * The same physical address can be mapped multiple
+ * times. Without a hardware IOMMU this results in the
+ * same device addresses being put into the dma-debug
+ * hash multiple times too. This can result in false
+ * positives being reported. Therefore we implement a
+ * best-fit algorithm here which updates the first entry
+ * from the hash which fits the reference value and is
+ * not currently listed as being checked.
+ */
+ if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+ entry->map_err_type = MAP_ERR_CHECKED;
+ break;
+ }
+ }
- entry->map_err_type = MAP_ERR_CHECKED;
-out:
put_hash_bucket(bucket, &flags);
}
EXPORT_SYMBOL(debug_dma_mapping_error);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a0be33bb19..ca9a7c6d7e9 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
- struct hstate *h = &default_hstate;
- return h->nr_huge_pages * pages_per_huge_page(h);
+ struct hstate *h;
+ unsigned long nr_total_pages = 0;
+
+ for_each_hstate(h)
+ nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
+ return nr_total_pages;
}
static int hugetlb_acct_memory(struct hstate *h, long delta)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9597eec8239..ee376576081 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1779,7 +1779,11 @@ void try_offline_node(int nid)
for (i = 0; i < MAX_NR_ZONES; i++) {
struct zone *zone = pgdat->node_zones + i;
- if (zone->wait_table)
+ /*
+ * wait_table may be allocated from boot memory,
+ * here only free if it's allocated by vmalloc.
+ */
+ if (is_vmalloc_addr(zone->wait_table))
vfree(zone->wait_table);
}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a18714469bf..85addcd9372 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
grp = &vlan_info->grp;
- /* Take it out of our own structures, but be sure to interlock with
- * HW accelerating devices or SW vlan input packet processing if
- * VLAN is not 0 (leave it there for 802.1p).
- */
- if (vlan_id)
- vlan_vid_del(real_dev, vlan_id);
-
grp->nr_vlan_devs--;
if (vlan->flags & VLAN_FLAG_MVRP)
@@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
vlan_gvrp_uninit_applicant(real_dev);
}
+ /* Take it out of our own structures, but be sure to interlock with
+ * HW accelerating devices or SW vlan input packet processing if
+ * VLAN is not 0 (leave it there for 802.1p).
+ */
+ if (vlan_id)
+ vlan_vid_del(real_dev, vlan_id);
+
/* Get rid of the vlan's reference to real_dev */
dev_put(real_dev);
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 10b47d4cdfe..c581f1200ef 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -421,7 +421,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
return 0;
br_warn(br, "adding interface %s with same address "
"as a received packet\n",
- source->dev->name);
+ source ? source->dev->name : br->dev->name);
fdb_delete(br, fdb);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index f5ad23bb24f..2db88dfa99d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1545,7 +1545,6 @@ void net_enable_timestamp(void)
return;
}
#endif
- WARN_ON(in_interrupt());
static_key_slow_inc(&netstamp_needed);
}
EXPORT_SYMBOL(net_enable_timestamp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b2b36196b34..6d9ca35f0c3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1858,11 +1858,8 @@ void tcp_enter_loss(struct sock *sk, int how)
if (tcp_is_reno(tp))
tcp_reset_reno_sack(tp);
- if (!how) {
- /* Push undo marker, if it was plain RTO and nothing
- * was retransmitted. */
- tp->undo_marker = tp->snd_una;
- } else {
+ tp->undo_marker = tp->snd_una;
+ if (how) {
tp->sacked_out = 0;
tp->fackets_out = 0;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 78d8414d2cf..d6279cb3d54 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4876,26 +4876,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
static int __net_init addrconf_init_net(struct net *net)
{
- int err;
+ int err = -ENOMEM;
struct ipv6_devconf *all, *dflt;
- err = -ENOMEM;
- all = &ipv6_devconf;
- dflt = &ipv6_devconf_dflt;
+ all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
+ if (all == NULL)
+ goto err_alloc_all;
- if (!net_eq(net, &init_net)) {
- all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
- if (all == NULL)
- goto err_alloc_all;
+ dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
+ if (dflt == NULL)
+ goto err_alloc_dflt;
- dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
- if (dflt == NULL)
- goto err_alloc_dflt;
- } else {
- /* these will be inherited by all namespaces */
- dflt->autoconf = ipv6_defaults.autoconf;
- dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
- }
+ /* these will be inherited by all namespaces */
+ dflt->autoconf = ipv6_defaults.autoconf;
+ dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
net->ipv6.devconf_all = all;
net->ipv6.devconf_dflt = dflt;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fb20f25ddec..f8529fc8e54 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
task->tk_waitqueue = queue;
queue->qlen++;
+ /* barrier matches the read in rpc_wake_up_task_queue_locked() */
+ smp_wmb();
rpc_set_queued(task);
dprintk("RPC: %5u added to queue %p \"%s\"\n",
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
*/
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
{
- if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
- __rpc_do_wake_up_task(queue, task);
+ if (RPC_IS_QUEUED(task)) {
+ smp_rmb();
+ if (task->tk_waitqueue == queue)
+ __rpc_do_wake_up_task(queue, task);
+ }
}
/*
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 51be64f163e..971282b6f6a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk)
#endif
}
-static int unix_release_sock(struct sock *sk, int embrion)
+static void unix_release_sock(struct sock *sk, int embrion)
{
struct unix_sock *u = unix_sk(sk);
struct path path;
@@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
if (unix_tot_inflight)
unix_gc(); /* Garbage collect fds */
-
- return 0;
}
static void init_peercred(struct sock *sk)
@@ -699,9 +697,10 @@ static int unix_release(struct socket *sock)
if (!sk)
return 0;
+ unix_release_sock(sk, 0);
sock->sk = NULL;
- return unix_release_sock(sk, 0);
+ return 0;
}
static int unix_autobind(struct socket *sock)
@@ -1413,8 +1412,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
if (UNIXCB(skb).cred)
return;
if (test_bit(SOCK_PASSCRED, &sock->flags) ||
- !other->sk_socket ||
- test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
+ (other->sk_socket &&
+ test_bit(SOCK_PASSCRED, &other->sk_socket->flags))) {
UNIXCB(skb).pid = get_pid(task_tgid(current));
UNIXCB(skb).cred = get_current_cred();
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a9ebcf9e371..ecdf30eb587 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3144,7 +3144,7 @@ static unsigned int convert_to_spdif_status(unsigned short val)
if (val & AC_DIG1_PROFESSIONAL)
sbits |= IEC958_AES0_PROFESSIONAL;
if (sbits & IEC958_AES0_PROFESSIONAL) {
- if (sbits & AC_DIG1_EMPHASIS)
+ if (val & AC_DIG1_EMPHASIS)
sbits |= IEC958_AES0_PRO_EMPHASIS_5015;
} else {
if (val & AC_DIG1_EMPHASIS)
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 78897d05d80..43c2ea53956 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -995,6 +995,8 @@ enum {
BAD_NO_EXTRA_SURR_DAC = 0x101,
/* Primary DAC shared with main surrounds */
BAD_SHARED_SURROUND = 0x100,
+ /* No independent HP possible */
+ BAD_NO_INDEP_HP = 0x40,
/* Primary DAC shared with main CLFE */
BAD_SHARED_CLFE = 0x10,
/* Primary DAC shared with extra surrounds */
@@ -1392,6 +1394,43 @@ static int check_aamix_out_path(struct hda_codec *codec, int path_idx)
return snd_hda_get_path_idx(codec, path);
}
+/* check whether the independent HP is available with the current config */
+static bool indep_hp_possible(struct hda_codec *codec)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ struct nid_path *path;
+ int i, idx;
+
+ if (cfg->line_out_type == AUTO_PIN_HP_OUT)
+ idx = spec->out_paths[0];
+ else
+ idx = spec->hp_paths[0];
+ path = snd_hda_get_path_from_idx(codec, idx);
+ if (!path)
+ return false;
+
+ /* assume no path conflicts unless aamix is involved */
+ if (!spec->mixer_nid || !is_nid_contained(path, spec->mixer_nid))
+ return true;
+
+ /* check whether output paths contain aamix */
+ for (i = 0; i < cfg->line_outs; i++) {
+ if (spec->out_paths[i] == idx)
+ break;
+ path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]);
+ if (path && is_nid_contained(path, spec->mixer_nid))
+ return false;
+ }
+ for (i = 0; i < cfg->speaker_outs; i++) {
+ path = snd_hda_get_path_from_idx(codec, spec->speaker_paths[i]);
+ if (path && is_nid_contained(path, spec->mixer_nid))
+ return false;
+ }
+
+ return true;
+}
+
/* fill the empty entries in the dac array for speaker/hp with the
* shared dac pointed by the paths
*/
@@ -1545,6 +1584,9 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
badness += BAD_MULTI_IO;
}
+ if (spec->indep_hp && !indep_hp_possible(codec))
+ badness += BAD_NO_INDEP_HP;
+
/* re-fill the shared DAC for speaker / headphone */
if (cfg->line_out_type != AUTO_PIN_HP_OUT)
refill_shared_dacs(codec, cfg->hp_outs,
@@ -1758,6 +1800,10 @@ static int parse_output_paths(struct hda_codec *codec)
cfg->speaker_pins, val);
}
+ /* clear indep_hp flag if not available */
+ if (spec->indep_hp && !indep_hp_possible(codec))
+ spec->indep_hp = 0;
+
kfree(best_cfg);
return 0;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4cea6bb6fad..418bfc0eb0a 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -415,6 +415,8 @@ struct azx_dev {
unsigned int opened :1;
unsigned int running :1;
unsigned int irq_pending :1;
+ unsigned int prepared:1;
+ unsigned int locked:1;
/*
* For VIA:
* A flag to ensure DMA position is 0
@@ -426,8 +428,25 @@ struct azx_dev {
struct timecounter azx_tc;
struct cyclecounter azx_cc;
+
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+ struct mutex dsp_mutex;
+#endif
};
+/* DSP lock helpers */
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+#define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
+#define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
+#define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
+#define dsp_is_locked(dev) ((dev)->locked)
+#else
+#define dsp_lock_init(dev) do {} while (0)
+#define dsp_lock(dev) do {} while (0)
+#define dsp_unlock(dev) do {} while (0)
+#define dsp_is_locked(dev) 0
+#endif
+
/* CORB/RIRB */
struct azx_rb {
u32 *buf; /* CORB/RIRB buffer
@@ -527,6 +546,10 @@ struct azx {
/* card list (for power_save trigger) */
struct list_head list;
+
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+ struct azx_dev saved_azx_dev;
+#endif
};
#define CREATE_TRACE_POINTS
@@ -1793,15 +1816,25 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
dev = chip->capture_index_offset;
nums = chip->capture_streams;
}
- for (i = 0; i < nums; i++, dev++)
- if (!chip->azx_dev[dev].opened) {
- res = &chip->azx_dev[dev];
- if (res->assigned_key == key)
- break;
+ for (i = 0; i < nums; i++, dev++) {
+ struct azx_dev *azx_dev = &chip->azx_dev[dev];
+ dsp_lock(azx_dev);
+ if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
+ res = azx_dev;
+ if (res->assigned_key == key) {
+ res->opened = 1;
+ res->assigned_key = key;
+ dsp_unlock(azx_dev);
+ return azx_dev;
+ }
}
+ dsp_unlock(azx_dev);
+ }
if (res) {
+ dsp_lock(res);
res->opened = 1;
res->assigned_key = key;
+ dsp_unlock(res);
}
return res;
}
@@ -2009,6 +2042,12 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
struct azx_dev *azx_dev = get_azx_dev(substream);
int ret;
+ dsp_lock(azx_dev);
+ if (dsp_is_locked(azx_dev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
mark_runtime_wc(chip, azx_dev, substream, false);
azx_dev->bufsize = 0;
azx_dev->period_bytes = 0;
@@ -2016,8 +2055,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0)
- return ret;
+ goto unlock;
mark_runtime_wc(chip, azx_dev, substream, true);
+ unlock:
+ dsp_unlock(azx_dev);
return ret;
}
@@ -2029,16 +2070,21 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
/* reset BDL address */
- azx_sd_writel(azx_dev, SD_BDLPL, 0);
- azx_sd_writel(azx_dev, SD_BDLPU, 0);
- azx_sd_writel(azx_dev, SD_CTL, 0);
- azx_dev->bufsize = 0;
- azx_dev->period_bytes = 0;
- azx_dev->format_val = 0;
+ dsp_lock(azx_dev);
+ if (!dsp_is_locked(azx_dev)) {
+ azx_sd_writel(azx_dev, SD_BDLPL, 0);
+ azx_sd_writel(azx_dev, SD_BDLPU, 0);
+ azx_sd_writel(azx_dev, SD_CTL, 0);
+ azx_dev->bufsize = 0;
+ azx_dev->period_bytes = 0;
+ azx_dev->format_val = 0;
+ }
snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
mark_runtime_wc(chip, azx_dev, substream, false);
+ azx_dev->prepared = 0;
+ dsp_unlock(azx_dev);
return snd_pcm_lib_free_pages(substream);
}
@@ -2055,6 +2101,12 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
unsigned short ctls = spdif ? spdif->ctls : 0;
+ dsp_lock(azx_dev);
+ if (dsp_is_locked(azx_dev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
azx_stream_reset(chip, azx_dev);
format_val = snd_hda_calc_stream_format(runtime->rate,
runtime->channels,
@@ -2065,7 +2117,8 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
snd_printk(KERN_ERR SFX
"%s: invalid format_val, rate=%d, ch=%d, format=%d\n",
pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format);
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
}
bufsize = snd_pcm_lib_buffer_bytes(substream);
@@ -2084,7 +2137,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
azx_dev->no_period_wakeup = runtime->no_period_wakeup;
err = azx_setup_periods(chip, substream, azx_dev);
if (err < 0)
- return err;
+ goto unlock;
}
/* wallclk has 24Mhz clock source */
@@ -2101,8 +2154,14 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
stream_tag > chip->capture_streams)
stream_tag -= chip->capture_streams;
- return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
+ err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
azx_dev->format_val, substream);
+
+ unlock:
+ if (!err)
+ azx_dev->prepared = 1;
+ dsp_unlock(azx_dev);
+ return err;
}
static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -2117,6 +2176,9 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
azx_dev = get_azx_dev(substream);
trace_azx_pcm_trigger(chip, azx_dev, cmd);
+ if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
+ return -EPIPE;
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
rstart = 1;
@@ -2621,17 +2683,27 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
struct azx_dev *azx_dev;
int err;
- if (snd_hda_lock_devices(bus))
- return -EBUSY;
+ azx_dev = azx_get_dsp_loader_dev(chip);
+
+ dsp_lock(azx_dev);
+ spin_lock_irq(&chip->reg_lock);
+ if (azx_dev->running || azx_dev->locked) {
+ spin_unlock_irq(&chip->reg_lock);
+ err = -EBUSY;
+ goto unlock;
+ }
+ azx_dev->prepared = 0;
+ chip->saved_azx_dev = *azx_dev;
+ azx_dev->locked = 1;
+ spin_unlock_irq(&chip->reg_lock);
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(chip->pci),
byte_size, bufp);
if (err < 0)
- goto unlock;
+ goto err_alloc;
mark_pages_wc(chip, bufp, true);
- azx_dev = azx_get_dsp_loader_dev(chip);
azx_dev->bufsize = byte_size;
azx_dev->period_bytes = byte_size;
azx_dev->format_val = format;
@@ -2649,13 +2721,20 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
goto error;
azx_setup_controller(chip, azx_dev);
+ dsp_unlock(azx_dev);
return azx_dev->stream_tag;
error:
mark_pages_wc(chip, bufp, false);
snd_dma_free_pages(bufp);
-unlock:
- snd_hda_unlock_devices(bus);
+ err_alloc:
+ spin_lock_irq(&chip->reg_lock);
+ if (azx_dev->opened)
+ *azx_dev = chip->saved_azx_dev;
+ azx_dev->locked = 0;
+ spin_unlock_irq(&chip->reg_lock);
+ unlock:
+ dsp_unlock(azx_dev);
return err;
}
@@ -2677,9 +2756,10 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus,
struct azx *chip = bus->private_data;
struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
- if (!dmab->area)
+ if (!dmab->area || !azx_dev->locked)
return;
+ dsp_lock(azx_dev);
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
@@ -2692,7 +2772,12 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus,
snd_dma_free_pages(dmab);
dmab->area = NULL;
- snd_hda_unlock_devices(bus);
+ spin_lock_irq(&chip->reg_lock);
+ if (azx_dev->opened)
+ *azx_dev = chip->saved_azx_dev;
+ azx_dev->locked = 0;
+ spin_unlock_irq(&chip->reg_lock);
+ dsp_unlock(azx_dev);
}
#endif /* CONFIG_SND_HDA_DSP_LOADER */
@@ -3481,6 +3566,7 @@ static int azx_first_init(struct azx *chip)
}
for (i = 0; i < chip->num_streams; i++) {
+ dsp_lock_init(&chip->azx_dev[i]);
/* allocate memory for the BDL for each stream */
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 60d08f669f0..0d9c58f1356 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -168,10 +168,10 @@ static void cs_automute(struct hda_codec *codec)
snd_hda_gen_update_outputs(codec);
if (spec->gpio_eapd_hp) {
- unsigned int gpio = spec->gen.hp_jack_present ?
+ spec->gpio_data = spec->gen.hp_jack_present ?
spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
snd_hda_codec_write(codec, 0x01, 0,
- AC_VERB_SET_GPIO_DATA, gpio);
+ AC_VERB_SET_GPIO_DATA, spec->gpio_data);
}
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 941bf6c766e..2a89d1eefeb 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -1142,7 +1142,7 @@ static int patch_cxt5045(struct hda_codec *codec)
}
if (spec->beep_amp)
- snd_hda_attach_beep_device(codec, spec->beep_amp);
+ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
return 0;
}
@@ -1921,7 +1921,7 @@ static int patch_cxt5051(struct hda_codec *codec)
}
if (spec->beep_amp)
- snd_hda_attach_beep_device(codec, spec->beep_amp);
+ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
return 0;
}
@@ -3099,7 +3099,7 @@ static int patch_cxt5066(struct hda_codec *codec)
}
if (spec->beep_amp)
- snd_hda_attach_beep_device(codec, spec->beep_amp);
+ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
return 0;
}
@@ -3191,11 +3191,17 @@ static int cx_auto_build_controls(struct hda_codec *codec)
return 0;
}
+static void cx_auto_free(struct hda_codec *codec)
+{
+ snd_hda_detach_beep_device(codec);
+ snd_hda_gen_free(codec);
+}
+
static const struct hda_codec_ops cx_auto_patch_ops = {
.build_controls = cx_auto_build_controls,
.build_pcms = snd_hda_gen_build_pcms,
.init = snd_hda_gen_init,
- .free = snd_hda_gen_free,
+ .free = cx_auto_free,
.unsol_event = snd_hda_jack_unsol_event,
#ifdef CONFIG_PM
.check_power_status = snd_hda_gen_check_power_status,
@@ -3391,7 +3397,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
codec->patch_ops = cx_auto_patch_ops;
if (spec->beep_amp)
- snd_hda_attach_beep_device(codec, spec->beep_amp);
+ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
/* Some laptops with Conexant chips show stalls in S3 resume,
* which falls into the single-cmd mode.
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 638e7f73801..ca4739c3f65 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -715,8 +715,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
case UAC2_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
- if (check_input_term(state, d->baSourceID[0], term) < 0)
- return -ENODEV;
+ err = check_input_term(state, d->baSourceID[0], term);
+ if (err < 0)
+ return err;
term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->id = id;
term->name = uac_selector_unit_iSelector(d);
@@ -725,7 +726,8 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
case UAC1_PROCESSING_UNIT:
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 */
- /* UAC2_EFFECT_UNIT */ {
+ /* UAC2_EFFECT_UNIT */
+ case UAC2_EXTENSION_UNIT_V2: {
struct uac_processing_unit_descriptor *d = p1;
if (state->mixer->protocol == UAC_VERSION_2 &&
@@ -1356,8 +1358,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
return err;
/* determine the input source type and name */
- if (check_input_term(state, hdr->bSourceID, &iterm) < 0)
- return -EINVAL;
+ err = check_input_term(state, hdr->bSourceID, &iterm);
+ if (err < 0)
+ return err;
master_bits = snd_usb_combine_bytes(bmaControls, csize);
/* master configuration quirks */
@@ -2052,6 +2055,8 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
return parse_audio_extension_unit(state, unitid, p1);
else /* UAC_VERSION_2 */
return parse_audio_processing_unit(state, unitid, p1);
+ case UAC2_EXTENSION_UNIT_V2:
+ return parse_audio_extension_unit(state, unitid, p1);
default:
snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]);
return -EINVAL;
@@ -2118,7 +2123,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
state.oterm.type = le16_to_cpu(desc->wTerminalType);
state.oterm.name = desc->iTerminal;
err = parse_audio_unit(&state, desc->bSourceID);
- if (err < 0)
+ if (err < 0 && err != -EINVAL)
return err;
} else { /* UAC_VERSION_2 */
struct uac2_output_terminal_descriptor *desc = p;
@@ -2130,12 +2135,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
state.oterm.type = le16_to_cpu(desc->wTerminalType);
state.oterm.name = desc->iTerminal;
err = parse_audio_unit(&state, desc->bSourceID);
- if (err < 0)
+ if (err < 0 && err != -EINVAL)
return err;
/* for UAC2, use the same approach to also add the clock selectors */
err = parse_audio_unit(&state, desc->bCSourceID);
- if (err < 0)
+ if (err < 0 && err != -EINVAL)
return err;
}
}
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index a20e3203343..0b0a90787db 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -122,7 +122,7 @@ export Q VERBOSE
EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
-INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES)
+INCLUDES = -I. $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -Wall
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index a2108ca1cc1..bb74c79cd16 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -95,7 +95,7 @@ ifeq ("$(origin DEBUG)", "command line")
PERF_DEBUG = $(DEBUG)
endif
ifndef PERF_DEBUG
- CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2
+ CFLAGS_OPTIMIZE = -O6
endif
ifdef PARSER_DEBUG
@@ -180,6 +180,12 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wvolatile-register-var,-W
CFLAGS := $(CFLAGS) -Wvolatile-register-var
endif
+ifndef PERF_DEBUG
+ ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y)
+ CFLAGS := $(CFLAGS) -D_FORTIFY_SOURCE=2
+ endif
+endif
+
### --- END CONFIGURATION SECTION ---
ifeq ($(srctree),)
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index a5223e6a7b4..0fdc85269c4 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -1,6 +1,30 @@
#ifndef BENCH_H
#define BENCH_H
+/*
+ * The madvise transparent hugepage constants were added in glibc
+ * 2.13. For compatibility with older versions of glibc, define these
+ * tokens if they are not already defined.
+ *
+ * PA-RISC uses different madvise values from other architectures and
+ * needs to be special-cased.
+ */
+#ifdef __hppa__
+# ifndef MADV_HUGEPAGE
+# define MADV_HUGEPAGE 67
+# endif
+# ifndef MADV_NOHUGEPAGE
+# define MADV_NOHUGEPAGE 68
+# endif
+#else
+# ifndef MADV_HUGEPAGE
+# define MADV_HUGEPAGE 14
+# endif
+# ifndef MADV_NOHUGEPAGE
+# define MADV_NOHUGEPAGE 15
+# endif
+#endif
+
extern int bench_numa(int argc, const char **argv, const char *prefix);
extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 774c90713a5..f1a939ebc19 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -573,13 +573,15 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
perf_event__synthesize_guest_os, tool);
}
- if (!opts->target.system_wide)
+ if (perf_target__has_task(&opts->target))
err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
process_synthesized_event,
machine);
- else
+ else if (perf_target__has_cpu(&opts->target))
err = perf_event__synthesize_threads(tool, process_synthesized_event,
machine);
+ else /* command specified */
+ err = 0;
if (err != 0)
goto out_delete_session;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 38624686ee9..226a4ae2f93 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -208,8 +208,9 @@ static inline int script_browse(const char *script_opt __maybe_unused)
return 0;
}
-#define K_LEFT -1
-#define K_RIGHT -2
+#define K_LEFT -1000
+#define K_RIGHT -2000
+#define K_SWITCH_INPUT_DATA -3000
#endif
#ifdef GTK2_SUPPORT
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 55433aa42c8..eabdce0a2da 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -143,7 +143,7 @@ struct strlist *strlist__new(bool dupstr, const char *list)
slist->rblist.node_delete = strlist__node_delete;
slist->dupstr = dupstr;
- if (slist && strlist__parse_list(slist, list) != 0)
+ if (list && strlist__parse_list(slist, list) != 0)
goto out_error;
}
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index ce82b940195..5ba005c00e2 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
u64 redir_content;
- ASSERT(redir_index < IOAPIC_NUM_PINS);
+ if (redir_index < IOAPIC_NUM_PINS)
+ redir_content =
+ ioapic->redirtbl[redir_index].bits;
+ else
+ redir_content = ~0ULL;
- redir_content = ioapic->redirtbl[redir_index].bits;
result = (ioapic->ioregsel & 0x1) ?
(redir_content >> 32) & 0xffffffff :
redir_content & 0xffffffff;