diff options
author | Dave Airlie <airlied@redhat.com> | 2011-03-14 14:15:13 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-03-14 14:15:13 +1000 |
commit | 34db18abd376b2075c760c38f0b861aed379415d (patch) | |
tree | c4174e39a2f445f17c25ab206d45c66217bbbf85 | |
parent | e73f88af66fcc50083fae4b7e1c39b469179a97a (diff) | |
parent | 47ae63e0c2e5fdb582d471dc906eb29be94c732f (diff) |
Merge remote branch 'intel/drm-intel-next' of ../drm-next into drm-core-next
* 'intel/drm-intel-next' of ../drm-next: (755 commits)
drm/i915: Only wait on a pending flip if we intend to write to the buffer
drm/i915/dp: Sanity check eDP existence
drm/i915: Rebind the buffer if its alignment constraints changes with tiling
drm/i915: Disable GPU semaphores by default
drm/i915: Do not overflow the MMADDR write FIFO
Revert "drm/i915: fix corruptions on i8xx due to relaxed fencing"
drm/i915: Don't save/restore hardware status page address register
drm/i915: don't store the reg value for HWS_PGA
drm/i915: fix memory corruption with GM965 and >4GB RAM
Linux 2.6.38-rc7
Revert "TPM: Long default timeout fix"
drm/i915: Re-enable GPU semaphores for SandyBridge mobile
drm/i915: Replace vblank PM QoS with "Interrupt-Based AGPBUSY#"
Revert "drm/i915: Use PM QoS to prevent C-State starvation of gen3 GPU"
drm/i915: Allow relocation deltas outside of target bo
drm/i915: Silence an innocuous compiler warning for an unused variable
fs/block_dev.c: fix new kernel-doc warning
ACPI: Fix build for CONFIG_NET unset
mm: <asm-generic/pgtable.h> must include <linux/mm_types.h>
x86: Use u32 instead of long to set reset vector back to 0
...
Conflicts:
drivers/gpu/drm/i915/i915_gem.c
741 files changed, 10181 insertions, 7255 deletions
diff --git a/.gitignore b/.gitignore index 8faa6c02b39..5d56a3fd0de 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ modules.builtin *.gz *.bz2 *.lzma +*.xz *.lzo *.patch *.gcno diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 2861055afd7..c2791589397 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -73,8 +73,8 @@ services. </para> <para> - The core of every DRM driver is struct drm_device. Drivers - will typically statically initialize a drm_device structure, + The core of every DRM driver is struct drm_driver. Drivers + will typically statically initialize a drm_driver structure, then pass it to drm_init() at load time. </para> @@ -84,7 +84,7 @@ <title>Driver initialization</title> <para> Before calling the DRM initialization routines, the driver must - first create and fill out a struct drm_device structure. + first create and fill out a struct drm_driver structure. </para> <programlisting> static struct drm_driver driver = { diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl index 5e87ad58c0b..f51f28531b8 100644 --- a/Documentation/DocBook/filesystems.tmpl +++ b/Documentation/DocBook/filesystems.tmpl @@ -82,6 +82,11 @@ </sect1> </chapter> + <chapter id="fs_events"> + <title>Events based on file descriptors</title> +!Efs/eventfd.c + </chapter> + <chapter id="sysfs"> <title>The Filesystem for Exporting Kernel Objects</title> !Efs/sysfs/file.c diff --git a/Documentation/powerpc/dts-bindings/fsl/sata.txt b/Documentation/devicetree/bindings/ata/fsl-sata.txt index b46bcf46c3d..b46bcf46c3d 100644 --- a/Documentation/powerpc/dts-bindings/fsl/sata.txt +++ b/Documentation/devicetree/bindings/ata/fsl-sata.txt diff --git a/Documentation/powerpc/dts-bindings/eeprom.txt b/Documentation/devicetree/bindings/eeprom.txt index 4342c10de1b..4342c10de1b 100644 --- a/Documentation/powerpc/dts-bindings/eeprom.txt +++ b/Documentation/devicetree/bindings/eeprom.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt index b0019eb5330..b0019eb5330 100644 --- a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt +++ b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt diff --git a/Documentation/powerpc/dts-bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt index edaa84d288a..edaa84d288a 100644 --- a/Documentation/powerpc/dts-bindings/gpio/gpio.txt +++ b/Documentation/devicetree/bindings/gpio/gpio.txt diff --git a/Documentation/powerpc/dts-bindings/gpio/led.txt b/Documentation/devicetree/bindings/gpio/led.txt index 064db928c3c..064db928c3c 100644 --- a/Documentation/powerpc/dts-bindings/gpio/led.txt +++ b/Documentation/devicetree/bindings/gpio/led.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/i2c.txt b/Documentation/devicetree/bindings/i2c/fsl-i2c.txt index 1eacd6b20ed..1eacd6b20ed 100644 --- a/Documentation/powerpc/dts-bindings/fsl/i2c.txt +++ b/Documentation/devicetree/bindings/i2c/fsl-i2c.txt diff --git a/Documentation/powerpc/dts-bindings/marvell.txt b/Documentation/devicetree/bindings/marvell.txt index f1533d91953..f1533d91953 100644 --- a/Documentation/powerpc/dts-bindings/marvell.txt +++ b/Documentation/devicetree/bindings/marvell.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt index 64bcb8be973..64bcb8be973 100644 --- a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt diff --git a/Documentation/powerpc/dts-bindings/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt index c39ac289195..c39ac289195 100644 --- a/Documentation/powerpc/dts-bindings/mmc-spi-slot.txt +++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/upm-nand.txt b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt index a48b2cadc7f..a48b2cadc7f 100644 --- a/Documentation/powerpc/dts-bindings/fsl/upm-nand.txt +++ b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt diff --git a/Documentation/powerpc/dts-bindings/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt index 80152cb567d..80152cb567d 100644 --- a/Documentation/powerpc/dts-bindings/mtd-physmap.txt +++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/can.txt b/Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt index 2fa4fcd38fd..2fa4fcd38fd 100644 --- a/Documentation/powerpc/dts-bindings/fsl/can.txt +++ b/Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt diff --git a/Documentation/powerpc/dts-bindings/can/sja1000.txt b/Documentation/devicetree/bindings/net/can/sja1000.txt index d6d209ded93..d6d209ded93 100644 --- a/Documentation/powerpc/dts-bindings/can/sja1000.txt +++ b/Documentation/devicetree/bindings/net/can/sja1000.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/tsec.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt index edb7ae19e86..edb7ae19e86 100644 --- a/Documentation/powerpc/dts-bindings/fsl/tsec.txt +++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt diff --git a/Documentation/powerpc/dts-bindings/gpio/mdio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt index bc954952901..bc954952901 100644 --- a/Documentation/powerpc/dts-bindings/gpio/mdio.txt +++ b/Documentation/devicetree/bindings/net/mdio-gpio.txt diff --git a/Documentation/powerpc/dts-bindings/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index bb8c742eb8c..bb8c742eb8c 100644 --- a/Documentation/powerpc/dts-bindings/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt b/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt index 35a46536240..35a46536240 100644 --- a/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt +++ b/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt diff --git a/Documentation/powerpc/dts-bindings/4xx/cpm.txt b/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt index ee459806d35..ee459806d35 100644 --- a/Documentation/powerpc/dts-bindings/4xx/cpm.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt diff --git a/Documentation/powerpc/dts-bindings/4xx/emac.txt b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt index 2161334a7ca..2161334a7ca 100644 --- a/Documentation/powerpc/dts-bindings/4xx/emac.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt diff --git a/Documentation/powerpc/dts-bindings/4xx/ndfc.txt b/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt index 869f0b5f16e..869f0b5f16e 100644 --- a/Documentation/powerpc/dts-bindings/4xx/ndfc.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt diff --git a/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt index 515ebcf1b97..515ebcf1b97 100644 --- a/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt diff --git a/Documentation/powerpc/dts-bindings/4xx/reboot.txt b/Documentation/devicetree/bindings/powerpc/4xx/reboot.txt index d7217260589..d7217260589 100644 --- a/Documentation/powerpc/dts-bindings/4xx/reboot.txt +++ b/Documentation/devicetree/bindings/powerpc/4xx/reboot.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/devicetree/bindings/powerpc/fsl/board.txt index 39e941515a3..39e941515a3 100644 --- a/Documentation/powerpc/dts-bindings/fsl/board.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/board.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt index 160c752484b..160c752484b 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt index 4c7d45eaf02..4c7d45eaf02 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt index 87bc6048667..87bc6048667 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt index 8e3ee168161..8e3ee168161 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt index 74bfda4bb82..74bfda4bb82 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt index 349f79fd707..349f79fd707 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt index 0e426944658..0e426944658 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt index 4f8930263dd..4f8930263dd 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt index 249db3a15d1..249db3a15d1 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt index 60984260207..60984260207 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt index c5b43061db3..c5b43061db3 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt index e47734bee3f..e47734bee3f 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt index 9ccd5f30405..9ccd5f30405 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt index 2ea76d9d137..2ea76d9d137 100644 --- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/diu.txt b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt index b66cb6d31d6..b66cb6d31d6 100644 --- a/Documentation/powerpc/dts-bindings/fsl/diu.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt index 2a4b4bce611..2a4b4bce611 100644 --- a/Documentation/powerpc/dts-bindings/fsl/dma.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt diff --git a/Documentation/powerpc/dts-bindings/ecm.txt b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt index f514f29c67d..f514f29c67d 100644 --- a/Documentation/powerpc/dts-bindings/ecm.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/gtm.txt b/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt index 9a33efded4b..9a33efded4b 100644 --- a/Documentation/powerpc/dts-bindings/fsl/gtm.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/guts.txt b/Documentation/devicetree/bindings/powerpc/fsl/guts.txt index 9e7a2417dac..9e7a2417dac 100644 --- a/Documentation/powerpc/dts-bindings/fsl/guts.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/guts.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/lbc.txt b/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt index 3300fec501c..3300fec501c 100644 --- a/Documentation/powerpc/dts-bindings/fsl/lbc.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/mcm.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt index 4ceda9b3b41..4ceda9b3b41 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mcm.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt index 0f766333b6e..0f766333b6e 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt index 8832e879891..8832e879891 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt index 4ccb2cd5df9..4ccb2cd5df9 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/mpic.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt index 71e39cf3215..71e39cf3215 100644 --- a/Documentation/powerpc/dts-bindings/fsl/mpic.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/msi-pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt index bcc30bac683..bcc30bac683 100644 --- a/Documentation/powerpc/dts-bindings/fsl/msi-pic.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/pmc.txt b/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt index 07256b7ffca..07256b7ffca 100644 --- a/Documentation/powerpc/dts-bindings/fsl/pmc.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/sec.txt b/Documentation/devicetree/bindings/powerpc/fsl/sec.txt index 2b6f2d45c45..2b6f2d45c45 100644 --- a/Documentation/powerpc/dts-bindings/fsl/sec.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/sec.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/ssi.txt b/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt index 5ff76c9c57d..5ff76c9c57d 100644 --- a/Documentation/powerpc/dts-bindings/fsl/ssi.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt diff --git a/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt b/Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt index b558585b1aa..b558585b1aa 100644 --- a/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt +++ b/Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt diff --git a/Documentation/powerpc/dts-bindings/nintendo/wii.txt b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt index a7e155a023b..a7e155a023b 100644 --- a/Documentation/powerpc/dts-bindings/nintendo/wii.txt +++ b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt index 777abd7399d..777abd7399d 100644 --- a/Documentation/powerpc/dts-bindings/fsl/spi.txt +++ b/Documentation/devicetree/bindings/spi/fsl-spi.txt diff --git a/Documentation/powerpc/dts-bindings/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt index e782add2e45..e782add2e45 100644 --- a/Documentation/powerpc/dts-bindings/spi-bus.txt +++ b/Documentation/devicetree/bindings/spi/spi-bus.txt diff --git a/Documentation/powerpc/dts-bindings/fsl/usb.txt b/Documentation/devicetree/bindings/usb/fsl-usb.txt index bd5723f0b67..bd5723f0b67 100644 --- a/Documentation/powerpc/dts-bindings/fsl/usb.txt +++ b/Documentation/devicetree/bindings/usb/fsl-usb.txt diff --git a/Documentation/powerpc/dts-bindings/usb-ehci.txt b/Documentation/devicetree/bindings/usb/usb-ehci.txt index fa18612f757..fa18612f757 100644 --- a/Documentation/powerpc/dts-bindings/usb-ehci.txt +++ b/Documentation/devicetree/bindings/usb/usb-ehci.txt diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt index 299d0923537..299d0923537 100644 --- a/Documentation/powerpc/dts-bindings/xilinx.txt +++ b/Documentation/devicetree/bindings/xilinx.txt diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt index 7400d7555dc..28b1c9d3d35 100644 --- a/Documentation/powerpc/booting-without-of.txt +++ b/Documentation/devicetree/booting-without-of.txt @@ -13,7 +13,6 @@ Table of Contents I - Introduction 1) Entry point for arch/powerpc - 2) Board support II - The DT block format 1) Header @@ -41,13 +40,6 @@ Table of Contents VI - System-on-a-chip devices and nodes 1) Defining child nodes of an SOC 2) Representing devices without a current OF specification - a) PHY nodes - b) Interrupt controllers - c) 4xx/Axon EMAC ethernet nodes - d) Xilinx IP cores - e) USB EHCI controllers - f) MDIO on GPIOs - g) SPI busses VII - Specifying interrupt information for devices 1) interrupts property @@ -123,7 +115,7 @@ Revision Information I - Introduction ================ -During the recent development of the Linux/ppc64 kernel, and more +During the development of the Linux/ppc64 kernel, and more specifically, the addition of new platform types outside of the old IBM pSeries/iSeries pair, it was decided to enforce some strict rules regarding the kernel entry and bootloader <-> kernel interfaces, in @@ -146,7 +138,7 @@ section III, but, for example, the kernel does not require you to create a node for every PCI device in the system. It is a requirement to have a node for PCI host bridges in order to provide interrupt routing informations and memory/IO ranges, among others. It is also -recommended to define nodes for on chip devices and other busses that +recommended to define nodes for on chip devices and other buses that don't specifically fit in an existing OF specification. This creates a great flexibility in the way the kernel can then probe those and match drivers to device, without having to hard code all sorts of tables. It @@ -158,7 +150,7 @@ it with special cases. 1) Entry point for arch/powerpc ------------------------------- - There is one and one single entry point to the kernel, at the start + There is one single entry point to the kernel, at the start of the kernel image. That entry point supports two calling conventions: @@ -210,12 +202,6 @@ it with special cases. with all CPUs. The way to do that with method b) will be described in a later revision of this document. - -2) Board support ----------------- - -64-bit kernels: - Board supports (platforms) are not exclusive config options. An arbitrary set of board supports can be built in a single kernel image. The kernel will "know" what set of functions to use for a @@ -234,48 +220,11 @@ it with special cases. containing the various callbacks that the generic code will use to get to your platform specific code - c) Add a reference to your "ppc_md" structure in the - "machines" table in arch/powerpc/kernel/setup_64.c if you are - a 64-bit platform. - - d) request and get assigned a platform number (see PLATFORM_* - constants in arch/powerpc/include/asm/processor.h - -32-bit embedded kernels: - - Currently, board support is essentially an exclusive config option. - The kernel is configured for a single platform. Part of the reason - for this is to keep kernels on embedded systems small and efficient; - part of this is due to the fact the code is already that way. In the - future, a kernel may support multiple platforms, but only if the + A kernel image may support multiple platforms, but only if the platforms feature the same core architecture. A single kernel build cannot support both configurations with Book E and configurations with classic Powerpc architectures. - 32-bit embedded platforms that are moved into arch/powerpc using a - flattened device tree should adopt the merged tree practice of - setting ppc_md up dynamically, even though the kernel is currently - built with support for only a single platform at a time. This allows - unification of the setup code, and will make it easier to go to a - multiple-platform-support model in the future. - -NOTE: I believe the above will be true once Ben's done with the merge -of the boot sequences.... someone speak up if this is wrong! - - To add a 32-bit embedded platform support, follow the instructions - for 64-bit platforms above, with the exception that the Kconfig - option should be set up such that the kernel builds exclusively for - the platform selected. The processor type for the platform should - enable another config option to select the specific board - supported. - -NOTE: If Ben doesn't merge the setup files, may need to change this to -point to setup_32.c - - - I will describe later the boot process and various callbacks that - your platform should implement. - II - The DT block format ======================== @@ -300,8 +249,8 @@ the block to RAM before passing it to the kernel. 1) Header --------- - The kernel is entered with r3 pointing to an area of memory that is - roughly described in arch/powerpc/include/asm/prom.h by the structure + The kernel is passed the physical address pointing to an area of memory + that is roughly described in include/linux/of_fdt.h by the structure boot_param_header: struct boot_param_header { @@ -339,7 +288,7 @@ struct boot_param_header { All values in this header are in big endian format, the various fields in this header are defined more precisely below. All "offset" values are in bytes from the start of the header; that is - from the value of r3. + from the physical base address of the device tree block. - magic @@ -437,7 +386,7 @@ struct boot_param_header { ------------------------------ - r3 -> | struct boot_param_header | + base -> | struct boot_param_header | ------------------------------ | (alignment gap) (*) | ------------------------------ @@ -457,7 +406,7 @@ struct boot_param_header { -----> ------------------------------ | | - --- (r3 + totalsize) + --- (base + totalsize) (*) The alignment gaps are not necessarily present; their presence and size are dependent on the various alignment requirements of @@ -500,7 +449,7 @@ the device-tree structure. It is typically used to represent "path" in the device-tree. More details about the actual format of these will be below. -The kernel powerpc generic code does not make any formal use of the +The kernel generic code does not make any formal use of the unit address (though some board support code may do) so the only real requirement here for the unit address is to ensure uniqueness of the node unit name at a given level of the tree. Nodes with no notion @@ -518,20 +467,21 @@ path to the root node is "/". Every node which actually represents an actual device (that is, a node which isn't only a virtual "container" for more nodes, like "/cpus" -is) is also required to have a "device_type" property indicating the -type of node . +is) is also required to have a "compatible" property indicating the +specific hardware and an optional list of devices it is fully +backwards compatible with. Finally, every node that can be referenced from a property in another -node is required to have a "linux,phandle" property. Real open -firmware implementations provide a unique "phandle" value for every -node that the "prom_init()" trampoline code turns into -"linux,phandle" properties. However, this is made optional if the -flattened device tree is used directly. An example of a node +node is required to have either a "phandle" or a "linux,phandle" +property. Real Open Firmware implementations provide a unique +"phandle" value for every node that the "prom_init()" trampoline code +turns into "linux,phandle" properties. However, this is made optional +if the flattened device tree is used directly. An example of a node referencing another node via "phandle" is when laying out the interrupt tree which will be described in a further version of this document. -This "linux, phandle" property is a 32-bit value that uniquely +The "phandle" property is a 32-bit value that uniquely identifies a node. You are free to use whatever values or system of values, internal pointers, or whatever to generate these, the only requirement is that every node for which you provide that property has @@ -694,7 +644,7 @@ made of 3 cells, the bottom two containing the actual address itself while the top cell contains address space indication, flags, and pci bus & device numbers. -For busses that support dynamic allocation, it's the accepted practice +For buses that support dynamic allocation, it's the accepted practice to then not provide the address in "reg" (keep it 0) though while providing a flag indicating the address is dynamically allocated, and then, to provide a separate "assigned-addresses" property that @@ -711,7 +661,7 @@ prom_parse.c file of the recent kernels for your bus type. The "reg" property only defines addresses and sizes (if #size-cells is non-0) within a given bus. In order to translate addresses upward (that is into parent bus addresses, and possibly into CPU physical -addresses), all busses must contain a "ranges" property. If the +addresses), all buses must contain a "ranges" property. If the "ranges" property is missing at a given level, it's assumed that translation isn't possible, i.e., the registers are not visible on the parent bus. The format of the "ranges" property for a bus is a list @@ -727,9 +677,9 @@ example, for a PCI host controller, that would be a CPU address. For a PCI<->ISA bridge, that would be a PCI address. It defines the base address in the parent bus where the beginning of that range is mapped. -For a new 64-bit powerpc board, I recommend either the 2/2 format or +For new 64-bit board support, I recommend either the 2/2 format or Apple's 2/1 format which is slightly more compact since sizes usually -fit in a single 32-bit word. New 32-bit powerpc boards should use a +fit in a single 32-bit word. New 32-bit board support should use a 1/1 format, unless the processor supports physical addresses greater than 32-bits, in which case a 2/1 format is recommended. @@ -754,7 +704,7 @@ of their actual names. While earlier users of Open Firmware like OldWorld macintoshes tended to use the actual device name for the "name" property, it's nowadays considered a good practice to use a name that is closer to the device -class (often equal to device_type). For example, nowadays, ethernet +class (often equal to device_type). For example, nowadays, Ethernet controllers are named "ethernet", an additional "model" property defining precisely the chip type/model, and "compatible" property defining the family in case a single driver can driver more than one @@ -772,7 +722,7 @@ is present). 4) Note about node and property names and character set ------------------------------------------------------- -While open firmware provides more flexible usage of 8859-1, this +While Open Firmware provides more flexible usage of 8859-1, this specification enforces more strict rules. Nodes and properties should be comprised only of ASCII characters 'a' to 'z', '0' to '9', ',', '.', '_', '+', '#', '?', and '-'. Node names additionally @@ -792,7 +742,7 @@ address which can extend beyond that limit. -------------------------------- These are all that are currently required. However, it is strongly recommended that you expose PCI host bridges as documented in the - PCI binding to open firmware, and your interrupt tree as documented + PCI binding to Open Firmware, and your interrupt tree as documented in OF interrupt tree specification. a) The root node @@ -802,20 +752,12 @@ address which can extend beyond that limit. - model : this is your board name/model - #address-cells : address representation for "root" devices - #size-cells: the size representation for "root" devices - - device_type : This property shouldn't be necessary. However, if - you decide to create a device_type for your root node, make sure it - is _not_ "chrp" unless your platform is a pSeries or PAPR compliant - one for 64-bit, or a CHRP-type machine for 32-bit as this will - matched by the kernel this way. - - Additionally, some recommended properties are: - - compatible : the board "family" generally finds its way here, for example, if you have 2 board models with a similar layout, that typically get driven by the same platform code in the - kernel, you would use a different "model" property but put a - value in "compatible". The kernel doesn't directly use that - value but it is generally useful. + kernel, you would specify the exact board model in the + compatible property followed by an entry that represents the SoC + model. The root node is also generally where you add additional properties specific to your board like the serial number if any, that sort of @@ -841,8 +783,11 @@ address which can extend beyond that limit. So under /cpus, you are supposed to create a node for every CPU on the machine. There is no specific restriction on the name of the - CPU, though It's common practice to call it PowerPC,<name>. For + CPU, though it's common to call it <architecture>,<core>. For example, Apple uses PowerPC,G5 while IBM uses PowerPC,970FX. + However, the Generic Names convention suggests that it would be + better to simply use 'cpu' for each cpu node and use the compatible + property to identify the specific cpu core. Required properties: @@ -923,7 +868,7 @@ compatibility. e) The /chosen node - This node is a bit "special". Normally, that's where open firmware + This node is a bit "special". Normally, that's where Open Firmware puts some variable environment information, like the arguments, or the default input/output devices. @@ -940,11 +885,7 @@ compatibility. console device if any. Typically, if you have serial devices on your board, you may want to put the full path to the one set as the default console in the firmware here, for the kernel to pick - it up as its own default console. If you look at the function - set_preferred_console() in arch/ppc64/kernel/setup.c, you'll see - that the kernel tries to find out the default console and has - knowledge of various types like 8250 serial ports. You may want - to extend this function to add your own. + it up as its own default console. Note that u-boot creates and fills in the chosen node for platforms that use it. @@ -955,23 +896,23 @@ compatibility. f) the /soc<SOCname> node - This node is used to represent a system-on-a-chip (SOC) and must be - present if the processor is a SOC. The top-level soc node contains - information that is global to all devices on the SOC. The node name - should contain a unit address for the SOC, which is the base address - of the memory-mapped register set for the SOC. The name of an soc + This node is used to represent a system-on-a-chip (SoC) and must be + present if the processor is a SoC. The top-level soc node contains + information that is global to all devices on the SoC. The node name + should contain a unit address for the SoC, which is the base address + of the memory-mapped register set for the SoC. The name of an SoC node should start with "soc", and the remainder of the name should represent the part number for the soc. For example, the MPC8540's soc node would be called "soc8540". Required properties: - - device_type : Should be "soc" - ranges : Should be defined as specified in 1) to describe the - translation of SOC addresses for memory mapped SOC registers. - - bus-frequency: Contains the bus frequency for the SOC node. + translation of SoC addresses for memory mapped SoC registers. + - bus-frequency: Contains the bus frequency for the SoC node. Typically, the value of this field is filled in by the boot loader. + - compatible : Exact model of the SoC Recommended properties: @@ -1155,12 +1096,13 @@ while all this has been defined and implemented. - An example of code for iterating nodes & retrieving properties directly from the flattened tree format can be found in the kernel - file arch/ppc64/kernel/prom.c, look at scan_flat_dt() function, + file drivers/of/fdt.c. Look at the of_scan_flat_dt() function, its usage in early_init_devtree(), and the corresponding various early_init_dt_scan_*() callbacks. That code can be re-used in a GPL bootloader, and as the author of that code, I would be happy to discuss possible free licensing to any vendor who wishes to integrate all or part of this code into a non-GPL bootloader. + (reference needed; who is 'I' here? ---gcl Jan 31, 2011) @@ -1203,18 +1145,19 @@ MPC8540. 2) Representing devices without a current OF specification ---------------------------------------------------------- -Currently, there are many devices on SOCs that do not have a standard -representation pre-defined as part of the open firmware -specifications, mainly because the boards that contain these SOCs are -not currently booted using open firmware. This section contains -descriptions for the SOC devices for which new nodes have been -defined; this list will expand as more and more SOC-containing -platforms are moved over to use the flattened-device-tree model. +Currently, there are many devices on SoCs that do not have a standard +representation defined as part of the Open Firmware specifications, +mainly because the boards that contain these SoCs are not currently +booted using Open Firmware. Binding documentation for new devices +should be added to the Documentation/devicetree/bindings directory. +That directory will expand as device tree support is added to more and +more SoCs. + VII - Specifying interrupt information for devices =================================================== -The device tree represents the busses and devices of a hardware +The device tree represents the buses and devices of a hardware system in a form similar to the physical bus topology of the hardware. diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42 index 0e76ef12e4c..a22ecf48f25 100644 --- a/Documentation/hwmon/jc42 +++ b/Documentation/hwmon/jc42 @@ -51,7 +51,8 @@ Supported chips: * JEDEC JC 42.4 compliant temperature sensor chips Prefix: 'jc42' Addresses scanned: I2C 0x18 - 0x1f - Datasheet: - + Datasheet: + http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf Author: Guenter Roeck <guenter.roeck@ericsson.com> @@ -60,7 +61,11 @@ Author: Description ----------- -This driver implements support for JEDEC JC 42.4 compliant temperature sensors. +This driver implements support for JEDEC JC 42.4 compliant temperature sensors, +which are used on many DDR3 memory modules for mobile devices and servers. Some +systems use the sensor to prevent memory overheating by automatically throttling +the memory controller. + The driver auto-detects the chips listed above, but can be manually instantiated to support other JC 42.4 compliant chips. @@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis, which applies to all limits. This register can be written by writing into temp1_crit_hyst. Other hysteresis attributes are read-only. +If the BIOS has configured the sensor for automatic temperature management, it +is likely that it has locked the registers, i.e., that the temperature limits +cannot be changed. + Sysfs entries ------------- temp1_input Temperature (RO) -temp1_min Minimum temperature (RW) -temp1_max Maximum temperature (RW) -temp1_crit Critical high temperature (RW) +temp1_min Minimum temperature (RO or RW) +temp1_max Maximum temperature (RO or RW) +temp1_crit Critical high temperature (RO or RW) -temp1_crit_hyst Critical hysteresis temperature (RW) +temp1_crit_hyst Critical hysteresis temperature (RO or RW) temp1_max_hyst Maximum hysteresis temperature (RO) temp1_min_alarm Temperature low alarm diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp index 6526eee525a..d2b56a4fd1f 100644 --- a/Documentation/hwmon/k10temp +++ b/Documentation/hwmon/k10temp @@ -9,6 +9,8 @@ Supported chips: Socket S1G3: Athlon II, Sempron, Turion II * AMD Family 11h processors: Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) +* AMD Family 12h processors: "Llano" +* AMD Family 14h processors: "Brazos" (C/E/G-Series) Prefix: 'k10temp' Addresses scanned: PCI space @@ -17,10 +19,14 @@ Supported chips: http://support.amd.com/us/Processor_TechDocs/31116.pdf BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: http://support.amd.com/us/Processor_TechDocs/41256.pdf + BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors: + http://support.amd.com/us/Processor_TechDocs/43170.pdf Revision Guide for AMD Family 10h Processors: http://support.amd.com/us/Processor_TechDocs/41322.pdf Revision Guide for AMD Family 11h Processors: http://support.amd.com/us/Processor_TechDocs/41788.pdf + Revision Guide for AMD Family 14h Models 00h-0Fh Processors: + http://support.amd.com/us/Processor_TechDocs/47534.pdf AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: http://support.amd.com/us/Processor_TechDocs/43373.pdf AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: @@ -34,7 +40,7 @@ Description ----------- This driver permits reading of the internal temperature sensor of AMD -Family 10h and 11h processors. +Family 10h/11h/12h/14h processors. All these processors have a sensor, but on those for Socket F or AM2+, the sensor may return inconsistent values (erratum 319). The driver diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 89835a4766a..f4a04c0c7ed 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture and is between 256 and 4096 characters. It is defined in the file ./include/asm/setup.h as COMMAND_LINE_SIZE. +Finally, the [KMG] suffix is commonly described after a number of kernel +parameter values. These 'K', 'M', and 'G' letters represent the _binary_ +multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30 +bytes respectively. Such letter suffixes can also be entirely omitted. + acpi= [HW,ACPI,X86] Advanced Configuration and Power Interface @@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file Format: <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>] - crashkernel=nn[KMG]@ss[KMG] - [KNL] Reserve a chunk of physical memory to - hold a kernel to switch to with kexec on panic. + crashkernel=size[KMG][@offset[KMG]] + [KNL] Using kexec, Linux can switch to a 'crash kernel' + upon panic. This parameter reserves the physical + memory region [offset, offset + size] for that kernel + image. If '@offset' is omitted, then a suitable offset + is selected automatically. Check + Documentation/kdump/kdump.txt for further details. crashkernel=range1:size1[,range2:size2,...][@offset] [KNL] Same as above, but depends on the memory in the running system. The syntax of range is start-[end] where start and end are both a memory unit (amount[KMG]). See also - Documentation/kdump/kdump.txt for a example. + Documentation/kdump/kdump.txt for an example. cs89x0_dma= [HW,NET] Format: <dma> @@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file 6 (KERN_INFO) informational 7 (KERN_DEBUG) debug-level messages - log_buf_len=n Sets the size of the printk ring buffer, in bytes. - Format: { n | nk | nM } - n must be a power of two. The default size - is set in the kernel config file. + log_buf_len=n[KMG] Sets the size of the printk ring buffer, + in bytes. n must be a power of two. The default + size is set in the kernel config file. logo.nologo [FB] Disables display of the built-in Linux logo. This may be used to provide more screen space for diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile index 5aba7a33aee..24c308dd3fd 100644 --- a/Documentation/networking/Makefile +++ b/Documentation/networking/Makefile @@ -4,6 +4,8 @@ obj- := dummy.o # List of programs to build hostprogs-y := ifenslave +HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include + # Tell kbuild to always build the programs always := $(hostprogs-y) diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt index 996a27d9b8d..01c513fac40 100644 --- a/Documentation/workqueue.txt +++ b/Documentation/workqueue.txt @@ -190,9 +190,9 @@ resources, scheduled and executed. * Long running CPU intensive workloads which can be better managed by the system scheduler. - WQ_FREEZEABLE + WQ_FREEZABLE - A freezeable wq participates in the freeze phase of the system + A freezable wq participates in the freeze phase of the system suspend operations. Work items on the wq are drained and no new work item starts execution until thawed. diff --git a/MAINTAINERS b/MAINTAINERS index 531c5cf150b..8afba6321e2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -885,7 +885,7 @@ S: Supported ARM/QUALCOMM MSM MACHINE SUPPORT M: David Brown <davidb@codeaurora.org> -M: Daniel Walker <dwalker@codeaurora.org> +M: Daniel Walker <dwalker@fifo99.com> M: Bryan Huntsman <bryanh@codeaurora.org> L: linux-arm-msm@vger.kernel.org F: arch/arm/mach-msm/ @@ -1692,6 +1692,13 @@ M: Andy Whitcroft <apw@canonical.com> S: Supported F: scripts/checkpatch.pl +CHINESE DOCUMENTATION +M: Harry Wei <harryxiyou@gmail.com> +L: xiyoulinuxkernelgroup@googlegroups.com +L: linux-kernel@zh-kernel.org (moderated for non-subscribers) +S: Maintained +F: Documentation/zh_CN/ + CISCO VIC ETHERNET NIC DRIVER M: Vasanthy Kolluri <vkolluri@cisco.com> M: Roopa Prabhu <roprabhu@cisco.com> @@ -2126,6 +2133,7 @@ S: Supported F: fs/dlm/ DMA GENERIC OFFLOAD ENGINE SUBSYSTEM +M: Vinod Koul <vinod.koul@intel.com> M: Dan Williams <dan.j.williams@intel.com> S: Supported F: drivers/dma/ @@ -2774,6 +2782,15 @@ F: Documentation/isdn/README.gigaset F: drivers/isdn/gigaset/ F: include/linux/gigaset_dev.h +GPIO SUBSYSTEM +M: Grant Likely <grant.likely@secretlab.ca> +L: linux-kernel@vger.kernel.org +S: Maintained +T: git git://git.secretlab.ca/git/linux-2.6.git +F: Documentation/gpio/gpio.txt +F: drivers/gpio/ +F: include/linux/gpio* + GRETH 10/100/1G Ethernet MAC device driver M: Kristoffer Glembo <kristoffer@gaisler.com> L: netdev@vger.kernel.org @@ -2863,7 +2880,6 @@ M: Guenter Roeck <guenter.roeck@ericsson.com> L: lm-sensors@lm-sensors.org W: http://www.lm-sensors.org/ T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ -T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git S: Maintained F: Documentation/hwmon/ @@ -4591,7 +4607,7 @@ F: drivers/i2c/busses/i2c-ocores.c OPEN FIRMWARE AND FLATTENED DEVICE TREE M: Grant Likely <grant.likely@secretlab.ca> -L: devicetree-discuss@lists.ozlabs.org +L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers) W: http://fdt.secretlab.ca T: git git://git.secretlab.ca/git/linux-2.6.git S: Maintained @@ -5257,7 +5273,7 @@ S: Maintained F: drivers/net/wireless/rtl818x/rtl8180/ RTL8187 WIRELESS DRIVER -M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> +M: Herton Ronaldo Krzesinski <herton@canonical.com> M: Hin-Tak Leung <htl10@users.sourceforge.net> M: Larry Finger <Larry.Finger@lwfinger.net> L: linux-wireless@vger.kernel.org @@ -6095,7 +6111,7 @@ S: Maintained F: security/tomoyo/ TOPSTAR LAPTOP EXTRAS DRIVER -M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> +M: Herton Ronaldo Krzesinski <herton@canonical.com> L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/topstar-laptop.c @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 38 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc7 NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5cff165b7eb..166efa2a19c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622 visible impact on the overall performance or power consumption of the processor. +config ARM_ERRATA_751472 + bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation" + depends on CPU_V7 && SMP + help + This option enables the workaround for the 751472 Cortex-A9 (prior + to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the + completion of a following broadcasted operation if the second + operation is received by a CPU before the ICIALLUIS has completed, + potentially leading to corrupted entries in the cache or TLB. + +config ARM_ERRATA_753970 + bool "ARM errata: cache sync operation may be faulty" + depends on CACHE_PL310 + help + This option enables the workaround for the 753970 PL310 (r3p0) erratum. + + Under some condition the effect of cache sync operation on + the store buffer still remains when the operation completes. + This means that the store buffer is always asked to drain and + this prevents it from merging any further writes. The workaround + is to replace the normal offset of cache sync operation (0x730) + by another offset targeting an unmapped PL310 register 0x740. + This has the same effect as the cache sync operation: store buffer + drain and waiting for all buffers empty. + endmenu source "arch/arm/common/Kconfig" @@ -1391,7 +1416,7 @@ config AEABI config OABI_COMPAT bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" - depends on AEABI && EXPERIMENTAL + depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL default y help This option preserves the old syscall interface along with the diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c22c1adfedd..6f7b29294c8 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) LDFLAGS_vmlinux += --be8 endif -OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S +OBJCOPYFLAGS :=-O binary -R .comment -S GZFLAGS :=-9 #KBUILD_CFLAGS +=-pipe # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index ab204db594d..c6028967d33 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore @@ -1,3 +1,7 @@ font.c -piggy.gz +lib1funcs.S +piggy.gzip +piggy.lzo +piggy.lzma +vmlinux vmlinux.lds diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5aeec1e1735..16bd4803158 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -36,6 +36,7 @@ #define L2X0_RAW_INTR_STAT 0x21C #define L2X0_INTR_CLEAR 0x220 #define L2X0_CACHE_SYNC 0x730 +#define L2X0_DUMMY_REG 0x740 #define L2X0_INV_LINE_PA 0x770 #define L2X0_INV_WAY 0x77C #define L2X0_CLEAN_LINE_PA 0x7B0 diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index 721847dc68a..e0d1c0cfa54 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h @@ -58,6 +58,9 @@ static inline void sysctl_soft_reset(void __iomem *base) { + /* switch to slow mode */ + writel(0x2, base + SCCTRL); + /* writing any value to SCSYSSTAT reg will reset system */ writel(0, base + SCSYSSTAT); } diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f41a6f57cd1..82dfe5d0c41 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -18,16 +18,34 @@ #define __ASMARM_TLB_H #include <asm/cacheflush.h> -#include <asm/tlbflush.h> #ifndef CONFIG_MMU #include <linux/pagemap.h> + +#define tlb_flush(tlb) ((void) tlb) + #include <asm-generic/tlb.h> #else /* !CONFIG_MMU */ +#include <linux/swap.h> #include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +/* + * We need to delay page freeing for SMP as other CPUs can access pages + * which have been removed but not yet had their TLB entries invalidated. + * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, + * we need to apply this same delaying tactic to ensure correct operation. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) +#define tlb_fast_mode(tlb) 0 +#define FREE_PTE_NR 500 +#else +#define tlb_fast_mode(tlb) 1 +#define FREE_PTE_NR 0 +#endif /* * TLB handling. This allows us to remove pages from the page @@ -36,12 +54,58 @@ struct mmu_gather { struct mm_struct *mm; unsigned int fullmm; + struct vm_area_struct *vma; unsigned long range_start; unsigned long range_end; + unsigned int nr; + struct page *pages[FREE_PTE_NR]; }; DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); +/* + * This is unnecessarily complex. There's three ways the TLB shootdown + * code is used: + * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). + * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. + * tlb->vma will be non-NULL. + * 2. Unmapping all vmas. See exit_mmap(). + * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. + * tlb->vma will be non-NULL. Additionally, page tables will be freed. + * 3. Unmapping argument pages. See shift_arg_pages(). + * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. + * tlb->vma will be NULL. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->fullmm || !tlb->vma) + flush_tlb_mm(tlb->mm); + else if (tlb->range_end > 0) { + flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); + tlb->range_start = TASK_SIZE; + tlb->range_end = 0; + } +} + +static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) +{ + if (!tlb->fullmm) { + if (addr < tlb->range_start) + tlb->range_start = addr; + if (addr + PAGE_SIZE > tlb->range_end) + tlb->range_end = addr + PAGE_SIZE; + } +} + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush(tlb); + if (!tlb_fast_mode(tlb)) { + free_pages_and_swap_cache(tlb->pages, tlb->nr); + tlb->nr = 0; + } +} + static inline struct mmu_gather * tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) { @@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) tlb->mm = mm; tlb->fullmm = full_mm_flush; + tlb->vma = NULL; + tlb->nr = 0; return tlb; } @@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - if (tlb->fullmm) - flush_tlb_mm(tlb->mm); + tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ check_pgt_cache(); @@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) static inline void tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) { - if (!tlb->fullmm) { - if (addr < tlb->range_start) - tlb->range_start = addr; - if (addr + PAGE_SIZE > tlb->range_end) - tlb->range_end = addr + PAGE_SIZE; - } + tlb_add_flush(tlb, addr); } /* @@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (!tlb->fullmm) { flush_cache_range(vma, vma->vm_start, vma->vm_end); + tlb->vma = vma; tlb->range_start = TASK_SIZE; tlb->range_end = 0; } @@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { - if (!tlb->fullmm && tlb->range_end > 0) - flush_tlb_range(vma, tlb->range_start, tlb->range_end); + if (!tlb->fullmm) + tlb_flush(tlb); +} + +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + if (tlb_fast_mode(tlb)) { + free_page_and_swap_cache(page); + } else { + tlb->pages[tlb->nr++] = page; + if (tlb->nr >= FREE_PTE_NR) + tlb_flush_mmu(tlb); + } +} + +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long addr) +{ + pgtable_page_dtor(pte); + tlb_add_flush(tlb, addr); + tlb_remove_page(tlb, pte); } -#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define tlb_migrate_finish(mm) do { } while (0) diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index ce7378ea15a..d2005de383b 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -10,12 +10,7 @@ #ifndef _ASMARM_TLBFLUSH_H #define _ASMARM_TLBFLUSH_H - -#ifndef CONFIG_MMU - -#define tlb_flush(tlb) ((void) tlb) - -#else /* CONFIG_MMU */ +#ifdef CONFIG_MMU #include <asm/glue.h> diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index c0225da3fb2..f06ff9feb0d 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on) #ifdef CONFIG_SMP_ON_UP + __INIT __fixup_smp: and r3, r9, #0x000f0000 @ architecture version teq r3, #0x000f0000 @ CPU ID supported? @@ -415,18 +416,7 @@ __fixup_smp_on_up: sub r3, r0, r3 add r4, r4, r3 add r5, r5, r3 -2: cmp r4, r5 - movhs pc, lr - ldmia r4!, {r0, r6} - ARM( str r6, [r0, r3] ) - THUMB( add r0, r0, r3 ) -#ifdef __ARMEB__ - THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. -#endif - THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords - THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. - THUMB( strh r6, [r0] ) - b 2b + b __do_fixup_smp_on_up ENDPROC(__fixup_smp) .align @@ -440,7 +430,31 @@ smp_on_up: ALT_SMP(.long 1) ALT_UP(.long 0) .popsection +#endif + .text +__do_fixup_smp_on_up: + cmp r4, r5 + movhs pc, lr + ldmia r4!, {r0, r6} + ARM( str r6, [r0, r3] ) + THUMB( add r0, r0, r3 ) +#ifdef __ARMEB__ + THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. #endif + THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords + THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. + THUMB( strh r6, [r0] ) + b __do_fixup_smp_on_up +ENDPROC(__do_fixup_smp_on_up) + +ENTRY(fixup_smp) + stmfd sp!, {r4 - r6, lr} + mov r4, r0 + add r5, r0, r1 + mov r3, #0 + bl __do_fixup_smp_on_up + ldmfd sp!, {r4 - r6, pc} +ENDPROC(fixup_smp) #include "head-common.S" diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index c9f3f046757..d600bd35070 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -137,11 +137,10 @@ static u8 get_debug_arch(void) u32 didr; /* Do we implement the extended CPUID interface? */ - if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { - pr_warning("CPUID feature registers not supported. " - "Assuming v6 debug is present.\n"); + if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), + "CPUID feature registers not supported. " + "Assuming v6 debug is present.\n")) return ARM_DEBUG_ARCH_V6; - } ARM_DBG_READ(c0, 0, didr); return (didr >> 16) & 0xf; @@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void) return debug_arch; } +static int debug_arch_supported(void) +{ + u8 arch = get_debug_arch(); + return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; +} + /* Determine number of BRP register available. */ static int get_num_brp_resources(void) { @@ -268,6 +273,9 @@ out: int hw_breakpoint_slots(int type) { + if (!debug_arch_supported()) + return 0; + /* * We can be called early, so don't rely on * our static variables being initialised. @@ -834,11 +842,11 @@ static void reset_ctrl_regs(void *unused) /* * v7 debug contains save and restore registers so that debug state - * can be maintained across low-power modes without leaving - * the debug logic powered up. It is IMPLEMENTATION DEFINED whether - * we can write to the debug registers out of reset, so we must - * unlock the OS Lock Access Register to avoid taking undefined - * instruction exceptions later on. + * can be maintained across low-power modes without leaving the debug + * logic powered up. It is IMPLEMENTATION DEFINED whether we can access + * the debug registers out of reset, so we must unlock the OS Lock + * Access Register to avoid taking undefined instruction exceptions + * later on. */ if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { /* @@ -882,7 +890,7 @@ static int __init arch_hw_breakpoint_init(void) debug_arch = get_debug_arch(); - if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { + if (!debug_arch_supported()) { pr_info("debug architecture 0x%x unsupported.\n", debug_arch); return 0; } @@ -899,18 +907,18 @@ static int __init arch_hw_breakpoint_init(void) pr_info("%d breakpoint(s) reserved for watchpoint " "single-step.\n", core_num_reserved_brps); + /* + * Reset the breakpoint resources. We assume that a halting + * debugger will leave the world in a nice state for us. + */ + on_each_cpu(reset_ctrl_regs, NULL, 1); + ARM_DBG_READ(c1, 0, dscr); if (dscr & ARM_DSCR_HDBGEN) { + max_watchpoint_len = 4; pr_warning("halting debug mode enabled. Assuming maximum " - "watchpoint size of 4 bytes."); + "watchpoint size of %u bytes.", max_watchpoint_len); } else { - /* - * Reset the breakpoint resources. We assume that a halting - * debugger will leave the world in a nice state for us. - */ - smp_call_function(reset_ctrl_regs, NULL, 1); - reset_ctrl_regs(NULL); - /* Work out the maximum supported watchpoint length. */ max_watchpoint_len = get_max_wp_len(); pr_info("maximum watchpoint size is %u bytes.\n", diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 2c1f0050c9c..8f6ed43861f 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) return space_cccc_1100_010x(insn, asi); - } else if ((insn & 0x0e000000) == 0x0c400000) { + } else if ((insn & 0x0e000000) == 0x0c000000) { return space_cccc_110x(insn, asi); diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b47..6d4105e6872 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -22,6 +22,7 @@ #include <asm/pgtable.h> #include <asm/sections.h> +#include <asm/smp_plat.h> #include <asm/unwind.h> #ifdef CONFIG_XIP_KERNEL @@ -268,12 +269,28 @@ struct mod_unwind_map { const Elf_Shdr *txt_sec; }; +static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, + const Elf_Shdr *sechdrs, const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + + return NULL; +} + +extern void fixup_smp(const void *, unsigned long); + int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + const Elf_Shdr * __maybe_unused s = NULL; #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; struct mod_unwind_map maps[ARM_SEC_MAX]; int i; @@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, maps[i].txt_sec->sh_addr, maps[i].txt_sec->sh_size); #endif + s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); + if (s && !is_smp()) + fixup_smp((void *)s->sh_addr, s->sh_size); return 0; } diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5efa2647a2f..d150ad1ccb5 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail, * Frame pointers should strictly progress back up the stack * (towards higher addresses). */ - if (tail >= buftail.fp) + if (tail + 1 >= buftail.fp) return NULL; return buftail.fp - 1; diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index b8af96ea62e..2c79eec1926 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -97,28 +97,34 @@ set_irq_affinity(int irq, irq, cpu); return err; #else - return 0; + return -EINVAL; #endif } static int init_cpu_pmu(void) { - int i, err = 0; + int i, irqs, err = 0; struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; - if (!pdev) { - err = -ENODEV; - goto out; - } + if (!pdev) + return -ENODEV; + + irqs = pdev->num_resources; + + /* + * If we have a single PMU interrupt that we can't shift, assume that + * we're running on a uniprocessor machine and continue. + */ + if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) + return 0; - for (i = 0; i < pdev->num_resources; ++i) { + for (i = 0; i < irqs; ++i) { err = set_irq_affinity(platform_get_irq(pdev, i), i); if (err) break; } -out: return err; } diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 420b8d6485d..5ea4fb718b9 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -226,8 +226,8 @@ int cpu_architecture(void) * Register 0 and check for VMSAv7 or PMSAv7 */ asm("mrc p15, 0, %0, c0, c1, 4" : "=r" (mmfr0)); - if ((mmfr0 & 0x0000000f) == 0x00000003 || - (mmfr0 & 0x000000f0) == 0x00000030) + if ((mmfr0 & 0x0000000f) >= 0x00000003 || + (mmfr0 & 0x000000f0) >= 0x00000030) cpu_arch = CPU_ARCH_ARMv7; else if ((mmfr0 & 0x0000000f) == 0x00000002 || (mmfr0 & 0x000000f0) == 0x00000020) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bc..abaf8445ce2 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; int thumb = 0; - unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; + unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); + + cpsr |= PSR_ENDSTATE; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f203..61462790757 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -21,6 +21,12 @@ #define ARM_CPU_KEEP(x) #endif +#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) +#define ARM_EXIT_KEEP(x) x +#else +#define ARM_EXIT_KEEP(x) +#endif + OUTPUT_ARCH(arm) ENTRY(stext) @@ -43,6 +49,7 @@ SECTIONS _sinittext = .; HEAD_TEXT INIT_TEXT + ARM_EXIT_KEEP(EXIT_TEXT) _einittext = .; ARM_CPU_DISCARD(PROC_INFO) __arch_info_begin = .; @@ -67,6 +74,7 @@ SECTIONS #ifndef CONFIG_XIP_KERNEL __init_begin = _stext; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) #endif } @@ -162,6 +170,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; INIT_DATA + ARM_EXIT_KEEP(EXIT_DATA) . = ALIGN(PAGE_SIZE); __init_end = .; #endif @@ -247,6 +256,8 @@ SECTIONS } #endif + NOTES + BSS_SECTION(0, 0, 0) _end = .; diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index ffdf87be295..82079545adc 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -838,7 +838,7 @@ EXPORT_SYMBOL(ep93xx_i2s_release); static struct resource ep93xx_ac97_resources[] = { { .start = EP93XX_AAC_PHYS_BASE, - .end = EP93XX_AAC_PHYS_BASE + 0xb0 - 1, + .end = EP93XX_AAC_PHYS_BASE + 0xac - 1, .flags = IORESOURCE_MEM, }, { diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c index aa76cfd9f34..8382e790207 100644 --- a/arch/arm/mach-imx/mach-mx25_3ds.c +++ b/arch/arm/mach-imx/mach-mx25_3ds.c @@ -180,7 +180,7 @@ static const uint32_t mx25pdk_keymap[] = { KEY(3, 3, KEY_POWER), }; -static const struct matrix_keymap_data mx25pdk_keymap_data __initdata = { +static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = { .keymap = mx25pdk_keymap, .keymap_size = ARRAY_SIZE(mx25pdk_keymap), }; diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c index b1a362ebfde..ca72a05ed9c 100644 --- a/arch/arm/mach-mxs/clock-mx23.c +++ b/arch/arm/mach-mxs/clock-mx23.c @@ -304,7 +304,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ reg &= ~BM_CLKCTRL_##dr##_DIV; \ reg |= div << BP_CLKCTRL_##dr##_DIV; \ - if (reg | (1 << clk->enable_shift)) { \ + if (reg & (1 << clk->enable_shift)) { \ pr_err("%s: clock is gated\n", __func__); \ return -EINVAL; \ } \ @@ -347,7 +347,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \ { \ if (parent != clk->parent) { \ __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \ - HW_CLKCTRL_CLKSEQ_TOG); \ + CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \ clk->parent = parent; \ } \ \ diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c index 56312c092a9..fd1c4c54b8e 100644 --- a/arch/arm/mach-mxs/clock-mx28.c +++ b/arch/arm/mach-mxs/clock-mx28.c @@ -355,12 +355,12 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \ } else { \ reg &= ~BM_CLKCTRL_##dr##_DIV; \ reg |= div << BP_CLKCTRL_##dr##_DIV; \ - if (reg | (1 << clk->enable_shift)) { \ + if (reg & (1 << clk->enable_shift)) { \ pr_err("%s: clock is gated\n", __func__); \ return -EINVAL; \ } \ } \ - __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU); \ + __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \ \ for (i = 10000; i; i--) \ if (!(__raw_readl(CLKCTRL_BASE_ADDR + \ @@ -483,7 +483,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \ { \ if (parent != clk->parent) { \ __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \ - HW_CLKCTRL_CLKSEQ_TOG); \ + CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \ clk->parent = parent; \ } \ \ @@ -609,7 +609,6 @@ static struct clk_lookup lookups[] = { _REGISTER_CLOCK("duart", NULL, uart_clk) _REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk) _REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk) - _REGISTER_CLOCK("fec.0", NULL, fec_clk) _REGISTER_CLOCK("rtc", NULL, rtc_clk) _REGISTER_CLOCK("pll2", NULL, pll2_clk) _REGISTER_CLOCK(NULL, "hclk", hbus_clk) diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c index e7d2269cf70..a7093c88e6a 100644 --- a/arch/arm/mach-mxs/clock.c +++ b/arch/arm/mach-mxs/clock.c @@ -57,7 +57,6 @@ static void __clk_disable(struct clk *clk) if (clk->disable) clk->disable(clk); __clk_disable(clk->parent); - __clk_disable(clk->secondary); } } @@ -68,7 +67,6 @@ static int __clk_enable(struct clk *clk) if (clk->usecount++ == 0) { __clk_enable(clk->parent); - __clk_enable(clk->secondary); if (clk->enable) clk->enable(clk); diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c index d7ad7a61366..cb0c0e83a52 100644 --- a/arch/arm/mach-mxs/gpio.c +++ b/arch/arm/mach-mxs/gpio.c @@ -139,6 +139,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq); u32 gpio_irq_no_base = port->virtual_irq_start; + desc->irq_data.chip->irq_ack(&desc->irq_data); + irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) & __raw_readl(port->base + PINCTRL_IRQEN(port->id)); diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h index 041e276d8a3..592c9ab5d76 100644 --- a/arch/arm/mach-mxs/include/mach/clock.h +++ b/arch/arm/mach-mxs/include/mach/clock.h @@ -29,8 +29,6 @@ struct clk { int id; /* Source clock this clk depends on */ struct clk *parent; - /* Secondary clock to enable/disable with this clock */ - struct clk *secondary; /* Reference count of clock enable/disable */ __s8 usecount; /* Register bit position for clock's enable/disable control. */ diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c index c9088d85da0..453809359ba 100644 --- a/arch/arm/mach-omap1/lcd_dma.c +++ b/arch/arm/mach-omap1/lcd_dma.c @@ -37,7 +37,7 @@ int omap_lcd_dma_running(void) * On OMAP1510, internal LCD controller will start the transfer * when it gets enabled, so assume DMA running if LCD enabled. */ - if (cpu_is_omap1510()) + if (cpu_is_omap15xx()) if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN) return 1; @@ -95,7 +95,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer); void omap_set_lcd_dma_b1_rotation(int rotate) { - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n"); BUG(); return; @@ -106,7 +106,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation); void omap_set_lcd_dma_b1_mirror(int mirror) { - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n"); BUG(); } @@ -116,7 +116,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror); void omap_set_lcd_dma_b1_vxres(unsigned long vxres) { - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA virtual resulotion is not supported " "in 1510 mode\n"); BUG(); @@ -127,7 +127,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres); void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale) { - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA scale is not supported in 1510 mode\n"); BUG(); } @@ -177,7 +177,7 @@ static void set_b1_regs(void) bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); /* 1510 DMA requires the bottom address to be 2 more * than the actual last memory access location. */ - if (cpu_is_omap1510() && + if (cpu_is_omap15xx() && lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32) bottom += 2; ei = PIXSTEP(0, 0, 1, 0); @@ -241,7 +241,7 @@ static void set_b1_regs(void) return; /* Suppress warning about uninitialized vars */ } - if (cpu_is_omap1510()) { + if (cpu_is_omap15xx()) { omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U); omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L); omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U); @@ -343,7 +343,7 @@ void omap_free_lcd_dma(void) BUG(); return; } - if (!cpu_is_omap1510()) + if (!cpu_is_omap15xx()) omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR); lcd_dma.reserved = 0; @@ -360,7 +360,7 @@ void omap_enable_lcd_dma(void) * connected. Otherwise the OMAP internal controller will * start the transfer when it gets enabled. */ - if (cpu_is_omap1510() || !lcd_dma.ext_ctrl) + if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CTRL); @@ -378,14 +378,14 @@ EXPORT_SYMBOL(omap_enable_lcd_dma); void omap_setup_lcd_dma(void) { BUG_ON(lcd_dma.active); - if (!cpu_is_omap1510()) { + if (!cpu_is_omap15xx()) { /* Set some reasonable defaults */ omap_writew(0x5440, OMAP1610_DMA_LCD_CCR); omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP); omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL); } set_b1_regs(); - if (!cpu_is_omap1510()) { + if (!cpu_is_omap15xx()) { u16 w; w = omap_readw(OMAP1610_DMA_LCD_CCR); @@ -407,7 +407,7 @@ void omap_stop_lcd_dma(void) u16 w; lcd_dma.active = 0; - if (cpu_is_omap1510() || !lcd_dma.ext_ctrl) + if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CCR); diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index f83fc335c61..6885d2fac18 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -44,7 +44,6 @@ #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/io.h> -#include <linux/sched.h> #include <asm/system.h> #include <mach/hardware.h> diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index e906e05bb41..9a2a31e011c 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c @@ -115,9 +115,6 @@ static struct omap2_hsmmc_info mmc[] = { static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev) { - twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1); - twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0); - if (gpio_is_valid(dssdev->reset_gpio)) gpio_set_value_cansleep(dssdev->reset_gpio, 1); return 0; @@ -247,6 +244,8 @@ static struct gpio_led gpio_leds[]; static int devkit8000_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { + int ret; + omap_mux_init_gpio(29, OMAP_PIN_INPUT); /* gpio + 0 is "mmc0_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; @@ -255,17 +254,23 @@ static int devkit8000_twl_gpio_setup(struct device *dev, /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; - /* gpio + 1 is "LCD_PWREN" (out, active high) */ - devkit8000_lcd_device.reset_gpio = gpio + 1; - gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN"); - /* Disable until needed */ - gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0); + /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */ + devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0; + ret = gpio_request_one(devkit8000_lcd_device.reset_gpio, + GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN"); + if (ret < 0) { + devkit8000_lcd_device.reset_gpio = -EINVAL; + printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n"); + } /* gpio + 7 is "DVI_PD" (out, active low) */ devkit8000_dvi_device.reset_gpio = gpio + 7; - gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown"); - /* Disable until needed */ - gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0); + ret = gpio_request_one(devkit8000_dvi_device.reset_gpio, + GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown"); + if (ret < 0) { + devkit8000_dvi_device.reset_gpio = -EINVAL; + printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n"); + } return 0; } diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index e001a048dc0..e944025d5ef 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -409,8 +409,6 @@ static void __init omap4_panda_init(void) platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); omap_serial_init(); omap4_twl6030_hsmmc_init(mmc); - /* OMAP4 Panda uses internal transceiver so register nop transceiver */ - usb_nop_xceiv_register(); omap4_ehci_init(); usb_musb_init(&musb_board_data); } diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c index cb77be7ac44..39a71bb8a30 100644 --- a/arch/arm/mach-omap2/board-rm680.c +++ b/arch/arm/mach-omap2/board-rm680.c @@ -40,9 +40,6 @@ static struct regulator_consumer_supply rm680_vemmc_consumers[] = { static struct regulator_init_data rm680_vemmc = { .constraints = { .name = "rm680_vemmc", - .min_uV = 2900000, - .max_uV = 2900000, - .apply_uV = 1, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_STATUS diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c index 337392c3f54..acb7ae5b0a2 100644 --- a/arch/arm/mach-omap2/clkt_dpll.c +++ b/arch/arm/mach-omap2/clkt_dpll.c @@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n) dd = clk->dpll_data; /* DPLL divider must result in a valid jitter correction val */ - fint = clk->parent->rate / (n + 1); + fint = clk->parent->rate / n; if (fint < DPLL_FINT_BAND1_MIN) { pr_debug("rejecting n=%d due to Fint failure, " diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c index 394413dc7de..0a585dfa987 100644 --- a/arch/arm/mach-omap2/mailbox.c +++ b/arch/arm/mach-omap2/mailbox.c @@ -334,7 +334,7 @@ static struct omap_mbox mbox_iva_info = { .priv = &omap2_mbox_iva_priv, }; -struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL }; +struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL }; #endif #if defined(CONFIG_ARCH_OMAP4) diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index fae49d12bc7..6c84659cf84 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry( list_for_each_entry(e, &partition->muxmodes, node) { struct omap_mux *m = &e->mux; - (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir, + (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir, m, &omap_mux_dbg_signal_fops); } } @@ -1000,6 +1000,7 @@ int __init omap_mux_init(const char *name, u32 flags, if (!partition->base) { pr_err("%s: Could not ioremap mux partition at 0x%08x\n", __func__, partition->phys); + kfree(partition); return -ENODEV; } diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 125f56591fb..a5a83b358dd 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c @@ -637,14 +637,14 @@ static int __init pm_dbg_init(void) } - (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d, + (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d, &enable_off_mode, &pm_dbg_option_fops); - (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d, + (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d, &sleep_while_idle, &pm_dbg_option_fops); - (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d, + (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d, &wakeup_timer_seconds, &pm_dbg_option_fops); (void) debugfs_create_file("wakeup_timer_milliseconds", - S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds, + S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds, &pm_dbg_option_fops); pm_dbg_init_done = 1; diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index a4aa1920a75..2f864e4b085 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -168,9 +168,10 @@ static void omap3_core_restore_context(void) * once during boot sequence, but this works as we are not using secure * services. */ -static void omap3_save_secure_ram_context(u32 target_mpu_state) +static void omap3_save_secure_ram_context(void) { u32 ret; + int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { /* @@ -181,7 +182,7 @@ static void omap3_save_secure_ram_context(u32 target_mpu_state) pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); ret = _omap_save_secure_sram((u32 *) __pa(omap3_secure_ram_storage)); - pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state); + pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ if (ret) { printk(KERN_ERR "save_secure_sram() returns %08x\n", @@ -1094,7 +1095,7 @@ static int __init omap3_pm_init(void) local_fiq_disable(); omap_dma_global_context_save(); - omap3_save_secure_ram_context(PWRDM_POWER_ON); + omap3_save_secure_ram_context(); omap_dma_global_context_restore(); local_irq_enable(); diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h index 729a644ce85..3300ff6e3cf 100644 --- a/arch/arm/mach-omap2/prcm_mpu44xx.h +++ b/arch/arm/mach-omap2/prcm_mpu44xx.h @@ -38,8 +38,8 @@ #define OMAP4430_PRCM_MPU_CPU1_INST 0x0800 /* PRCM_MPU clockdomain register offsets (from instance start) */ -#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000 -#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000 +#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018 +#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018 /* diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index 77ecebf3fae..95ac336fe3f 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c @@ -780,8 +780,7 @@ static int omap_sr_autocomp_show(void *data, u64 *val) struct omap_sr *sr_info = (struct omap_sr *) data; if (!sr_info) { - pr_warning("%s: omap_sr struct for sr_%s not found\n", - __func__, sr_info->voltdm->name); + pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } @@ -795,8 +794,7 @@ static int omap_sr_autocomp_store(void *data, u64 val) struct omap_sr *sr_info = (struct omap_sr *) data; if (!sr_info) { - pr_warning("%s: omap_sr struct for sr_%s not found\n", - __func__, sr_info->voltdm->name); + pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } @@ -834,7 +832,8 @@ static int __init omap_sr_probe(struct platform_device *pdev) if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); - return -EINVAL; + ret = -EINVAL; + goto err_free_devinfo; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -901,7 +900,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) return PTR_ERR(dbg_dir); } - (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir, + (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, (void *)sr_info, &pm_sr_fops); (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir, &sr_info->err_weight); @@ -940,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) strcpy(name, "volt_"); sprintf(volt_name, "%d", volt_data[i].volt_nominal); strcat(name, volt_name); - (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir, + (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, &(sr_info->nvalue_table[i].nvalue)); } @@ -966,7 +965,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev) } sr_info = _sr_lookup(pdata->voltdm); - if (!sr_info) { + if (IS_ERR(sr_info)) { dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", __func__); return -EINVAL; diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 7b7c2683ae7..0fc550e7e48 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c @@ -39,6 +39,7 @@ #include <asm/mach/time.h> #include <plat/dmtimer.h> #include <asm/localtimer.h> +#include <asm/sched_clock.h> #include "timer-gp.h" @@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void) /* * clocksource */ +static DEFINE_CLOCK_DATA(cd); static struct omap_dm_timer *gpt_clocksource; static cycle_t clocksource_read_cycles(struct clocksource *cs) { @@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +static void notrace dmtimer_update_sched_clock(void) +{ + u32 cyc; + + cyc = omap_dm_timer_read_counter(gpt_clocksource); + + update_sched_clock(&cd, cyc, (u32)~0); +} + /* Setup free-running counter for clocksource */ static void __init omap2_gp_clocksource_init(void) { @@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void) omap_dm_timer_set_load_start(gpt, 1, 0); + init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate); + if (clocksource_register_hz(&clocksource_gpt, tick_rate)) printk(err2, clocksource_gpt.name); } diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c index ed6079c94c5..12be525b8df 100644 --- a/arch/arm/mach-omap2/voltage.c +++ b/arch/arm/mach-omap2/voltage.c @@ -471,6 +471,7 @@ static void __init vdd_debugfs_init(struct omap_vdd_info *vdd) strcat(name, vdd->voltdm.name); vdd->debug_dir = debugfs_create_dir(name, voltage_dir); + kfree(name); if (IS_ERR(vdd->debug_dir)) { pr_warning("%s: Unable to create debugfs directory for" " vdd_%s\n", __func__, vdd->voltdm.name); diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c index 6b2c800a113..28f667e52ef 100644 --- a/arch/arm/mach-pxa/colibri-evalboard.c +++ b/arch/arm/mach-pxa/colibri-evalboard.c @@ -50,7 +50,7 @@ static void __init colibri_mmc_init(void) GPIO0_COLIBRI_PXA270_SD_DETECT; if (machine_is_colibri300()) /* PXA300 Colibri */ colibri_mci_platform_data.gpio_card_detect = - GPIO39_COLIBRI_PXA300_SD_DETECT; + GPIO13_COLIBRI_PXA300_SD_DETECT; else /* PXA320 Colibri */ colibri_mci_platform_data.gpio_card_detect = GPIO28_COLIBRI_PXA320_SD_DETECT; diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c index fddb16d07eb..66dd81cbc8a 100644 --- a/arch/arm/mach-pxa/colibri-pxa300.c +++ b/arch/arm/mach-pxa/colibri-pxa300.c @@ -41,7 +41,7 @@ static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = { GPIO4_MMC1_DAT1, GPIO5_MMC1_DAT2, GPIO6_MMC1_DAT3, - GPIO39_GPIO, /* SD detect */ + GPIO13_GPIO, /* GPIO13_COLIBRI_PXA300_SD_DETECT */ /* UHC */ GPIO0_2_USBH_PEN, diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h index 388a96f1ef9..cb4236e98a0 100644 --- a/arch/arm/mach-pxa/include/mach/colibri.h +++ b/arch/arm/mach-pxa/include/mach/colibri.h @@ -60,7 +60,7 @@ static inline void colibri_pxa3xx_init_nand(void) {} #define GPIO113_COLIBRI_PXA270_TS_IRQ 113 /* GPIO definitions for Colibri PXA300/310 */ -#define GPIO39_COLIBRI_PXA300_SD_DETECT 39 +#define GPIO13_COLIBRI_PXA300_SD_DETECT 13 /* GPIO definitions for Colibri PXA320 */ #define GPIO28_COLIBRI_PXA320_SD_DETECT 28 diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 405b92a2979..35572c427fa 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c @@ -323,7 +323,7 @@ static struct platform_pwm_backlight_data palm27x_backlight_data = { .pwm_id = 0, .max_brightness = 0xfe, .dft_brightness = 0x7e, - .pwm_period_ns = 3500, + .pwm_period_ns = 3500 * 1024, .init = palm27x_backlight_init, .notify = palm27x_backlight_notify, .exit = palm27x_backlight_exit, diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 978e1b28954..1807c9abdde 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c @@ -33,7 +33,7 @@ int pxa_pm_enter(suspend_state_t state) #endif /* skip registers saving for standby */ - if (state != PM_SUSPEND_STANDBY) { + if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) { pxa_cpu_pm_fns->save(sleep_save); /* before sleeping, calculate and save a checksum */ for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) @@ -44,7 +44,7 @@ int pxa_pm_enter(suspend_state_t state) pxa_cpu_pm_fns->enter(state); cpu_init(); - if (state != PM_SUSPEND_STANDBY) { + if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { /* after sleeping, validate the checksum */ for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) checksum += sleep_save[i]; diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h index 203dd5a18bd..058dab4482a 100644 --- a/arch/arm/mach-s5p6442/include/mach/map.h +++ b/arch/arm/mach-s5p6442/include/mach/map.h @@ -1,6 +1,6 @@ /* linux/arch/arm/mach-s5p6442/include/mach/map.h * - * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5P6442 - Memory map definitions @@ -16,56 +16,61 @@ #include <plat/map-base.h> #include <plat/map-s5p.h> -#define S5P6442_PA_CHIPID (0xE0000000) -#define S5P_PA_CHIPID S5P6442_PA_CHIPID +#define S5P6442_PA_SDRAM 0x20000000 -#define S5P6442_PA_SYSCON (0xE0100000) -#define S5P_PA_SYSCON S5P6442_PA_SYSCON +#define S5P6442_PA_I2S0 0xC0B00000 +#define S5P6442_PA_I2S1 0xF2200000 -#define S5P6442_PA_GPIO (0xE0200000) +#define S5P6442_PA_CHIPID 0xE0000000 -#define S5P6442_PA_VIC0 (0xE4000000) -#define S5P6442_PA_VIC1 (0xE4100000) -#define S5P6442_PA_VIC2 (0xE4200000) +#define S5P6442_PA_SYSCON 0xE0100000 -#define S5P6442_PA_SROMC (0xE7000000) -#define S5P_PA_SROMC S5P6442_PA_SROMC +#define S5P6442_PA_GPIO 0xE0200000 -#define S5P6442_PA_MDMA 0xE8000000 -#define S5P6442_PA_PDMA 0xE9000000 +#define S5P6442_PA_VIC0 0xE4000000 +#define S5P6442_PA_VIC1 0xE4100000 +#define S5P6442_PA_VIC2 0xE4200000 -#define S5P6442_PA_TIMER (0xEA000000) -#define S5P_PA_TIMER S5P6442_PA_TIMER +#define S5P6442_PA_SROMC 0xE7000000 -#define S5P6442_PA_SYSTIMER (0xEA100000) +#define S5P6442_PA_MDMA 0xE8000000 +#define S5P6442_PA_PDMA 0xE9000000 -#define S5P6442_PA_WATCHDOG (0xEA200000) +#define S5P6442_PA_TIMER 0xEA000000 -#define S5P6442_PA_UART (0xEC000000) +#define S5P6442_PA_SYSTIMER 0xEA100000 -#define S5P_PA_UART0 (S5P6442_PA_UART + 0x0) -#define S5P_PA_UART1 (S5P6442_PA_UART + 0x400) -#define S5P_PA_UART2 (S5P6442_PA_UART + 0x800) -#define S5P_SZ_UART SZ_256 +#define S5P6442_PA_WATCHDOG 0xEA200000 -#define S5P6442_PA_IIC0 (0xEC100000) +#define S5P6442_PA_UART 0xEC000000 -#define S5P6442_PA_SDRAM (0x20000000) -#define S5P_PA_SDRAM S5P6442_PA_SDRAM +#define S5P6442_PA_IIC0 0xEC100000 #define S5P6442_PA_SPI 0xEC300000 -/* I2S */ -#define S5P6442_PA_I2S0 0xC0B00000 -#define S5P6442_PA_I2S1 0xF2200000 - -/* PCM */ #define S5P6442_PA_PCM0 0xF2400000 #define S5P6442_PA_PCM1 0xF2500000 -/* compatibiltiy defines. */ +/* Compatibiltiy Defines */ + +#define S3C_PA_IIC S5P6442_PA_IIC0 #define S3C_PA_WDT S5P6442_PA_WATCHDOG + +#define S5P_PA_CHIPID S5P6442_PA_CHIPID +#define S5P_PA_SDRAM S5P6442_PA_SDRAM +#define S5P_PA_SROMC S5P6442_PA_SROMC +#define S5P_PA_SYSCON S5P6442_PA_SYSCON +#define S5P_PA_TIMER S5P6442_PA_TIMER + +/* UART */ + #define S3C_PA_UART S5P6442_PA_UART -#define S3C_PA_IIC S5P6442_PA_IIC0 + +#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) +#define S5P_PA_UART0 S5P_PA_UART(0) +#define S5P_PA_UART1 S5P_PA_UART(1) +#define S5P_PA_UART2 S5P_PA_UART(2) + +#define S5P_SZ_UART SZ_256 #endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h index a9365e5ba61..95c91257c7c 100644 --- a/arch/arm/mach-s5p64x0/include/mach/map.h +++ b/arch/arm/mach-s5p64x0/include/mach/map.h @@ -1,6 +1,6 @@ /* linux/arch/arm/mach-s5p64x0/include/mach/map.h * - * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. + * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * S5P64X0 - Memory map definitions @@ -16,64 +16,46 @@ #include <plat/map-base.h> #include <plat/map-s5p.h> -#define S5P64X0_PA_SDRAM (0x20000000) +#define S5P64X0_PA_SDRAM 0x20000000 -#define S5P64X0_PA_CHIPID (0xE0000000) -#define S5P_PA_CHIPID S5P64X0_PA_CHIPID - -#define S5P64X0_PA_SYSCON (0xE0100000) -#define S5P_PA_SYSCON S5P64X0_PA_SYSCON - -#define S5P64X0_PA_GPIO (0xE0308000) - -#define S5P64X0_PA_VIC0 (0xE4000000) -#define S5P64X0_PA_VIC1 (0xE4100000) +#define S5P64X0_PA_CHIPID 0xE0000000 -#define S5P64X0_PA_SROMC (0xE7000000) -#define S5P_PA_SROMC S5P64X0_PA_SROMC - -#define S5P64X0_PA_PDMA (0xE9000000) - -#define S5P64X0_PA_TIMER (0xEA000000) -#define S5P_PA_TIMER S5P64X0_PA_TIMER +#define S5P64X0_PA_SYSCON 0xE0100000 -#define S5P64X0_PA_RTC (0xEA100000) +#define S5P64X0_PA_GPIO 0xE0308000 -#define S5P64X0_PA_WDT (0xEA200000) +#define S5P64X0_PA_VIC0 0xE4000000 +#define S5P64X0_PA_VIC1 0xE4100000 -#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) -#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000)) +#define S5P64X0_PA_SROMC 0xE7000000 -#define S5P_PA_UART0 S5P6450_PA_UART(0) -#define S5P_PA_UART1 S5P6450_PA_UART(1) -#define S5P_PA_UART2 S5P6450_PA_UART(2) -#define S5P_PA_UART3 S5P6450_PA_UART(3) -#define S5P_PA_UART4 S5P6450_PA_UART(4) -#define S5P_PA_UART5 S5P6450_PA_UART(5) +#define S5P64X0_PA_PDMA 0xE9000000 -#define S5P_SZ_UART SZ_256 +#define S5P64X0_PA_TIMER 0xEA000000 +#define S5P64X0_PA_RTC 0xEA100000 +#define S5P64X0_PA_WDT 0xEA200000 -#define S5P6440_PA_IIC0 (0xEC104000) -#define S5P6440_PA_IIC1 (0xEC20F000) -#define S5P6450_PA_IIC0 (0xEC100000) -#define S5P6450_PA_IIC1 (0xEC200000) +#define S5P6440_PA_IIC0 0xEC104000 +#define S5P6440_PA_IIC1 0xEC20F000 +#define S5P6450_PA_IIC0 0xEC100000 +#define S5P6450_PA_IIC1 0xEC200000 -#define S5P64X0_PA_SPI0 (0xEC400000) -#define S5P64X0_PA_SPI1 (0xEC500000) +#define S5P64X0_PA_SPI0 0xEC400000 +#define S5P64X0_PA_SPI1 0xEC500000 -#define S5P64X0_PA_HSOTG (0xED100000) +#define S5P64X0_PA_HSOTG 0xED100000 #define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) -#define S5P64X0_PA_I2S (0xF2000000) +#define S5P64X0_PA_I2S 0xF2000000 #define S5P6450_PA_I2S1 0xF2800000 #define S5P6450_PA_I2S2 0xF2900000 -#define S5P64X0_PA_PCM (0xF2100000) +#define S5P64X0_PA_PCM 0xF2100000 -#define S5P64X0_PA_ADC (0xF3000000) +#define S5P64X0_PA_ADC 0xF3000000 -/* compatibiltiy defines. */ +/* Compatibiltiy Defines */ #define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0) #define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1) @@ -83,6 +65,25 @@ #define S3C_PA_RTC S5P64X0_PA_RTC #define S3C_PA_WDT S5P64X0_PA_WDT +#define S5P_PA_CHIPID S5P64X0_PA_CHIPID +#define S5P_PA_SROMC S5P64X0_PA_SROMC +#define S5P_PA_SYSCON S5P64X0_PA_SYSCON +#define S5P_PA_TIMER S5P64X0_PA_TIMER + #define SAMSUNG_PA_ADC S5P64X0_PA_ADC +/* UART */ + +#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) +#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000)) + +#define S5P_PA_UART0 S5P6450_PA_UART(0) +#define S5P_PA_UART1 S5P6450_PA_UART(1) +#define S5P_PA_UART2 S5P6450_PA_UART(2) +#define S5P_PA_UART3 S5P6450_PA_UART(3) +#define S5P_PA_UART4 S5P6450_PA_UART(4) +#define S5P_PA_UART5 S5P6450_PA_UART(5) + +#define S5P_SZ_UART SZ_256 + #endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h index 328467b346a..ccbe6b767f7 100644 --- a/arch/arm/mach-s5pc100/include/mach/map.h +++ b/arch/arm/mach-s5pc100/include/mach/map.h @@ -1,5 +1,8 @@ /* linux/arch/arm/mach-s5pc100/include/mach/map.h * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * * Copyright 2009 Samsung Electronics Co. * Byungho Min <bhmin@samsung.com> * @@ -16,145 +19,115 @@ #include <plat/map-base.h> #include <plat/map-s5p.h> -/* - * map-base.h has already defined virtual memory address - * S3C_VA_IRQ S3C_ADDR(0x00000000) irq controller(s) - * S3C_VA_SYS S3C_ADDR(0x00100000) system control - * S3C_VA_MEM S3C_ADDR(0x00200000) system control (not used) - * S3C_VA_TIMER S3C_ADDR(0x00300000) timer block - * S3C_VA_WATCHDOG S3C_ADDR(0x00400000) watchdog - * S3C_VA_UART S3C_ADDR(0x01000000) UART - * - * S5PC100 specific virtual memory address can be defined here - * S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) GPIO - * - */ +#define S5PC100_PA_SDRAM 0x20000000 + +#define S5PC100_PA_ONENAND 0xE7100000 +#define S5PC100_PA_ONENAND_BUF 0xB0000000 + +#define S5PC100_PA_CHIPID 0xE0000000 -#define S5PC100_PA_ONENAND_BUF (0xB0000000) -#define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M) +#define S5PC100_PA_SYSCON 0xE0100000 -/* Chip ID */ +#define S5PC100_PA_OTHERS 0xE0200000 -#define S5PC100_PA_CHIPID (0xE0000000) -#define S5P_PA_CHIPID S5PC100_PA_CHIPID +#define S5PC100_PA_GPIO 0xE0300000 -#define S5PC100_PA_SYSCON (0xE0100000) -#define S5P_PA_SYSCON S5PC100_PA_SYSCON +#define S5PC100_PA_VIC0 0xE4000000 +#define S5PC100_PA_VIC1 0xE4100000 +#define S5PC100_PA_VIC2 0xE4200000 -#define S5PC100_PA_OTHERS (0xE0200000) -#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000) +#define S5PC100_PA_SROMC 0xE7000000 -#define S5PC100_PA_GPIO (0xE0300000) -#define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) +#define S5PC100_PA_CFCON 0xE7800000 -/* Interrupt */ -#define S5PC100_PA_VIC0 (0xE4000000) -#define S5PC100_PA_VIC1 (0xE4100000) -#define S5PC100_PA_VIC2 (0xE4200000) -#define S5PC100_VA_VIC S3C_VA_IRQ -#define S5PC100_VA_VIC_OFFSET 0x10000 -#define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET)) +#define S5PC100_PA_MDMA 0xE8100000 +#define S5PC100_PA_PDMA0 0xE9000000 +#define S5PC100_PA_PDMA1 0xE9200000 -#define S5PC100_PA_SROMC (0xE7000000) -#define S5P_PA_SROMC S5PC100_PA_SROMC +#define S5PC100_PA_TIMER 0xEA000000 +#define S5PC100_PA_SYSTIMER 0xEA100000 +#define S5PC100_PA_WATCHDOG 0xEA200000 +#define S5PC100_PA_RTC 0xEA300000 -#define S5PC100_PA_ONENAND (0xE7100000) +#define S5PC100_PA_UART 0xEC000000 -#define S5PC100_PA_CFCON (0xE7800000) +#define S5PC100_PA_IIC0 0xEC100000 +#define S5PC100_PA_IIC1 0xEC200000 -/* DMA */ -#define S5PC100_PA_MDMA (0xE8100000) -#define S5PC100_PA_PDMA0 (0xE9000000) -#define S5PC100_PA_PDMA1 (0xE9200000) +#define S5PC100_PA_SPI0 0xEC300000 +#define S5PC100_PA_SPI1 0xEC400000 +#define S5PC100_PA_SPI2 0xEC500000 -/* Timer */ -#define S5PC100_PA_TIMER (0xEA000000) -#define S5P_PA_TIMER S5PC100_PA_TIMER +#define S5PC100_PA_USB_HSOTG 0xED200000 +#define S5PC100_PA_USB_HSPHY 0xED300000 -#define S5PC100_PA_SYSTIMER (0xEA100000) +#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) -#define S5PC100_PA_WATCHDOG (0xEA200000) -#define S5PC100_PA_RTC (0xEA300000) +#define S5PC100_PA_FB 0xEE000000 -#define S5PC100_PA_UART (0xEC000000) +#define S5PC100_PA_FIMC0 0xEE200000 +#define S5PC100_PA_FIMC1 0xEE300000 +#define S5PC100_PA_FIMC2 0xEE400000 -#define S5P_PA_UART0 (S5PC100_PA_UART + 0x0) -#define S5P_PA_UART1 (S5PC100_PA_UART + 0x400) -#define S5P_PA_UART2 (S5PC100_PA_UART + 0x800) -#define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00) -#define S5P_SZ_UART SZ_256 +#define S5PC100_PA_I2S0 0xF2000000 +#define S5PC100_PA_I2S1 0xF2100000 +#define S5PC100_PA_I2S2 0xF2200000 -#define S5PC100_PA_IIC0 (0xEC100000) -#define S5PC100_PA_IIC1 (0xEC200000) +#define S5PC100_PA_AC97 0xF2300000 -/* SPI */ -#define S5PC100_PA_SPI0 0xEC300000 -#define S5PC100_PA_SPI1 0xEC400000 -#define S5PC100_PA_SPI2 0xEC500000 +#define S5PC100_PA_PCM0 0xF2400000 +#define S5PC100_PA_PCM1 0xF2500000 -/* USB HS OTG */ -#define S5PC100_PA_USB_HSOTG (0xED200000) -#define S5PC100_PA_USB_HSPHY (0xED300000) +#define S5PC100_PA_SPDIF 0xF2600000 -#define S5PC100_PA_FB (0xEE000000) +#define S5PC100_PA_TSADC 0xF3000000 -#define S5PC100_PA_FIMC0 (0xEE200000) -#define S5PC100_PA_FIMC1 (0xEE300000) -#define S5PC100_PA_FIMC2 (0xEE400000) +#define S5PC100_PA_KEYPAD 0xF3100000 -#define S5PC100_PA_I2S0 (0xF2000000) -#define S5PC100_PA_I2S1 (0xF2100000) -#define S5PC100_PA_I2S2 (0xF2200000) +/* Compatibiltiy Defines */ -#define S5PC100_PA_AC97 0xF2300000 +#define S3C_PA_FB S5PC100_PA_FB +#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0) +#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1) +#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2) +#define S3C_PA_IIC S5PC100_PA_IIC0 +#define S3C_PA_IIC1 S5PC100_PA_IIC1 +#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD +#define S3C_PA_ONENAND S5PC100_PA_ONENAND +#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF +#define S3C_PA_RTC S5PC100_PA_RTC +#define S3C_PA_TSADC S5PC100_PA_TSADC +#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG +#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY +#define S3C_PA_WDT S5PC100_PA_WATCHDOG -/* PCM */ -#define S5PC100_PA_PCM0 0xF2400000 -#define S5PC100_PA_PCM1 0xF2500000 +#define S5P_PA_CHIPID S5PC100_PA_CHIPID +#define S5P_PA_FIMC0 S5PC100_PA_FIMC0 +#define S5P_PA_FIMC1 S5PC100_PA_FIMC1 +#define S5P_PA_FIMC2 S5PC100_PA_FIMC2 +#define S5P_PA_SDRAM S5PC100_PA_SDRAM +#define S5P_PA_SROMC S5PC100_PA_SROMC +#define S5P_PA_SYSCON S5PC100_PA_SYSCON +#define S5P_PA_TIMER S5PC100_PA_TIMER -#define S5PC100_PA_SPDIF 0xF2600000 +#define SAMSUNG_PA_ADC S5PC100_PA_TSADC +#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON +#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD -#define S5PC100_PA_TSADC (0xF3000000) +#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000) -/* KEYPAD */ -#define S5PC100_PA_KEYPAD (0xF3100000) +#define S3C_SZ_ONENAND_BUF (SZ_256M - SZ_32M) -#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) +/* UART */ -#define S5PC100_PA_SDRAM (0x20000000) -#define S5P_PA_SDRAM S5PC100_PA_SDRAM +#define S3C_PA_UART S5PC100_PA_UART -/* compatibiltiy defines. */ -#define S3C_PA_UART S5PC100_PA_UART -#define S3C_PA_IIC S5PC100_PA_IIC0 -#define S3C_PA_IIC1 S5PC100_PA_IIC1 -#define S3C_PA_FB S5PC100_PA_FB -#define S3C_PA_G2D S5PC100_PA_G2D -#define S3C_PA_G3D S5PC100_PA_G3D -#define S3C_PA_JPEG S5PC100_PA_JPEG -#define S3C_PA_ROTATOR S5PC100_PA_ROTATOR -#define S5P_VA_VIC0 S5PC1XX_VA_VIC(0) -#define S5P_VA_VIC1 S5PC1XX_VA_VIC(1) -#define S5P_VA_VIC2 S5PC1XX_VA_VIC(2) -#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG -#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY -#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0) -#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1) -#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2) -#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD -#define S3C_PA_WDT S5PC100_PA_WATCHDOG -#define S3C_PA_TSADC S5PC100_PA_TSADC -#define S3C_PA_ONENAND S5PC100_PA_ONENAND -#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF -#define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF -#define S3C_PA_RTC S5PC100_PA_RTC - -#define SAMSUNG_PA_ADC S5PC100_PA_TSADC -#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON -#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD +#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) +#define S5P_PA_UART0 S5P_PA_UART(0) +#define S5P_PA_UART1 S5P_PA_UART(1) +#define S5P_PA_UART2 S5P_PA_UART(2) +#define S5P_PA_UART3 S5P_PA_UART(3) -#define S5P_PA_FIMC0 S5PC100_PA_FIMC0 -#define S5P_PA_FIMC1 S5PC100_PA_FIMC1 -#define S5P_PA_FIMC2 S5PC100_PA_FIMC2 +#define S5P_SZ_UART SZ_256 -#endif /* __ASM_ARCH_C100_MAP_H */ +#endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h index 3611492ad68..1dd58836fd4 100644 --- a/arch/arm/mach-s5pv210/include/mach/map.h +++ b/arch/arm/mach-s5pv210/include/mach/map.h @@ -1,6 +1,6 @@ /* linux/arch/arm/mach-s5pv210/include/mach/map.h * - * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5PV210 - Memory map definitions @@ -16,122 +16,120 @@ #include <plat/map-base.h> #include <plat/map-s5p.h> -#define S5PV210_PA_SROM_BANK5 (0xA8000000) +#define S5PV210_PA_SDRAM 0x20000000 -#define S5PC110_PA_ONENAND (0xB0000000) -#define S5P_PA_ONENAND S5PC110_PA_ONENAND +#define S5PV210_PA_SROM_BANK5 0xA8000000 -#define S5PC110_PA_ONENAND_DMA (0xB0600000) -#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA +#define S5PC110_PA_ONENAND 0xB0000000 +#define S5PC110_PA_ONENAND_DMA 0xB0600000 -#define S5PV210_PA_CHIPID (0xE0000000) -#define S5P_PA_CHIPID S5PV210_PA_CHIPID +#define S5PV210_PA_CHIPID 0xE0000000 -#define S5PV210_PA_SYSCON (0xE0100000) -#define S5P_PA_SYSCON S5PV210_PA_SYSCON +#define S5PV210_PA_SYSCON 0xE0100000 -#define S5PV210_PA_GPIO (0xE0200000) +#define S5PV210_PA_GPIO 0xE0200000 -/* SPI */ -#define S5PV210_PA_SPI0 0xE1300000 -#define S5PV210_PA_SPI1 0xE1400000 +#define S5PV210_PA_SPDIF 0xE1100000 -#define S5PV210_PA_KEYPAD (0xE1600000) +#define S5PV210_PA_SPI0 0xE1300000 +#define S5PV210_PA_SPI1 0xE1400000 -#define S5PV210_PA_IIC0 (0xE1800000) -#define S5PV210_PA_IIC1 (0xFAB00000) -#define S5PV210_PA_IIC2 (0xE1A00000) +#define S5PV210_PA_KEYPAD 0xE1600000 -#define S5PV210_PA_TIMER (0xE2500000) -#define S5P_PA_TIMER S5PV210_PA_TIMER +#define S5PV210_PA_ADC 0xE1700000 -#define S5PV210_PA_SYSTIMER (0xE2600000) +#define S5PV210_PA_IIC0 0xE1800000 +#define S5PV210_PA_IIC1 0xFAB00000 +#define S5PV210_PA_IIC2 0xE1A00000 -#define S5PV210_PA_WATCHDOG (0xE2700000) +#define S5PV210_PA_AC97 0xE2200000 -#define S5PV210_PA_RTC (0xE2800000) -#define S5PV210_PA_UART (0xE2900000) +#define S5PV210_PA_PCM0 0xE2300000 +#define S5PV210_PA_PCM1 0xE1200000 +#define S5PV210_PA_PCM2 0xE2B00000 -#define S5P_PA_UART0 (S5PV210_PA_UART + 0x0) -#define S5P_PA_UART1 (S5PV210_PA_UART + 0x400) -#define S5P_PA_UART2 (S5PV210_PA_UART + 0x800) -#define S5P_PA_UART3 (S5PV210_PA_UART + 0xC00) +#define S5PV210_PA_TIMER 0xE2500000 +#define S5PV210_PA_SYSTIMER 0xE2600000 +#define S5PV210_PA_WATCHDOG 0xE2700000 +#define S5PV210_PA_RTC 0xE2800000 -#define S5P_SZ_UART SZ_256 +#define S5PV210_PA_UART 0xE2900000 -#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) +#define S5PV210_PA_SROMC 0xE8000000 -#define S5PV210_PA_SROMC (0xE8000000) -#define S5P_PA_SROMC S5PV210_PA_SROMC +#define S5PV210_PA_CFCON 0xE8200000 -#define S5PV210_PA_CFCON (0xE8200000) +#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) -#define S5PV210_PA_MDMA 0xFA200000 -#define S5PV210_PA_PDMA0 0xE0900000 -#define S5PV210_PA_PDMA1 0xE0A00000 +#define S5PV210_PA_HSOTG 0xEC000000 +#define S5PV210_PA_HSPHY 0xEC100000 -#define S5PV210_PA_FB (0xF8000000) +#define S5PV210_PA_IIS0 0xEEE30000 +#define S5PV210_PA_IIS1 0xE2100000 +#define S5PV210_PA_IIS2 0xE2A00000 -#define S5PV210_PA_FIMC0 (0xFB200000) -#define S5PV210_PA_FIMC1 (0xFB300000) -#define S5PV210_PA_FIMC2 (0xFB400000) +#define S5PV210_PA_DMC0 0xF0000000 +#define S5PV210_PA_DMC1 0xF1400000 -#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) +#define S5PV210_PA_VIC0 0xF2000000 +#define S5PV210_PA_VIC1 0xF2100000 +#define S5PV210_PA_VIC2 0xF2200000 +#define S5PV210_PA_VIC3 0xF2300000 -#define S5PV210_PA_HSOTG (0xEC000000) -#define S5PV210_PA_HSPHY (0xEC100000) +#define S5PV210_PA_FB 0xF8000000 -#define S5PV210_PA_VIC0 (0xF2000000) -#define S5PV210_PA_VIC1 (0xF2100000) -#define S5PV210_PA_VIC2 (0xF2200000) -#define S5PV210_PA_VIC3 (0xF2300000) +#define S5PV210_PA_MDMA 0xFA200000 +#define S5PV210_PA_PDMA0 0xE0900000 +#define S5PV210_PA_PDMA1 0xE0A00000 -#define S5PV210_PA_SDRAM (0x20000000) -#define S5P_PA_SDRAM S5PV210_PA_SDRAM +#define S5PV210_PA_MIPI_CSIS 0xFA600000 -/* S/PDIF */ -#define S5PV210_PA_SPDIF 0xE1100000 +#define S5PV210_PA_FIMC0 0xFB200000 +#define S5PV210_PA_FIMC1 0xFB300000 +#define S5PV210_PA_FIMC2 0xFB400000 -/* I2S */ -#define S5PV210_PA_IIS0 0xEEE30000 -#define S5PV210_PA_IIS1 0xE2100000 -#define S5PV210_PA_IIS2 0xE2A00000 +/* Compatibiltiy Defines */ -/* PCM */ -#define S5PV210_PA_PCM0 0xE2300000 -#define S5PV210_PA_PCM1 0xE1200000 -#define S5PV210_PA_PCM2 0xE2B00000 +#define S3C_PA_FB S5PV210_PA_FB +#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0) +#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1) +#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2) +#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3) +#define S3C_PA_IIC S5PV210_PA_IIC0 +#define S3C_PA_IIC1 S5PV210_PA_IIC1 +#define S3C_PA_IIC2 S5PV210_PA_IIC2 +#define S3C_PA_RTC S5PV210_PA_RTC +#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG +#define S3C_PA_WDT S5PV210_PA_WATCHDOG -/* AC97 */ -#define S5PV210_PA_AC97 0xE2200000 +#define S5P_PA_CHIPID S5PV210_PA_CHIPID +#define S5P_PA_FIMC0 S5PV210_PA_FIMC0 +#define S5P_PA_FIMC1 S5PV210_PA_FIMC1 +#define S5P_PA_FIMC2 S5PV210_PA_FIMC2 +#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS +#define S5P_PA_ONENAND S5PC110_PA_ONENAND +#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA +#define S5P_PA_SDRAM S5PV210_PA_SDRAM +#define S5P_PA_SROMC S5PV210_PA_SROMC +#define S5P_PA_SYSCON S5PV210_PA_SYSCON +#define S5P_PA_TIMER S5PV210_PA_TIMER -#define S5PV210_PA_ADC (0xE1700000) +#define SAMSUNG_PA_ADC S5PV210_PA_ADC +#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON +#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD -#define S5PV210_PA_DMC0 (0xF0000000) -#define S5PV210_PA_DMC1 (0xF1400000) +/* UART */ -#define S5PV210_PA_MIPI_CSIS 0xFA600000 +#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) -/* compatibiltiy defines. */ -#define S3C_PA_UART S5PV210_PA_UART -#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0) -#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1) -#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2) -#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3) -#define S3C_PA_IIC S5PV210_PA_IIC0 -#define S3C_PA_IIC1 S5PV210_PA_IIC1 -#define S3C_PA_IIC2 S5PV210_PA_IIC2 -#define S3C_PA_FB S5PV210_PA_FB -#define S3C_PA_RTC S5PV210_PA_RTC -#define S3C_PA_WDT S5PV210_PA_WATCHDOG -#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG -#define S5P_PA_FIMC0 S5PV210_PA_FIMC0 -#define S5P_PA_FIMC1 S5PV210_PA_FIMC1 -#define S5P_PA_FIMC2 S5PV210_PA_FIMC2 -#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS +#define S3C_PA_UART S5PV210_PA_UART -#define SAMSUNG_PA_ADC S5PV210_PA_ADC -#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON -#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD +#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) +#define S5P_PA_UART0 S5P_PA_UART(0) +#define S5P_PA_UART1 S5P_PA_UART(1) +#define S5P_PA_UART2 S5P_PA_UART(2) +#define S5P_PA_UART3 S5P_PA_UART(3) + +#define S5P_SZ_UART SZ_256 #endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c index 461aa035afc..557add4fc56 100644 --- a/arch/arm/mach-s5pv210/mach-aquila.c +++ b/arch/arm/mach-s5pv210/mach-aquila.c @@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = { static struct regulator_init_data aquila_ldo3_data = { .constraints = { - .name = "VUSB/MIPI_1.1V", + .name = "VUSB+MIPI_1.1V", .min_uV = 1100000, .max_uV = 1100000, .apply_uV = 1, @@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = { static struct regulator_init_data aquila_ldo8_data = { .constraints = { - .name = "VUSB/VADC_3.3V", + .name = "VUSB+VADC_3.3V", .min_uV = 3300000, .max_uV = 3300000, .apply_uV = 1, @@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = { static struct regulator_init_data aquila_ldo9_data = { .constraints = { - .name = "VCC/VCAM_2.8V", + .name = "VCC+VCAM_2.8V", .min_uV = 2800000, .max_uV = 2800000, .apply_uV = 1, @@ -381,9 +381,12 @@ static struct max8998_platform_data aquila_max8998_pdata = { .buck1_set1 = S5PV210_GPH0(3), .buck1_set2 = S5PV210_GPH0(4), .buck2_set3 = S5PV210_GPH0(5), - .buck1_max_voltage1 = 1200000, - .buck1_max_voltage2 = 1200000, - .buck2_max_voltage = 1200000, + .buck1_voltage1 = 1200000, + .buck1_voltage2 = 1200000, + .buck1_voltage3 = 1200000, + .buck1_voltage4 = 1200000, + .buck2_voltage1 = 1200000, + .buck2_voltage2 = 1200000, }; #endif diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index e22d5112fd4..056f5c769b0 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c @@ -288,7 +288,7 @@ static struct regulator_init_data goni_ldo2_data = { static struct regulator_init_data goni_ldo3_data = { .constraints = { - .name = "VUSB/MIPI_1.1V", + .name = "VUSB+MIPI_1.1V", .min_uV = 1100000, .max_uV = 1100000, .apply_uV = 1, @@ -337,7 +337,7 @@ static struct regulator_init_data goni_ldo7_data = { static struct regulator_init_data goni_ldo8_data = { .constraints = { - .name = "VUSB/VADC_3.3V", + .name = "VUSB+VADC_3.3V", .min_uV = 3300000, .max_uV = 3300000, .apply_uV = 1, @@ -347,7 +347,7 @@ static struct regulator_init_data goni_ldo8_data = { static struct regulator_init_data goni_ldo9_data = { .constraints = { - .name = "VCC/VCAM_2.8V", + .name = "VCC+VCAM_2.8V", .min_uV = 2800000, .max_uV = 2800000, .apply_uV = 1, @@ -521,9 +521,12 @@ static struct max8998_platform_data goni_max8998_pdata = { .buck1_set1 = S5PV210_GPH0(3), .buck1_set2 = S5PV210_GPH0(4), .buck2_set3 = S5PV210_GPH0(5), - .buck1_max_voltage1 = 1200000, - .buck1_max_voltage2 = 1200000, - .buck2_max_voltage = 1200000, + .buck1_voltage1 = 1200000, + .buck1_voltage2 = 1200000, + .buck1_voltage3 = 1200000, + .buck1_voltage4 = 1200000, + .buck2_voltage1 = 1200000, + .buck2_voltage2 = 1200000, }; #endif diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-s5pv310/Kconfig index 09c4c21b70c..b2a9acc5185 100644 --- a/arch/arm/mach-s5pv310/Kconfig +++ b/arch/arm/mach-s5pv310/Kconfig @@ -122,6 +122,7 @@ config MACH_SMDKV310 select S3C_DEV_HSMMC2 select S3C_DEV_HSMMC3 select S5PV310_DEV_PD + select S5PV310_DEV_SYSMMU select S5PV310_SETUP_I2C1 select S5PV310_SETUP_SDHCI help diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h index 74d400625a2..901657fa7a1 100644 --- a/arch/arm/mach-s5pv310/include/mach/map.h +++ b/arch/arm/mach-s5pv310/include/mach/map.h @@ -1,6 +1,6 @@ /* linux/arch/arm/mach-s5pv310/include/mach/map.h * - * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5PV310 - Memory map definitions @@ -23,90 +23,43 @@ #include <plat/map-s5p.h> -#define S5PV310_PA_SYSRAM (0x02025000) +#define S5PV310_PA_SYSRAM 0x02025000 -#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) - -#define S5PC210_PA_ONENAND (0x0C000000) -#define S5P_PA_ONENAND S5PC210_PA_ONENAND - -#define S5PC210_PA_ONENAND_DMA (0x0C600000) -#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA - -#define S5PV310_PA_CHIPID (0x10000000) -#define S5P_PA_CHIPID S5PV310_PA_CHIPID - -#define S5PV310_PA_SYSCON (0x10010000) -#define S5P_PA_SYSCON S5PV310_PA_SYSCON +#define S5PV310_PA_I2S0 0x03830000 +#define S5PV310_PA_I2S1 0xE3100000 +#define S5PV310_PA_I2S2 0xE2A00000 -#define S5PV310_PA_PMU (0x10020000) +#define S5PV310_PA_PCM0 0x03840000 +#define S5PV310_PA_PCM1 0x13980000 +#define S5PV310_PA_PCM2 0x13990000 -#define S5PV310_PA_CMU (0x10030000) - -#define S5PV310_PA_WATCHDOG (0x10060000) -#define S5PV310_PA_RTC (0x10070000) - -#define S5PV310_PA_DMC0 (0x10400000) - -#define S5PV310_PA_COMBINER (0x10448000) - -#define S5PV310_PA_COREPERI (0x10500000) -#define S5PV310_PA_GIC_CPU (0x10500100) -#define S5PV310_PA_TWD (0x10500600) -#define S5PV310_PA_GIC_DIST (0x10501000) -#define S5PV310_PA_L2CC (0x10502000) - -/* DMA */ -#define S5PV310_PA_MDMA 0x10810000 -#define S5PV310_PA_PDMA0 0x12680000 -#define S5PV310_PA_PDMA1 0x12690000 - -#define S5PV310_PA_GPIO1 (0x11400000) -#define S5PV310_PA_GPIO2 (0x11000000) -#define S5PV310_PA_GPIO3 (0x03860000) - -#define S5PV310_PA_MIPI_CSIS0 0x11880000 -#define S5PV310_PA_MIPI_CSIS1 0x11890000 +#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) -#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) +#define S5PC210_PA_ONENAND 0x0C000000 +#define S5PC210_PA_ONENAND_DMA 0x0C600000 -#define S5PV310_PA_SROMC (0x12570000) -#define S5P_PA_SROMC S5PV310_PA_SROMC +#define S5PV310_PA_CHIPID 0x10000000 -/* S/PDIF */ -#define S5PV310_PA_SPDIF 0xE1100000 +#define S5PV310_PA_SYSCON 0x10010000 +#define S5PV310_PA_PMU 0x10020000 +#define S5PV310_PA_CMU 0x10030000 -/* I2S */ -#define S5PV310_PA_I2S0 0x03830000 -#define S5PV310_PA_I2S1 0xE3100000 -#define S5PV310_PA_I2S2 0xE2A00000 +#define S5PV310_PA_WATCHDOG 0x10060000 +#define S5PV310_PA_RTC 0x10070000 -/* PCM */ -#define S5PV310_PA_PCM0 0x03840000 -#define S5PV310_PA_PCM1 0x13980000 -#define S5PV310_PA_PCM2 0x13990000 +#define S5PV310_PA_DMC0 0x10400000 -/* AC97 */ -#define S5PV310_PA_AC97 0x139A0000 +#define S5PV310_PA_COMBINER 0x10448000 -#define S5PV310_PA_UART (0x13800000) +#define S5PV310_PA_COREPERI 0x10500000 +#define S5PV310_PA_GIC_CPU 0x10500100 +#define S5PV310_PA_TWD 0x10500600 +#define S5PV310_PA_GIC_DIST 0x10501000 +#define S5PV310_PA_L2CC 0x10502000 -#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET)) -#define S5P_PA_UART0 S5P_PA_UART(0) -#define S5P_PA_UART1 S5P_PA_UART(1) -#define S5P_PA_UART2 S5P_PA_UART(2) -#define S5P_PA_UART3 S5P_PA_UART(3) -#define S5P_PA_UART4 S5P_PA_UART(4) - -#define S5P_SZ_UART SZ_256 - -#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000)) - -#define S5PV310_PA_TIMER (0x139D0000) -#define S5P_PA_TIMER S5PV310_PA_TIMER - -#define S5PV310_PA_SDRAM (0x40000000) -#define S5P_PA_SDRAM S5PV310_PA_SDRAM +#define S5PV310_PA_MDMA 0x10810000 +#define S5PV310_PA_PDMA0 0x12680000 +#define S5PV310_PA_PDMA1 0x12690000 #define S5PV310_PA_SYSMMU_MDMA 0x10A40000 #define S5PV310_PA_SYSMMU_SSS 0x10A50000 @@ -124,11 +77,32 @@ #define S5PV310_PA_SYSMMU_TV 0x12E20000 #define S5PV310_PA_SYSMMU_MFC_L 0x13620000 #define S5PV310_PA_SYSMMU_MFC_R 0x13630000 -#define S5PV310_SYSMMU_TOTAL_IPNUM 16 -#define S5P_SYSMMU_TOTAL_IPNUM S5PV310_SYSMMU_TOTAL_IPNUM -/* compatibiltiy defines. */ -#define S3C_PA_UART S5PV310_PA_UART +#define S5PV310_PA_GPIO1 0x11400000 +#define S5PV310_PA_GPIO2 0x11000000 +#define S5PV310_PA_GPIO3 0x03860000 + +#define S5PV310_PA_MIPI_CSIS0 0x11880000 +#define S5PV310_PA_MIPI_CSIS1 0x11890000 + +#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) + +#define S5PV310_PA_SROMC 0x12570000 + +#define S5PV310_PA_UART 0x13800000 + +#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000)) + +#define S5PV310_PA_AC97 0x139A0000 + +#define S5PV310_PA_TIMER 0x139D0000 + +#define S5PV310_PA_SDRAM 0x40000000 + +#define S5PV310_PA_SPDIF 0xE1100000 + +/* Compatibiltiy Defines */ + #define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0) #define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1) #define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2) @@ -143,7 +117,28 @@ #define S3C_PA_IIC7 S5PV310_PA_IIC(7) #define S3C_PA_RTC S5PV310_PA_RTC #define S3C_PA_WDT S5PV310_PA_WATCHDOG + +#define S5P_PA_CHIPID S5PV310_PA_CHIPID #define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0 #define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1 +#define S5P_PA_ONENAND S5PC210_PA_ONENAND +#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA +#define S5P_PA_SDRAM S5PV310_PA_SDRAM +#define S5P_PA_SROMC S5PV310_PA_SROMC +#define S5P_PA_SYSCON S5PV310_PA_SYSCON +#define S5P_PA_TIMER S5PV310_PA_TIMER + +/* UART */ + +#define S3C_PA_UART S5PV310_PA_UART + +#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) +#define S5P_PA_UART0 S5P_PA_UART(0) +#define S5P_PA_UART1 S5P_PA_UART(1) +#define S5P_PA_UART2 S5P_PA_UART(2) +#define S5P_PA_UART3 S5P_PA_UART(3) +#define S5P_PA_UART4 S5P_PA_UART(4) + +#define S5P_SZ_UART SZ_256 #endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-s5pv310/include/mach/sysmmu.h b/arch/arm/mach-s5pv310/include/mach/sysmmu.h index 662fe85ff4d..598fc5c9211 100644 --- a/arch/arm/mach-s5pv310/include/mach/sysmmu.h +++ b/arch/arm/mach-s5pv310/include/mach/sysmmu.h @@ -13,6 +13,9 @@ #ifndef __ASM_ARM_ARCH_SYSMMU_H #define __ASM_ARM_ARCH_SYSMMU_H __FILE__ +#define S5PV310_SYSMMU_TOTAL_IPNUM 16 +#define S5P_SYSMMU_TOTAL_IPNUM S5PV310_SYSMMU_TOTAL_IPNUM + enum s5pv310_sysmmu_ips { SYSMMU_MDMA, SYSMMU_SSS, @@ -32,7 +35,7 @@ enum s5pv310_sysmmu_ips { SYSMMU_MFC_R, }; -static char *sysmmu_ips_name[S5P_SYSMMU_TOTAL_IPNUM] = { +static char *sysmmu_ips_name[S5PV310_SYSMMU_TOTAL_IPNUM] = { "SYSMMU_MDMA" , "SYSMMU_SSS" , "SYSMMU_FIMC0" , diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index d43c5ef58eb..bd3e1bfdd6a 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c @@ -241,6 +241,9 @@ static struct locomo_platform_data locomo_info = { struct platform_device collie_locomo_device = { .name = "locomo", .id = 0, + .dev = { + .platform_data = &locomo_info, + }, .num_resources = ARRAY_SIZE(locomo_resources), .resource = locomo_resources, }; diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h index cacf17a958c..53677e464d4 100644 --- a/arch/arm/mach-spear3xx/include/mach/spear320.h +++ b/arch/arm/mach-spear3xx/include/mach/spear320.h @@ -62,7 +62,7 @@ #define SPEAR320_SMII1_BASE 0xAB000000 #define SPEAR320_SMII1_SIZE 0x01000000 -#define SPEAR320_SOC_CONFIG_BASE 0xB4000000 +#define SPEAR320_SOC_CONFIG_BASE 0xB3000000 #define SPEAR320_SOC_CONFIG_SIZE 0x00000070 /* Interrupt registers offsets and masks */ #define INT_STS_MASK_REG 0x04 diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h index 66ad2760c62..04c779832c7 100644 --- a/arch/arm/mach-tegra/include/mach/kbc.h +++ b/arch/arm/mach-tegra/include/mach/kbc.h @@ -57,5 +57,6 @@ struct tegra_kbc_platform_data { const struct matrix_keymap_data *keymap_data; bool wakeup; + bool use_fn_map; }; #endif diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9d30c6f804b..e4509bae8fc 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -405,7 +405,7 @@ config CPU_V6 config CPU_32v6K bool "Support ARM V6K processor extensions" if !SMP depends on CPU_V6 || CPU_V7 - default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) + default y if SMP help Say Y here if your ARMv6 processor supports the 'K' extension. This enables the kernel to use some instructions not present @@ -416,7 +416,7 @@ config CPU_32v6K # ARMv7 config CPU_V7 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX - select CPU_32v6K if !ARCH_OMAP2 + select CPU_32v6K select CPU_32v7 select CPU_ABRT_EV7 select CPU_PABRT_V7 @@ -644,7 +644,7 @@ config ARM_THUMBEE config SWP_EMULATE bool "Emulate SWP/SWPB instructions" - depends on CPU_V7 && !CPU_V6 + depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6 select HAVE_PROC_CPU if PROC_FS default y if SMP help diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 170c9bb9586..f2ce38e085d 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask) static inline void cache_sync(void) { void __iomem *base = l2x0_base; + +#ifdef CONFIG_ARM_ERRATA_753970 + /* write to an unmmapped register */ + writel_relaxed(0, base + L2X0_DUMMY_REG); +#else writel_relaxed(0, base + L2X0_CACHE_SYNC); +#endif cache_wait(base + L2X0_CACHE_SYNC, 1); } diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0c1172b56b4..8e335623913 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -264,6 +264,12 @@ __v7_setup: orreq r10, r10, #1 << 6 @ set bit #6 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif +#ifdef CONFIG_ARM_ERRATA_751472 + cmp r6, #0x30 @ present prior to r3p0 + mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register + orrlt r10, r10, #1 << 11 @ set bit #11 + mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register +#endif 3: mov r10, #0 #ifdef HARVARD_CACHE diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 8aa974491df..c074e66ad22 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c @@ -10,8 +10,6 @@ */ #include <linux/cpumask.h> -#include <linux/err.h> -#include <linux/errno.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/oprofile.h> @@ -46,6 +44,7 @@ char *op_name_from_perf_id(void) return NULL; } } +#endif static int report_trace(struct stackframe *frame, void *d) { @@ -85,7 +84,7 @@ static struct frame_tail* user_backtrace(struct frame_tail *tail) /* frame pointers should strictly progress back up the stack * (towards higher addresses) */ - if (tail >= buftail[0].fp) + if (tail + 1 >= buftail[0].fp) return NULL; return buftail[0].fp-1; @@ -111,6 +110,7 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth) int __init oprofile_arch_init(struct oprofile_operations *ops) { + /* provide backtrace support also in timer mode: */ ops->backtrace = arm_backtrace; return oprofile_perf_init(ops); @@ -120,11 +120,3 @@ void __exit oprofile_arch_exit(void) { oprofile_perf_exit(); } -#else -int __init oprofile_arch_init(struct oprofile_operations *ops) -{ - pr_info("oprofile: hardware counters not available\n"); - return -ENODEV; -} -void __exit oprofile_arch_exit(void) {} -#endif /* CONFIG_HW_PERF_EVENTS */ diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h index 3a70ebf0477..ff469c4f1d7 100644 --- a/arch/arm/plat-mxc/include/mach/uncompress.h +++ b/arch/arm/plat-mxc/include/mach/uncompress.h @@ -95,6 +95,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) case MACH_TYPE_MX35_3DS: case MACH_TYPE_PCM043: case MACH_TYPE_LILLY1131: + case MACH_TYPE_VPR200: uart_base = MX3X_UART1_BASE_ADDR; break; case MACH_TYPE_MAGX_ZN5: @@ -102,6 +103,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id) break; case MACH_TYPE_MX51_BABBAGE: case MACH_TYPE_EUKREA_CPUIMX51SD: + case MACH_TYPE_MX51_3DS: uart_base = MX51_UART1_BASE_ADDR; break; case MACH_TYPE_MX50_RDP: diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 459b319a9fa..49d3208793e 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c @@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox) struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) { - struct omap_mbox *mbox; - int ret; + struct omap_mbox *_mbox, *mbox = NULL; + int i, ret; if (!mboxes) return ERR_PTR(-EINVAL); - for (mbox = *mboxes; mbox; mbox++) - if (!strcmp(mbox->name, name)) + for (i = 0; (_mbox = mboxes[i]); i++) { + if (!strcmp(_mbox->name, name)) { + mbox = _mbox; break; + } + } if (!mbox) return ERR_PTR(-ENOENT); diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c index b77e018d36c..a9aa5ad3f4e 100644 --- a/arch/arm/plat-pxa/mfp.c +++ b/arch/arm/plat-pxa/mfp.c @@ -139,10 +139,11 @@ static const unsigned long mfpr_edge[] = { #define mfp_configured(p) ((p)->config != -1) /* - * perform a read-back of any MFPR register to make sure the + * perform a read-back of any valid MFPR register to make sure the * previous writings are finished */ -#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + 0) +static unsigned long mfpr_off_readback; +#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback) static inline void __mfp_config_run(struct mfp_pin *p) { @@ -248,6 +249,9 @@ void __init mfp_init_addr(struct mfp_addr_map *map) spin_lock_irqsave(&mfp_spin_lock, flags); + /* mfp offset for readback */ + mfpr_off_readback = map[0].offset; + for (p = map; p->start != MFP_PIN_INVALID; p++) { offset = p->offset; i = p->start; diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig index deb39951a22..557f8c507f6 100644 --- a/arch/arm/plat-s5p/Kconfig +++ b/arch/arm/plat-s5p/Kconfig @@ -37,6 +37,14 @@ config S5P_GPIO_INT help Common code for the GPIO interrupts (other than external interrupts.) +comment "System MMU" + +config S5P_SYSTEM_MMU + bool "S5P SYSTEM MMU" + depends on ARCH_S5PV310 + help + Say Y here if you want to enable System MMU + config S5P_DEV_FIMC0 bool help @@ -66,19 +74,3 @@ config S5P_DEV_CSIS1 bool help Compile in platform device definitions for MIPI-CSIS channel 1 - -menuconfig S5P_SYSMMU - bool "SYSMMU support" - depends on ARCH_S5PV310 - help - This is a System MMU driver for Samsung ARM based Soc. - -if S5P_SYSMMU - -config S5P_SYSMMU_DEBUG - bool "Enables debug messages" - depends on S5P_SYSMMU - help - This enables SYSMMU driver debug massages. - -endif diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile index 92efe1adcfd..4bd5cf90897 100644 --- a/arch/arm/plat-s5p/Makefile +++ b/arch/arm/plat-s5p/Makefile @@ -19,6 +19,7 @@ obj-y += clock.o obj-y += irq.o obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o +obj-$(CONFIG_S5P_SYSTEM_MMU) += sysmmu.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_PM) += irq-pm.o @@ -30,4 +31,3 @@ obj-$(CONFIG_S5P_DEV_FIMC2) += dev-fimc2.o obj-$(CONFIG_S5P_DEV_ONENAND) += dev-onenand.o obj-$(CONFIG_S5P_DEV_CSIS0) += dev-csis0.o obj-$(CONFIG_S5P_DEV_CSIS1) += dev-csis1.o -obj-$(CONFIG_S5P_SYSMMU) += sysmmu.o diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c index 6a734288617..afaf87fdb93 100644 --- a/arch/arm/plat-s5p/dev-uart.c +++ b/arch/arm/plat-s5p/dev-uart.c @@ -28,7 +28,7 @@ static struct resource s5p_uart0_resource[] = { [0] = { .start = S5P_PA_UART0, - .end = S5P_PA_UART0 + S5P_SZ_UART, + .end = S5P_PA_UART0 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = { static struct resource s5p_uart1_resource[] = { [0] = { .start = S5P_PA_UART1, - .end = S5P_PA_UART1 + S5P_SZ_UART, + .end = S5P_PA_UART1 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = { static struct resource s5p_uart2_resource[] = { [0] = { .start = S5P_PA_UART2, - .end = S5P_PA_UART2 + S5P_SZ_UART, + .end = S5P_PA_UART2 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = { #if CONFIG_SERIAL_SAMSUNG_UARTS > 3 [0] = { .start = S5P_PA_UART3, - .end = S5P_PA_UART3 + S5P_SZ_UART, + .end = S5P_PA_UART3 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = { #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 [0] = { .start = S5P_PA_UART4, - .end = S5P_PA_UART4 + S5P_SZ_UART, + .end = S5P_PA_UART4 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = { #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 [0] = { .start = S5P_PA_UART5, - .end = S5P_PA_UART5 + S5P_SZ_UART, + .end = S5P_PA_UART5 + S5P_SZ_UART - 1, .flags = IORESOURCE_MEM, }, [1] = { diff --git a/arch/arm/plat-s5p/include/plat/sysmmu.h b/arch/arm/plat-s5p/include/plat/sysmmu.h deleted file mode 100644 index db298fc5438..00000000000 --- a/arch/arm/plat-s5p/include/plat/sysmmu.h +++ /dev/null @@ -1,23 +0,0 @@ -/* linux/arch/arm/plat-s5p/include/plat/sysmmu.h - * - * Copyright (c) 2010 Samsung Electronics Co., Ltd. - * http://www.samsung.com/ - * - * Samsung sysmmu driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef __ASM_PLAT_S5P_SYSMMU_H -#define __ASM_PLAT_S5P_SYSMMU_H __FILE__ - -/* debug macro */ -#ifdef CONFIG_S5P_SYSMMU_DEBUG -#define sysmmu_debug(fmt, arg...) printk(KERN_INFO "[%s] " fmt, __func__, ## arg) -#else -#define sysmmu_debug(fmt, arg...) do { } while (0) -#endif - -#endif /* __ASM_PLAT_S5P_SYSMMU_H */ diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c index d804914dc2e..ffe8a48bc3c 100644 --- a/arch/arm/plat-s5p/sysmmu.c +++ b/arch/arm/plat-s5p/sysmmu.c @@ -16,8 +16,6 @@ #include <mach/regs-sysmmu.h> #include <mach/sysmmu.h> -#include <plat/sysmmu.h> - struct sysmmu_controller s5p_sysmmu_cntlrs[S5P_SYSMMU_TOTAL_IPNUM]; void s5p_sysmmu_register(struct sysmmu_controller *sysmmuconp) @@ -123,7 +121,7 @@ static int s5p_sysmmu_set_tablebase(sysmmu_ips ips) : "=r" (pg) : : "cc"); \ pg &= ~0x3fff; - sysmmu_debug("CP15 TTBR0 : 0x%x\n", pg); + printk(KERN_INFO "%s: CP15 TTBR0 : 0x%x\n", __func__, pg); /* Set sysmmu page table base address */ __raw_writel(pg, sysmmuconp->regs + S5P_PT_BASE_ADDR); diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c index 236ef8427d7..3e4bd8147bf 100644 --- a/arch/arm/plat-samsung/dev-ts.c +++ b/arch/arm/plat-samsung/dev-ts.c @@ -58,4 +58,3 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd) s3c_device_ts.dev.platform_data = npd; } -EXPORT_SYMBOL(s3c24xx_ts_set_platdata); diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h index d9025e37767..30518cc9a67 100644 --- a/arch/arm/plat-samsung/include/plat/pm.h +++ b/arch/arm/plat-samsung/include/plat/pm.h @@ -17,6 +17,8 @@ #include <linux/irq.h> +struct sys_device; + #ifdef CONFIG_PM extern __init int s3c_pm_init(void); diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h index 99ba6789cc9..6dd455bafdf 100644 --- a/arch/arm/plat-spear/include/plat/uncompress.h +++ b/arch/arm/plat-spear/include/plat/uncompress.h @@ -24,10 +24,10 @@ static inline void putc(int c) { void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; - while (readl(base + UART01x_FR) & UART01x_FR_TXFF) + while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF) barrier(); - writel(c, base + UART01x_DR); + writel_relaxed(c, base + UART01x_DR); } static inline void flush(void) diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h index 09e9372aea2..8c8b24d0704 100644 --- a/arch/arm/plat-spear/include/plat/vmalloc.h +++ b/arch/arm/plat-spear/include/plat/vmalloc.h @@ -14,6 +14,6 @@ #ifndef __PLAT_VMALLOC_H #define __PLAT_VMALLOC_H -#define VMALLOC_END 0xF0000000 +#define VMALLOC_END 0xF0000000UL #endif /* __PLAT_VMALLOC_H */ diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 2fea897ebeb..9d6feaabbe7 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -12,7 +12,7 @@ # # http://www.arm.linux.org.uk/developer/machines/?action=new # -# Last update: Sun Dec 12 23:24:27 2010 +# Last update: Mon Feb 7 08:59:27 2011 # # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # @@ -2240,7 +2240,7 @@ arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250 vs_v210 MACH_VS_V210 VS_V210 2252 vs_v212 MACH_VS_V212 VS_V212 2253 hmt MACH_HMT HMT 2254 -suen3 MACH_SUEN3 SUEN3 2255 +km_kirkwood MACH_KM_KIRKWOOD KM_KIRKWOOD 2255 vesper MACH_VESPER VESPER 2256 str9 MACH_STR9 STR9 2257 omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258 @@ -2987,7 +2987,7 @@ pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001 ea20 MACH_EA20 EA20 3002 awm2 MACH_AWM2 AWM2 3003 ti8148evm MACH_TI8148EVM TI8148EVM 3004 -tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005 +seaboard MACH_SEABOARD SEABOARD 3005 linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006 tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007 rubys MACH_RUBYS RUBYS 3008 @@ -3190,7 +3190,7 @@ synergy MACH_SYNERGY SYNERGY 3205 ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 punica MACH_PUNICA PUNICA 3208 -sbc_nt250 MACH_SBC_NT250 SBC_NT250 3209 +trimslice MACH_TRIMSLICE TRIMSLICE 3209 mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210 mackerel MACH_MACKEREL MACKEREL 3211 fa9x27 MACH_FA9X27 FA9X27 3213 @@ -3219,3 +3219,100 @@ pivicc MACH_PIVICC PIVICC 3235 pcm048 MACH_PCM048 PCM048 3236 dds MACH_DDS DDS 3237 chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238 +ts48xx MACH_TS48XX TS48XX 3239 +tonga2_tfttimer MACH_TONGA2_TFTTIMER TONGA2_TFTTIMER 3240 +whistler MACH_WHISTLER WHISTLER 3241 +asl_phoenix MACH_ASL_PHOENIX ASL_PHOENIX 3242 +at91sam9263otlite MACH_AT91SAM9263OTLITE AT91SAM9263OTLITE 3243 +ddplug MACH_DDPLUG DDPLUG 3244 +d2plug MACH_D2PLUG D2PLUG 3245 +kzm9d MACH_KZM9D KZM9D 3246 +verdi_lte MACH_VERDI_LTE VERDI_LTE 3247 +nanozoom MACH_NANOZOOM NANOZOOM 3248 +dm3730_som_lv MACH_DM3730_SOM_LV DM3730_SOM_LV 3249 +dm3730_torpedo MACH_DM3730_TORPEDO DM3730_TORPEDO 3250 +anchovy MACH_ANCHOVY ANCHOVY 3251 +re2rev20 MACH_RE2REV20 RE2REV20 3253 +re2rev21 MACH_RE2REV21 RE2REV21 3254 +cns21xx MACH_CNS21XX CNS21XX 3255 +rider MACH_RIDER RIDER 3257 +nsk330 MACH_NSK330 NSK330 3258 +cns2133evb MACH_CNS2133EVB CNS2133EVB 3259 +z3_816x_mod MACH_Z3_816X_MOD Z3_816X_MOD 3260 +z3_814x_mod MACH_Z3_814X_MOD Z3_814X_MOD 3261 +beect MACH_BEECT BEECT 3262 +dma_thunderbug MACH_DMA_THUNDERBUG DMA_THUNDERBUG 3263 +omn_at91sam9g20 MACH_OMN_AT91SAM9G20 OMN_AT91SAM9G20 3264 +mx25_e2s_uc MACH_MX25_E2S_UC MX25_E2S_UC 3265 +mione MACH_MIONE MIONE 3266 +top9000_tcu MACH_TOP9000_TCU TOP9000_TCU 3267 +top9000_bsl MACH_TOP9000_BSL TOP9000_BSL 3268 +kingdom MACH_KINGDOM KINGDOM 3269 +armadillo460 MACH_ARMADILLO460 ARMADILLO460 3270 +lq2 MACH_LQ2 LQ2 3271 +sweda_tms2 MACH_SWEDA_TMS2 SWEDA_TMS2 3272 +mx53_loco MACH_MX53_LOCO MX53_LOCO 3273 +acer_a8 MACH_ACER_A8 ACER_A8 3275 +acer_gauguin MACH_ACER_GAUGUIN ACER_GAUGUIN 3276 +guppy MACH_GUPPY GUPPY 3277 +mx61_ard MACH_MX61_ARD MX61_ARD 3278 +tx53 MACH_TX53 TX53 3279 +omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280 +uemd MACH_UEMD UEMD 3281 +ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282 +rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283 +nookcolor MACH_NOOKCOLOR NOOKCOLOR 3284 +hkdkc100 MACH_HKDKC100 HKDKC100 3285 +ts42xx MACH_TS42XX TS42XX 3286 +aebl MACH_AEBL AEBL 3287 +wario MACH_WARIO WARIO 3288 +gfs_spm MACH_GFS_SPM GFS_SPM 3289 +cm_t3730 MACH_CM_T3730 CM_T3730 3290 +isc3 MACH_ISC3 ISC3 3291 +rascal MACH_RASCAL RASCAL 3292 +hrefv60 MACH_HREFV60 HREFV60 3293 +tpt_2_0 MACH_TPT_2_0 TPT_2_0 3294 +pyramid_td MACH_PYRAMID_TD PYRAMID_TD 3295 +splendor MACH_SPLENDOR SPLENDOR 3296 +guf_planet MACH_GUF_PLANET GUF_PLANET 3297 +msm8x60_qt MACH_MSM8X60_QT MSM8X60_QT 3298 +htc_hd_mini MACH_HTC_HD_MINI HTC_HD_MINI 3299 +athene MACH_ATHENE ATHENE 3300 +deep_r_ek_1 MACH_DEEP_R_EK_1 DEEP_R_EK_1 3301 +vivow_ct MACH_VIVOW_CT VIVOW_CT 3302 +nery_1000 MACH_NERY_1000 NERY_1000 3303 +rfl109145_ssrv MACH_RFL109145_SSRV RFL109145_SSRV 3304 +nmh MACH_NMH NMH 3305 +wn802t MACH_WN802T WN802T 3306 +dragonet MACH_DRAGONET DRAGONET 3307 +geneva_b MACH_GENEVA_B GENEVA_B 3308 +at91sam9263desk16l MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L 3309 +bcmhana_sv MACH_BCMHANA_SV BCMHANA_SV 3310 +bcmhana_tablet MACH_BCMHANA_TABLET BCMHANA_TABLET 3311 +koi MACH_KOI KOI 3312 +ts4800 MACH_TS4800 TS4800 3313 +tqma9263 MACH_TQMA9263 TQMA9263 3314 +holiday MACH_HOLIDAY HOLIDAY 3315 +dma_6410 MACH_DMA6410 DMA6410 3316 +pcats_overlay MACH_PCATS_OVERLAY PCATS_OVERLAY 3317 +hwgw6410 MACH_HWGW6410 HWGW6410 3318 +shenzhou MACH_SHENZHOU SHENZHOU 3319 +cwme9210 MACH_CWME9210 CWME9210 3320 +cwme9210js MACH_CWME9210JS CWME9210JS 3321 +pgs_v1 MACH_PGS_SITARA PGS_SITARA 3322 +colibri_tegra2 MACH_COLIBRI_TEGRA2 COLIBRI_TEGRA2 3323 +w21 MACH_W21 W21 3324 +polysat1 MACH_POLYSAT1 POLYSAT1 3325 +dataway MACH_DATAWAY DATAWAY 3326 +cobral138 MACH_COBRAL138 COBRAL138 3327 +roverpcs8 MACH_ROVERPCS8 ROVERPCS8 3328 +marvelc MACH_MARVELC MARVELC 3329 +navefihid MACH_NAVEFIHID NAVEFIHID 3330 +dm365_cv100 MACH_DM365_CV100 DM365_CV100 3331 +able MACH_ABLE ABLE 3332 +legacy MACH_LEGACY LEGACY 3333 +icong MACH_ICONG ICONG 3334 +rover_g8 MACH_ROVER_G8 ROVER_G8 3335 +t5388p MACH_T5388P T5388P 3336 +dingo MACH_DINGO DINGO 3337 +goflexhome MACH_GOFLEXHOME GOFLEXHOME 3338 diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h index 1ff9f1468c0..7dbc664eab1 100644 --- a/arch/blackfin/include/asm/bfin_serial.h +++ b/arch/blackfin/include/asm/bfin_serial.h @@ -10,6 +10,7 @@ #define __BFIN_ASM_SERIAL_H__ #include <linux/serial_core.h> +#include <linux/spinlock.h> #include <mach/anomaly.h> #include <mach/bfin_serial.h> @@ -41,6 +42,7 @@ struct bfin_serial_port { struct circ_buf rx_dma_buf; struct timer_list rx_dma_timer; int rx_dma_nrows; + spinlock_t rx_lock; unsigned int tx_dma_channel; unsigned int rx_dma_channel; struct work_struct tx_dma_workqueue; diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index 442218980db..c49be845f96 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S @@ -72,11 +72,6 @@ SECTIONS INIT_TEXT_SECTION(PAGE_SIZE) .init.data : { INIT_DATA } .init.setup : { INIT_SETUP(16) } -#ifdef CONFIG_ETRAX_ARCH_V32 - __start___param = .; - __param : { *(__param) } - __stop___param = .; -#endif .initcall.init : { INIT_CALLS } diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h index 65b13128283..32198454da7 100644 --- a/arch/m68k/include/asm/string.h +++ b/arch/m68k/include/asm/string.h @@ -99,14 +99,12 @@ static inline int strcmp(const char *cs, const char *ct) : "+a" (cs), "+a" (ct), "=d" (res)); return res; } +#endif /* CONFIG_COLDFIRE */ #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *, const void *, __kernel_size_t); -#define __HAVE_ARCH_MEMCMP -extern int memcmp(const void *, const void *, __kernel_size_t); #define memcmp(d, s, n) __builtin_memcmp(d, s, n) -#endif /* CONFIG_COLDFIRE */ #define __HAVE_ARCH_MEMSET extern void *memset(void *, int, __kernel_size_t); diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c index 4253f870e54..d399c5f2563 100644 --- a/arch/m68k/lib/string.c +++ b/arch/m68k/lib/string.c @@ -243,14 +243,3 @@ void *memmove(void *dest, const void *src, size_t n) return xdest; } EXPORT_SYMBOL(memmove); - -int memcmp(const void *cs, const void *ct, size_t count) -{ - const unsigned char *su1, *su2; - - for (su1 = cs, su2 = ct; count > 0; ++su1, ++su2, count--) - if (*su1 != *su2) - return *su1 < *su2 ? -1 : +1; - return 0; -} -EXPORT_SYMBOL(memcmp); diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index ef332136f96..47e15ebfd89 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S @@ -141,6 +141,12 @@ SECTIONS { *(__param) __stop___param = .; + /* Built-in module versions */ + . = ALIGN(4) ; + __start___modver = .; + *(__modver) + __stop___modver = .; + . = ALIGN(4) ; _etext = . ; } > TEXT diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile index d94d709665a..32d852e586d 100644 --- a/arch/m68knommu/lib/Makefile +++ b/arch/m68knommu/lib/Makefile @@ -4,4 +4,4 @@ lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ - checksum.o memcpy.o memset.o delay.o + checksum.o memcpy.o memmove.o memset.o delay.o diff --git a/arch/m68knommu/lib/memmove.c b/arch/m68knommu/lib/memmove.c new file mode 100644 index 00000000000..b3dcfe9dab7 --- /dev/null +++ b/arch/m68knommu/lib/memmove.c @@ -0,0 +1,105 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#define __IN_STRING_C + +#include <linux/module.h> +#include <linux/string.h> + +void *memmove(void *dest, const void *src, size_t n) +{ + void *xdest = dest; + size_t temp; + + if (!n) + return xdest; + + if (dest < src) { + if ((long)dest & 1) { + char *cdest = dest; + const char *csrc = src; + *cdest++ = *csrc++; + dest = cdest; + src = csrc; + n--; + } + if (n > 2 && (long)dest & 2) { + short *sdest = dest; + const short *ssrc = src; + *sdest++ = *ssrc++; + dest = sdest; + src = ssrc; + n -= 2; + } + temp = n >> 2; + if (temp) { + long *ldest = dest; + const long *lsrc = src; + temp--; + do + *ldest++ = *lsrc++; + while (temp--); + dest = ldest; + src = lsrc; + } + if (n & 2) { + short *sdest = dest; + const short *ssrc = src; + *sdest++ = *ssrc++; + dest = sdest; + src = ssrc; + } + if (n & 1) { + char *cdest = dest; + const char *csrc = src; + *cdest = *csrc; + } + } else { + dest = (char *)dest + n; + src = (const char *)src + n; + if ((long)dest & 1) { + char *cdest = dest; + const char *csrc = src; + *--cdest = *--csrc; + dest = cdest; + src = csrc; + n--; + } + if (n > 2 && (long)dest & 2) { + short *sdest = dest; + const short *ssrc = src; + *--sdest = *--ssrc; + dest = sdest; + src = ssrc; + n -= 2; + } + temp = n >> 2; + if (temp) { + long *ldest = dest; + const long *lsrc = src; + temp--; + do + *--ldest = *--lsrc; + while (temp--); + dest = ldest; + src = lsrc; + } + if (n & 2) { + short *sdest = dest; + const short *ssrc = src; + *--sdest = *--ssrc; + dest = sdest; + src = ssrc; + } + if (n & 1) { + char *cdest = dest; + const char *csrc = src; + *--cdest = *--csrc; + } + } + return xdest; +} +EXPORT_SYMBOL(memmove); diff --git a/arch/m68knommu/platform/5249/intc2.c b/arch/m68knommu/platform/5249/intc2.c index d09d9da0453..c5151f84659 100644 --- a/arch/m68knommu/platform/5249/intc2.c +++ b/arch/m68knommu/platform/5249/intc2.c @@ -50,8 +50,10 @@ static int __init mcf_intc2_init(void) int irq; /* GPIO interrupt sources */ - for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) + for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) { irq_desc[irq].chip = &intc2_irq_gpio_chip; + set_irq_handler(irq, handle_edge_irq); + } return 0; } diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S index 240a7a6e25c..676960cf022 100644 --- a/arch/m68knommu/platform/68328/entry.S +++ b/arch/m68knommu/platform/68328/entry.S @@ -108,7 +108,6 @@ Luser_return: movel %d1,%a2 1: move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ - andl #_TIF_WORK_MASK,%d1 jne Lwork_to_do RESTORE_ALL diff --git a/arch/m68knommu/platform/68360/commproc.c b/arch/m68knommu/platform/68360/commproc.c index f27e688c404..8e4e10cc008 100644 --- a/arch/m68knommu/platform/68360/commproc.c +++ b/arch/m68knommu/platform/68360/commproc.c @@ -210,7 +210,7 @@ void cpm_install_handler(int vec, void (*handler)(), void *dev_id) { - request_irq(vec, handler, IRQ_FLG_LOCK, "timer", dev_id); + request_irq(vec, handler, 0, "timer", dev_id); /* if (cpm_vecs[vec].handler != 0) */ /* printk(KERN_INFO "CPM interrupt %x replacing %x\n", */ diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c index ac629fa3009..9dd5bca3874 100644 --- a/arch/m68knommu/platform/68360/config.c +++ b/arch/m68knommu/platform/68360/config.c @@ -75,7 +75,7 @@ void hw_timer_init(void) /* Set compare register 32Khz / 32 / 10 = 100 */ TCMP = 10; - request_irq(IRQ_MACHSPEC | 1, timer_routine, IRQ_FLG_LOCK, "timer", NULL); + request_irq(IRQ_MACHSPEC | 1, timer_routine, 0, "timer", NULL); #endif /* General purpose quicc timers: MC68360UM p7-20 */ diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S index 8a28788c0ee..46c1b18c9dc 100644 --- a/arch/m68knommu/platform/68360/entry.S +++ b/arch/m68knommu/platform/68360/entry.S @@ -104,7 +104,6 @@ Luser_return: movel %d1,%a2 1: move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ - andl #_TIF_WORK_MASK,%d1 jne Lwork_to_do RESTORE_ALL diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c index ad96ab1051f..a29041c1a8a 100644 --- a/arch/m68knommu/platform/68360/ints.c +++ b/arch/m68knommu/platform/68360/ints.c @@ -132,8 +132,8 @@ void init_IRQ(void) pquicc->intr_cimr = 0x00000000; for (i = 0; (i < NR_IRQS); i++) { - set_irq_chip(irq, &intc_irq_chip); - set_irq_handler(irq, handle_level_irq); + set_irq_chip(i, &intc_irq_chip); + set_irq_handler(i, handle_level_irq); } } diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S index 4ddfc3da70d..5837cf080b6 100644 --- a/arch/m68knommu/platform/coldfire/entry.S +++ b/arch/m68knommu/platform/coldfire/entry.S @@ -138,7 +138,6 @@ Luser_return: andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ movel %d1,%a0 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ - andl #0xefff,%d1 jne Lwork_to_do /* still work to do */ Lreturn: diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h index 5fd31905775..c4532f032b3 100644 --- a/arch/microblaze/include/asm/irqflags.h +++ b/arch/microblaze/include/asm/irqflags.h @@ -12,7 +12,7 @@ #include <linux/types.h> #include <asm/registers.h> -#ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR +#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR static inline unsigned long arch_local_irq_save(void) { diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index b23f6807587..885574a73f0 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -411,20 +411,19 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline unsigned long pte_update(pte_t *p, unsigned long clr, unsigned long set) { - unsigned long old, tmp, msr; - - __asm__ __volatile__("\ - msrclr %2, 0x2\n\ - nop\n\ - lw %0, %4, r0\n\ - andn %1, %0, %5\n\ - or %1, %1, %6\n\ - sw %1, %4, r0\n\ - mts rmsr, %2\n\ - nop" - : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) - : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p) - : "cc"); + unsigned long flags, old, tmp; + + raw_local_irq_save(flags); + + __asm__ __volatile__( "lw %0, %2, r0 \n" + "andn %1, %0, %3 \n" + "or %1, %1, %4 \n" + "sw %1, %2, r0 \n" + : "=&r" (old), "=&r" (tmp) + : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set) + : "cc"); + + raw_local_irq_restore(flags); return old; } diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c index e01afa68273..488c1ed24e3 100644 --- a/arch/microblaze/kernel/cpu/pvr.c +++ b/arch/microblaze/kernel/cpu/pvr.c @@ -27,7 +27,7 @@ register unsigned tmp __asm__("r3"); \ tmp = 0x0; /* Prevent warning about unused */ \ __asm__ __volatile__ ( \ - "mfs %0, rpvr" #pvrid ";" \ + "mfs %0, rpvr" #pvrid ";" \ : "=r" (tmp) : : "memory"); \ val = tmp; \ } @@ -54,7 +54,7 @@ int cpu_has_pvr(void) if (!(flags & PVR_MSR_BIT)) return 0; - get_single_pvr(0x00, pvr0); + get_single_pvr(0, pvr0); pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0); if (pvr0 & PVR0_PVR_FULL_MASK) diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 0db20b5abb5..778a5ce2e4f 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -62,15 +62,14 @@ real_start: andi r1, r1, ~2 mts rmsr, r1 /* - * Here is checking mechanism which check if Microblaze has msr instructions - * We load msr and compare it with previous r1 value - if is the same, - * msr instructions works if not - cpu don't have them. + * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc' + * if the msrclr instruction is not enabled. We use this to detect + * if the opcode is available, by issuing msrclr and then testing the result. + * r8 == 0 - msr instructions are implemented + * r8 != 0 - msr instructions are not implemented */ - /* r8=0 - I have msr instr, 1 - I don't have them */ - rsubi r0, r0, 1 /* set the carry bit */ - msrclr r0, 0x4 /* try to clear it */ - /* read the carry bit, r8 will be '0' if msrclr exists */ - addik r8, r0, 0 + msrclr r8, 0 /* clear nothing - just read msr for test */ + cmpu r8, r8, r1 /* r1 must contain msr reg content */ /* r7 may point to an FDT, or there may be one linked in. if it's in r7, we've got to save it away ASAP. diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index bb1558e4b28..9312fbb37ef 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -161,11 +161,11 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR if (msr) eprintk("!!!Your kernel has setup MSR instruction but " - "CPU don't have it %d\n", msr); + "CPU don't have it %x\n", msr); #else if (!msr) eprintk("!!!Your kernel not setup MSR instruction but " - "CPU have it %d\n", msr); + "CPU have it %x\n", msr); #endif for (src = __ivt_start; src < __ivt_end; src++, dst++) diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 8eaed81ea64..17194fcd404 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -40,8 +40,8 @@ /* MAS registers bit definitions */ -#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) -#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) +#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) +#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) #define MAS0_NV(x) ((x) & 0x00000FFF) #define MAS0_HES 0x00004000 #define MAS0_WQ_ALLWAYS 0x00000000 @@ -50,12 +50,12 @@ #define MAS1_VALID 0x80000000 #define MAS1_IPROT 0x40000000 -#define MAS1_TID(x) ((x << 16) & 0x3FFF0000) +#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) #define MAS1_IND 0x00002000 #define MAS1_TS 0x00001000 #define MAS1_TSIZE_MASK 0x00000f80 #define MAS1_TSIZE_SHIFT 7 -#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) +#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) #define MAS2_EPN 0xFFFFF000 #define MAS2_X0 0x00000040 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 53b64be40eb..da4b2000854 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr; #ifdef CONFIG_FLATMEM #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) -#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) #endif #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a8a95..f8cd9fba4d3 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -18,7 +18,7 @@ #include <asm/mmu.h> _GLOBAL(__setup_cpu_603) - mflr r4 + mflr r5 BEGIN_MMU_FTR_SECTION li r10,0 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION bl __init_fpu_registers END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) bl setup_common_caches - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_604) - mflr r4 + mflr r5 bl setup_common_caches bl setup_604_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750cx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750fx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7400) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7410) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 li r3,0 mtspr SPRN_L2CR2,r3 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_745x) - mflr r4 + mflr r5 bl setup_common_caches bl setup_745x_specifics - mtlr r4 + mtlr r5 blr /* Enable caches for 603's, 604, 750 & 7400 */ @@ -194,10 +194,10 @@ setup_750cx: cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr2+eq bnelr - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) blr /* 750fx specific @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION andis. r11,r11,L3CR_L3E@h beq 1f END_FTR_SECTION_IFSET(CPU_FTR_L3CR) - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) andi. r0,r6,CPU_FTR_L3_DISABLE_NAP beq 1f li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) 1: mfspr r11,SPRN_HID0 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 8d74a24c550..e8e915ce3d8 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) * pointer on ppc64 and booke as we are running at 0 in real mode * on ppc64 and reloc_offset is always 0 on booke. */ - if (s->cpu_setup) { - s->cpu_setup(offset, s); + if (t->cpu_setup) { + t->cpu_setup(offset, t); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index bf5cb91f07d..fd481232957 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu) dbg("removing cpu %lu from node %d\n", cpu, node); if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { - cpumask_set_cpu(cpu, node_to_cpumask_map[node]); + cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); } else { printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", cpu, node); @@ -1289,10 +1289,9 @@ u64 memory_hotplug_max(void) } #endif /* CONFIG_MEMORY_HOTPLUG */ -/* Vrtual Processor Home Node (VPHN) support */ +/* Virtual Processor Home Node (VPHN) support */ #ifdef CONFIG_PPC_SPLPAR -#define VPHN_NR_CHANGE_CTRS (8) -static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS]; +static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; static cpumask_t cpu_associativity_changes_mask; static int vphn_enabled; static void set_topology_timer(void); @@ -1303,16 +1302,18 @@ static void set_topology_timer(void); */ static void setup_cpu_associativity_change_counters(void) { - int cpu = 0; + int cpu; + + /* The VPHN feature supports a maximum of 8 reference points */ + BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); for_each_possible_cpu(cpu) { - int i = 0; + int i; u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; - for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { + for (i = 0; i < distance_ref_points_depth; i++) counts[i] = hypervisor_counts[i]; - } } } @@ -1329,7 +1330,7 @@ static void setup_cpu_associativity_change_counters(void) */ static int update_cpu_associativity_changes_mask(void) { - int cpu = 0, nr_cpus = 0; + int cpu, nr_cpus = 0; cpumask_t *changes = &cpu_associativity_changes_mask; cpumask_clear(changes); @@ -1339,8 +1340,8 @@ static int update_cpu_associativity_changes_mask(void) u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; - for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { - if (hypervisor_counts[i] > counts[i]) { + for (i = 0; i < distance_ref_points_depth; i++) { + if (hypervisor_counts[i] != counts[i]) { counts[i] = hypervisor_counts[i]; changed = 1; } @@ -1354,8 +1355,11 @@ static int update_cpu_associativity_changes_mask(void) return nr_cpus; } -/* 6 64-bit registers unpacked into 12 32-bit associativity values */ -#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32)) +/* + * 6 64-bit registers unpacked into 12 32-bit associativity values. To form + * the complete property we have to add the length in the first cell. + */ +#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) /* * Convert the associativity domain numbers returned from the hypervisor @@ -1363,15 +1367,14 @@ static int update_cpu_associativity_changes_mask(void) */ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) { - int i = 0; - int nr_assoc_doms = 0; + int i, nr_assoc_doms = 0; const u16 *field = (const u16*) packed; #define VPHN_FIELD_UNUSED (0xffff) #define VPHN_FIELD_MSB (0x8000) #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) - for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) { + for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { if (*field == VPHN_FIELD_UNUSED) { /* All significant fields processed, and remaining * fields contain the reserved value of all 1's. @@ -1379,14 +1382,12 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) */ unpacked[i] = *((u32*)field); field += 2; - } - else if (*field & VPHN_FIELD_MSB) { + } else if (*field & VPHN_FIELD_MSB) { /* Data is in the lower 15 bits of this field */ unpacked[i] = *field & VPHN_FIELD_MASK; field++; nr_assoc_doms++; - } - else { + } else { /* Data is in the lower 15 bits of this field * concatenated with the next 16 bit field */ @@ -1396,6 +1397,9 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) } } + /* The first cell contains the length of the property */ + unpacked[0] = nr_assoc_doms; + return nr_assoc_doms; } @@ -1405,7 +1409,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) */ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) { - long rc = 0; + long rc; long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; u64 flags = 1; int hwcpu = get_hard_smp_processor_id(cpu); @@ -1419,7 +1423,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) static long vphn_get_associativity(unsigned long cpu, unsigned int *associativity) { - long rc = 0; + long rc; rc = hcall_vphn(cpu, associativity); @@ -1445,9 +1449,9 @@ static long vphn_get_associativity(unsigned long cpu, */ int arch_update_cpu_topology(void) { - int cpu = 0, nid = 0, old_nid = 0; + int cpu, nid, old_nid; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; - struct sys_device *sysdev = NULL; + struct sys_device *sysdev; for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { vphn_get_associativity(cpu, associativity); @@ -1512,7 +1516,8 @@ int start_topology_update(void) { int rc = 0; - if (firmware_has_feature(FW_FEATURE_VPHN)) { + if (firmware_has_feature(FW_FEATURE_VPHN) && + get_lppaca()->shared_proc) { vphn_enabled = 1; setup_cpu_associativity_change_counters(); init_timer_deferrable(&topology_timer); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 5d3ea9f60dd..ca5d5898d32 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page); /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ extern long hcall_tracepoint_refcount; +/* + * Since the tracing code might execute hcalls we need to guard against + * recursion. One example of this are spinlocks calling H_YIELD on + * shared processor partitions. + */ +static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); + void hcall_tracepoint_regfunc(void) { hcall_tracepoint_refcount++; @@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void) void __trace_hcall_entry(unsigned long opcode, unsigned long *args) { + unsigned long flags; + unsigned int *depth; + + local_irq_save(flags); + + depth = &__get_cpu_var(hcall_trace_depth); + + if (*depth) + goto out; + + (*depth)++; trace_hcall_entry(opcode, args); + (*depth)--; + +out: + local_irq_restore(flags); } void __trace_hcall_exit(long opcode, unsigned long retval, unsigned long *retbuf) { + unsigned long flags; + unsigned int *depth; + + local_irq_save(flags); + + depth = &__get_cpu_var(hcall_trace_depth); + + if (*depth) + goto out; + + (*depth)++; trace_hcall_exit(opcode, retval, retbuf); + (*depth)--; + +out: + local_irq_restore(flags); } #endif diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 0851eb1e919..2751b3a8a66 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -133,11 +133,12 @@ unsigned long decompress_kernel(void) unsigned long output_addr; unsigned char *output; - check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); + output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; + check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); memset(&_bss, 0, &_ebss - &_bss); free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); + output = (unsigned char *) output_addr; #ifdef CONFIG_BLK_DEV_INITRD /* diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index f42dbabc0d3..48884f89ab9 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c @@ -38,6 +38,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) BUG_ON(ret != bsize); data += bsize - index; len -= bsize - index; + index = 0; } /* process as many blocks as possible */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 76daea11718..5c5ba10384c 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -36,14 +36,19 @@ static inline int atomic_read(const atomic_t *v) { - barrier(); - return v->counter; + int c; + + asm volatile( + " l %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic_set(atomic_t *v, int i) { - v->counter = i; - barrier(); + asm volatile( + " st %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline int atomic_add_return(int i, atomic_t *v) @@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) static inline long long atomic64_read(const atomic64_t *v) { - barrier(); - return v->counter; + long long c; + + asm volatile( + " lg %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic64_set(atomic64_t *v, long long i) { - v->counter = i; - barrier(); + asm volatile( + " stg %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline long long atomic64_add_return(long long i, atomic64_t *v) diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 24aafa68b64..2a30d5ac066 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h @@ -13,6 +13,7 @@ #define L1_CACHE_BYTES 256 #define L1_CACHE_SHIFT 8 +#define NET_SKB_PAD 32 #define __read_mostly __attribute__((__section__(".data..read_mostly"))) diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index bf3de04170a..2c79b641627 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -148,11 +148,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); */ extern unsigned long thread_saved_pc(struct task_struct *t); -/* - * Print register of task into buffer. Used in fs/proc/array.c. - */ -extern void task_show_regs(struct seq_file *m, struct task_struct *task); - extern void show_code(struct pt_regs *regs); unsigned long get_wchan(struct task_struct *p); diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 5eb78dd584c..b5a4a739b47 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -237,43 +237,6 @@ void show_regs(struct pt_regs *regs) show_last_breaking_event(regs); } -/* This is called from fs/proc/array.c */ -void task_show_regs(struct seq_file *m, struct task_struct *task) -{ - struct pt_regs *regs; - - regs = task_pt_regs(task); - seq_printf(m, "task: %p, ksp: %p\n", - task, (void *)task->thread.ksp); - seq_printf(m, "User PSW : %p %p\n", - (void *) regs->psw.mask, (void *)regs->psw.addr); - - seq_printf(m, "User GPRS: " FOURLONG, - regs->gprs[0], regs->gprs[1], - regs->gprs[2], regs->gprs[3]); - seq_printf(m, " " FOURLONG, - regs->gprs[4], regs->gprs[5], - regs->gprs[6], regs->gprs[7]); - seq_printf(m, " " FOURLONG, - regs->gprs[8], regs->gprs[9], - regs->gprs[10], regs->gprs[11]); - seq_printf(m, " " FOURLONG, - regs->gprs[12], regs->gprs[13], - regs->gprs[14], regs->gprs[15]); - seq_printf(m, "User ACRS: %08x %08x %08x %08x\n", - task->thread.acrs[0], task->thread.acrs[1], - task->thread.acrs[2], task->thread.acrs[3]); - seq_printf(m, " %08x %08x %08x %08x\n", - task->thread.acrs[4], task->thread.acrs[5], - task->thread.acrs[6], task->thread.acrs[7]); - seq_printf(m, " %08x %08x %08x %08x\n", - task->thread.acrs[8], task->thread.acrs[9], - task->thread.acrs[10], task->thread.acrs[11]); - seq_printf(m, " %08x %08x %08x %08x\n", - task->thread.acrs[12], task->thread.acrs[13], - task->thread.acrs[14], task->thread.acrs[15]); -} - static DEFINE_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h index a2f5c61f924..843e4faf6a5 100644 --- a/arch/sparc/include/asm/pcr.h +++ b/arch/sparc/include/asm/pcr.h @@ -43,4 +43,6 @@ static inline u64 picl_value(unsigned int nmi_hz) extern u64 pcr_enable; +extern int pcr_arch_init(void); + #endif /* __PCR_H */ diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 47977a77f6c..72509d0e34b 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -255,10 +255,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, static int iommu_alloc_ctx(struct iommu *iommu) { int lowest = iommu->ctx_lowest_free; - int sz = IOMMU_NUM_CTXS - lowest; - int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); + int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); - if (unlikely(n == sz)) { + if (unlikely(n == IOMMU_NUM_CTXS)) { n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); if (unlikely(n == lowest)) { printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index ae96cf52a95..7c2ced612b8 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -167,5 +167,3 @@ out_unregister: unregister_perf_hsvc(); return err; } - -early_initcall(pcr_arch_init); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b6a2b8f4704..555a76d1f4a 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -49,6 +49,7 @@ #include <asm/mdesc.h> #include <asm/ldc.h> #include <asm/hypervisor.h> +#include <asm/pcr.h> #include "cpumap.h" @@ -1358,6 +1359,7 @@ void __cpu_die(unsigned int cpu) void __init smp_cpus_done(unsigned int max_cpus) { + pcr_arch_init(); } void smp_send_reschedule(int cpu) diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S index 8cc03458eb7..8f096e84a93 100644 --- a/arch/sparc/kernel/una_asm_32.S +++ b/arch/sparc/kernel/una_asm_32.S @@ -24,9 +24,9 @@ retl_efault: .globl __do_int_store __do_int_store: ld [%o2], %g1 - cmp %1, 2 + cmp %o1, 2 be 2f - cmp %1, 4 + cmp %o1, 4 be 1f srl %g1, 24, %g2 srl %g1, 16, %g7 diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 764b3eb7b60..48d00e72ce1 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c @@ -10,7 +10,7 @@ */ #include <linux/string.h> -#include <linux/bitops.h> +#include <linux/bitmap.h> #include <asm/bitext.h> @@ -80,8 +80,7 @@ int bit_map_string_get(struct bit_map *t, int len, int align) while (test_bit(offset + i, t->map) == 0) { i++; if (i == len) { - for (i = 0; i < len; i++) - __set_bit(offset + i, t->map); + bitmap_set(t->map, offset, len); if (offset == t->first_free) t->first_free = find_next_zero_bit (t->map, t->size, diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 211ca3f7fd1..4ea15ca89b2 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -88,6 +88,7 @@ extern int acpi_disabled; extern int acpi_pci_disabled; extern int acpi_skip_timer_override; extern int acpi_use_timer_override; +extern int acpi_fix_pin2_polarity; extern u8 acpi_sci_flags; extern int acpi_sci_override_gsi; diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 5e3969c36d7..3c896946f4c 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -233,6 +233,7 @@ extern void sync_Arb_IDs(void); extern void init_bsp_APIC(void); extern void setup_local_APIC(void); extern void end_local_APIC_setup(void); +extern void bsp_end_local_APIC_setup(void); extern void init_apic_mappings(void); void register_lapic_address(unsigned long address); extern void setup_boot_APIC_clock(void); diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 6e6e7558e70..4564c8e28a3 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -32,6 +32,6 @@ extern void arch_unregister_cpu(int); DECLARE_PER_CPU(int, cpu_state); -int __cpuinit mwait_usable(const struct cpuinfo_x86 *); +int mwait_usable(const struct cpuinfo_x86 *); #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index e2f6a99f14a..cc29086e30c 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h @@ -22,6 +22,7 @@ #define ARCH_P4_CNTRVAL_BITS (40) #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) +#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) #define P4_ESCR_EVENT_MASK 0x7e000000U #define P4_ESCR_EVENT_SHIFT 25 diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h index 6c22bf353f2..725b7783199 100644 --- a/arch/x86/include/asm/smpboot_hooks.h +++ b/arch/x86/include/asm/smpboot_hooks.h @@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void) */ CMOS_WRITE(0, 0xf); - *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; + *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0; } static inline void __init smpboot_setup_io_apic(void) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b3a71137983..3e6e2d68f76 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata; int acpi_sci_override_gsi __initdata; int acpi_skip_timer_override __initdata; int acpi_use_timer_override __initdata; +int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; @@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, return 0; } - if (acpi_skip_timer_override && - intsrc->source_irq == 0 && intsrc->global_irq == 2) { - printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); - return 0; + if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { + if (acpi_skip_timer_override) { + printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); + return 0; + } + if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { + intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; + printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); + } } mp_override_legacy_irq(intsrc->source_irq, diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 4d9ebbab223..68d1537b8c8 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -12,10 +12,8 @@ #include <linux/cpumask.h> #include <asm/segment.h> #include <asm/desc.h> - -#ifdef CONFIG_X86_32 #include <asm/pgtable.h> -#endif +#include <asm/cacheflush.h> #include "realmode/wakeup.h" #include "sleep.h" @@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void) memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); } +int __init acpi_configure_wakeup_memory(void) +{ + if (acpi_realmode) + set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT); + + return 0; +} +arch_initcall(acpi_configure_wakeup_memory); + static int __init acpi_sleep_setup(char *str) { diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 123608531c8..7038b95d363 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -671,7 +671,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) atomic_set(&stop_machine_first, 1); wrote_text = 0; - stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); + __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); } #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 51ef31a89be..51d4e166306 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void) memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { - apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; + adev->evt.rating = APBT_CLOCKEVENT_RATING - 100; global_clock_event = &adev->evt; printk(KERN_DEBUG "%s clockevent registered as global\n", global_clock_event->name); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 06c196d7e59..76b96d74978 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1381,12 +1381,17 @@ void __cpuinit end_local_APIC_setup(void) #endif apic_pm_activate(); +} + +void __init bsp_end_local_APIC_setup(void) +{ + end_local_APIC_setup(); /* * Now that local APIC setup is completed for BP, configure the fault * handling for interrupt remapping. */ - if (!smp_processor_id() && intr_remapping_enabled) + if (intr_remapping_enabled) enable_drhd_fault_handling(); } @@ -1756,7 +1761,7 @@ int __init APIC_init_uniprocessor(void) enable_IO_APIC(); #endif - end_local_APIC_setup(); + bsp_end_local_APIC_setup(); #ifdef CONFIG_X86_IO_APIC if (smp_found_config && !skip_ioapic_setup && nr_ioapics) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 697dc34b7b8..ca9e2a3545a 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -4002,6 +4002,9 @@ int mp_find_ioapic(u32 gsi) { int i = 0; + if (nr_ioapics == 0) + return -1; + /* Find the IOAPIC that manages this GSI. */ for (i = 0; i < nr_ioapics; i++) { if ((gsi >= mp_gsi_routing[i].gsi_base) diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index f7a0993c1e7..ff751a9f182 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -770,9 +770,14 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) return 1; } - /* it might be unflagged overflow */ - rdmsrl(hwc->event_base + hwc->idx, v); - if (!(v & ARCH_P4_CNTRVAL_MASK)) + /* + * In some circumstances the overflow might issue an NMI but did + * not set P4_CCCR_OVF bit. Because a counter holds a negative value + * we simply check for high bit being set, if it's cleared it means + * the counter has reached zero value and continued counting before + * real NMI signal was received: + */ + if (!(v & ARCH_P4_UNFLAGGED_BIT)) return 1; return 0; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 76b8cd953de..9efbdcc5642 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func) static u32 __init ati_sbx00_rev(int num, int slot, int func) { - u32 old, d; + u32 d; - d = read_pci_config(num, slot, func, 0x70); - old = d; - d &= ~(1<<8); - write_pci_config(num, slot, func, 0x70, d); d = read_pci_config(num, slot, func, 0x8); d &= 0xff; - write_pci_config(num, slot, func, 0x70, old); return d; } @@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func) { u32 d, rev; - if (acpi_use_timer_override) - return; - rev = ati_sbx00_rev(num, slot, func); + if (rev >= 0x40) + acpi_fix_pin2_polarity = 1; + if (rev > 0x13) return; + if (acpi_use_timer_override) + return; + /* check for IRQ0 interrupt swap */ d = read_pci_config(num, slot, func, 0x64); if (!(d & (1<<14))) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 52945da52a9..387b6a0c9e8 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -367,7 +367,8 @@ void fixup_irqs(void) if (irr & (1 << (vector % 32))) { irq = __this_cpu_read(vector_irq[vector]); - data = irq_get_irq_data(irq); + desc = irq_to_desc(irq); + data = &desc->irq_data; raw_spin_lock(&desc->lock); if (data->chip->irq_retrigger) data->chip->irq_retrigger(data); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e764fc05d70..ff455419898 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -92,21 +92,31 @@ void show_regs(struct pt_regs *regs) void show_regs_common(void) { - const char *board, *product; + const char *vendor, *product, *board; - board = dmi_get_system_info(DMI_BOARD_NAME); - if (!board) - board = ""; + vendor = dmi_get_system_info(DMI_SYS_VENDOR); + if (!vendor) + vendor = ""; product = dmi_get_system_info(DMI_PRODUCT_NAME); if (!product) product = ""; + /* Board Name is optional */ + board = dmi_get_system_info(DMI_BOARD_NAME); + printk(KERN_CONT "\n"); - printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", + printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", current->pid, current->comm, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), - init_utsname()->version, board, product); + init_utsname()->version); + printk(KERN_CONT " "); + printk(KERN_CONT "%s %s", vendor, product); + if (board) { + printk(KERN_CONT "/"); + printk(KERN_CONT "%s", board); + } + printk(KERN_CONT "\n"); } void flush_thread(void) @@ -506,7 +516,7 @@ static void poll_idle(void) #define MWAIT_ECX_EXTENDED_INFO 0x01 #define MWAIT_EDX_C1 0xf0 -int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) +int mwait_usable(const struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index fc7aae1e2bc..715037caeb4 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "P4S800"), }, }, + { /* Handle problems with rebooting on VersaLogic Menlow boards */ + .callback = set_bios_reboot, + .ident = "VersaLogic Menlow based board", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), + }, + }, { } }; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 03273b6c272..08776a95348 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1060,7 +1060,7 @@ static int __init smp_sanity_check(unsigned max_cpus) connect_bsp_APIC(); setup_local_APIC(); - end_local_APIC_setup(); + bsp_end_local_APIC_setup(); return -1; } @@ -1137,7 +1137,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) if (!skip_ioapic_setup && nr_ioapics) enable_IO_APIC(); - end_local_APIC_setup(); + bsp_end_local_APIC_setup(); map_cpu_to_logical_apicid(); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 25bd1bc5aad..63fec1531e8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1150,8 +1150,8 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) kvm_load_ldt(svm->host.ldt); #ifdef CONFIG_X86_64 loadsegment(fs, svm->host.fs); - load_gs_index(svm->host.gs); wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); + load_gs_index(svm->host.gs); #else loadsegment(gs, svm->host.gs); #endif @@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm) kvm_register_write(&svm->vcpu, reg, val); } + skip_emulated_instruction(&svm->vcpu); + return 1; } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 381b09bb562..a89043a3caa 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -168,7 +168,15 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, * tree of blkg (instead of traversing through hash list all * the time. */ - tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); + + /* + * This is the common case when there are no blkio cgroups. + * Avoid lookup in this case + */ + if (blkcg == &blkio_root_cgroup) + tg = &td->root_tg; + else + tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); /* Fill in device details for root group */ if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 501ffdf0399..7be4c795962 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -599,7 +599,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) } static inline unsigned -cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned slice = cfq_prio_to_slice(cfqd, cfqq); if (cfqd->cfq_latency) { @@ -631,7 +631,7 @@ cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - unsigned slice = cfq_scaled_group_slice(cfqd, cfqq); + unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); cfqq->slice_start = jiffies; cfqq->slice_end = jiffies + slice; @@ -1671,7 +1671,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, */ if (timed_out) { if (cfq_cfqq_slice_new(cfqq)) - cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq); + cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); else cfqq->slice_resid = cfqq->slice_end - jiffies; cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); @@ -3432,6 +3432,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct cfq_io_context *cic = cfqd->active_cic; + /* If the queue already has requests, don't wait */ + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) + return false; + /* If there are other queues in the group, don't wait */ if (cfqq->cfqg->nr_cfqq > 1) return false; diff --git a/block/genhd.c b/block/genhd.c index 6a5b772aa20..cbf1112a885 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno) struct block_device *bdev = bdget_disk(disk, partno); if (bdev) { fsync_bdev(bdev); - res = __invalidate_device(bdev); + res = __invalidate_device(bdev, true); bdput(bdev); } return res; diff --git a/block/ioctl.c b/block/ioctl.c index 9049d460fa8..1124cd29726 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return -EINVAL; if (get_user(n, (int __user *) arg)) return -EFAULT; - if (!(mode & FMODE_EXCL) && - blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) - return -EBUSY; + if (!(mode & FMODE_EXCL)) { + bdgrab(bdev); + if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) + return -EBUSY; + } ret = set_blocksize(bdev, n); if (!(mode & FMODE_EXCL)) blkdev_put(bdev, mode | FMODE_EXCL); diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index e9562a7cb2f..3b20a3401b6 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -212,37 +212,40 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, return_ACPI_STATUS(AE_BAD_PARAMETER); } - /* Validate wake_device is of type Device */ - - device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); - if (device_node->type != ACPI_TYPE_DEVICE) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); - if (gpe_event_info) { - /* - * If there is no method or handler for this GPE, then the - * wake_device will be notified whenever this GPE fires (aka - * "implicit notify") Note: The GPE is assumed to be - * level-triggered (for windows compatibility). - */ - if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == - ACPI_GPE_DISPATCH_NONE) { - gpe_event_info->flags = - (ACPI_GPE_DISPATCH_NOTIFY | - ACPI_GPE_LEVEL_TRIGGERED); - gpe_event_info->dispatch.device_node = device_node; - } + if (!gpe_event_info) { + goto unlock_and_exit; + } + + /* + * If there is no method or handler for this GPE, then the + * wake_device will be notified whenever this GPE fires (aka + * "implicit notify") Note: The GPE is assumed to be + * level-triggered (for windows compatibility). + */ + if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == + ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { - gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; - status = AE_OK; + /* Validate wake_device is of type Device */ + + device_node = ACPI_CAST_PTR(struct acpi_namespace_node, + wake_device); + if (device_node->type != ACPI_TYPE_DEVICE) { + goto unlock_and_exit; + } + gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | + ACPI_GPE_LEVEL_TRIGGERED); + gpe_event_info->dispatch.device_node = device_node; } + gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; + status = AE_OK; + + unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index b0931818cf9..c90c76aa7f8 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -636,17 +636,21 @@ EXPORT_SYMBOL(acpi_os_write_port); acpi_status acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) { - u32 dummy; void __iomem *virt_addr; - int size = width / 8, unmap = 0; + unsigned int size = width / 8; + bool unmap = false; + u32 dummy; rcu_read_lock(); virt_addr = acpi_map_vaddr_lookup(phys_addr, size); - rcu_read_unlock(); if (!virt_addr) { + rcu_read_unlock(); virt_addr = acpi_os_ioremap(phys_addr, size); - unmap = 1; + if (!virt_addr) + return AE_BAD_ADDRESS; + unmap = true; } + if (!value) value = &dummy; @@ -666,6 +670,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) if (unmap) iounmap(virt_addr); + else + rcu_read_unlock(); return AE_OK; } @@ -674,14 +680,17 @@ acpi_status acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) { void __iomem *virt_addr; - int size = width / 8, unmap = 0; + unsigned int size = width / 8; + bool unmap = false; rcu_read_lock(); virt_addr = acpi_map_vaddr_lookup(phys_addr, size); - rcu_read_unlock(); if (!virt_addr) { + rcu_read_unlock(); virt_addr = acpi_os_ioremap(phys_addr, size); - unmap = 1; + if (!virt_addr) + return AE_BAD_ADDRESS; + unmap = true; } switch (width) { @@ -700,6 +709,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) if (unmap) iounmap(virt_addr); + else + rcu_read_unlock(); return AE_OK; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 42d3d72dae8..5af3479714f 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -82,6 +82,11 @@ long acpi_is_video_device(struct acpi_device *device) if (!device) return 0; + /* Is this device able to support video switching ? */ + if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) || + ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) + video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; + /* Is this device able to retrieve a video ROM ? */ if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) video_caps |= ACPI_VIDEO_ROM_AVAILABLE; diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index ed650145250..7bfbe40bc43 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -86,8 +86,12 @@ int __init acpi_wakeup_device_init(void) struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); - if (device_can_wakeup(&dev->dev)) + if (device_can_wakeup(&dev->dev)) { + /* Button GPEs are supposed to be always enabled. */ + acpi_enable_gpe(dev->wakeup.gpe_device, + dev->wakeup.gpe_number); device_set_wakeup_enable(&dev->dev, true); + } } mutex_unlock(&acpi_device_lock); return 0; diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 73fb1c4f4cd..25ef1a4556e 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc) } skb = alloc_skb(sizeof(*header), GFP_ATOMIC); - if (!skb && net_ratelimit()) { - dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); + if (!skb) { + if (net_ratelimit()) + dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); return -ENOMEM; } header = (void *)skb_put(skb, sizeof(*header)); diff --git a/drivers/block/Makefile b/drivers/block/Makefile index d7f463d6312..40528ba56d1 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -39,4 +39,4 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o -swim_mod-objs := swim.o swim_asm.o +swim_mod-y := swim.o swim_asm.o diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile index e76d997183c..06ea82cdf27 100644 --- a/drivers/block/aoe/Makefile +++ b/drivers/block/aoe/Makefile @@ -3,4 +3,4 @@ # obj-$(CONFIG_ATA_OVER_ETH) += aoe.o -aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o +aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 516d5bbec2b..9279272b373 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -2833,7 +2833,7 @@ static int cciss_revalidate(struct gendisk *disk) sector_t total_size; InquiryData_struct *inq_buff = NULL; - for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { + for (logvol = 0; logvol <= h->highest_lun; logvol++) { if (!h->drv[logvol]) continue; if (memcmp(h->drv[logvol]->LunID, drv->LunID, diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b9ba04fc2b3..77fc76f8aea 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, struct block_device *bdev = opened_bdev[cnt]; if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) continue; - __invalidate_device(bdev); + __invalidate_device(bdev, true); } mutex_unlock(&open_lock); } else { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 44e18c073c4..49e6a545eb6 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1641,6 +1641,9 @@ out: static void loop_free(struct loop_device *lo) { + if (!lo->lo_queue->queue_lock) + lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock; + blk_cleanup_queue(lo->lo_queue); put_disk(lo->lo_disk); list_del(&lo->lo_list); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index a32fb41246f..e6fc716aca4 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -53,7 +53,6 @@ #define DBG_BLKDEV 0x0100 #define DBG_RX 0x0200 #define DBG_TX 0x0400 -static DEFINE_MUTEX(nbd_mutex); static unsigned int debugflags; #endif /* NDEBUG */ @@ -718,11 +717,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); - mutex_lock(&nbd_mutex); mutex_lock(&lo->tx_lock); error = __nbd_ioctl(bdev, lo, cmd, arg); mutex_unlock(&lo->tx_lock); - mutex_unlock(&nbd_mutex); return error; } diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a126e614601..6dcd55a74c0 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -39,6 +39,11 @@ static struct usb_device_id ath3k_table[] = { /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x3002) }, + /* Atheros AR9285 Malbec with sflash firmware */ + { USB_DEVICE(0x03F0, 0x311D) }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE02C) }, { } /* Terminating entry */ }; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1da773f899a..700a3840fdd 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -102,6 +102,12 @@ static struct usb_device_id blacklist_table[] = { /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, + /* Atheros AR9285 Malbec with sflash firmware */ + { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, + /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, @@ -826,7 +832,7 @@ static void btusb_work(struct work_struct *work) if (hdev->conn_hash.sco_num > 0) { if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { - err = usb_autopm_get_interface(data->isoc); + err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf); if (err < 0) { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); @@ -855,7 +861,7 @@ static void btusb_work(struct work_struct *work) __set_isoc_interface(hdev, 0); if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) - usb_autopm_put_interface(data->isoc); + usb_autopm_put_interface(data->isoc ? data->isoc : data->intf); } } @@ -1038,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf, usb_set_intfdata(intf, data); - usb_enable_autosuspend(interface_to_usbdev(intf)); - return 0; } diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 14033a36bcd..e2c48a7eccf 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -409,7 +409,8 @@ int register_cdrom(struct cdrom_device_info *cdi) } ENSURE(drive_status, CDC_DRIVE_STATUS ); - ENSURE(media_changed, CDC_MEDIA_CHANGED); + if (cdo->check_events == NULL && cdo->media_changed == NULL) + *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC); ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); ENSURE(lock_door, CDC_LOCK); ENSURE(select_speed, CDC_SELECT_SPEED); diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 5bc765d4c3c..8238f89f73c 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o obj-$(CONFIG_SX) += sx.o generic_serial.o obj-$(CONFIG_RIO) += rio/ generic_serial.o +obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o obj-$(CONFIG_RAW_DRIVER) += raw.o obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o obj-$(CONFIG_MSPEC) += mspec.o diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 9252e85706e..780498d7658 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -773,18 +773,23 @@ int __init agp_amd64_init(void) #else printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); #endif + pci_unregister_driver(&agp_amd64_pci_driver); return -ENODEV; } /* First check that we have at least one AMD64 NB */ - if (!pci_dev_present(amd_nb_misc_ids)) + if (!pci_dev_present(amd_nb_misc_ids)) { + pci_unregister_driver(&agp_amd64_pci_driver); return -ENODEV; + } /* Look for any AGP bridge */ agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; err = driver_attach(&agp_amd64_pci_driver.driver); - if (err == 0 && agp_bridges_found == 0) + if (err == 0 && agp_bridges_found == 0) { + pci_unregister_driver(&agp_amd64_pci_driver); err = -ENODEV; + } } return err; } diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c195bfeade1..5feebe2800e 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -130,6 +130,7 @@ #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) #define I915_IFPADDR 0x60 +#define I830_HIC 0x70 /* Intel 965G registers */ #define I965_MSAC 0x62 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index fab3d3265ad..0d09b537bb9 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -21,6 +21,7 @@ #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/agp_backend.h> +#include <linux/delay.h> #include <asm/smp.h> #include "agp.h" #include "intel-agp.h" @@ -70,12 +71,8 @@ static struct _intel_private { u32 __iomem *gtt; /* I915G */ bool clear_fake_agp; /* on first access via agp, fill with scratch */ int num_dcache_entries; - union { - void __iomem *i9xx_flush_page; - void *i8xx_flush_page; - }; + void __iomem *i9xx_flush_page; char *i81x_gtt_table; - struct page *i8xx_page; struct resource ifp_resource; int resource_valid; struct page *scratch_page; @@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void) static void i830_cleanup(void) { - if (intel_private.i8xx_flush_page) { - kunmap(intel_private.i8xx_flush_page); - intel_private.i8xx_flush_page = NULL; - } - - __free_page(intel_private.i8xx_page); - intel_private.i8xx_page = NULL; -} - -static void intel_i830_setup_flush(void) -{ - /* return if we've already set the flush mechanism up */ - if (intel_private.i8xx_page) - return; - - intel_private.i8xx_page = alloc_page(GFP_KERNEL); - if (!intel_private.i8xx_page) - return; - - intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); - if (!intel_private.i8xx_flush_page) - i830_cleanup(); } /* The chipset_flush interface needs to get data that has already been @@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void) */ static void i830_chipset_flush(void) { - unsigned int *pg = intel_private.i8xx_flush_page; + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + + /* Forcibly evict everything from the CPU write buffers. + * clflush appears to be insufficient. + */ + wbinvd_on_all_cpus(); + + /* Now we've only seen documents for this magic bit on 855GM, + * we hope it exists for the other gen2 chipsets... + * + * Also works as advertised on my 845G. + */ + writel(readl(intel_private.registers+I830_HIC) | (1<<31), + intel_private.registers+I830_HIC); - memset(pg, 0, 1024); + while (readl(intel_private.registers+I830_HIC) & (1<<31)) { + if (time_after(jiffies, timeout)) + break; - if (cpu_has_clflush) - clflush_cache_range(pg, 1024); - else if (wbinvd_on_all_cpus() != 0) - printk(KERN_ERR "Timed out waiting for cache flush.\n"); + udelay(50); + } } static void i830_write_entry(dma_addr_t addr, unsigned int entry, @@ -849,8 +837,6 @@ static int i830_setup(void) intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; - intel_i830_setup_flush(); - return 0; } diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index b6ae6e9a9c5..7855f9f45b8 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -320,6 +320,7 @@ static int unload_when_empty = 1; static int add_smi(struct smi_info *smi); static int try_smi_init(struct smi_info *smi); static void cleanup_one_si(struct smi_info *to_clean); +static void cleanup_ipmi_si(void); static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); static int register_xaction_notifier(struct notifier_block *nb) @@ -3450,16 +3451,7 @@ static int __devinit init_ipmi_si(void) mutex_lock(&smi_infos_lock); if (unload_when_empty && list_empty(&smi_infos)) { mutex_unlock(&smi_infos_lock); -#ifdef CONFIG_PCI - if (pci_registered) - pci_unregister_driver(&ipmi_pci_driver); -#endif - -#ifdef CONFIG_PPC_OF - if (of_registered) - of_unregister_platform_driver(&ipmi_of_platform_driver); -#endif - driver_unregister(&ipmi_driver.driver); + cleanup_ipmi_si(); printk(KERN_WARNING PFX "Unable to find any System Interface(s)\n"); return -ENODEV; diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index 777181a2e60..bcbbc71febb 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -830,8 +830,7 @@ static void monitor_card(unsigned long p) test_bit(IS_ANY_T1, &dev->flags))) { DEBUGP(4, dev, "Perform AUTOPPS\n"); set_bit(IS_AUTOPPS_ACT, &dev->flags); - ptsreq.protocol = ptsreq.protocol = - (0x01 << dev->proto); + ptsreq.protocol = (0x01 << dev->proto); ptsreq.flags = 0x01; ptsreq.pts1 = 0x00; ptsreq.pts2 = 0x00; diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c index 94b8eb4d691..444155a305a 100644 --- a/drivers/char/pcmcia/ipwireless/main.c +++ b/drivers/char/pcmcia/ipwireless/main.c @@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data) static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) { struct ipw_dev *ipw = priv_data; - struct resource *io_resource; int ret; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; @@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) if (ret) return ret; - io_resource = request_region(p_dev->resource[0]->start, - resource_size(p_dev->resource[0]), - IPWIRELESS_PCCARD_NAME); + if (!request_region(p_dev->resource[0]->start, + resource_size(p_dev->resource[0]), + IPWIRELESS_PCCARD_NAME)) { + ret = -EBUSY; + goto exit; + } p_dev->resource[2]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; @@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); if (ret != 0) - goto exit2; + goto exit1; ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; - ipw->attr_memory = ioremap(p_dev->resource[2]->start, + ipw->common_memory = ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); - request_mem_region(p_dev->resource[2]->start, - resource_size(p_dev->resource[2]), - IPWIRELESS_PCCARD_NAME); + if (!request_mem_region(p_dev->resource[2]->start, + resource_size(p_dev->resource[2]), + IPWIRELESS_PCCARD_NAME)) { + ret = -EBUSY; + goto exit2; + } p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); if (ret != 0) - goto exit2; + goto exit3; ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); if (ret != 0) @@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) ipw->attr_memory = ioremap(p_dev->resource[3]->start, resource_size(p_dev->resource[3])); - request_mem_region(p_dev->resource[3]->start, - resource_size(p_dev->resource[3]), - IPWIRELESS_PCCARD_NAME); + if (!request_mem_region(p_dev->resource[3]->start, + resource_size(p_dev->resource[3]), + IPWIRELESS_PCCARD_NAME)) { + ret = -EBUSY; + goto exit4; + } return 0; +exit4: + iounmap(ipw->attr_memory); exit3: + release_mem_region(p_dev->resource[2]->start, + resource_size(p_dev->resource[2])); exit2: - if (ipw->common_memory) { - release_mem_region(p_dev->resource[2]->start, - resource_size(p_dev->resource[2])); - iounmap(ipw->common_memory); - } + iounmap(ipw->common_memory); exit1: - release_resource(io_resource); + release_region(p_dev->resource[0]->start, + resource_size(p_dev->resource[0])); +exit: pcmcia_disable_device(p_dev); - return -1; + return ret; } static int config_ipwireless(struct ipw_dev *ipw) @@ -219,6 +229,8 @@ exit: static void release_ipwireless(struct ipw_dev *ipw) { + release_region(ipw->link->resource[0]->start, + resource_size(ipw->link->resource[0])); if (ipw->common_memory) { release_mem_region(ipw->link->resource[2]->start, resource_size(ipw->link->resource[2])); diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 36e0fa161c2..1f46f1cd922 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, tpm_protected_ordinal_duration[ordinal & TPM_PROTECTED_ORDINAL_MASK]; - if (duration_idx != TPM_UNDEFINED) { + if (duration_idx != TPM_UNDEFINED) duration = chip->vendor.duration[duration_idx]; - /* if duration is 0, it's because chip->vendor.duration wasn't */ - /* filled yet, so we set the lowest timeout just to give enough */ - /* time for tpm_get_timeouts() to succeed */ - return (duration <= 0 ? HZ : duration); - } else + if (duration <= 0) return 2 * 60 * HZ; + else + return duration; } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); diff --git a/drivers/tty/hvc/virtio_console.c b/drivers/char/virtio_console.c index 896a2ced1d2..49039318633 100644 --- a/drivers/tty/hvc/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1,6 +1,7 @@ /* * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation - * Copyright (C) 2009, 2010 Red Hat, Inc. + * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. + * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -31,7 +32,7 @@ #include <linux/virtio_console.h> #include <linux/wait.h> #include <linux/workqueue.h> -#include "hvc_console.h" +#include "../tty/hvc/hvc_console.h" /* * This is a global struct for storing common data for all the devices @@ -1462,6 +1463,17 @@ static void control_work_handler(struct work_struct *work) spin_unlock(&portdev->cvq_lock); } +static void out_intr(struct virtqueue *vq) +{ + struct port *port; + + port = find_port_by_vq(vq->vdev->priv, vq); + if (!port) + return; + + wake_up_interruptible(&port->waitqueue); +} + static void in_intr(struct virtqueue *vq) { struct port *port; @@ -1566,7 +1578,7 @@ static int init_vqs(struct ports_device *portdev) */ j = 0; io_callbacks[j] = in_intr; - io_callbacks[j + 1] = NULL; + io_callbacks[j + 1] = out_intr; io_names[j] = "input"; io_names[j + 1] = "output"; j += 2; @@ -1580,7 +1592,7 @@ static int init_vqs(struct ports_device *portdev) for (i = 1; i < nr_ports; i++) { j += 2; io_callbacks[j] = in_intr; - io_callbacks[j + 1] = NULL; + io_callbacks[j + 1] = out_intr; io_names[j] = "input"; io_names[j + 1] = "output"; } diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 297f48b0cba..07bca4970e5 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -79,6 +79,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> +#include <linux/delay.h> #include <linux/dmapool.h> #include <linux/dmaengine.h> #include <linux/amba/bus.h> @@ -235,16 +236,19 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan, } /* - * Overall DMAC remains enabled always. + * Pause the channel by setting the HALT bit. * - * Disabling individual channels could lose data. + * For M->P transfers, pause the DMAC first and then stop the peripheral - + * the FIFO can only drain if the peripheral is still requesting data. + * (note: this can still timeout if the DMAC FIFO never drains of data.) * - * Disable the peripheral DMA after disabling the DMAC in order to allow - * the DMAC FIFO to drain, and hence allow the channel to show inactive + * For P->M transfers, disable the peripheral first to stop it filling + * the DMAC FIFO, and then pause the DMAC. */ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) { u32 val; + int timeout; /* Set the HALT bit and wait for the FIFO to drain */ val = readl(ch->base + PL080_CH_CONFIG); @@ -252,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) writel(val, ch->base + PL080_CH_CONFIG); /* Wait for channel inactive */ - while (pl08x_phy_channel_busy(ch)) - cpu_relax(); + for (timeout = 1000; timeout; timeout--) { + if (!pl08x_phy_channel_busy(ch)) + break; + udelay(1); + } + if (pl08x_phy_channel_busy(ch)) + pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); } static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) @@ -267,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) } -/* Stops the channel */ -static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) +/* + * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and + * clears any pending interrupt status. This should not be used for + * an on-going transfer, but as a method of shutting down a channel + * (eg, when it's no longer used) or terminating a transfer. + */ +static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, + struct pl08x_phy_chan *ch) { - u32 val; + u32 val = readl(ch->base + PL080_CH_CONFIG); - pl08x_pause_phy_chan(ch); + val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | + PL080_CONFIG_TC_IRQ_MASK); - /* Disable channel */ - val = readl(ch->base + PL080_CH_CONFIG); - val &= ~PL080_CONFIG_ENABLE; - val &= ~PL080_CONFIG_ERR_IRQ_MASK; - val &= ~PL080_CONFIG_TC_IRQ_MASK; writel(val, ch->base + PL080_CH_CONFIG); + + writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); + writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); } static inline u32 get_bytes_in_cctl(u32 cctl) @@ -404,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, { unsigned long flags; + spin_lock_irqsave(&ch->lock, flags); + /* Stop the channel and clear its interrupts */ - pl08x_stop_phy_chan(ch); - writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); - writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); + pl08x_terminate_phy_chan(pl08x, ch); /* Mark it as free */ - spin_lock_irqsave(&ch->lock, flags); ch->serving = NULL; spin_unlock_irqrestore(&ch->lock, flags); } @@ -1449,7 +1462,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, plchan->state = PL08X_CHAN_IDLE; if (plchan->phychan) { - pl08x_stop_phy_chan(plchan->phychan); + pl08x_terminate_phy_chan(pl08x, plchan->phychan); /* * Mark physical channel as free and free any slave diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index e53d438142b..e18eaabe92b 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -49,6 +49,7 @@ struct imxdma_channel { struct imxdma_engine { struct device *dev; + struct device_dma_parameters dma_parms; struct dma_device dma_device; struct imxdma_channel channel[MAX_DMA_CHANNELS]; }; @@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( else dmamode = DMA_MODE_WRITE; + switch (imxdmac->word_size) { + case DMA_SLAVE_BUSWIDTH_4_BYTES: + if (sgl->length & 3 || sgl->dma_address & 3) + return NULL; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + if (sgl->length & 1 || sgl->dma_address & 1) + return NULL; + break; + case DMA_SLAVE_BUSWIDTH_1_BYTE: + break; + default: + return NULL; + } + ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, dma_length, imxdmac->per_address, dmamode); if (ret) @@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&imxdma->dma_device.channels); + dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); + dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); + /* Initialize channel parameters */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { struct imxdma_channel *imxdmac = &imxdma->channel[i]; @@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev) imxdmac->imxdma = imxdma; spin_lock_init(&imxdmac->lock); - dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); - dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); - imxdmac->chan.device = &imxdma->dma_device; - imxdmac->chan.chan_id = i; imxdmac->channel = i; /* Add the channel to the DMAC list */ @@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, imxdma); + imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; + dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); + ret = dma_async_device_register(&imxdma->dma_device); if (ret) { dev_err(&pdev->dev, "unable to register\n"); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d5a5d4d9c19..b6d1455fa93 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -230,7 +230,7 @@ struct sdma_engine; * struct sdma_channel - housekeeping for a SDMA channel * * @sdma pointer to the SDMA engine for this channel - * @channel the channel number, matches dmaengine chan_id + * @channel the channel number, matches dmaengine chan_id + 1 * @direction transfer type. Needed for setting SDMA script * @peripheral_type Peripheral type. Needed for setting SDMA script * @event_id0 aka dma request line @@ -301,6 +301,7 @@ struct sdma_firmware_header { struct sdma_engine { struct device *dev; + struct device_dma_parameters dma_parms; struct sdma_channel channel[MAX_DMA_CHANNELS]; struct sdma_channel_control *channel_control; void __iomem *regs; @@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac) if (bd->mode.status & BD_RROR) sdmac->status = DMA_ERROR; else - sdmac->status = DMA_SUCCESS; + sdmac->status = DMA_IN_PROGRESS; bd->mode.status |= BD_DONE; sdmac->buf_tail++; @@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) __raw_writel(1 << channel, sdma->regs + SDMA_H_START); } -static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) +static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) { - dma_cookie_t cookie = sdma->chan.cookie; + dma_cookie_t cookie = sdmac->chan.cookie; if (++cookie < 0) cookie = 1; - sdma->chan.cookie = cookie; - sdma->desc.cookie = cookie; + sdmac->chan.cookie = cookie; + sdmac->desc.cookie = cookie; return cookie; } @@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) cookie = sdma_assign_cookie(sdmac); - sdma_enable_channel(sdma, tx->chan->chan_id); + sdma_enable_channel(sdma, sdmac->channel); spin_unlock_irq(&sdmac->lock); @@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) struct imx_dma_data *data = chan->private; int prio, ret; - /* No need to execute this for internal channel 0 */ - if (chan->chan_id == 0) - return 0; - if (!data) return -EINVAL; @@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; int ret, i, count; - int channel = chan->chan_id; + int channel = sdmac->channel; struct scatterlist *sg; if (sdmac->status == DMA_IN_PROGRESS) @@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( ret = -EINVAL; goto err_out; } - if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) + + switch (sdmac->word_size) { + case DMA_SLAVE_BUSWIDTH_4_BYTES: bd->mode.command = 0; - else - bd->mode.command = sdmac->word_size; + if (count & 3 || sg->dma_address & 3) + return NULL; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + bd->mode.command = 2; + if (count & 1 || sg->dma_address & 1) + return NULL; + break; + case DMA_SLAVE_BUSWIDTH_1_BYTE: + bd->mode.command = 1; + break; + default: + return NULL; + } param = BD_DONE | BD_EXTD | BD_CONT; - if (sdmac->flags & IMX_DMA_SG_LOOP) { + if (i + 1 == sg_len) { param |= BD_INTR; - if (i + 1 == sg_len) - param |= BD_WRAP; + param |= BD_LAST; + param &= ~BD_CONT; } - if (i + 1 == sg_len) - param |= BD_INTR; - dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", i, count, sg->dma_address, param & BD_WRAP ? "wrap" : "", @@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( return &sdmac->desc; err_out: + sdmac->status = DMA_ERROR; return NULL; } @@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; int num_periods = buf_len / period_len; - int channel = chan->chan_id; + int channel = sdmac->channel; int ret, i = 0, buf = 0; dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); @@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, { struct sdma_channel *sdmac = to_sdma_chan(chan); dma_cookie_t last_used; - enum dma_status ret; last_used = chan->cookie; - ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used); dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); - return ret; + return sdmac->status; } static void sdma_issue_pending(struct dma_chan *chan) @@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma, /* download the RAM image for SDMA */ sdma_load_script(sdma, ram_code, header->ram_code_size, - sdma->script_addrs->ram_code_start_addr); + addr->ram_code_start_addr); clk_disable(sdma->clk); sdma_add_scripts(sdma, addr); @@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev) struct resource *iores; struct sdma_platform_data *pdata = pdev->dev.platform_data; int i; - dma_cap_mask_t mask; struct sdma_engine *sdma; sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); @@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev) sdma->version = pdata->sdma_version; + dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); + dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); + INIT_LIST_HEAD(&sdma->dma_device.channels); /* Initialize channel parameters */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { @@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev) sdmac->sdma = sdma; spin_lock_init(&sdmac->lock); - dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); - dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); - sdmac->chan.device = &sdma->dma_device; - sdmac->chan.chan_id = i; sdmac->channel = i; - /* Add the channel to the DMAC list */ - list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); + /* + * Add the channel to the DMAC list. Do not add channel 0 though + * because we need it internally in the SDMA driver. This also means + * that channel 0 in dmaengine counting matches sdma channel 1. + */ + if (i) + list_add_tail(&sdmac->chan.device_node, + &sdma->dma_device.channels); } ret = sdma_init(sdma); @@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_control = sdma_control; sdma->dma_device.device_issue_pending = sdma_issue_pending; + sdma->dma_device.dev->dma_parms = &sdma->dma_parms; + dma_set_max_seg_size(sdma->dma_device.dev, 65535); ret = dma_async_device_register(&sdma->dma_device); if (ret) { @@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev) goto err_init; } - /* request channel 0. This is an internal control channel - * to the SDMA engine and not available to clients. - */ - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - dma_request_channel(mask, NULL, NULL); - dev_info(sdma->dev, "initialized\n"); return 0; @@ -1348,7 +1354,7 @@ err_clk: err_request_region: err_irq: kfree(sdma); - return 0; + return ret; } static int __exit sdma_remove(struct platform_device *pdev) diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb26ee9773d..c1a125e7d1d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); - /* - * Problem (observed with channel DMAIC_7): after enabling the channel - * and initialising buffers, there comes an interrupt with current still - * pointing at buffer 0, whereas it should use buffer 0 first and only - * generate an interrupt when it is done, then current should already - * point to buffer 1. This spurious interrupt also comes on channel - * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the - * first interrupt, there comes the second with current correctly - * pointing to buffer 1 this time. But sometimes this second interrupt - * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling - * the channel seems to prevent the channel from hanging, but it doesn't - * prevent the spurious interrupt. This might also be unsafe. Think - * about the IDMAC controller trying to switch to a buffer, when we - * clear the ready bit, and re-enable it a moment later. - */ - reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY); - idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY); - idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY); - - reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY); - idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY); - idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY); - spin_unlock_irqrestore(&ipu->lock, flags); return 0; @@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) /* Other interrupts do not interfere with this channel */ spin_lock(&ichan->lock); - if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && - ((curbuf >> chan_id) & 1) == ichan->active_buffer && - !list_is_last(ichan->queue.next, &ichan->queue))) { - int i = 100; - - /* This doesn't help. See comment in ipu_disable_channel() */ - while (--i) { - curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); - if (((curbuf >> chan_id) & 1) != ichan->active_buffer) - break; - cpu_relax(); - } - - if (!i) { - spin_unlock(&ichan->lock); - dev_dbg(dev, - "IRQ on active buffer on channel %x, active " - "%d, ready %x, %x, current %x!\n", chan_id, - ichan->active_buffer, ready0, ready1, curbuf); - return IRQ_NONE; - } else - dev_dbg(dev, - "Buffer deactivated on channel %x, active " - "%d, ready %x, %x, current %x, rest %d!\n", chan_id, - ichan->active_buffer, ready0, ready1, curbuf, i); - } - if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || (!ichan->active_buffer && (ready0 >> chan_id) & 1) )) { diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 4a5ecc58025..23e03554f0d 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -826,8 +826,6 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) /* Display and decode various NB registers for debug purposes. */ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) { - int ganged; - debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); debugf1(" NB two channel DRAM capable: %s\n", @@ -851,28 +849,19 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) debugf1(" DramHoleValid: %s\n", (pvt->dhar & DHAR_VALID) ? "yes" : "no"); + amd64_debug_display_dimm_sizes(0, pvt); + /* everything below this point is Fam10h and above */ - if (boot_cpu_data.x86 == 0xf) { - amd64_debug_display_dimm_sizes(0, pvt); + if (boot_cpu_data.x86 == 0xf) return; - } + + amd64_debug_display_dimm_sizes(1, pvt); amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); /* Only if NOT ganged does dclr1 have valid info */ if (!dct_ganging_enabled(pvt)) amd64_dump_dramcfg_low(pvt->dclr1, 1); - - /* - * Determine if ganged and then dump memory sizes for first controller, - * and if NOT ganged dump info for 2nd controller. - */ - ganged = dct_ganging_enabled(pvt); - - amd64_debug_display_dimm_sizes(0, pvt); - - if (!ganged) - amd64_debug_display_dimm_sizes(1, pvt); } /* Read in both of DBAM registers */ @@ -1644,11 +1633,10 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) WARN_ON(ctrl != 0); } - debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", - ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); + dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; + dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0; - dbam = ctrl ? pvt->dbam1 : pvt->dbam0; - dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; + debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index e28e4166817..bcb1126e3d0 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -378,10 +378,17 @@ static void __init print_filtered(const char *info) static void __init dmi_dump_ids(void) { + const char *board; /* Board Name is optional */ + printk(KERN_DEBUG "DMI: "); - print_filtered(dmi_get_system_info(DMI_BOARD_NAME)); - printk(KERN_CONT "/"); + print_filtered(dmi_get_system_info(DMI_SYS_VENDOR)); + printk(KERN_CONT " "); print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME)); + board = dmi_get_system_info(DMI_BOARD_NAME); + if (board) { + printk(KERN_CONT "/"); + print_filtered(board); + } printk(KERN_CONT ", BIOS "); print_filtered(dmi_get_system_info(DMI_BIOS_VERSION)); printk(KERN_CONT " "); diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index a261972f603..b473429eee7 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c @@ -60,6 +60,7 @@ struct pca953x_chip { unsigned gpio_start; uint16_t reg_output; uint16_t reg_direction; + struct mutex i2c_lock; #ifdef CONFIG_GPIO_PCA953X_IRQ struct mutex irq_lock; @@ -119,13 +120,17 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off) chip = container_of(gc, struct pca953x_chip, gpio_chip); + mutex_lock(&chip->i2c_lock); reg_val = chip->reg_direction | (1u << off); ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); if (ret) - return ret; + goto exit; chip->reg_direction = reg_val; - return 0; + ret = 0; +exit: + mutex_unlock(&chip->i2c_lock); + return ret; } static int pca953x_gpio_direction_output(struct gpio_chip *gc, @@ -137,6 +142,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, chip = container_of(gc, struct pca953x_chip, gpio_chip); + mutex_lock(&chip->i2c_lock); /* set output level */ if (val) reg_val = chip->reg_output | (1u << off); @@ -145,7 +151,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); if (ret) - return ret; + goto exit; chip->reg_output = reg_val; @@ -153,10 +159,13 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, reg_val = chip->reg_direction & ~(1u << off); ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val); if (ret) - return ret; + goto exit; chip->reg_direction = reg_val; - return 0; + ret = 0; +exit: + mutex_unlock(&chip->i2c_lock); + return ret; } static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) @@ -167,7 +176,9 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) chip = container_of(gc, struct pca953x_chip, gpio_chip); + mutex_lock(&chip->i2c_lock); ret = pca953x_read_reg(chip, PCA953X_INPUT, ®_val); + mutex_unlock(&chip->i2c_lock); if (ret < 0) { /* NOTE: diagnostic already emitted; that's all we should * do unless gpio_*_value_cansleep() calls become different @@ -187,6 +198,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) chip = container_of(gc, struct pca953x_chip, gpio_chip); + mutex_lock(&chip->i2c_lock); if (val) reg_val = chip->reg_output | (1u << off); else @@ -194,9 +206,11 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val); if (ret) - return; + goto exit; chip->reg_output = reg_val; +exit: + mutex_unlock(&chip->i2c_lock); } static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) @@ -517,6 +531,8 @@ static int __devinit pca953x_probe(struct i2c_client *client, chip->names = pdata->names; + mutex_init(&chip->i2c_lock); + /* initialize cached registers from their original values. * we can't share this chip with another i2c master. */ diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 812aaac4438..ab1162da70f 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -272,17 +272,18 @@ int drm_vma_info(struct seq_file *m, void *data) #endif mutex_lock(&dev->struct_mutex); - seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n", + seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n", atomic_read(&dev->vma_count), - high_memory, (u64)virt_to_phys(high_memory)); + high_memory, (void *)virt_to_phys(high_memory)); list_for_each_entry(pt, &dev->vmalist, head) { vma = pt->vma; if (!vma) continue; seq_printf(m, - "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", - pt->pid, vma->vm_start, vma->vm_end, + "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000", + pt->pid, + (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_flags & VM_READ ? 'r' : '-', vma->vm_flags & VM_WRITE ? 'w' : '-', vma->vm_flags & VM_EXEC ? 'x' : '-', diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index cb49685bde0..a34ef97d3c8 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -154,8 +154,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) * available. In that case we can't account for this and just * hope for the best. */ - if ((vblrc > 0) && (abs(diff_ns) > 1000000)) + if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { atomic_inc(&dev->_vblank_count[crtc]); + smp_mb__after_atomic_inc(); + } /* Invalidate all timestamps while vblank irq's are off. */ clear_vblank_timestamps(dev, crtc); @@ -481,6 +483,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc) /* Dot clock in Hz: */ dotclock = (u64) crtc->hwmode.clock * 1000; + /* Fields of interlaced scanout modes are only halve a frame duration. + * Double the dotclock to get halve the frame-/line-/pixelduration. + */ + if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) + dotclock *= 2; + /* Valid dotclock? */ if (dotclock > 0) { /* Convert scanline length in pixels and video dot clock to @@ -593,14 +601,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, return -EAGAIN; } - /* Don't know yet how to handle interlaced or - * double scan modes. Just no-op for now. - */ - if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { - DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); - return -ENOTSUPP; - } - /* Get current scanout position with system timestamp. * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times * if single query takes longer than max_error nanoseconds. @@ -848,10 +848,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) if (rc) { tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; vblanktimestamp(dev, crtc, tslot) = t_vblank; - smp_wmb(); } + smp_mb__before_atomic_inc(); atomic_add(diff, &dev->_vblank_count[crtc]); + smp_mb__after_atomic_inc(); } /** @@ -1001,7 +1002,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; - int crtc, ret = 0; + int ret = 0; + unsigned int crtc; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) @@ -1283,15 +1285,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) * e.g., due to spurious vblank interrupts. We need to * ignore those for accounting. */ - if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { + if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { /* Store new timestamp in ringbuffer. */ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; - smp_wmb(); /* Increment cooked vblank count. This also atomically commits * the timestamp computed above. */ + smp_mb__before_atomic_inc(); atomic_inc(&dev->_vblank_count[crtc]); + smp_mb__after_atomic_inc(); } else { DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", crtc, (int) diff_ns); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3601466c550..09e0327fc6c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -326,21 +326,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) struct intel_crtc *crtc; list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { - const char *pipe = crtc->pipe ? "B" : "A"; - const char *plane = crtc->plane ? "B" : "A"; + const char pipe = pipe_name(crtc->pipe); + const char plane = plane_name(crtc->plane); struct intel_unpin_work *work; spin_lock_irqsave(&dev->event_lock, flags); work = crtc->unpin_work; if (work == NULL) { - seq_printf(m, "No flip due on pipe %s (plane %s)\n", + seq_printf(m, "No flip due on pipe %c (plane %c)\n", pipe, plane); } else { if (!work->pending) { - seq_printf(m, "Flip queued on pipe %s (plane %s)\n", + seq_printf(m, "Flip queued on pipe %c (plane %c)\n", pipe, plane); } else { - seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n", + seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", pipe, plane); } if (work->enable_stall_check) @@ -458,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int ret, i; + int ret, i, pipe; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -471,10 +471,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data) I915_READ(IIR)); seq_printf(m, "Interrupt mask: %08x\n", I915_READ(IMR)); - seq_printf(m, "Pipe A stat: %08x\n", - I915_READ(PIPEASTAT)); - seq_printf(m, "Pipe B stat: %08x\n", - I915_READ(PIPEBSTAT)); + for_each_pipe(pipe) + seq_printf(m, "Pipe %c stat: %08x\n", + pipe_name(pipe), + I915_READ(PIPESTAT(pipe))); } else { seq_printf(m, "North Display Interrupt enable: %08x\n", I915_READ(DEIER)); @@ -544,11 +544,11 @@ static int i915_hws_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; - volatile u32 *hws; + const volatile u32 __iomem *hws; int i; ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; - hws = (volatile u32 *)ring->status_page.page_addr; + hws = (volatile u32 __iomem *)ring->status_page.page_addr; if (hws == NULL) return 0; @@ -615,7 +615,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) if (!ring->obj) { seq_printf(m, "No ringbuffer setup\n"); } else { - u8 *virt = ring->virtual_start; + const u8 __iomem *virt = ring->virtual_start; uint32_t off; for (off = 0; off < ring->size; off += 4) { @@ -805,15 +805,20 @@ static int i915_error_state(struct seq_file *m, void *unused) } } - if (error->ringbuffer) { - struct drm_i915_error_object *obj = error->ringbuffer; - - seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset); - offset = 0; - for (page = 0; page < obj->page_count; page++) { - for (elt = 0; elt < PAGE_SIZE/4; elt++) { - seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); - offset += 4; + for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { + if (error->ringbuffer[i]) { + struct drm_i915_error_object *obj = error->ringbuffer[i]; + seq_printf(m, "%s --- ringbuffer = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (page = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + seq_printf(m, "%08x : %08x\n", + offset, + obj->pages[page][elt]); + offset += 4; + } } } } @@ -862,19 +867,44 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + u32 rpstat; + u32 rpupei, rpcurup, rpprevup; + u32 rpdownei, rpcurdown, rpprevdown; int max_freq; /* RPSTAT1 is in the GT power well */ - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); + + rpstat = I915_READ(GEN6_RPSTAT1); + rpupei = I915_READ(GEN6_RP_CUR_UP_EI); + rpcurup = I915_READ(GEN6_RP_CUR_UP); + rpprevup = I915_READ(GEN6_RP_PREV_UP); + rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); + rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); + rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); - seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); + seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); seq_printf(m, "Render p-state ratio: %d\n", (gt_perf_status & 0xff00) >> 8); seq_printf(m, "Render p-state VID: %d\n", gt_perf_status & 0xff); seq_printf(m, "Render p-state limit: %d\n", rp_state_limits & 0xff); + seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> + GEN6_CAGF_SHIFT) * 100); + seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & + GEN6_CURICONT_MASK); + seq_printf(m, "RP CUR UP: %dus\n", rpcurup & + GEN6_CURBSYTAVG_MASK); + seq_printf(m, "RP PREV UP: %dus\n", rpprevup & + GEN6_CURBSYTAVG_MASK); + seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & + GEN6_CURIAVG_MASK); + seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & + GEN6_CURBSYTAVG_MASK); + seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & + GEN6_CURBSYTAVG_MASK); max_freq = (rp_state_cap & 0xff0000) >> 16; seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", @@ -888,7 +918,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", max_freq * 100); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } else { seq_printf(m, "no P-state info available\n"); } @@ -1259,7 +1289,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) } static struct drm_info_list i915_debugfs_list[] = { - {"i915_capabilities", i915_capabilities, 0, 0}, + {"i915_capabilities", i915_capabilities, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_gtt", i915_gem_gtt_info, 0}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 17bd766f208..72730377a01 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -43,6 +43,17 @@ #include <linux/slab.h> #include <acpi/video.h> +static void i915_write_hws_pga(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 addr; + + addr = dev_priv->status_page_dmah->busaddr; + if (INTEL_INFO(dev)->gen >= 4) + addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; + I915_WRITE(HWS_PGA, addr); +} + /** * Sets up the hardware status page for devices that need a physical address * in the register. @@ -60,16 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev) DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; + ring->status_page.page_addr = + (void __force __iomem *)dev_priv->status_page_dmah->vaddr; - memset(ring->status_page.page_addr, 0, PAGE_SIZE); + memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); - if (INTEL_INFO(dev)->gen >= 4) - dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & - 0xf0; + i915_write_hws_pga(dev); - I915_WRITE(HWS_PGA, dev_priv->dma_status_page); DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } @@ -216,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev) if (ring->status_page.gfx_addr != 0) intel_ring_setup_status_page(ring); else - I915_WRITE(HWS_PGA, dev_priv->dma_status_page); + i915_write_hws_pga(dev); DRM_DEBUG_DRIVER("Enabled hardware status page\n"); @@ -771,6 +779,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_EXEC_CONSTANTS: value = INTEL_INFO(dev)->gen >= 4; break; + case I915_PARAM_HAS_RELAXED_DELTA: + value = 1; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -859,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data, " G33 hw status page\n"); return -ENOMEM; } - ring->status_page.page_addr = dev_priv->hws_map.handle; - memset(ring->status_page.page_addr, 0, PAGE_SIZE); + ring->status_page.page_addr = + (void __force __iomem *)dev_priv->hws_map.handle; + memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", @@ -1895,6 +1907,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + /* 965GM sometimes incorrectly writes to hardware status page (HWS) + * using 32bit addressing, overwriting memory if HWS is located + * above 4GB. + * + * The documentation also mentions an issue with undefined + * behaviour if any general state is accessed within a page above 4GB, + * which also needs to be handled carefully. + */ + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + mmio_bar = IS_GEN2(dev) ? 1 : 0; dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); if (!dev_priv->regs) { @@ -2002,9 +2025,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); - dev_priv->trace_irq_seqno = 0; - ret = drm_vblank_init(dev, I915_NUM_PIPE); + if (IS_MOBILE(dev) || !IS_GEN2(dev)) + dev_priv->num_pipe = 2; + else + dev_priv->num_pipe = 1; + + ret = drm_vblank_init(dev, dev_priv->num_pipe); if (ret) goto out_gem_unload; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9ad42d58349..c34a8dd31d0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -43,16 +43,28 @@ module_param_named(modeset, i915_modeset, int, 0400); unsigned int i915_fbpercrtc = 0; module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); +int i915_panel_ignore_lid = 0; +module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); + unsigned int i915_powersave = 1; module_param_named(powersave, i915_powersave, int, 0600); +unsigned int i915_semaphores = 1; +module_param_named(semaphores, i915_semaphores, int, 0600); + +unsigned int i915_enable_rc6 = 0; +module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); + unsigned int i915_lvds_downclock = 0; module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); unsigned int i915_panel_use_ssc = 1; module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); -bool i915_try_reset = true; +int i915_vbt_sdvo_panel_type = -1; +module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); + +static bool i915_try_reset = true; module_param_named(reset, i915_try_reset, bool, 0600); static struct drm_driver driver; @@ -251,7 +263,7 @@ void intel_detect_pch (struct drm_device *dev) } } -void __gen6_force_wake_get(struct drm_i915_private *dev_priv) +void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { int count; @@ -267,12 +279,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv) udelay(10); } -void __gen6_force_wake_put(struct drm_i915_private *dev_priv) +void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); POSTING_READ(FORCEWAKE); } +void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) +{ + int loop = 500; + u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + while (fifo < 20 && loop--) { + udelay(10); + fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + } +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -360,7 +382,7 @@ static int i915_drm_thaw(struct drm_device *dev) /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); - if (dev_priv->renderctx && dev_priv->pwrctx) + if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a78197d43ce..449650545bb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -49,17 +49,22 @@ enum pipe { PIPE_A = 0, PIPE_B, + PIPE_C, + I915_MAX_PIPES }; +#define pipe_name(p) ((p) + 'A') enum plane { PLANE_A = 0, PLANE_B, + PLANE_C, }; - -#define I915_NUM_PIPE 2 +#define plane_name(p) ((p) + 'A') #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) +#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) + /* Interface history: * * 1.1: Original. @@ -75,10 +80,7 @@ enum plane { #define DRIVER_PATCHLEVEL 0 #define WATCH_COHERENCY 0 -#define WATCH_EXEC 0 -#define WATCH_RELOC 0 #define WATCH_LISTS 0 -#define WATCH_PWRITE 0 #define I915_GEM_PHYS_CURSOR_0 1 #define I915_GEM_PHYS_CURSOR_1 2 @@ -111,6 +113,7 @@ struct intel_opregion { struct opregion_swsci *swsci; struct opregion_asle *asle; void *vbt; + u32 __iomem *lid_state; }; #define OPREGION_SIZE (8*1024) @@ -144,8 +147,7 @@ struct intel_display_error_state; struct drm_i915_error_state { u32 eir; u32 pgtbl_er; - u32 pipeastat; - u32 pipebstat; + u32 pipestat[I915_MAX_PIPES]; u32 ipeir; u32 ipehr; u32 instdone; @@ -172,7 +174,7 @@ struct drm_i915_error_state { int page_count; u32 gtt_offset; u32 *pages[0]; - } *ringbuffer, *batchbuffer[I915_NUM_RINGS]; + } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS]; struct drm_i915_error_buffer { u32 size; u32 name; @@ -200,9 +202,7 @@ struct drm_i915_display_funcs { void (*disable_fbc)(struct drm_device *dev); int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); - void (*update_wm)(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int sr_htotal, - int pixel_size); + void (*update_wm)(struct drm_device *dev); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ @@ -274,7 +274,6 @@ typedef struct drm_i915_private { uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; - dma_addr_t dma_status_page; uint32_t counter; drm_local_map_t hws_map; struct drm_i915_gem_object *pwrctx; @@ -289,7 +288,6 @@ typedef struct drm_i915_private { int page_flipping; atomic_t irq_received; - u32 trace_irq_seqno; /* protects the irq masks */ spinlock_t irq_lock; @@ -324,8 +322,6 @@ typedef struct drm_i915_private { int cfb_plane; int cfb_y; - int irq_enabled; - struct intel_opregion opregion; /* overlay */ @@ -387,7 +383,6 @@ typedef struct drm_i915_private { u32 saveDSPACNTR; u32 saveDSPBCNTR; u32 saveDSPARB; - u32 saveHWS; u32 savePIPEACONF; u32 savePIPEBCONF; u32 savePIPEASRC; @@ -615,6 +610,12 @@ typedef struct drm_i915_private { struct delayed_work retire_work; /** + * Are we in a non-interruptible section of code like + * modesetting? + */ + bool interruptible; + + /** * Flag if the X Server, and thus DRM, is not currently in * control of the device. * @@ -652,6 +653,7 @@ typedef struct drm_i915_private { unsigned int lvds_border_bits; /* Panel fitter placement and size for Ironlake+ */ u32 pch_pf_pos, pch_pf_size; + int panel_t3, panel_t12; struct drm_crtc *plane_to_crtc_mapping[2]; struct drm_crtc *pipe_to_crtc_mapping[2]; @@ -698,6 +700,8 @@ typedef struct drm_i915_private { /* list of fbdev register on this device */ struct intel_fbdev *fbdev; + + struct drm_property *broadcast_rgb_property; } drm_i915_private_t; struct drm_i915_gem_object { @@ -955,9 +959,13 @@ enum intel_chip_family { extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc; +extern int i915_panel_ignore_lid; extern unsigned int i915_powersave; +extern unsigned int i915_semaphores; extern unsigned int i915_lvds_downclock; extern unsigned int i915_panel_use_ssc; +extern int i915_vbt_sdvo_panel_type; +extern unsigned int i915_enable_rc6; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); @@ -996,8 +1004,6 @@ extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); -void i915_trace_irq_get(struct drm_device *dev, u32 seqno); -extern void i915_enable_interrupt (struct drm_device *dev); extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); extern void i915_driver_irq_preinstall(struct drm_device * dev); @@ -1049,7 +1055,6 @@ extern void i915_mem_takedown(struct mem_block **heap); extern void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, struct mem_block *heap); /* i915_gem.c */ -int i915_gem_check_is_wedged(struct drm_device *dev); int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_create_ioctl(struct drm_device *dev, void *data, @@ -1092,8 +1097,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); -int __must_check i915_gem_flush_ring(struct drm_device *dev, - struct intel_ring_buffer *ring, +int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, @@ -1108,8 +1112,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); -int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, - bool interruptible); +int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, u32 seqno); @@ -1131,16 +1134,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) } static inline u32 -i915_gem_next_request_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +i915_gem_next_request_seqno(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = ring->dev->dev_private; return ring->outstanding_lazy_request = dev_priv->next_seqno; } int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined, - bool interruptible); + struct intel_ring_buffer *pipelined); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); void i915_gem_retire_requests(struct drm_device *dev); @@ -1149,8 +1150,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, uint32_t read_domains, uint32_t write_domain); -int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, - bool interruptible); +int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); void i915_gem_do_init(struct drm_device *dev, @@ -1159,14 +1159,11 @@ void i915_gem_do_init(struct drm_device *dev, unsigned long end); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev); -int __must_check i915_add_request(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_i915_gem_request *request, - struct intel_ring_buffer *ring); -int __must_check i915_do_wait_request(struct drm_device *dev, - uint32_t seqno, - bool interruptible, - struct intel_ring_buffer *ring); +int __must_check i915_add_request(struct intel_ring_buffer *ring, + struct drm_file *file, + struct drm_i915_gem_request *request); +int __must_check i915_wait_request(struct intel_ring_buffer *ring, + uint32_t seqno); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, @@ -1183,6 +1180,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev, void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_release(struct drm_device *dev, struct drm_file *file); +uint32_t +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); + /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); @@ -1315,7 +1315,7 @@ extern void intel_display_print_error_state(struct seq_file *m, #define __i915_read(x, y) \ static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = read##y(dev_priv->regs + reg); \ - trace_i915_reg_rw('R', reg, val, sizeof(val)); \ + trace_i915_reg_rw(false, reg, val, sizeof(val)); \ return val; \ } __i915_read(8, b) @@ -1326,7 +1326,7 @@ __i915_read(64, q) #define __i915_write(x, y) \ static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ - trace_i915_reg_rw('W', reg, val, sizeof(val)); \ + trace_i915_reg_rw(true, reg, val, sizeof(val)); \ write##y(val, dev_priv->regs + reg); \ } __i915_write(8, b) @@ -1359,62 +1359,29 @@ __i915_write(64, q) * must be set to prevent GT core from power down and stale values being * returned. */ -void __gen6_force_wake_get(struct drm_i915_private *dev_priv); -void __gen6_force_wake_put (struct drm_i915_private *dev_priv); -static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) +void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); +void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); +void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); + +static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val; if (dev_priv->info->gen >= 6) { - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); val = I915_READ(reg); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } else val = I915_READ(reg); return val; } -static inline void -i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) +static inline void i915_gt_write(struct drm_i915_private *dev_priv, + u32 reg, u32 val) { - /* Trace down the write operation before the real write */ - trace_i915_reg_rw('W', reg, val, len); - switch (len) { - case 8: - writeq(val, dev_priv->regs + reg); - break; - case 4: - writel(val, dev_priv->regs + reg); - break; - case 2: - writew(val, dev_priv->regs + reg); - break; - case 1: - writeb(val, dev_priv->regs + reg); - break; - } + if (dev_priv->info->gen >= 6) + __gen6_gt_wait_for_fifo(dev_priv); + I915_WRITE(reg, val); } - -/** - * Reads a dword out of the status page, which is written to from the command - * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or - * MI_STORE_DATA_IMM. - * - * The following dwords have a reserved meaning: - * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. - * 0x04: ring 0 head pointer - * 0x05: ring 1 head pointer (915-class) - * 0x06: ring 2 head pointer (915-class) - * 0x10-0x1b: Context status DWords (GM45) - * 0x1f: Last written status offset. (GM45) - * - * The area from dword 0x20 to 0x3ff is available for driver usage. - */ -#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ - (LP_RING(dev_priv)->status_page.page_addr))[reg]) -#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) -#define I915_GEM_HWS_INDEX 0x20 -#define I915_BREADCRUMB_INDEX 0x21 - #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bc7f06b8fbc..c4c2855d002 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, dev_priv->mm.object_memory -= size; } -int -i915_gem_check_is_wedged(struct drm_device *dev) +static int +i915_gem_wait_for_error(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct completion *x = &dev_priv->error_completion; @@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev) if (ret) return ret; - /* Success, we reset the GPU! */ - if (!atomic_read(&dev_priv->mm.wedged)) - return 0; - - /* GPU is hung, bump the completion count to account for - * the token we just consumed so that we never hit zero and - * end up waiting upon a subsequent completion event that - * will never happen. - */ - spin_lock_irqsave(&x->wait.lock, flags); - x->done++; - spin_unlock_irqrestore(&x->wait.lock, flags); - return -EIO; + if (atomic_read(&dev_priv->mm.wedged)) { + /* GPU is hung, bump the completion count to account for + * the token we just consumed so that we never hit zero and + * end up waiting upon a subsequent completion event that + * will never happen. + */ + spin_lock_irqsave(&x->wait.lock, flags); + x->done++; + spin_unlock_irqrestore(&x->wait.lock, flags); + } + return 0; } int i915_mutex_lock_interruptible(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; - ret = i915_gem_check_is_wedged(dev); + ret = i915_gem_wait_for_error(dev); if (ret) return ret; @@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) if (ret) return ret; - if (atomic_read(&dev_priv->mm.wedged)) { - mutex_unlock(&dev->struct_mutex); - return -EAGAIN; - } - WARN_ON(i915_verify_lists(dev)); return 0; } @@ -543,7 +535,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (obj == NULL) { + if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } @@ -555,6 +547,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; } + trace_i915_gem_object_pread(obj, args->offset, args->size); + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, args->size); @@ -984,7 +978,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (obj == NULL) { + if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } @@ -996,6 +990,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } + trace_i915_gem_object_pwrite(obj, args->offset, args->size); + /* We can only do the GTT pwrite on untiled buffers, as otherwise * it would end up going through the fenced access, and we'll get * different detiling behavior between reading and writing. @@ -1078,7 +1074,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (obj == NULL) { + if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } @@ -1121,7 +1117,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (obj == NULL) { + if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } @@ -1150,7 +1146,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_mmap *args = data; struct drm_gem_object *obj; - loff_t offset; unsigned long addr; if (!(dev->driver->driver_features & DRIVER_GEM)) @@ -1165,8 +1160,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, return -E2BIG; } - offset = args->offset; - down_write(¤t->mm->mmap_sem); addr = do_mmap(obj->filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, @@ -1211,9 +1204,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; - /* Now bind it into the GTT if needed */ - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto out; + trace_i915_gem_object_fault(obj, page_offset, true, write); + + /* Now bind it into the GTT if needed */ if (!obj->map_and_fenceable) { ret = i915_gem_object_unbind(obj); if (ret) @@ -1232,7 +1229,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (obj->tiling_mode == I915_TILING_NONE) ret = i915_gem_object_put_fence(obj); else - ret = i915_gem_object_get_fence(obj, NULL, true); + ret = i915_gem_object_get_fence(obj, NULL); if (ret) goto unlock; @@ -1248,12 +1245,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); unlock: mutex_unlock(&dev->struct_mutex); - +out: switch (ret) { + case -EIO: case -EAGAIN: + /* Give the error handler a chance to run and move the + * objects off the GPU active list. Next time we service the + * fault, we should be able to transition the page into the + * GTT without touching the GPU (and so avoid further + * EIO/EGAIN). If the GPU is wedged, then there is no issue + * with coherency, just lost writes. + */ set_need_resched(); case 0: case -ERESTARTSYS: + case -EINTR: return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; @@ -1427,7 +1433,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) * Return the required GTT alignment for an object, only taking into account * unfenced tiled surface requirements. */ -static uint32_t +uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; @@ -1472,7 +1478,7 @@ i915_gem_mmap_gtt(struct drm_file *file, return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); - if (obj == NULL) { + if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } @@ -1712,9 +1718,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) } static void -i915_gem_process_flushing_list(struct drm_device *dev, - uint32_t flush_domains, - struct intel_ring_buffer *ring) +i915_gem_process_flushing_list(struct intel_ring_buffer *ring, + uint32_t flush_domains) { struct drm_i915_gem_object *obj, *next; @@ -1727,7 +1732,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, obj->base.write_domain = 0; list_del_init(&obj->gpu_write_list); i915_gem_object_move_to_active(obj, ring, - i915_gem_next_request_seqno(dev, ring)); + i915_gem_next_request_seqno(ring)); trace_i915_gem_object_change_domain(obj, obj->base.read_domains, @@ -1737,27 +1742,22 @@ i915_gem_process_flushing_list(struct drm_device *dev, } int -i915_add_request(struct drm_device *dev, +i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, - struct drm_i915_gem_request *request, - struct intel_ring_buffer *ring) + struct drm_i915_gem_request *request) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_file_private *file_priv = NULL; + drm_i915_private_t *dev_priv = ring->dev->dev_private; uint32_t seqno; int was_empty; int ret; BUG_ON(request == NULL); - if (file != NULL) - file_priv = file->driver_priv; - ret = ring->add_request(ring, &seqno); if (ret) return ret; - ring->outstanding_lazy_request = false; + trace_i915_gem_request_add(ring, seqno); request->seqno = seqno; request->ring = ring; @@ -1765,7 +1765,9 @@ i915_add_request(struct drm_device *dev, was_empty = list_empty(&ring->request_list); list_add_tail(&request->list, &ring->request_list); - if (file_priv) { + if (file) { + struct drm_i915_file_private *file_priv = file->driver_priv; + spin_lock(&file_priv->mm.lock); request->file_priv = file_priv; list_add_tail(&request->client_list, @@ -1773,6 +1775,8 @@ i915_add_request(struct drm_device *dev, spin_unlock(&file_priv->mm.lock); } + ring->outstanding_lazy_request = false; + if (!dev_priv->mm.suspended) { mod_timer(&dev_priv->hangcheck_timer, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); @@ -1889,18 +1893,15 @@ void i915_gem_reset(struct drm_device *dev) * This function clears the request list as sequence numbers are passed. */ static void -i915_gem_retire_requests_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) +i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; int i; - if (!ring->status_page.page_addr || - list_empty(&ring->request_list)) + if (list_empty(&ring->request_list)) return; - WARN_ON(i915_verify_lists(dev)); + WARN_ON(i915_verify_lists(ring->dev)); seqno = ring->get_seqno(ring); @@ -1918,7 +1919,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, if (!i915_seqno_passed(seqno, request->seqno)) break; - trace_i915_gem_request_retire(dev, request->seqno); + trace_i915_gem_request_retire(ring, request->seqno); list_del(&request->list); i915_gem_request_remove_from_client(request); @@ -1944,13 +1945,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev, i915_gem_object_move_to_inactive(obj); } - if (unlikely (dev_priv->trace_irq_seqno && - i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { + if (unlikely(ring->trace_irq_seqno && + i915_seqno_passed(seqno, ring->trace_irq_seqno))) { ring->irq_put(ring); - dev_priv->trace_irq_seqno = 0; + ring->trace_irq_seqno = 0; } - WARN_ON(i915_verify_lists(dev)); + WARN_ON(i915_verify_lists(ring->dev)); } void @@ -1974,7 +1975,7 @@ i915_gem_retire_requests(struct drm_device *dev) } for (i = 0; i < I915_NUM_RINGS; i++) - i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]); + i915_gem_retire_requests_ring(&dev_priv->ring[i]); } static void @@ -2008,11 +2009,11 @@ i915_gem_retire_work_handler(struct work_struct *work) struct drm_i915_gem_request *request; int ret; - ret = i915_gem_flush_ring(dev, ring, 0, - I915_GEM_GPU_DOMAINS); + ret = i915_gem_flush_ring(ring, + 0, I915_GEM_GPU_DOMAINS); request = kzalloc(sizeof(*request), GFP_KERNEL); if (ret || request == NULL || - i915_add_request(dev, NULL, request, ring)) + i915_add_request(ring, NULL, request)) kfree(request); } @@ -2025,18 +2026,32 @@ i915_gem_retire_work_handler(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } +/** + * Waits for a sequence number to be signaled, and cleans up the + * request and object lists appropriately for that event. + */ int -i915_do_wait_request(struct drm_device *dev, uint32_t seqno, - bool interruptible, struct intel_ring_buffer *ring) +i915_wait_request(struct intel_ring_buffer *ring, + uint32_t seqno) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = ring->dev->dev_private; u32 ier; int ret = 0; BUG_ON(seqno == 0); - if (atomic_read(&dev_priv->mm.wedged)) - return -EAGAIN; + if (atomic_read(&dev_priv->mm.wedged)) { + struct completion *x = &dev_priv->error_completion; + bool recovery_complete; + unsigned long flags; + + /* Give the error handler a chance to run. */ + spin_lock_irqsave(&x->wait.lock, flags); + recovery_complete = x->done > 0; + spin_unlock_irqrestore(&x->wait.lock, flags); + + return recovery_complete ? -EIO : -EAGAIN; + } if (seqno == ring->outstanding_lazy_request) { struct drm_i915_gem_request *request; @@ -2045,7 +2060,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (request == NULL) return -ENOMEM; - ret = i915_add_request(dev, NULL, request, ring); + ret = i915_add_request(ring, NULL, request); if (ret) { kfree(request); return ret; @@ -2055,22 +2070,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, } if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(ring->dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); else ier = I915_READ(IER); if (!ier) { DRM_ERROR("something (likely vbetool) disabled " "interrupts, re-enabling\n"); - i915_driver_irq_preinstall(dev); - i915_driver_irq_postinstall(dev); + i915_driver_irq_preinstall(ring->dev); + i915_driver_irq_postinstall(ring->dev); } - trace_i915_gem_request_wait_begin(dev, seqno); + trace_i915_gem_request_wait_begin(ring, seqno); ring->waiting_seqno = seqno; if (ring->irq_get(ring)) { - if (interruptible) + if (dev_priv->mm.interruptible) ret = wait_event_interruptible(ring->irq_queue, i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_read(&dev_priv->mm.wedged)); @@ -2086,7 +2101,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, ret = -EBUSY; ring->waiting_seqno = 0; - trace_i915_gem_request_wait_end(dev, seqno); + trace_i915_gem_request_wait_end(ring, seqno); } if (atomic_read(&dev_priv->mm.wedged)) ret = -EAGAIN; @@ -2102,31 +2117,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, * a separate wait queue to handle that. */ if (ret == 0) - i915_gem_retire_requests_ring(dev, ring); + i915_gem_retire_requests_ring(ring); return ret; } /** - * Waits for a sequence number to be signaled, and cleans up the - * request and object lists appropriately for that event. - */ -static int -i915_wait_request(struct drm_device *dev, uint32_t seqno, - struct intel_ring_buffer *ring) -{ - return i915_do_wait_request(dev, seqno, 1, ring); -} - -/** * Ensures that all rendering to the object has completed and the object is * safe to unbind from the GTT or access from the CPU. */ int -i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, - bool interruptible) +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; int ret; /* This function only exists to support waiting for existing rendering, @@ -2138,10 +2140,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, * it. */ if (obj->active) { - ret = i915_do_wait_request(dev, - obj->last_rendering_seqno, - interruptible, - obj->ring); + ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); if (ret) return ret; } @@ -2191,6 +2190,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) if (ret == -ERESTARTSYS) return ret; + trace_i915_gem_object_unbind(obj); + i915_gem_gtt_unbind_object(obj); i915_gem_object_put_pages_gtt(obj); @@ -2206,29 +2207,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) if (i915_gem_object_is_purgeable(obj)) i915_gem_object_truncate(obj); - trace_i915_gem_object_unbind(obj); - return ret; } int -i915_gem_flush_ring(struct drm_device *dev, - struct intel_ring_buffer *ring, +i915_gem_flush_ring(struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains) { int ret; + trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); + ret = ring->flush(ring, invalidate_domains, flush_domains); if (ret) return ret; - i915_gem_process_flushing_list(dev, flush_domains, ring); + i915_gem_process_flushing_list(ring, flush_domains); return 0; } -static int i915_ring_idle(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int i915_ring_idle(struct intel_ring_buffer *ring) { int ret; @@ -2236,15 +2235,13 @@ static int i915_ring_idle(struct drm_device *dev, return 0; if (!list_empty(&ring->gpu_write_list)) { - ret = i915_gem_flush_ring(dev, ring, + ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); if (ret) return ret; } - return i915_wait_request(dev, - i915_gem_next_request_seqno(dev, ring), - ring); + return i915_wait_request(ring, i915_gem_next_request_seqno(ring)); } int @@ -2261,7 +2258,7 @@ i915_gpu_idle(struct drm_device *dev) /* Flush everything onto the inactive list. */ for (i = 0; i < I915_NUM_RINGS; i++) { - ret = i915_ring_idle(dev, &dev_priv->ring[i]); + ret = i915_ring_idle(&dev_priv->ring[i]); if (ret) return ret; } @@ -2445,15 +2442,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) static int i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined, - bool interruptible) + struct intel_ring_buffer *pipelined) { int ret; if (obj->fenced_gpu_access) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->base.dev, - obj->last_fenced_ring, + ret = i915_gem_flush_ring(obj->last_fenced_ring, 0, obj->base.write_domain); if (ret) return ret; @@ -2465,10 +2460,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { if (!ring_passed_seqno(obj->last_fenced_ring, obj->last_fenced_seqno)) { - ret = i915_do_wait_request(obj->base.dev, - obj->last_fenced_seqno, - interruptible, - obj->last_fenced_ring); + ret = i915_wait_request(obj->last_fenced_ring, + obj->last_fenced_seqno); if (ret) return ret; } @@ -2494,7 +2487,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj) if (obj->tiling_mode) i915_gem_release_mmap(obj); - ret = i915_gem_object_flush_fence(obj, NULL, true); + ret = i915_gem_object_flush_fence(obj, NULL); if (ret) return ret; @@ -2571,8 +2564,7 @@ i915_find_fence_reg(struct drm_device *dev, */ int i915_gem_object_get_fence(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined, - bool interruptible) + struct intel_ring_buffer *pipelined) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2594,10 +2586,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, if (reg->setup_seqno) { if (!ring_passed_seqno(obj->last_fenced_ring, reg->setup_seqno)) { - ret = i915_do_wait_request(obj->base.dev, - reg->setup_seqno, - interruptible, - obj->last_fenced_ring); + ret = i915_wait_request(obj->last_fenced_ring, + reg->setup_seqno); if (ret) return ret; } @@ -2606,15 +2596,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, } } else if (obj->last_fenced_ring && obj->last_fenced_ring != pipelined) { - ret = i915_gem_object_flush_fence(obj, - pipelined, - interruptible); + ret = i915_gem_object_flush_fence(obj, pipelined); if (ret) return ret; } else if (obj->tiling_changed) { if (obj->fenced_gpu_access) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->base.dev, obj->ring, + ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); if (ret) return ret; @@ -2631,7 +2619,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, if (obj->tiling_changed) { if (pipelined) { reg->setup_seqno = - i915_gem_next_request_seqno(dev, pipelined); + i915_gem_next_request_seqno(pipelined); obj->last_fenced_seqno = reg->setup_seqno; obj->last_fenced_ring = pipelined; } @@ -2645,7 +2633,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, if (reg == NULL) return -ENOSPC; - ret = i915_gem_object_flush_fence(obj, pipelined, interruptible); + ret = i915_gem_object_flush_fence(obj, pipelined); if (ret) return ret; @@ -2657,9 +2645,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, if (old->tiling_mode) i915_gem_release_mmap(old); - ret = i915_gem_object_flush_fence(old, - pipelined, - interruptible); + ret = i915_gem_object_flush_fence(old, pipelined); if (ret) { drm_gem_object_unreference(&old->base); return ret; @@ -2671,7 +2657,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, old->fence_reg = I915_FENCE_REG_NONE; old->last_fenced_ring = pipelined; old->last_fenced_seqno = - pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; + pipelined ? i915_gem_next_request_seqno(pipelined) : 0; drm_gem_object_unreference(&old->base); } else if (obj->last_fenced_seqno == 0) @@ -2683,7 +2669,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, obj->last_fenced_ring = pipelined; reg->setup_seqno = - pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; + pipelined ? i915_gem_next_request_seqno(pipelined) : 0; obj->last_fenced_seqno = reg->setup_seqno; update: @@ -2880,7 +2866,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, obj->map_and_fenceable = mappable && fenceable; - trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); + trace_i915_gem_object_bind(obj, map_and_fenceable); return 0; } @@ -2903,13 +2889,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) return 0; /* Queue the GPU write cache flushing we need. */ - return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); + return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -2976,12 +2960,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (obj->gtt_space == NULL) return -EINVAL; + if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) + return 0; + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; if (obj->pending_gpu_write || write) { - ret = i915_gem_object_wait_rendering(obj, true); + ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; } @@ -3031,7 +3018,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, /* Currently, we are always called from an non-interruptible context. */ if (pipelined != obj->ring) { - ret = i915_gem_object_wait_rendering(obj, false); + ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; } @@ -3049,8 +3036,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, } int -i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, - bool interruptible) +i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) { int ret; @@ -3058,13 +3044,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, return 0; if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->base.dev, obj->ring, - 0, obj->base.write_domain); + ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); if (ret) return ret; } - return i915_gem_object_wait_rendering(obj, interruptible); + return i915_gem_object_wait_rendering(obj); } /** @@ -3079,11 +3064,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) uint32_t old_write_domain, old_read_domains; int ret; + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) + return 0; + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; - ret = i915_gem_object_wait_rendering(obj, true); + ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; @@ -3181,7 +3169,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, if (ret) return ret; - ret = i915_gem_object_wait_rendering(obj, true); + ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; @@ -3252,6 +3240,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) u32 seqno = 0; int ret; + if (atomic_read(&dev_priv->mm.wedged)) + return -EIO; + spin_lock(&file_priv->mm.lock); list_for_each_entry(request, &file_priv->mm.request_list, client_list) { if (time_after_eq(request->emitted_jiffies, recent_enough)) @@ -3367,7 +3358,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (obj == NULL) { + if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } @@ -3418,7 +3409,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (obj == NULL) { + if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } @@ -3455,7 +3446,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (obj == NULL) { + if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } @@ -3473,7 +3464,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * flush earlier is beneficial. */ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(dev, obj->ring, + ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); } else if (obj->ring->outstanding_lazy_request == obj->last_rendering_seqno) { @@ -3484,9 +3475,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, */ request = kzalloc(sizeof(*request), GFP_KERNEL); if (request) - ret = i915_add_request(dev, - NULL, request, - obj->ring); + ret = i915_add_request(obj->ring, NULL,request); else ret = -ENOMEM; } @@ -3496,7 +3485,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * are actually unmasked, and our working set ends up being * larger than required. */ - i915_gem_retire_requests_ring(dev, obj->ring); + i915_gem_retire_requests_ring(obj->ring); args->busy = obj->active; } @@ -3535,7 +3524,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, return ret; obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); - if (obj == NULL) { + if (&obj->base == NULL) { ret = -ENOENT; goto unlock; } @@ -3626,6 +3615,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) kfree(obj->page_cpu_valid); kfree(obj->bit_17); kfree(obj); + + trace_i915_gem_object_destroy(obj); } void i915_gem_free_object(struct drm_gem_object *gem_obj) @@ -3633,8 +3624,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_device *dev = obj->base.dev; - trace_i915_gem_object_destroy(obj); - while (obj->pin_count > 0) i915_gem_object_unpin(obj); @@ -3880,6 +3869,8 @@ i915_gem_load(struct drm_device *dev) i915_gem_detect_bit_6_swizzle(dev); init_waitqueue_head(&dev_priv->pending_flip_queue); + dev_priv->mm.interruptible = true; + dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; register_shrinker(&dev_priv->mm.inactive_shrinker); diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 29d014c48ca..8da1899bd24 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev) } #endif /* WATCH_INACTIVE */ - -#if WATCH_EXEC | WATCH_PWRITE -static void -i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, - uint32_t bias, uint32_t mark) -{ - uint32_t *mem = kmap_atomic(page, KM_USER0); - int i; - for (i = start; i < end; i += 4) - DRM_INFO("%08x: %08x%s\n", - (int) (bias + i), mem[i / 4], - (bias + i == mark) ? " ********" : ""); - kunmap_atomic(mem, KM_USER0); - /* give syslog time to catch up */ - msleep(1); -} - -void -i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, - const char *where, uint32_t mark) -{ - int page; - - DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset); - for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { - int page_len, chunk, chunk_len; - - page_len = len - page * PAGE_SIZE; - if (page_len > PAGE_SIZE) - page_len = PAGE_SIZE; - - for (chunk = 0; chunk < page_len; chunk += 128) { - chunk_len = page_len - chunk; - if (chunk_len > 128) - chunk_len = 128; - i915_gem_dump_page(obj->pages[page], - chunk, chunk + chunk_len, - obj->gtt_offset + - page * PAGE_SIZE, - mark); - } - } -} -#endif - #if WATCH_COHERENCY void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3d39005540a..da05a2692a7 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -30,6 +30,7 @@ #include "drm.h" #include "i915_drv.h" #include "i915_drm.h" +#include "i915_trace.h" static bool mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) @@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, return 0; } + trace_i915_gem_evict(dev, min_size, alignment, mappable); + /* * The goal is to evict objects and amalgamate space in LRU order. * The oldest idle objects reside on the inactive list, which is in @@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) if (lists_empty) return -ENOSPC; + trace_i915_gem_evict_everything(dev, purgeable_only); + /* Flush everything (on to the inactive lists) and evict */ ret = i915_gpu_idle(dev); if (ret) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d2f445e825f..7ff7f933ddf 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -37,6 +37,7 @@ struct change_domains { uint32_t invalidate_domains; uint32_t flush_domains; uint32_t flush_rings; + uint32_t flips; }; /* @@ -190,6 +191,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) i915_gem_release_mmap(obj); + if (obj->base.pending_write_domain) + cd->flips |= atomic_read(&obj->pending_flip); + /* The actual obj->write_domain will be updated with * pending_write_domain after we emit the accumulated flush for all * of our domain changes in execbuffers (which clears objects' @@ -282,21 +286,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, target_offset = to_intel_bo(target_obj)->gtt_offset; -#if WATCH_RELOC - DRM_INFO("%s: obj %p offset %08x target %d " - "read %08x write %08x gtt %08x " - "presumed %08x delta %08x\n", - __func__, - obj, - (int) reloc->offset, - (int) reloc->target_handle, - (int) reloc->read_domains, - (int) reloc->write_domain, - (int) target_offset, - (int) reloc->presumed_offset, - reloc->delta); -#endif - /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ @@ -365,16 +354,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return ret; } - /* and points to somewhere within the target object. */ - if (unlikely(reloc->delta >= target_obj->size)) { - DRM_ERROR("Relocation beyond target object bounds: " - "obj %p target %d delta %d size %d.\n", - obj, reloc->target_handle, - (int) reloc->delta, - (int) target_obj->size); - return ret; - } - reloc->delta += target_offset; if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { uint32_t page_offset = reloc->offset & ~PAGE_MASK; @@ -575,7 +554,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, if (has_fenced_gpu_access) { if (need_fence) { - ret = i915_gem_object_get_fence(obj, ring, 1); + ret = i915_gem_object_get_fence(obj, ring); if (ret) break; } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && @@ -690,11 +669,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, /* reacquire the objects */ eb_reset(eb); for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj; - obj = to_intel_bo(drm_gem_object_lookup(dev, file, exec[i].handle)); - if (obj == NULL) { + if (&obj->base == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec[i].handle, i); ret = -ENOENT; @@ -749,8 +726,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev, if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { for (i = 0; i < I915_NUM_RINGS; i++) if (flush_rings & (1 << i)) { - ret = i915_gem_flush_ring(dev, - &dev_priv->ring[i], + ret = i915_gem_flush_ring(&dev_priv->ring[i], invalidate_domains, flush_domains); if (ret) @@ -772,9 +748,9 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, if (from == NULL || to == from) return 0; - /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ - if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) - return i915_gem_object_wait_rendering(obj, true); + /* XXX gpu semaphores are implicated in various hard hangs on SNB */ + if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) + return i915_gem_object_wait_rendering(obj); idx = intel_ring_sync_index(from, to); @@ -789,7 +765,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, if (request == NULL) return -ENOMEM; - ret = i915_add_request(obj->base.dev, NULL, request, from); + ret = i915_add_request(from, NULL, request); if (ret) { kfree(request); return ret; @@ -803,6 +779,39 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, } static int +i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) +{ + u32 plane, flip_mask; + int ret; + + /* Check for any pending flips. As we only maintain a flip queue depth + * of 1, we can simply insert a WAIT for the next display flip prior + * to executing the batch and avoid stalling the CPU. + */ + + for (plane = 0; flips >> plane; plane++) { + if (((flips >> plane) & 1) == 0) + continue; + + if (plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } + + return 0; +} + + +static int i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, struct list_head *objects) { @@ -810,19 +819,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, struct change_domains cd; int ret; - cd.invalidate_domains = 0; - cd.flush_domains = 0; - cd.flush_rings = 0; + memset(&cd, 0, sizeof(cd)); list_for_each_entry(obj, objects, exec_list) i915_gem_object_set_to_gpu_domain(obj, ring, &cd); if (cd.invalidate_domains | cd.flush_domains) { -#if WATCH_EXEC - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", - __func__, - cd.invalidate_domains, - cd.flush_domains); -#endif ret = i915_gem_execbuffer_flush(ring->dev, cd.invalidate_domains, cd.flush_domains, @@ -831,6 +832,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, return ret; } + if (cd.flips) { + ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips); + if (ret) + return ret; + } + list_for_each_entry(obj, objects, exec_list) { ret = i915_gem_execbuffer_sync_rings(obj, ring); if (ret) @@ -877,47 +884,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, return 0; } -static int -i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, - struct list_head *objects) -{ - struct drm_i915_gem_object *obj; - int flips; - - /* Check for any pending flips. As we only maintain a flip queue depth - * of 1, we can simply insert a WAIT for the next display flip prior - * to executing the batch and avoid stalling the CPU. - */ - flips = 0; - list_for_each_entry(obj, objects, exec_list) { - if (obj->base.write_domain) - flips |= atomic_read(&obj->pending_flip); - } - if (flips) { - int plane, flip_mask, ret; - - for (plane = 0; flips >> plane; plane++) { - if (((flips >> plane) & 1) == 0) - continue; - - if (plane) - flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; - else - flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - - ret = intel_ring_begin(ring, 2); - if (ret) - return ret; - - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); - } - } - - return 0; -} - static void i915_gem_execbuffer_move_to_active(struct list_head *objects, struct intel_ring_buffer *ring, @@ -926,6 +892,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, struct drm_i915_gem_object *obj; list_for_each_entry(obj, objects, exec_list) { + u32 old_read = obj->base.read_domains; + u32 old_write = obj->base.write_domain; + + obj->base.read_domains = obj->base.pending_read_domains; obj->base.write_domain = obj->base.pending_write_domain; obj->fenced_gpu_access = obj->pending_fenced_gpu_access; @@ -939,9 +909,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, intel_mark_busy(ring->dev, obj); } - trace_i915_gem_object_change_domain(obj, - obj->base.read_domains, - obj->base.write_domain); + trace_i915_gem_object_change_domain(obj, old_read, old_write); } } @@ -963,14 +931,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, if (INTEL_INFO(dev)->gen >= 4) invalidate |= I915_GEM_DOMAIN_SAMPLER; if (ring->flush(ring, invalidate, 0)) { - i915_gem_next_request_seqno(dev, ring); + i915_gem_next_request_seqno(ring); return; } /* Add a breadcrumb for the completion of the batch buffer */ request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL || i915_add_request(dev, file, request, ring)) { - i915_gem_next_request_seqno(dev, ring); + if (request == NULL || i915_add_request(ring, file, request)) { + i915_gem_next_request_seqno(ring); kfree(request); } } @@ -1000,10 +968,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) return ret; -#if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); -#endif switch (args->flags & I915_EXEC_RING_MASK) { case I915_EXEC_DEFAULT: case I915_EXEC_RENDER: @@ -1113,7 +1077,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, exec[i].handle)); - if (obj == NULL) { + if (&obj->base == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec[i].handle, i); /* prevent error path from reading uninitialized data */ @@ -1170,11 +1134,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto err; - ret = i915_gem_execbuffer_wait_for_flips(ring, &objects); - if (ret) - goto err; - - seqno = i915_gem_next_request_seqno(dev, ring); + seqno = i915_gem_next_request_seqno(ring); for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) { if (seqno < ring->sync_seqno[i]) { /* The GPU can not handle its semaphore value wrapping, @@ -1189,6 +1149,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } } + trace_i915_gem_ring_dispatch(ring, seqno); + exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_len = args->batch_len; if (cliprects) { @@ -1245,11 +1207,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret, i; -#if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); -#endif - if (args->buffer_count < 1) { DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; @@ -1330,17 +1287,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret; -#if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); -#endif - if (args->buffer_count < 1) { DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); return -EINVAL; } - exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); + exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (exec2_list == NULL) + exec2_list = drm_malloc_ab(sizeof(*exec2_list), + args->buffer_count); if (exec2_list == NULL) { DRM_ERROR("Failed to allocate exec list for %d buffers\n", args->buffer_count); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 22a32b9932c..281ad3d6115 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -284,14 +284,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; - int ret; - - ret = i915_gem_check_is_wedged(dev); - if (ret) - return ret; + int ret = 0; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (obj == NULL) + if (&obj->base == NULL) return -ENOENT; if (!i915_tiling_ok(dev, @@ -349,14 +345,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); - obj->tiling_changed = true; - obj->tiling_mode = args->tiling_mode; - obj->stride = args->stride; + /* Rebind if we need a change of alignment */ + if (!obj->map_and_fenceable) { + u32 unfenced_alignment = + i915_gem_get_unfenced_gtt_alignment(obj); + if (obj->gtt_offset & (unfenced_alignment - 1)) + ret = i915_gem_object_unbind(obj); + } + + if (ret == 0) { + obj->tiling_changed = true; + obj->tiling_mode = args->tiling_mode; + obj->stride = args->stride; + } } + /* we have to maintain this existing ABI... */ + args->stride = obj->stride; + args->tiling_mode = obj->tiling_mode; drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } /** @@ -371,7 +380,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (obj == NULL) + if (&obj->base == NULL) return -ENOENT; mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 97f946dcc1a..188b497e507 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -85,21 +85,11 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) } } -static inline u32 -i915_pipestat(int pipe) -{ - if (pipe == 0) - return PIPEASTAT; - if (pipe == 1) - return PIPEBSTAT; - BUG(); -} - void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) { if ((dev_priv->pipestat[pipe] & mask) != mask) { - u32 reg = i915_pipestat(pipe); + u32 reg = PIPESTAT(pipe); dev_priv->pipestat[pipe] |= mask; /* Enable the interrupt, clear any pending status */ @@ -112,7 +102,7 @@ void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) { if ((dev_priv->pipestat[pipe] & mask) != 0) { - u32 reg = i915_pipestat(pipe); + u32 reg = PIPESTAT(pipe); dev_priv->pipestat[pipe] &= ~mask; I915_WRITE(reg, dev_priv->pipestat[pipe]); @@ -171,12 +161,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get vblank count for disabled " - "pipe %d\n", pipe); + "pipe %c\n", pipe_name(pipe)); return 0; } - high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; - low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; + high_frame = PIPEFRAME(pipe); + low_frame = PIPEFRAMEPIXEL(pipe); /* * High & low register fields aren't synchronized, so make sure @@ -197,11 +187,11 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; + int reg = PIPE_FRMCOUNT_GM45(pipe); if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get vblank count for disabled " - "pipe %d\n", pipe); + "pipe %c\n", pipe_name(pipe)); return 0; } @@ -219,7 +209,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " - "pipe %d\n", pipe); + "pipe %c\n", pipe_name(pipe)); return 0; } @@ -316,6 +306,8 @@ static void i915_hotplug_work_func(struct work_struct *work) struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; + DRM_DEBUG_KMS("running encoder hotplug functions\n"); + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->hot_plug) encoder->hot_plug(encoder); @@ -365,7 +357,7 @@ static void notify_ring(struct drm_device *dev, return; seqno = ring->get_seqno(ring); - trace_i915_gem_request_complete(dev, seqno); + trace_i915_gem_request_complete(ring, seqno); ring->irq_seqno = seqno; wake_up_all(&ring->irq_queue); @@ -417,6 +409,7 @@ static void pch_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 pch_iir; + int pipe; pch_iir = I915_READ(SDEIIR); @@ -437,13 +430,11 @@ static void pch_irq_handler(struct drm_device *dev) if (pch_iir & SDE_POISON) DRM_ERROR("PCH poison interrupt\n"); - if (pch_iir & SDE_FDI_MASK) { - u32 fdia, fdib; - - fdia = I915_READ(FDI_RXA_IIR); - fdib = I915_READ(FDI_RXB_IIR); - DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib); - } + if (pch_iir & SDE_FDI_MASK) + for_each_pipe(pipe) + DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + I915_READ(FDI_RX_IIR(pipe))); if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); @@ -648,9 +639,14 @@ static void i915_error_state_free(struct drm_device *dev, struct drm_i915_error_state *error) { - i915_error_object_free(error->batchbuffer[0]); - i915_error_object_free(error->batchbuffer[1]); - i915_error_object_free(error->ringbuffer); + int i; + + for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) + i915_error_object_free(error->batchbuffer[i]); + + for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) + i915_error_object_free(error->ringbuffer[i]); + kfree(error->active_bo); kfree(error->overlay); kfree(error); @@ -765,7 +761,7 @@ static void i915_capture_error_state(struct drm_device *dev) struct drm_i915_gem_object *obj; struct drm_i915_error_state *error; unsigned long flags; - int i; + int i, pipe; spin_lock_irqsave(&dev_priv->error_lock, flags); error = dev_priv->first_error; @@ -773,19 +769,21 @@ static void i915_capture_error_state(struct drm_device *dev) if (error) return; + /* Account for pipe specific data like PIPE*STAT */ error = kmalloc(sizeof(*error), GFP_ATOMIC); if (!error) { DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); return; } - DRM_DEBUG_DRIVER("generating error event\n"); + DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", + dev->primary->index); error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); - error->pipeastat = I915_READ(PIPEASTAT); - error->pipebstat = I915_READ(PIPEBSTAT); + for_each_pipe(pipe) + error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); error->instpm = I915_READ(INSTPM); error->error = 0; if (INTEL_INFO(dev)->gen >= 6) { @@ -824,15 +822,16 @@ static void i915_capture_error_state(struct drm_device *dev) } i915_gem_record_fences(dev, error); - /* Record the active batchbuffers */ - for (i = 0; i < I915_NUM_RINGS; i++) + /* Record the active batch and ring buffers */ + for (i = 0; i < I915_NUM_RINGS; i++) { error->batchbuffer[i] = i915_error_first_batchbuffer(dev_priv, &dev_priv->ring[i]); - /* Record the ringbuffer */ - error->ringbuffer = i915_error_object_create(dev_priv, - dev_priv->ring[RCS].obj); + error->ringbuffer[i] = + i915_error_object_create(dev_priv, + dev_priv->ring[i].obj); + } /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; @@ -905,6 +904,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 eir = I915_READ(EIR); + int pipe; if (!eir) return; @@ -953,14 +953,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev) } if (eir & I915_ERROR_MEMORY_REFRESH) { - u32 pipea_stats = I915_READ(PIPEASTAT); - u32 pipeb_stats = I915_READ(PIPEBSTAT); - - printk(KERN_ERR "memory refresh error\n"); - printk(KERN_ERR "PIPEASTAT: 0x%08x\n", - pipea_stats); - printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", - pipeb_stats); + printk(KERN_ERR "memory refresh error:\n"); + for_each_pipe(pipe) + printk(KERN_ERR "pipe %c stat: 0x%08x\n", + pipe_name(pipe), I915_READ(PIPESTAT(pipe))); /* pipestat has already been acked */ } if (eir & I915_ERROR_INSTRUCTION) { @@ -1074,10 +1070,10 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ obj = work->pending_flip_obj; if (INTEL_INFO(dev)->gen >= 4) { - int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; + int dspsurf = DSPSURF(intel_crtc->plane); stall_detected = I915_READ(dspsurf) == obj->gtt_offset; } else { - int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; + int dspaddr = DSPADDR(intel_crtc->plane); stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + crtc->y * crtc->fb->pitch + crtc->x * crtc->fb->bits_per_pixel/8); @@ -1097,12 +1093,13 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv; u32 iir, new_iir; - u32 pipea_stats, pipeb_stats; + u32 pipe_stats[I915_MAX_PIPES]; u32 vblank_status; int vblank = 0; unsigned long irqflags; int irq_received; - int ret = IRQ_NONE; + int ret = IRQ_NONE, pipe; + bool blc_event = false; atomic_inc(&dev_priv->irq_received); @@ -1125,27 +1122,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) * interrupts (for non-MSI). */ spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - pipea_stats = I915_READ(PIPEASTAT); - pipeb_stats = I915_READ(PIPEBSTAT); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) i915_handle_error(dev, false); - /* - * Clear the PIPE(A|B)STAT regs before the IIR - */ - if (pipea_stats & 0x8000ffff) { - if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe a underrun\n"); - I915_WRITE(PIPEASTAT, pipea_stats); - irq_received = 1; - } - - if (pipeb_stats & 0x8000ffff) { - if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) - DRM_DEBUG_DRIVER("pipe b underrun\n"); - I915_WRITE(PIPEBSTAT, pipeb_stats); - irq_received = 1; + for_each_pipe(pipe) { + int reg = PIPESTAT(pipe); + pipe_stats[pipe] = I915_READ(reg); + + /* + * Clear the PIPE*STAT regs before the IIR + */ + if (pipe_stats[pipe] & 0x8000ffff) { + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG_DRIVER("pipe %c underrun\n", + pipe_name(pipe)); + I915_WRITE(reg, pipe_stats[pipe]); + irq_received = 1; + } } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -1196,27 +1189,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) intel_finish_page_flip_plane(dev, 1); } - if (pipea_stats & vblank_status && - drm_handle_vblank(dev, 0)) { - vblank++; - if (!dev_priv->flip_pending_is_done) { - i915_pageflip_stall_check(dev, 0); - intel_finish_page_flip(dev, 0); + for_each_pipe(pipe) { + if (pipe_stats[pipe] & vblank_status && + drm_handle_vblank(dev, pipe)) { + vblank++; + if (!dev_priv->flip_pending_is_done) { + i915_pageflip_stall_check(dev, pipe); + intel_finish_page_flip(dev, pipe); + } } - } - if (pipeb_stats & vblank_status && - drm_handle_vblank(dev, 1)) { - vblank++; - if (!dev_priv->flip_pending_is_done) { - i915_pageflip_stall_check(dev, 1); - intel_finish_page_flip(dev, 1); - } + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; } - if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || - (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || - (iir & I915_ASLE_INTERRUPT)) + + if (blc_event || (iir & I915_ASLE_INTERRUPT)) intel_opregion_asle_intr(dev); /* With MSI, interrupts are only generated when iir @@ -1266,16 +1254,6 @@ static int i915_emit_irq(struct drm_device * dev) return dev_priv->counter; } -void i915_trace_irq_get(struct drm_device *dev, u32 seqno) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - struct intel_ring_buffer *ring = LP_RING(dev_priv); - - if (dev_priv->trace_irq_seqno == 0 && - ring->irq_get(ring)) - dev_priv->trace_irq_seqno = seqno; -} - static int i915_wait_irq(struct drm_device * dev, int irq_nr) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -1375,7 +1353,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) else i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); + + /* maintain vblank delivery even in deep C-states */ + if (dev_priv->info->gen == 3) + I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + return 0; } @@ -1388,6 +1371,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (dev_priv->info->gen == 3) + I915_WRITE(INSTPM, + INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); + if (HAS_PCH_SPLIT(dev)) ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); @@ -1398,16 +1385,6 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -void i915_enable_interrupt (struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!HAS_PCH_SPLIT(dev)) - intel_opregion_enable_asle(dev); - dev_priv->irq_enabled = 1; -} - - /* Set the vblank monitor pipe */ int i915_vblank_pipe_set(struct drm_device *dev, void *data, @@ -1644,14 +1621,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev) POSTING_READ(GTIER); if (HAS_PCH_CPT(dev)) { - hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | - SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ; + hotplug_mask = (SDE_CRT_HOTPLUG_CPT | + SDE_PORTB_HOTPLUG_CPT | + SDE_PORTC_HOTPLUG_CPT | + SDE_PORTD_HOTPLUG_CPT); } else { - hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | - SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; - hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; - I915_WRITE(FDI_RXA_IMR, 0); - I915_WRITE(FDI_RXB_IMR, 0); + hotplug_mask = (SDE_CRT_HOTPLUG | + SDE_PORTB_HOTPLUG | + SDE_PORTC_HOTPLUG | + SDE_PORTD_HOTPLUG | + SDE_AUX_MASK); } dev_priv->pch_irq_mask = ~hotplug_mask; @@ -1674,6 +1653,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) void i915_driver_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; atomic_set(&dev_priv->irq_received, 0); @@ -1691,8 +1671,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev) } I915_WRITE(HWSTAM, 0xeffe); - I915_WRITE(PIPEASTAT, 0); - I915_WRITE(PIPEBSTAT, 0); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); POSTING_READ(IER); @@ -1804,6 +1784,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev) void i915_driver_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; if (!dev_priv) return; @@ -1821,12 +1802,13 @@ void i915_driver_irq_uninstall(struct drm_device * dev) } I915_WRITE(HWSTAM, 0xffffffff); - I915_WRITE(PIPEASTAT, 0); - I915_WRITE(PIPEBSTAT, 0); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); - I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); - I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); + for_each_pipe(pipe) + I915_WRITE(PIPESTAT(pipe), + I915_READ(PIPESTAT(pipe)) & 0x8000ffff); I915_WRITE(IIR, I915_READ(IIR)); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5cfc68940f1..363f66ca5d3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -174,7 +174,9 @@ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) -#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ +#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ +#define MI_INVALIDATE_TLB (1<<18) +#define MI_INVALIDATE_BSD (1<<7) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) @@ -403,9 +405,12 @@ #define I915_ERROR_INSTRUCTION (1<<0) #define INSTPM 0x020c0 #define INSTPM_SELF_EN (1<<12) /* 915GM only */ +#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts + will not assert AGPBUSY# and will only + be delivered when out of C3. */ #define ACTHD 0x020c8 #define FW_BLC 0x020d8 -#define FW_BLC2 0x020dc +#define FW_BLC2 0x020dc #define FW_BLC_SELF 0x020e0 /* 915+ only */ #define FW_BLC_SELF_EN_MASK (1<<31) #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ @@ -704,9 +709,9 @@ #define VGA1_PD_P1_DIV_2 (1 << 13) #define VGA1_PD_P1_SHIFT 8 #define VGA1_PD_P1_MASK (0x1f << 8) -#define DPLL_A 0x06014 -#define DPLL_B 0x06018 -#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B) +#define _DPLL_A 0x06014 +#define _DPLL_B 0x06018 +#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) #define DPLL_VCO_ENABLE (1 << 31) #define DPLL_DVO_HIGH_SPEED (1 << 30) #define DPLL_SYNCLOCK_ENABLE (1 << 29) @@ -777,7 +782,7 @@ #define SDVO_MULTIPLIER_MASK 0x000000ff #define SDVO_MULTIPLIER_SHIFT_HIRES 4 #define SDVO_MULTIPLIER_SHIFT_VGA 0 -#define DPLL_A_MD 0x0601c /* 965+ only */ +#define _DPLL_A_MD 0x0601c /* 965+ only */ /* * UDI pixel divider, controlling how many pixels are stuffed into a packet. * @@ -814,14 +819,14 @@ */ #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 -#define DPLL_B_MD 0x06020 /* 965+ only */ -#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD) -#define FPA0 0x06040 -#define FPA1 0x06044 -#define FPB0 0x06048 -#define FPB1 0x0604c -#define FP0(pipe) _PIPE(pipe, FPA0, FPB0) -#define FP1(pipe) _PIPE(pipe, FPA1, FPB1) +#define _DPLL_B_MD 0x06020 /* 965+ only */ +#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) +#define _FPA0 0x06040 +#define _FPA1 0x06044 +#define _FPB0 0x06048 +#define _FPB1 0x0604c +#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0) +#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1) #define FP_N_DIV_MASK 0x003f0000 #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 #define FP_N_DIV_SHIFT 16 @@ -960,8 +965,9 @@ * Palette regs */ -#define PALETTE_A 0x0a000 -#define PALETTE_B 0x0a800 +#define _PALETTE_A 0x0a000 +#define _PALETTE_B 0x0a800 +#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) /* MCH MMIO space */ @@ -1265,32 +1271,32 @@ */ /* Pipe A timing regs */ -#define HTOTAL_A 0x60000 -#define HBLANK_A 0x60004 -#define HSYNC_A 0x60008 -#define VTOTAL_A 0x6000c -#define VBLANK_A 0x60010 -#define VSYNC_A 0x60014 -#define PIPEASRC 0x6001c -#define BCLRPAT_A 0x60020 +#define _HTOTAL_A 0x60000 +#define _HBLANK_A 0x60004 +#define _HSYNC_A 0x60008 +#define _VTOTAL_A 0x6000c +#define _VBLANK_A 0x60010 +#define _VSYNC_A 0x60014 +#define _PIPEASRC 0x6001c +#define _BCLRPAT_A 0x60020 /* Pipe B timing regs */ -#define HTOTAL_B 0x61000 -#define HBLANK_B 0x61004 -#define HSYNC_B 0x61008 -#define VTOTAL_B 0x6100c -#define VBLANK_B 0x61010 -#define VSYNC_B 0x61014 -#define PIPEBSRC 0x6101c -#define BCLRPAT_B 0x61020 - -#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B) -#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B) -#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B) -#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) -#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) -#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) -#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) +#define _HTOTAL_B 0x61000 +#define _HBLANK_B 0x61004 +#define _HSYNC_B 0x61008 +#define _VTOTAL_B 0x6100c +#define _VBLANK_B 0x61010 +#define _VSYNC_B 0x61014 +#define _PIPEBSRC 0x6101c +#define _BCLRPAT_B 0x61020 + +#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) +#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) +#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) +#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) +#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) +#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) +#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) /* VGA port control */ #define ADPA 0x61100 @@ -1384,6 +1390,7 @@ #define SDVO_ENCODING_HDMI (0x2 << 10) /** Requird for HDMI operation */ #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) +#define SDVO_COLOR_RANGE_16_235 (1 << 8) #define SDVO_BORDER_ENABLE (1 << 7) #define SDVO_AUDIO_ENABLE (1 << 6) /** New with 965, default is to be set */ @@ -1439,8 +1446,13 @@ #define LVDS_PORT_EN (1 << 31) /* Selects pipe B for LVDS data. Must be set on pre-965. */ #define LVDS_PIPEB_SELECT (1 << 30) +#define LVDS_PIPE_MASK (1 << 30) /* LVDS dithering flag on 965/g4x platform */ #define LVDS_ENABLE_DITHER (1 << 25) +/* LVDS sync polarity flags. Set to invert (i.e. negative) */ +#define LVDS_VSYNC_POLARITY (1 << 21) +#define LVDS_HSYNC_POLARITY (1 << 20) + /* Enable border for unscaled (or aspect-scaled) display */ #define LVDS_BORDER_ENABLE (1 << 15) /* @@ -1474,6 +1486,9 @@ #define LVDS_B0B3_POWER_DOWN (0 << 2) #define LVDS_B0B3_POWER_UP (3 << 2) +#define LVDS_PIPE_ENABLED(V, P) \ + (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN)) + /* Video Data Island Packet control */ #define VIDEO_DIP_DATA 0x61178 #define VIDEO_DIP_CTL 0x61170 @@ -1551,17 +1566,7 @@ /* Backlight control */ #define BLC_PWM_CTL 0x61254 -#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) #define BLC_PWM_CTL2 0x61250 /* 965+ only */ -#define BLM_COMBINATION_MODE (1 << 30) -/* - * This is the most significant 15 bits of the number of backlight cycles in a - * complete cycle of the modulated backlight control. - * - * The actual value is this field multiplied by two. - */ -#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) -#define BLM_LEGACY_MODE (1 << 16) /* * This is the number of cycles out of the backlight modulation cycle for which * the backlight is on. @@ -2062,6 +2067,10 @@ #define DP_PORT_EN (1 << 31) #define DP_PIPEB_SELECT (1 << 30) +#define DP_PIPE_MASK (1 << 30) + +#define DP_PIPE_ENABLED(V, P) \ + (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN)) /* Link training mode - select a suitable mode for each stage */ #define DP_LINK_TRAIN_PAT_1 (0 << 28) @@ -2204,8 +2213,8 @@ * which is after the LUTs, so we want the bytes for our color format. * For our current usage, this is always 3, one byte for R, G and B. */ -#define PIPEA_GMCH_DATA_M 0x70050 -#define PIPEB_GMCH_DATA_M 0x71050 +#define _PIPEA_GMCH_DATA_M 0x70050 +#define _PIPEB_GMCH_DATA_M 0x71050 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ #define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) @@ -2213,8 +2222,8 @@ #define PIPE_GMCH_DATA_M_MASK (0xffffff) -#define PIPEA_GMCH_DATA_N 0x70054 -#define PIPEB_GMCH_DATA_N 0x71054 +#define _PIPEA_GMCH_DATA_N 0x70054 +#define _PIPEB_GMCH_DATA_N 0x71054 #define PIPE_GMCH_DATA_N_MASK (0xffffff) /* @@ -2228,20 +2237,25 @@ * Attributes and VB-ID. */ -#define PIPEA_DP_LINK_M 0x70060 -#define PIPEB_DP_LINK_M 0x71060 +#define _PIPEA_DP_LINK_M 0x70060 +#define _PIPEB_DP_LINK_M 0x71060 #define PIPEA_DP_LINK_M_MASK (0xffffff) -#define PIPEA_DP_LINK_N 0x70064 -#define PIPEB_DP_LINK_N 0x71064 +#define _PIPEA_DP_LINK_N 0x70064 +#define _PIPEB_DP_LINK_N 0x71064 #define PIPEA_DP_LINK_N_MASK (0xffffff) +#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) +#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) +#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) +#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) + /* Display & cursor control */ /* Pipe A */ -#define PIPEADSL 0x70000 +#define _PIPEADSL 0x70000 #define DSL_LINEMASK 0x00000fff -#define PIPEACONF 0x70008 +#define _PIPEACONF 0x70008 #define PIPECONF_ENABLE (1<<31) #define PIPECONF_DISABLE 0 #define PIPECONF_DOUBLE_WIDE (1<<30) @@ -2267,7 +2281,7 @@ #define PIPECONF_DITHER_TYPE_ST1 (1<<2) #define PIPECONF_DITHER_TYPE_ST2 (2<<2) #define PIPECONF_DITHER_TYPE_TEMP (3<<2) -#define PIPEASTAT 0x70024 +#define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) #define PIPE_CRC_ERROR_ENABLE (1UL<<29) #define PIPE_CRC_DONE_ENABLE (1UL<<28) @@ -2303,10 +2317,12 @@ #define PIPE_6BPC (2 << 5) #define PIPE_12BPC (3 << 5) -#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) -#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) -#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) -#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL) +#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) +#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) +#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) +#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) +#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) +#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) @@ -2468,20 +2484,21 @@ * } while (high1 != high2); * frame = (high1 << 8) | low1; */ -#define PIPEAFRAMEHIGH 0x70040 +#define _PIPEAFRAMEHIGH 0x70040 #define PIPE_FRAME_HIGH_MASK 0x0000ffff #define PIPE_FRAME_HIGH_SHIFT 0 -#define PIPEAFRAMEPIXEL 0x70044 +#define _PIPEAFRAMEPIXEL 0x70044 #define PIPE_FRAME_LOW_MASK 0xff000000 #define PIPE_FRAME_LOW_SHIFT 24 #define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_SHIFT 0 /* GM45+ just has to be different */ -#define PIPEA_FRMCOUNT_GM45 0x70040 -#define PIPEA_FLIPCOUNT_GM45 0x70044 +#define _PIPEA_FRMCOUNT_GM45 0x70040 +#define _PIPEA_FLIPCOUNT_GM45 0x70044 +#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) /* Cursor A & B regs */ -#define CURACNTR 0x70080 +#define _CURACNTR 0x70080 /* Old style CUR*CNTR flags (desktop 8xx) */ #define CURSOR_ENABLE 0x80000000 #define CURSOR_GAMMA_ENABLE 0x40000000 @@ -2502,23 +2519,23 @@ #define MCURSOR_PIPE_A 0x00 #define MCURSOR_PIPE_B (1 << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) -#define CURABASE 0x70084 -#define CURAPOS 0x70088 +#define _CURABASE 0x70084 +#define _CURAPOS 0x70088 #define CURSOR_POS_MASK 0x007FF #define CURSOR_POS_SIGN 0x8000 #define CURSOR_X_SHIFT 0 #define CURSOR_Y_SHIFT 16 #define CURSIZE 0x700a0 -#define CURBCNTR 0x700c0 -#define CURBBASE 0x700c4 -#define CURBPOS 0x700c8 +#define _CURBCNTR 0x700c0 +#define _CURBBASE 0x700c4 +#define _CURBPOS 0x700c8 -#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR) -#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE) -#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS) +#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) +#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) +#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) /* Display A control */ -#define DSPACNTR 0x70180 +#define _DSPACNTR 0x70180 #define DISPLAY_PLANE_ENABLE (1<<31) #define DISPLAY_PLANE_DISABLE 0 #define DISPPLANE_GAMMA_ENABLE (1<<30) @@ -2532,9 +2549,10 @@ #define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) #define DISPPLANE_STEREO_ENABLE (1<<25) #define DISPPLANE_STEREO_DISABLE 0 -#define DISPPLANE_SEL_PIPE_MASK (1<<24) +#define DISPPLANE_SEL_PIPE_SHIFT 24 +#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT) #define DISPPLANE_SEL_PIPE_A 0 -#define DISPPLANE_SEL_PIPE_B (1<<24) +#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT) #define DISPPLANE_SRC_KEY_ENABLE (1<<22) #define DISPPLANE_SRC_KEY_DISABLE 0 #define DISPPLANE_LINE_DOUBLE (1<<20) @@ -2543,20 +2561,20 @@ #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ #define DISPPLANE_TILED (1<<10) -#define DSPAADDR 0x70184 -#define DSPASTRIDE 0x70188 -#define DSPAPOS 0x7018C /* reserved */ -#define DSPASIZE 0x70190 -#define DSPASURF 0x7019C /* 965+ only */ -#define DSPATILEOFF 0x701A4 /* 965+ only */ - -#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR) -#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR) -#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE) -#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS) -#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE) -#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF) -#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF) +#define _DSPAADDR 0x70184 +#define _DSPASTRIDE 0x70188 +#define _DSPAPOS 0x7018C /* reserved */ +#define _DSPASIZE 0x70190 +#define _DSPASURF 0x7019C /* 965+ only */ +#define _DSPATILEOFF 0x701A4 /* 965+ only */ + +#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) +#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) +#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE) +#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS) +#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) +#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) +#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) /* VBIOS flags */ #define SWF00 0x71410 @@ -2574,27 +2592,27 @@ #define SWF32 0x7241c /* Pipe B */ -#define PIPEBDSL 0x71000 -#define PIPEBCONF 0x71008 -#define PIPEBSTAT 0x71024 -#define PIPEBFRAMEHIGH 0x71040 -#define PIPEBFRAMEPIXEL 0x71044 -#define PIPEB_FRMCOUNT_GM45 0x71040 -#define PIPEB_FLIPCOUNT_GM45 0x71044 +#define _PIPEBDSL 0x71000 +#define _PIPEBCONF 0x71008 +#define _PIPEBSTAT 0x71024 +#define _PIPEBFRAMEHIGH 0x71040 +#define _PIPEBFRAMEPIXEL 0x71044 +#define _PIPEB_FRMCOUNT_GM45 0x71040 +#define _PIPEB_FLIPCOUNT_GM45 0x71044 /* Display B control */ -#define DSPBCNTR 0x71180 +#define _DSPBCNTR 0x71180 #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) #define DISPPLANE_ALPHA_TRANS_DISABLE 0 #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) -#define DSPBADDR 0x71184 -#define DSPBSTRIDE 0x71188 -#define DSPBPOS 0x7118C -#define DSPBSIZE 0x71190 -#define DSPBSURF 0x7119C -#define DSPBTILEOFF 0x711A4 +#define _DSPBADDR 0x71184 +#define _DSPBSTRIDE 0x71188 +#define _DSPBPOS 0x7118C +#define _DSPBSIZE 0x71190 +#define _DSPBSURF 0x7119C +#define _DSPBTILEOFF 0x711A4 /* VBIOS regs */ #define VGACNTRL 0x71400 @@ -2648,68 +2666,80 @@ #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff -#define PIPEA_DATA_M1 0x60030 +#define _PIPEA_DATA_M1 0x60030 #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ #define TU_SIZE_MASK 0x7e000000 #define PIPE_DATA_M1_OFFSET 0 -#define PIPEA_DATA_N1 0x60034 +#define _PIPEA_DATA_N1 0x60034 #define PIPE_DATA_N1_OFFSET 0 -#define PIPEA_DATA_M2 0x60038 +#define _PIPEA_DATA_M2 0x60038 #define PIPE_DATA_M2_OFFSET 0 -#define PIPEA_DATA_N2 0x6003c +#define _PIPEA_DATA_N2 0x6003c #define PIPE_DATA_N2_OFFSET 0 -#define PIPEA_LINK_M1 0x60040 +#define _PIPEA_LINK_M1 0x60040 #define PIPE_LINK_M1_OFFSET 0 -#define PIPEA_LINK_N1 0x60044 +#define _PIPEA_LINK_N1 0x60044 #define PIPE_LINK_N1_OFFSET 0 -#define PIPEA_LINK_M2 0x60048 +#define _PIPEA_LINK_M2 0x60048 #define PIPE_LINK_M2_OFFSET 0 -#define PIPEA_LINK_N2 0x6004c +#define _PIPEA_LINK_N2 0x6004c #define PIPE_LINK_N2_OFFSET 0 /* PIPEB timing regs are same start from 0x61000 */ -#define PIPEB_DATA_M1 0x61030 -#define PIPEB_DATA_N1 0x61034 +#define _PIPEB_DATA_M1 0x61030 +#define _PIPEB_DATA_N1 0x61034 -#define PIPEB_DATA_M2 0x61038 -#define PIPEB_DATA_N2 0x6103c +#define _PIPEB_DATA_M2 0x61038 +#define _PIPEB_DATA_N2 0x6103c -#define PIPEB_LINK_M1 0x61040 -#define PIPEB_LINK_N1 0x61044 +#define _PIPEB_LINK_M1 0x61040 +#define _PIPEB_LINK_N1 0x61044 -#define PIPEB_LINK_M2 0x61048 -#define PIPEB_LINK_N2 0x6104c +#define _PIPEB_LINK_M2 0x61048 +#define _PIPEB_LINK_N2 0x6104c -#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1) -#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1) -#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2) -#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2) -#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1) -#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1) -#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2) -#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2) +#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) +#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) +#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) +#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) +#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) +#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) +#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) +#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) /* CPU panel fitter */ -#define PFA_CTL_1 0x68080 -#define PFB_CTL_1 0x68880 +/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ +#define _PFA_CTL_1 0x68080 +#define _PFB_CTL_1 0x68880 #define PF_ENABLE (1<<31) #define PF_FILTER_MASK (3<<23) #define PF_FILTER_PROGRAMMED (0<<23) #define PF_FILTER_MED_3x3 (1<<23) #define PF_FILTER_EDGE_ENHANCE (2<<23) #define PF_FILTER_EDGE_SOFTEN (3<<23) -#define PFA_WIN_SZ 0x68074 -#define PFB_WIN_SZ 0x68874 -#define PFA_WIN_POS 0x68070 -#define PFB_WIN_POS 0x68870 +#define _PFA_WIN_SZ 0x68074 +#define _PFB_WIN_SZ 0x68874 +#define _PFA_WIN_POS 0x68070 +#define _PFB_WIN_POS 0x68870 +#define _PFA_VSCALE 0x68084 +#define _PFB_VSCALE 0x68884 +#define _PFA_HSCALE 0x68090 +#define _PFB_HSCALE 0x68890 + +#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1) +#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ) +#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS) +#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) +#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) /* legacy palette */ -#define LGC_PALETTE_A 0x4a000 -#define LGC_PALETTE_B 0x4a800 +#define _LGC_PALETTE_A 0x4a000 +#define _LGC_PALETTE_B 0x4a800 +#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) /* interrupts */ #define DE_MASTER_IRQ_CONTROL (1 << 31) @@ -2875,17 +2905,17 @@ #define PCH_GMBUS4 0xc5110 #define PCH_GMBUS5 0xc5120 -#define PCH_DPLL_A 0xc6014 -#define PCH_DPLL_B 0xc6018 -#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) +#define _PCH_DPLL_A 0xc6014 +#define _PCH_DPLL_B 0xc6018 +#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B) -#define PCH_FPA0 0xc6040 +#define _PCH_FPA0 0xc6040 #define FP_CB_TUNE (0x3<<22) -#define PCH_FPA1 0xc6044 -#define PCH_FPB0 0xc6048 -#define PCH_FPB1 0xc604c -#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0) -#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1) +#define _PCH_FPA1 0xc6044 +#define _PCH_FPB0 0xc6048 +#define _PCH_FPB1 0xc604c +#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0) +#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1) #define PCH_DPLL_TEST 0xc606c @@ -2904,6 +2934,7 @@ #define DREF_NONSPREAD_SOURCE_MASK (3<<9) #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) +#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7) #define DREF_SSC4_DOWNSPREAD (0<<6) #define DREF_SSC4_CENTERSPREAD (1<<6) #define DREF_SSC1_DISABLE (0<<1) @@ -2936,60 +2967,69 @@ /* transcoder */ -#define TRANS_HTOTAL_A 0xe0000 +#define _TRANS_HTOTAL_A 0xe0000 #define TRANS_HTOTAL_SHIFT 16 #define TRANS_HACTIVE_SHIFT 0 -#define TRANS_HBLANK_A 0xe0004 +#define _TRANS_HBLANK_A 0xe0004 #define TRANS_HBLANK_END_SHIFT 16 #define TRANS_HBLANK_START_SHIFT 0 -#define TRANS_HSYNC_A 0xe0008 +#define _TRANS_HSYNC_A 0xe0008 #define TRANS_HSYNC_END_SHIFT 16 #define TRANS_HSYNC_START_SHIFT 0 -#define TRANS_VTOTAL_A 0xe000c +#define _TRANS_VTOTAL_A 0xe000c #define TRANS_VTOTAL_SHIFT 16 #define TRANS_VACTIVE_SHIFT 0 -#define TRANS_VBLANK_A 0xe0010 +#define _TRANS_VBLANK_A 0xe0010 #define TRANS_VBLANK_END_SHIFT 16 #define TRANS_VBLANK_START_SHIFT 0 -#define TRANS_VSYNC_A 0xe0014 +#define _TRANS_VSYNC_A 0xe0014 #define TRANS_VSYNC_END_SHIFT 16 #define TRANS_VSYNC_START_SHIFT 0 -#define TRANSA_DATA_M1 0xe0030 -#define TRANSA_DATA_N1 0xe0034 -#define TRANSA_DATA_M2 0xe0038 -#define TRANSA_DATA_N2 0xe003c -#define TRANSA_DP_LINK_M1 0xe0040 -#define TRANSA_DP_LINK_N1 0xe0044 -#define TRANSA_DP_LINK_M2 0xe0048 -#define TRANSA_DP_LINK_N2 0xe004c - -#define TRANS_HTOTAL_B 0xe1000 -#define TRANS_HBLANK_B 0xe1004 -#define TRANS_HSYNC_B 0xe1008 -#define TRANS_VTOTAL_B 0xe100c -#define TRANS_VBLANK_B 0xe1010 -#define TRANS_VSYNC_B 0xe1014 - -#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B) -#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B) -#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B) -#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B) -#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B) -#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B) - -#define TRANSB_DATA_M1 0xe1030 -#define TRANSB_DATA_N1 0xe1034 -#define TRANSB_DATA_M2 0xe1038 -#define TRANSB_DATA_N2 0xe103c -#define TRANSB_DP_LINK_M1 0xe1040 -#define TRANSB_DP_LINK_N1 0xe1044 -#define TRANSB_DP_LINK_M2 0xe1048 -#define TRANSB_DP_LINK_N2 0xe104c - -#define TRANSACONF 0xf0008 -#define TRANSBCONF 0xf1008 -#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF) +#define _TRANSA_DATA_M1 0xe0030 +#define _TRANSA_DATA_N1 0xe0034 +#define _TRANSA_DATA_M2 0xe0038 +#define _TRANSA_DATA_N2 0xe003c +#define _TRANSA_DP_LINK_M1 0xe0040 +#define _TRANSA_DP_LINK_N1 0xe0044 +#define _TRANSA_DP_LINK_M2 0xe0048 +#define _TRANSA_DP_LINK_N2 0xe004c + +#define _TRANS_HTOTAL_B 0xe1000 +#define _TRANS_HBLANK_B 0xe1004 +#define _TRANS_HSYNC_B 0xe1008 +#define _TRANS_VTOTAL_B 0xe100c +#define _TRANS_VBLANK_B 0xe1010 +#define _TRANS_VSYNC_B 0xe1014 + +#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B) +#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B) +#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B) +#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B) +#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B) +#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B) + +#define _TRANSB_DATA_M1 0xe1030 +#define _TRANSB_DATA_N1 0xe1034 +#define _TRANSB_DATA_M2 0xe1038 +#define _TRANSB_DATA_N2 0xe103c +#define _TRANSB_DP_LINK_M1 0xe1040 +#define _TRANSB_DP_LINK_N1 0xe1044 +#define _TRANSB_DP_LINK_M2 0xe1048 +#define _TRANSB_DP_LINK_N2 0xe104c + +#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1) +#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1) +#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2) +#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2) +#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1) +#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1) +#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2) +#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2) + +#define _TRANSACONF 0xf0008 +#define _TRANSBCONF 0xf1008 +#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF) #define TRANS_DISABLE (0<<31) #define TRANS_ENABLE (1<<31) #define TRANS_STATE_MASK (1<<30) @@ -3007,18 +3047,19 @@ #define TRANS_6BPC (2<<5) #define TRANS_12BPC (3<<5) -#define FDI_RXA_CHICKEN 0xc200c -#define FDI_RXB_CHICKEN 0xc2010 -#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) -#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN) +#define _FDI_RXA_CHICKEN 0xc200c +#define _FDI_RXB_CHICKEN 0xc2010 +#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) +#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) +#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) #define SOUTH_DSPCLK_GATE_D 0xc2020 #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) /* CPU: FDI_TX */ -#define FDI_TXA_CTL 0x60100 -#define FDI_TXB_CTL 0x61100 -#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL) +#define _FDI_TXA_CTL 0x60100 +#define _FDI_TXB_CTL 0x61100 +#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) #define FDI_TX_DISABLE (0<<31) #define FDI_TX_ENABLE (1<<31) #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) @@ -3058,9 +3099,9 @@ #define FDI_SCRAMBLING_DISABLE (1<<7) /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ -#define FDI_RXA_CTL 0xf000c -#define FDI_RXB_CTL 0xf100c -#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL) +#define _FDI_RXA_CTL 0xf000c +#define _FDI_RXB_CTL 0xf100c +#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) #define FDI_RX_ENABLE (1<<31) /* train, dp width same as FDI_TX */ #define FDI_DP_PORT_WIDTH_X8 (7<<19) @@ -3085,15 +3126,15 @@ #define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) -#define FDI_RXA_MISC 0xf0010 -#define FDI_RXB_MISC 0xf1010 -#define FDI_RXA_TUSIZE1 0xf0030 -#define FDI_RXA_TUSIZE2 0xf0038 -#define FDI_RXB_TUSIZE1 0xf1030 -#define FDI_RXB_TUSIZE2 0xf1038 -#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC) -#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1) -#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2) +#define _FDI_RXA_MISC 0xf0010 +#define _FDI_RXB_MISC 0xf1010 +#define _FDI_RXA_TUSIZE1 0xf0030 +#define _FDI_RXA_TUSIZE2 0xf0038 +#define _FDI_RXB_TUSIZE1 0xf1030 +#define _FDI_RXB_TUSIZE2 0xf1038 +#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) +#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) +#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) /* FDI_RX interrupt register format */ #define FDI_RX_INTER_LANE_ALIGN (1<<10) @@ -3108,12 +3149,12 @@ #define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) -#define FDI_RXA_IIR 0xf0014 -#define FDI_RXA_IMR 0xf0018 -#define FDI_RXB_IIR 0xf1014 -#define FDI_RXB_IMR 0xf1018 -#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR) -#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR) +#define _FDI_RXA_IIR 0xf0014 +#define _FDI_RXA_IMR 0xf0018 +#define _FDI_RXB_IIR 0xf1014 +#define _FDI_RXB_IMR 0xf1018 +#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) +#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) #define FDI_PLL_CTL_1 0xfe000 #define FDI_PLL_CTL_2 0xfe004 @@ -3143,11 +3184,15 @@ #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) +#define ADPA_PIPE_ENABLED(V, P) \ + (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE)) + /* or SDVOB */ #define HDMIB 0xe1140 #define PORT_ENABLE (1 << 31) #define TRANSCODER_A (0) #define TRANSCODER_B (1 << 30) +#define TRANSCODER_MASK (1 << 30) #define COLOR_FORMAT_8bpc (0) #define COLOR_FORMAT_12bpc (3 << 26) #define SDVOB_HOTPLUG_ENABLE (1 << 23) @@ -3163,6 +3208,9 @@ #define HSYNC_ACTIVE_HIGH (1 << 3) #define PORT_DETECTED (1 << 2) +#define HDMI_PIPE_ENABLED(V, P) \ + (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE)) + /* PCH SDVOB multiplex with HDMIB */ #define PCH_SDVOB HDMIB @@ -3238,6 +3286,7 @@ #define TRANS_DP_PORT_SEL_B (0<<29) #define TRANS_DP_PORT_SEL_C (1<<29) #define TRANS_DP_PORT_SEL_D (2<<29) +#define TRANS_DP_PORT_SEL_NONE (3<<29) #define TRANS_DP_PORT_SEL_MASK (3<<29) #define TRANS_DP_AUDIO_ONLY (1<<26) #define TRANS_DP_ENH_FRAMING (1<<18) @@ -3269,6 +3318,8 @@ #define FORCEWAKE 0xA18C #define FORCEWAKE_ACK 0x130090 +#define GT_FIFO_FREE_ENTRIES 0x120008 + #define GEN6_RPNSWREQ 0xA008 #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) @@ -3286,15 +3337,28 @@ #define GEN6_RP_DOWN_TIMEOUT 0xA010 #define GEN6_RP_INTERRUPT_LIMITS 0xA014 #define GEN6_RPSTAT1 0xA01C +#define GEN6_CAGF_SHIFT 8 +#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) #define GEN6_RP_CONTROL 0xA024 #define GEN6_RP_MEDIA_TURBO (1<<11) #define GEN6_RP_USE_NORMAL_FREQ (1<<9) #define GEN6_RP_MEDIA_IS_GFX (1<<8) #define GEN6_RP_ENABLE (1<<7) -#define GEN6_RP_UP_BUSY_MAX (0x2<<3) -#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0) +#define GEN6_RP_UP_IDLE_MIN (0x1<<3) +#define GEN6_RP_UP_BUSY_AVG (0x2<<3) +#define GEN6_RP_UP_BUSY_CONT (0x4<<3) +#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) #define GEN6_RP_UP_THRESHOLD 0xA02C #define GEN6_RP_DOWN_THRESHOLD 0xA030 +#define GEN6_RP_CUR_UP_EI 0xA050 +#define GEN6_CURICONT_MASK 0xffffff +#define GEN6_RP_CUR_UP 0xA054 +#define GEN6_CURBSYTAVG_MASK 0xffffff +#define GEN6_RP_PREV_UP 0xA058 +#define GEN6_RP_CUR_DOWN_EI 0xA05C +#define GEN6_CURIAVG_MASK 0xffffff +#define GEN6_RP_CUR_DOWN 0xA060 +#define GEN6_RP_PREV_DOWN 0xA064 #define GEN6_RP_UP_EI 0xA068 #define GEN6_RP_DOWN_EI 0xA06C #define GEN6_RP_IDLE_HYSTERSIS 0xA070 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 0521ecf2601..7e992a8e909 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -34,11 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) struct drm_i915_private *dev_priv = dev->dev_private; u32 dpll_reg; - if (HAS_PCH_SPLIT(dev)) { - dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; - } else { - dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; - } + if (HAS_PCH_SPLIT(dev)) + dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B; + else + dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); } @@ -46,7 +45,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) static void i915_save_palette(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); + unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); u32 *array; int i; @@ -54,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) return; if (HAS_PCH_SPLIT(dev)) - reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; + reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) array = dev_priv->save_palette_a; @@ -68,7 +67,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe) static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); + unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); u32 *array; int i; @@ -76,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) return; if (HAS_PCH_SPLIT(dev)) - reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; + reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; if (pipe == PIPE_A) array = dev_priv->save_palette_a; @@ -241,12 +240,12 @@ static void i915_save_modeset_reg(struct drm_device *dev) return; /* Cursor state */ - dev_priv->saveCURACNTR = I915_READ(CURACNTR); - dev_priv->saveCURAPOS = I915_READ(CURAPOS); - dev_priv->saveCURABASE = I915_READ(CURABASE); - dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); - dev_priv->saveCURBPOS = I915_READ(CURBPOS); - dev_priv->saveCURBBASE = I915_READ(CURBBASE); + dev_priv->saveCURACNTR = I915_READ(_CURACNTR); + dev_priv->saveCURAPOS = I915_READ(_CURAPOS); + dev_priv->saveCURABASE = I915_READ(_CURABASE); + dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); + dev_priv->saveCURBPOS = I915_READ(_CURBPOS); + dev_priv->saveCURBBASE = I915_READ(_CURBBASE); if (IS_GEN2(dev)) dev_priv->saveCURSIZE = I915_READ(CURSIZE); @@ -256,118 +255,118 @@ static void i915_save_modeset_reg(struct drm_device *dev) } /* Pipe & plane A info */ - dev_priv->savePIPEACONF = I915_READ(PIPEACONF); - dev_priv->savePIPEASRC = I915_READ(PIPEASRC); + dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); + dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); if (HAS_PCH_SPLIT(dev)) { - dev_priv->saveFPA0 = I915_READ(PCH_FPA0); - dev_priv->saveFPA1 = I915_READ(PCH_FPA1); - dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); + dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); + dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); + dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); } else { - dev_priv->saveFPA0 = I915_READ(FPA0); - dev_priv->saveFPA1 = I915_READ(FPA1); - dev_priv->saveDPLL_A = I915_READ(DPLL_A); + dev_priv->saveFPA0 = I915_READ(_FPA0); + dev_priv->saveFPA1 = I915_READ(_FPA1); + dev_priv->saveDPLL_A = I915_READ(_DPLL_A); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); - dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); - dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); - dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); - dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); - dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); - dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); + dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); + dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); + dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); + dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); + dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); + dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); + dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); if (!HAS_PCH_SPLIT(dev)) - dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); + dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { - dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); - dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); - dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); - dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1); - - dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL); - dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL); - - dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1); - dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ); - dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS); - - dev_priv->saveTRANSACONF = I915_READ(TRANSACONF); - dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A); - dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A); - dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A); - dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A); - dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A); - dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A); - } - - dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); - dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); - dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); - dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); - dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); + dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); + dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); + dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); + dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); + + dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); + dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); + + dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); + dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); + dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); + + dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); + dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); + dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); + dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); + dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); + dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); + dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); + } + + dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); + dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); + dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); + dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); + dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); if (INTEL_INFO(dev)->gen >= 4) { - dev_priv->saveDSPASURF = I915_READ(DSPASURF); - dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); + dev_priv->saveDSPASURF = I915_READ(_DSPASURF); + dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); } i915_save_palette(dev, PIPE_A); - dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); + dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); /* Pipe & plane B info */ - dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); - dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); + dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); + dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); if (HAS_PCH_SPLIT(dev)) { - dev_priv->saveFPB0 = I915_READ(PCH_FPB0); - dev_priv->saveFPB1 = I915_READ(PCH_FPB1); - dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); + dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); + dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); + dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); } else { - dev_priv->saveFPB0 = I915_READ(FPB0); - dev_priv->saveFPB1 = I915_READ(FPB1); - dev_priv->saveDPLL_B = I915_READ(DPLL_B); + dev_priv->saveFPB0 = I915_READ(_FPB0); + dev_priv->saveFPB1 = I915_READ(_FPB1); + dev_priv->saveDPLL_B = I915_READ(_DPLL_B); } if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); - dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); - dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); - dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); - dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); - dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); - dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); + dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); + dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); + dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); + dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); + dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); + dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); + dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); if (!HAS_PCH_SPLIT(dev)) - dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); + dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { - dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); - dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); - dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); - dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1); - - dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL); - dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL); - - dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1); - dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ); - dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS); - - dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF); - dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B); - dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B); - dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B); - dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B); - dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B); - dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B); - } - - dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); - dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); - dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); - dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); - dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); + dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); + dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); + dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); + dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); + + dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); + dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); + + dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); + dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); + dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); + + dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); + dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); + dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); + dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); + dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); + dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); + dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); + } + + dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); + dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); + dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); + dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); + dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); if (INTEL_INFO(dev)->gen >= 4) { - dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); - dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); + dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); + dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); } i915_save_palette(dev, PIPE_B); - dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); + dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); /* Fences */ switch (INTEL_INFO(dev)->gen) { @@ -426,19 +425,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) { - dpll_a_reg = PCH_DPLL_A; - dpll_b_reg = PCH_DPLL_B; - fpa0_reg = PCH_FPA0; - fpb0_reg = PCH_FPB0; - fpa1_reg = PCH_FPA1; - fpb1_reg = PCH_FPB1; + dpll_a_reg = _PCH_DPLL_A; + dpll_b_reg = _PCH_DPLL_B; + fpa0_reg = _PCH_FPA0; + fpb0_reg = _PCH_FPB0; + fpa1_reg = _PCH_FPA1; + fpb1_reg = _PCH_FPB1; } else { - dpll_a_reg = DPLL_A; - dpll_b_reg = DPLL_B; - fpa0_reg = FPA0; - fpb0_reg = FPB0; - fpa1_reg = FPA1; - fpb1_reg = FPB1; + dpll_a_reg = _DPLL_A; + dpll_b_reg = _DPLL_B; + fpa0_reg = _FPA0; + fpb0_reg = _FPB0; + fpa1_reg = _FPA1; + fpb1_reg = _FPB1; } if (HAS_PCH_SPLIT(dev)) { @@ -461,60 +460,60 @@ static void i915_restore_modeset_reg(struct drm_device *dev) POSTING_READ(dpll_a_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { - I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); - POSTING_READ(DPLL_A_MD); + I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); + POSTING_READ(_DPLL_A_MD); } udelay(150); /* Restore mode */ - I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); - I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); - I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); - I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); - I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); - I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); + I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); + I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); + I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); + I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); + I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); + I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); + I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); - I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); - I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); - I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); + I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); + I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); + I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); + I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); - I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); - I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); + I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); + I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); - I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1); - I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); - I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS); + I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); + I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); + I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); - I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF); - I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); - I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); - I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); - I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); - I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); - I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); + I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); + I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); + I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); + I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); + I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); + I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); + I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); } /* Restore plane info */ - I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); - I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); - I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); - I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); - I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); + I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); + I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); + I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); + I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); + I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); - I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); + I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); + I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); } - I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); + I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); i915_restore_palette(dev, PIPE_A); /* Enable the plane */ - I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); - I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); + I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); + I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); /* Pipe & plane B info */ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { @@ -530,68 +529,68 @@ static void i915_restore_modeset_reg(struct drm_device *dev) POSTING_READ(dpll_b_reg); udelay(150); if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { - I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); - POSTING_READ(DPLL_B_MD); + I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); + POSTING_READ(_DPLL_B_MD); } udelay(150); /* Restore mode */ - I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); - I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); - I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); - I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); - I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); - I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); + I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); + I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); + I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); + I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); + I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); + I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); + I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); - I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); - I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); - I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); + I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); + I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); + I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); + I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); - I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); - I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); + I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); + I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); - I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1); - I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); - I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS); + I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); + I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); + I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); - I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF); - I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); - I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); - I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); - I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); - I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); - I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); + I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); + I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); + I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); + I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); + I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); + I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); + I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); } /* Restore plane info */ - I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); - I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); - I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); - I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); - I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); + I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); + I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); + I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); + I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); + I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); - I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); + I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); + I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); } - I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); + I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); i915_restore_palette(dev, PIPE_B); /* Enable the plane */ - I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); - I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); + I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); + I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); /* Cursor state */ - I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); - I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); - I915_WRITE(CURABASE, dev_priv->saveCURABASE); - I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); - I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); - I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); + I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); + I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); + I915_WRITE(_CURABASE, dev_priv->saveCURABASE); + I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); + I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); + I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); if (IS_GEN2(dev)) I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); @@ -653,14 +652,14 @@ void i915_save_display(struct drm_device *dev) dev_priv->saveDP_B = I915_READ(DP_B); dev_priv->saveDP_C = I915_READ(DP_C); dev_priv->saveDP_D = I915_READ(DP_D); - dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M); - dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M); - dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N); - dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N); - dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M); - dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M); - dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N); - dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N); + dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); + dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); + dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); + dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); + dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); + dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); + dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); + dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); } /* FIXME: save TV & SDVO state */ @@ -699,14 +698,14 @@ void i915_restore_display(struct drm_device *dev) /* Display port ratios (must be done before clock is set) */ if (SUPPORTS_INTEGRATED_DP(dev)) { - I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); - I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); - I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); - I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); - I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); - I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); - I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); - I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); + I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); + I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); + I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); + I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); + I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); + I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); + I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); + I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); } /* This is only meaningful in non-KMS mode */ @@ -797,9 +796,6 @@ int i915_save_state(struct drm_device *dev) pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); - /* Hardware status page */ - dev_priv->saveHWS = I915_READ(HWS_PGA); - i915_save_display(dev); /* Interrupt state */ @@ -808,8 +804,8 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveDEIMR = I915_READ(DEIMR); dev_priv->saveGTIER = I915_READ(GTIER); dev_priv->saveGTIMR = I915_READ(GTIMR); - dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); - dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); + dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); + dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); dev_priv->saveMCHBAR_RENDER_STANDBY = I915_READ(RSTDBYCTL); } else { @@ -846,9 +842,6 @@ int i915_restore_state(struct drm_device *dev) pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); - /* Hardware status page */ - I915_WRITE(HWS_PGA, dev_priv->saveHWS); - i915_restore_display(dev); /* Interrupt state */ @@ -857,11 +850,11 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(DEIMR, dev_priv->saveDEIMR); I915_WRITE(GTIER, dev_priv->saveGTIER); I915_WRITE(GTIMR, dev_priv->saveGTIMR); - I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); - I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); + I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); + I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); } else { - I915_WRITE (IER, dev_priv->saveIER); - I915_WRITE (IMR, dev_priv->saveIMR); + I915_WRITE(IER, dev_priv->saveIER); + I915_WRITE(IMR, dev_priv->saveIMR); } /* Clock gating state */ diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 7f0fc3ed61a..d623fefbfac 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -7,6 +7,7 @@ #include <drm/drmP.h> #include "i915_drv.h" +#include "intel_ringbuffer.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM i915 @@ -16,9 +17,7 @@ /* object tracking */ TRACE_EVENT(i915_gem_object_create, - TP_PROTO(struct drm_i915_gem_object *obj), - TP_ARGS(obj), TP_STRUCT__entry( @@ -35,33 +34,51 @@ TRACE_EVENT(i915_gem_object_create, ); TRACE_EVENT(i915_gem_object_bind, - - TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable), - - TP_ARGS(obj, gtt_offset, mappable), + TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), + TP_ARGS(obj, mappable), TP_STRUCT__entry( __field(struct drm_i915_gem_object *, obj) - __field(u32, gtt_offset) + __field(u32, offset) + __field(u32, size) __field(bool, mappable) ), TP_fast_assign( __entry->obj = obj; - __entry->gtt_offset = gtt_offset; + __entry->offset = obj->gtt_space->start; + __entry->size = obj->gtt_space->size; __entry->mappable = mappable; ), - TP_printk("obj=%p, gtt_offset=%08x%s", - __entry->obj, __entry->gtt_offset, + TP_printk("obj=%p, offset=%08x size=%x%s", + __entry->obj, __entry->offset, __entry->size, __entry->mappable ? ", mappable" : "") ); -TRACE_EVENT(i915_gem_object_change_domain, +TRACE_EVENT(i915_gem_object_unbind, + TP_PROTO(struct drm_i915_gem_object *obj), + TP_ARGS(obj), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, offset) + __field(u32, size) + ), - TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), + TP_fast_assign( + __entry->obj = obj; + __entry->offset = obj->gtt_space->start; + __entry->size = obj->gtt_space->size; + ), - TP_ARGS(obj, old_read_domains, old_write_domain), + TP_printk("obj=%p, offset=%08x size=%x", + __entry->obj, __entry->offset, __entry->size) +); + +TRACE_EVENT(i915_gem_object_change_domain, + TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), + TP_ARGS(obj, old_read, old_write), TP_STRUCT__entry( __field(struct drm_i915_gem_object *, obj) @@ -71,177 +88,264 @@ TRACE_EVENT(i915_gem_object_change_domain, TP_fast_assign( __entry->obj = obj; - __entry->read_domains = obj->base.read_domains | (old_read_domains << 16); - __entry->write_domain = obj->base.write_domain | (old_write_domain << 16); + __entry->read_domains = obj->base.read_domains | (old_read << 16); + __entry->write_domain = obj->base.write_domain | (old_write << 16); ), - TP_printk("obj=%p, read=%04x, write=%04x", + TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x", __entry->obj, - __entry->read_domains, __entry->write_domain) + __entry->read_domains >> 16, + __entry->read_domains & 0xffff, + __entry->write_domain >> 16, + __entry->write_domain & 0xffff) ); -DECLARE_EVENT_CLASS(i915_gem_object, +TRACE_EVENT(i915_gem_object_pwrite, + TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), + TP_ARGS(obj, offset, len), - TP_PROTO(struct drm_i915_gem_object *obj), + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, offset) + __field(u32, len) + ), - TP_ARGS(obj), + TP_fast_assign( + __entry->obj = obj; + __entry->offset = offset; + __entry->len = len; + ), + + TP_printk("obj=%p, offset=%u, len=%u", + __entry->obj, __entry->offset, __entry->len) +); + +TRACE_EVENT(i915_gem_object_pread, + TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len), + TP_ARGS(obj, offset, len), TP_STRUCT__entry( __field(struct drm_i915_gem_object *, obj) + __field(u32, offset) + __field(u32, len) ), TP_fast_assign( __entry->obj = obj; + __entry->offset = offset; + __entry->len = len; ), - TP_printk("obj=%p", __entry->obj) + TP_printk("obj=%p, offset=%u, len=%u", + __entry->obj, __entry->offset, __entry->len) ); -DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, +TRACE_EVENT(i915_gem_object_fault, + TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write), + TP_ARGS(obj, index, gtt, write), + + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + __field(u32, index) + __field(bool, gtt) + __field(bool, write) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->index = index; + __entry->gtt = gtt; + __entry->write = write; + ), + TP_printk("obj=%p, %s index=%u %s", + __entry->obj, + __entry->gtt ? "GTT" : "CPU", + __entry->index, + __entry->write ? ", writable" : "") +); + +DECLARE_EVENT_CLASS(i915_gem_object, TP_PROTO(struct drm_i915_gem_object *obj), + TP_ARGS(obj), - TP_ARGS(obj) + TP_STRUCT__entry( + __field(struct drm_i915_gem_object *, obj) + ), + + TP_fast_assign( + __entry->obj = obj; + ), + + TP_printk("obj=%p", __entry->obj) ); -DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, +DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, + TP_PROTO(struct drm_i915_gem_object *obj), + TP_ARGS(obj) +); +DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, TP_PROTO(struct drm_i915_gem_object *obj), - TP_ARGS(obj) ); -DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, +TRACE_EVENT(i915_gem_evict, + TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable), + TP_ARGS(dev, size, align, mappable), - TP_PROTO(struct drm_i915_gem_object *obj), + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, size) + __field(u32, align) + __field(bool, mappable) + ), - TP_ARGS(obj) + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->size = size; + __entry->align = align; + __entry->mappable = mappable; + ), + + TP_printk("dev=%d, size=%d, align=%d %s", + __entry->dev, __entry->size, __entry->align, + __entry->mappable ? ", mappable" : "") ); -/* batch tracing */ +TRACE_EVENT(i915_gem_evict_everything, + TP_PROTO(struct drm_device *dev, bool purgeable), + TP_ARGS(dev, purgeable), -TRACE_EVENT(i915_gem_request_submit, + TP_STRUCT__entry( + __field(u32, dev) + __field(bool, purgeable) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->purgeable = purgeable; + ), - TP_PROTO(struct drm_device *dev, u32 seqno), + TP_printk("dev=%d%s", + __entry->dev, + __entry->purgeable ? ", purgeable only" : "") +); - TP_ARGS(dev, seqno), +TRACE_EVENT(i915_gem_ring_dispatch, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno), TP_STRUCT__entry( __field(u32, dev) + __field(u32, ring) __field(u32, seqno) ), TP_fast_assign( - __entry->dev = dev->primary->index; + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; __entry->seqno = seqno; - i915_trace_irq_get(dev, seqno); + i915_trace_irq_get(ring, seqno); ), - TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) + TP_printk("dev=%u, ring=%u, seqno=%u", + __entry->dev, __entry->ring, __entry->seqno) ); -TRACE_EVENT(i915_gem_request_flush, - - TP_PROTO(struct drm_device *dev, u32 seqno, - u32 flush_domains, u32 invalidate_domains), - - TP_ARGS(dev, seqno, flush_domains, invalidate_domains), +TRACE_EVENT(i915_gem_ring_flush, + TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush), + TP_ARGS(ring, invalidate, flush), TP_STRUCT__entry( __field(u32, dev) - __field(u32, seqno) - __field(u32, flush_domains) - __field(u32, invalidate_domains) + __field(u32, ring) + __field(u32, invalidate) + __field(u32, flush) ), TP_fast_assign( - __entry->dev = dev->primary->index; - __entry->seqno = seqno; - __entry->flush_domains = flush_domains; - __entry->invalidate_domains = invalidate_domains; + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; + __entry->invalidate = invalidate; + __entry->flush = flush; ), - TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x", - __entry->dev, __entry->seqno, - __entry->flush_domains, __entry->invalidate_domains) + TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x", + __entry->dev, __entry->ring, + __entry->invalidate, __entry->flush) ); DECLARE_EVENT_CLASS(i915_gem_request, - - TP_PROTO(struct drm_device *dev, u32 seqno), - - TP_ARGS(dev, seqno), + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno), TP_STRUCT__entry( __field(u32, dev) + __field(u32, ring) __field(u32, seqno) ), TP_fast_assign( - __entry->dev = dev->primary->index; + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; __entry->seqno = seqno; ), - TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) + TP_printk("dev=%u, ring=%u, seqno=%u", + __entry->dev, __entry->ring, __entry->seqno) ); -DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, - - TP_PROTO(struct drm_device *dev, u32 seqno), +DEFINE_EVENT(i915_gem_request, i915_gem_request_add, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno) +); - TP_ARGS(dev, seqno) +DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno) ); DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, - - TP_PROTO(struct drm_device *dev, u32 seqno), - - TP_ARGS(dev, seqno) + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno) ); DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, - - TP_PROTO(struct drm_device *dev, u32 seqno), - - TP_ARGS(dev, seqno) + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno) ); DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, - - TP_PROTO(struct drm_device *dev, u32 seqno), - - TP_ARGS(dev, seqno) + TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), + TP_ARGS(ring, seqno) ); DECLARE_EVENT_CLASS(i915_ring, - - TP_PROTO(struct drm_device *dev), - - TP_ARGS(dev), + TP_PROTO(struct intel_ring_buffer *ring), + TP_ARGS(ring), TP_STRUCT__entry( __field(u32, dev) + __field(u32, ring) ), TP_fast_assign( - __entry->dev = dev->primary->index; + __entry->dev = ring->dev->primary->index; + __entry->ring = ring->id; ), - TP_printk("dev=%u", __entry->dev) + TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring) ); DEFINE_EVENT(i915_ring, i915_ring_wait_begin, - - TP_PROTO(struct drm_device *dev), - - TP_ARGS(dev) + TP_PROTO(struct intel_ring_buffer *ring), + TP_ARGS(ring) ); DEFINE_EVENT(i915_ring, i915_ring_wait_end, - - TP_PROTO(struct drm_device *dev), - - TP_ARGS(dev) + TP_PROTO(struct intel_ring_buffer *ring), + TP_ARGS(ring) ); TRACE_EVENT(i915_flip_request, @@ -281,26 +385,29 @@ TRACE_EVENT(i915_flip_complete, ); TRACE_EVENT(i915_reg_rw, - TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len), + TP_PROTO(bool write, u32 reg, u64 val, int len), - TP_ARGS(cmd, reg, val, len), + TP_ARGS(write, reg, val, len), TP_STRUCT__entry( - __field(int, cmd) - __field(uint32_t, reg) - __field(uint64_t, val) - __field(int, len) + __field(u64, val) + __field(u32, reg) + __field(u16, write) + __field(u16, len) ), TP_fast_assign( - __entry->cmd = cmd; + __entry->val = (u64)val; __entry->reg = reg; - __entry->val = (uint64_t)val; + __entry->write = write; __entry->len = len; ), - TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d", - __entry->cmd, __entry->reg, __entry->val, __entry->len) + TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)", + __entry->write ? "write" : "read", + __entry->reg, __entry->len, + (u32)(__entry->val & 0xffffffff), + (u32)(__entry->val >> 32)) ); #endif /* _I915_TRACE_H_ */ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 0b44956c336..fb5b4d426ae 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -226,29 +226,49 @@ static void parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { - struct bdb_sdvo_lvds_options *sdvo_lvds_options; struct lvds_dvo_timing *dvo_timing; struct drm_display_mode *panel_fixed_mode; + int index; - sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); - if (!sdvo_lvds_options) - return; + index = i915_vbt_sdvo_panel_type; + if (index == -1) { + struct bdb_sdvo_lvds_options *sdvo_lvds_options; + + sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); + if (!sdvo_lvds_options) + return; + + index = sdvo_lvds_options->panel_type; + } dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); if (!dvo_timing) return; panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); - if (!panel_fixed_mode) return; - fill_detail_timing_data(panel_fixed_mode, - dvo_timing + sdvo_lvds_options->panel_type); + fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; - return; + DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); + drm_mode_debug_printmodeline(panel_fixed_mode); +} + +static int intel_bios_ssc_frequency(struct drm_device *dev, + bool alternate) +{ + switch (INTEL_INFO(dev)->gen) { + case 2: + return alternate ? 66 : 48; + case 3: + case 4: + return alternate ? 100 : 96; + default: + return alternate ? 100 : 120; + } } static void @@ -263,13 +283,8 @@ parse_general_features(struct drm_i915_private *dev_priv, dev_priv->int_tv_support = general->int_tv_support; dev_priv->int_crt_support = general->int_crt_support; dev_priv->lvds_use_ssc = general->enable_ssc; - - if (IS_I85X(dev)) - dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; - else if (IS_GEN5(dev) || IS_GEN6(dev)) - dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120; - else - dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; + dev_priv->lvds_ssc_freq = + intel_bios_ssc_frequency(dev, general->ssc_freq); } } @@ -553,6 +568,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv, static void init_vbt_defaults(struct drm_i915_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; + dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; /* LFP panel data */ @@ -565,7 +582,11 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) /* general features */ dev_priv->int_tv_support = 1; dev_priv->int_crt_support = 1; - dev_priv->lvds_use_ssc = 0; + + /* Default to using SSC */ + dev_priv->lvds_use_ssc = 1; + dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); + DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); /* eDP data */ dev_priv->edp.bpp = 18; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 8a77ff4a723..8342259f316 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -129,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, u32 adpa, dpll_md; u32 adpa_reg; - if (intel_crtc->pipe == 0) - dpll_md_reg = DPLL_A_MD; - else - dpll_md_reg = DPLL_B_MD; + dpll_md_reg = DPLL_MD(intel_crtc->pipe); if (HAS_PCH_SPLIT(dev)) adpa_reg = PCH_ADPA; @@ -160,17 +157,16 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, adpa |= PORT_TRANS_A_SEL_CPT; else adpa |= ADPA_PIPE_A_SELECT; - if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(BCLRPAT_A, 0); } else { if (HAS_PCH_CPT(dev)) adpa |= PORT_TRANS_B_SEL_CPT; else adpa |= ADPA_PIPE_B_SELECT; - if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(BCLRPAT_B, 0); } + if (!HAS_PCH_SPLIT(dev)) + I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); + I915_WRITE(adpa_reg, adpa); } @@ -353,21 +349,12 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt) DRM_DEBUG_KMS("starting load-detect on CRT\n"); - if (pipe == 0) { - bclrpat_reg = BCLRPAT_A; - vtotal_reg = VTOTAL_A; - vblank_reg = VBLANK_A; - vsync_reg = VSYNC_A; - pipeconf_reg = PIPEACONF; - pipe_dsl_reg = PIPEADSL; - } else { - bclrpat_reg = BCLRPAT_B; - vtotal_reg = VTOTAL_B; - vblank_reg = VBLANK_B; - vsync_reg = VSYNC_B; - pipeconf_reg = PIPEBCONF; - pipe_dsl_reg = PIPEBDSL; - } + bclrpat_reg = BCLRPAT(pipe); + vtotal_reg = VTOTAL(pipe); + vblank_reg = VBLANK(pipe); + vsync_reg = VSYNC(pipe); + pipeconf_reg = PIPECONF(pipe); + pipe_dsl_reg = PIPEDSL(pipe); save_bclrpat = I915_READ(bclrpat_reg); save_vtotal = I915_READ(vtotal_reg); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7e42aa58650..3106c0dc838 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -989,7 +989,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, void intel_wait_for_vblank(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); + int pipestat_reg = PIPESTAT(pipe); /* Clear existing vblank status. Note this will clear any other * sticky status fields as well. @@ -1058,6 +1058,612 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) } } +static const char *state_string(bool enabled) +{ + return enabled ? "on" : "off"; +} + +/* Only for pre-ILK configs */ +static void assert_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + int reg; + u32 val; + bool cur_state; + + reg = DPLL(pipe); + val = I915_READ(reg); + cur_state = !!(val & DPLL_VCO_ENABLE); + WARN(cur_state != state, + "PLL state assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); +} +#define assert_pll_enabled(d, p) assert_pll(d, p, true) +#define assert_pll_disabled(d, p) assert_pll(d, p, false) + +/* For ILK+ */ +static void assert_pch_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + int reg; + u32 val; + bool cur_state; + + reg = PCH_DPLL(pipe); + val = I915_READ(reg); + cur_state = !!(val & DPLL_VCO_ENABLE); + WARN(cur_state != state, + "PCH PLL state assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); +} +#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) +#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) + +static void assert_fdi_tx(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + int reg; + u32 val; + bool cur_state; + + reg = FDI_TX_CTL(pipe); + val = I915_READ(reg); + cur_state = !!(val & FDI_TX_ENABLE); + WARN(cur_state != state, + "FDI TX state assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); +} +#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) +#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) + +static void assert_fdi_rx(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + int reg; + u32 val; + bool cur_state; + + reg = FDI_RX_CTL(pipe); + val = I915_READ(reg); + cur_state = !!(val & FDI_RX_ENABLE); + WARN(cur_state != state, + "FDI RX state assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); +} +#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) +#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) + +static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + /* ILK FDI PLL is always enabled */ + if (dev_priv->info->gen == 5) + return; + + reg = FDI_TX_CTL(pipe); + val = I915_READ(reg); + WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); +} + +static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + reg = FDI_RX_CTL(pipe); + val = I915_READ(reg); + WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); +} + +static void assert_panel_unlocked(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int pp_reg, lvds_reg; + u32 val; + enum pipe panel_pipe = PIPE_A; + bool locked = locked; + + if (HAS_PCH_SPLIT(dev_priv->dev)) { + pp_reg = PCH_PP_CONTROL; + lvds_reg = PCH_LVDS; + } else { + pp_reg = PP_CONTROL; + lvds_reg = LVDS; + } + + val = I915_READ(pp_reg); + if (!(val & PANEL_POWER_ON) || + ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) + locked = false; + + if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) + panel_pipe = PIPE_B; + + WARN(panel_pipe == pipe && locked, + "panel assertion failure, pipe %c regs locked\n", + pipe_name(pipe)); +} + +static void assert_pipe(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + int reg; + u32 val; + bool cur_state; + + reg = PIPECONF(pipe); + val = I915_READ(reg); + cur_state = !!(val & PIPECONF_ENABLE); + WARN(cur_state != state, + "pipe %c assertion failure (expected %s, current %s)\n", + pipe_name(pipe), state_string(state), state_string(cur_state)); +} +#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) +#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) + +static void assert_plane_enabled(struct drm_i915_private *dev_priv, + enum plane plane) +{ + int reg; + u32 val; + + reg = DSPCNTR(plane); + val = I915_READ(reg); + WARN(!(val & DISPLAY_PLANE_ENABLE), + "plane %c assertion failure, should be active but is disabled\n", + plane_name(plane)); +} + +static void assert_planes_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg, i; + u32 val; + int cur_pipe; + + /* Planes are fixed to pipes on ILK+ */ + if (HAS_PCH_SPLIT(dev_priv->dev)) + return; + + /* Need to check both planes against the pipe */ + for (i = 0; i < 2; i++) { + reg = DSPCNTR(i); + val = I915_READ(reg); + cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> + DISPPLANE_SEL_PIPE_SHIFT; + WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, + "plane %c assertion failure, should be off on pipe %c but is still active\n", + plane_name(i), pipe_name(pipe)); + } +} + +static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) +{ + u32 val; + bool enabled; + + val = I915_READ(PCH_DREF_CONTROL); + enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | + DREF_SUPERSPREAD_SOURCE_MASK)); + WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); +} + +static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + bool enabled; + + reg = TRANSCONF(pipe); + val = I915_READ(reg); + enabled = !!(val & TRANS_ENABLE); + WARN(enabled, + "transcoder assertion failed, should be off on pipe %c but is still active\n", + pipe_name(pipe)); +} + +static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, int reg) +{ + u32 val = I915_READ(reg); + WARN(DP_PIPE_ENABLED(val, pipe), + "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", + reg, pipe_name(pipe)); +} + +static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, int reg) +{ + u32 val = I915_READ(reg); + WARN(HDMI_PIPE_ENABLED(val, pipe), + "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", + reg, pipe_name(pipe)); +} + +static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); + assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); + assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); + + reg = PCH_ADPA; + val = I915_READ(reg); + WARN(ADPA_PIPE_ENABLED(val, pipe), + "PCH VGA enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + reg = PCH_LVDS; + val = I915_READ(reg); + WARN(LVDS_PIPE_ENABLED(val, pipe), + "PCH LVDS enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); + assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); + assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); +} + +/** + * intel_enable_pll - enable a PLL + * @dev_priv: i915 private structure + * @pipe: pipe PLL to enable + * + * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to + * make sure the PLL reg is writable first though, since the panel write + * protect mechanism may be enabled. + * + * Note! This is for pre-ILK only. + */ +static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + int reg; + u32 val; + + /* No really, not for ILK+ */ + BUG_ON(dev_priv->info->gen >= 5); + + /* PLL is protected by panel, make sure we can write it */ + if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) + assert_panel_unlocked(dev_priv, pipe); + + reg = DPLL(pipe); + val = I915_READ(reg); + val |= DPLL_VCO_ENABLE; + + /* We do this three times for luck */ + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ +} + +/** + * intel_disable_pll - disable a PLL + * @dev_priv: i915 private structure + * @pipe: pipe PLL to disable + * + * Disable the PLL for @pipe, making sure the pipe is off first. + * + * Note! This is for pre-ILK only. + */ +static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + int reg; + u32 val; + + /* Don't disable pipe A or pipe A PLLs if needed */ + if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + return; + + /* Make sure the pipe isn't still relying on us */ + assert_pipe_disabled(dev_priv, pipe); + + reg = DPLL(pipe); + val = I915_READ(reg); + val &= ~DPLL_VCO_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); +} + +/** + * intel_enable_pch_pll - enable PCH PLL + * @dev_priv: i915 private structure + * @pipe: pipe PLL to enable + * + * The PCH PLL needs to be enabled before the PCH transcoder, since it + * drives the transcoder clock. + */ +static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + /* PCH only available on ILK+ */ + BUG_ON(dev_priv->info->gen < 5); + + /* PCH refclock must be enabled first */ + assert_pch_refclk_enabled(dev_priv); + + reg = PCH_DPLL(pipe); + val = I915_READ(reg); + val |= DPLL_VCO_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(200); +} + +static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + /* PCH only available on ILK+ */ + BUG_ON(dev_priv->info->gen < 5); + + /* Make sure transcoder isn't still depending on us */ + assert_transcoder_disabled(dev_priv, pipe); + + reg = PCH_DPLL(pipe); + val = I915_READ(reg); + val &= ~DPLL_VCO_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(200); +} + +static void intel_enable_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + /* PCH only available on ILK+ */ + BUG_ON(dev_priv->info->gen < 5); + + /* Make sure PCH DPLL is enabled */ + assert_pch_pll_enabled(dev_priv, pipe); + + /* FDI must be feeding us bits for PCH ports */ + assert_fdi_tx_enabled(dev_priv, pipe); + assert_fdi_rx_enabled(dev_priv, pipe); + + reg = TRANSCONF(pipe); + val = I915_READ(reg); + /* + * make the BPC in transcoder be consistent with + * that in pipeconf reg. + */ + val &= ~PIPE_BPC_MASK; + val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; + I915_WRITE(reg, val | TRANS_ENABLE); + if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) + DRM_ERROR("failed to enable transcoder %d\n", pipe); +} + +static void intel_disable_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + /* FDI relies on the transcoder */ + assert_fdi_tx_disabled(dev_priv, pipe); + assert_fdi_rx_disabled(dev_priv, pipe); + + /* Ports must be off as well */ + assert_pch_ports_disabled(dev_priv, pipe); + + reg = TRANSCONF(pipe); + val = I915_READ(reg); + val &= ~TRANS_ENABLE; + I915_WRITE(reg, val); + /* wait for PCH transcoder off, transcoder state */ + if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) + DRM_ERROR("failed to disable transcoder\n"); +} + +/** + * intel_enable_pipe - enable a pipe, asserting requirements + * @dev_priv: i915 private structure + * @pipe: pipe to enable + * @pch_port: on ILK+, is this pipe driving a PCH port or not + * + * Enable @pipe, making sure that various hardware specific requirements + * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. + * + * @pipe should be %PIPE_A or %PIPE_B. + * + * Will wait until the pipe is actually running (i.e. first vblank) before + * returning. + */ +static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, + bool pch_port) +{ + int reg; + u32 val; + + /* + * A pipe without a PLL won't actually be able to drive bits from + * a plane. On ILK+ the pipe PLLs are integrated, so we don't + * need the check. + */ + if (!HAS_PCH_SPLIT(dev_priv->dev)) + assert_pll_enabled(dev_priv, pipe); + else { + if (pch_port) { + /* if driving the PCH, we need FDI enabled */ + assert_fdi_rx_pll_enabled(dev_priv, pipe); + assert_fdi_tx_pll_enabled(dev_priv, pipe); + } + /* FIXME: assert CPU port conditions for SNB+ */ + } + + reg = PIPECONF(pipe); + val = I915_READ(reg); + val |= PIPECONF_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + intel_wait_for_vblank(dev_priv->dev, pipe); +} + +/** + * intel_disable_pipe - disable a pipe, asserting requirements + * @dev_priv: i915 private structure + * @pipe: pipe to disable + * + * Disable @pipe, making sure that various hardware specific requirements + * are met, if applicable, e.g. plane disabled, panel fitter off, etc. + * + * @pipe should be %PIPE_A or %PIPE_B. + * + * Will wait until the pipe has shut down before returning. + */ +static void intel_disable_pipe(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + int reg; + u32 val; + + /* + * Make sure planes won't keep trying to pump pixels to us, + * or we might hang the display. + */ + assert_planes_disabled(dev_priv, pipe); + + /* Don't disable pipe A or pipe A PLLs if needed */ + if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + return; + + reg = PIPECONF(pipe); + val = I915_READ(reg); + val &= ~PIPECONF_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + intel_wait_for_pipe_off(dev_priv->dev, pipe); +} + +/** + * intel_enable_plane - enable a display plane on a given pipe + * @dev_priv: i915 private structure + * @plane: plane to enable + * @pipe: pipe being fed + * + * Enable @plane on @pipe, making sure that @pipe is running first. + */ +static void intel_enable_plane(struct drm_i915_private *dev_priv, + enum plane plane, enum pipe pipe) +{ + int reg; + u32 val; + + /* If the pipe isn't enabled, we can't pump pixels and may hang */ + assert_pipe_enabled(dev_priv, pipe); + + reg = DSPCNTR(plane); + val = I915_READ(reg); + val |= DISPLAY_PLANE_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + intel_wait_for_vblank(dev_priv->dev, pipe); +} + +/* + * Plane regs are double buffered, going from enabled->disabled needs a + * trigger in order to latch. The display address reg provides this. + */ +static void intel_flush_display_plane(struct drm_i915_private *dev_priv, + enum plane plane) +{ + u32 reg = DSPADDR(plane); + I915_WRITE(reg, I915_READ(reg)); +} + +/** + * intel_disable_plane - disable a display plane + * @dev_priv: i915 private structure + * @plane: plane to disable + * @pipe: pipe consuming the data + * + * Disable @plane; should be an independent operation. + */ +static void intel_disable_plane(struct drm_i915_private *dev_priv, + enum plane plane, enum pipe pipe) +{ + int reg; + u32 val; + + reg = DSPCNTR(plane); + val = I915_READ(reg); + val &= ~DISPLAY_PLANE_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + intel_flush_display_plane(dev_priv, plane); + intel_wait_for_vblank(dev_priv->dev, pipe); +} + +static void disable_pch_dp(struct drm_i915_private *dev_priv, + enum pipe pipe, int reg) +{ + u32 val = I915_READ(reg); + if (DP_PIPE_ENABLED(val, pipe)) + I915_WRITE(reg, val & ~DP_PORT_EN); +} + +static void disable_pch_hdmi(struct drm_i915_private *dev_priv, + enum pipe pipe, int reg) +{ + u32 val = I915_READ(reg); + if (HDMI_PIPE_ENABLED(val, pipe)) + I915_WRITE(reg, val & ~PORT_ENABLE); +} + +/* Disable any ports connected to this transcoder */ +static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + u32 reg, val; + + val = I915_READ(PCH_PP_CONTROL); + I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); + + disable_pch_dp(dev_priv, pipe, PCH_DP_B); + disable_pch_dp(dev_priv, pipe, PCH_DP_C); + disable_pch_dp(dev_priv, pipe, PCH_DP_D); + + reg = PCH_ADPA; + val = I915_READ(reg); + if (ADPA_PIPE_ENABLED(val, pipe)) + I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); + + reg = PCH_LVDS; + val = I915_READ(reg); + if (LVDS_PIPE_ENABLED(val, pipe)) { + I915_WRITE(reg, val & ~LVDS_PORT_EN); + POSTING_READ(reg); + udelay(100); + } + + disable_pch_hdmi(dev_priv, pipe, HDMIB); + disable_pch_hdmi(dev_priv, pipe, HDMIC); + disable_pch_hdmi(dev_priv, pipe, HDMID); +} + static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; @@ -1219,7 +1825,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) u32 blt_ecoskpd; /* Make sure blitter notifies FBC of writes */ - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << GEN6_BLITTER_LOCK_SHIFT; @@ -1230,7 +1836,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) GEN6_BLITTER_LOCK_SHIFT); I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); POSTING_READ(GEN6_BLITTER_ECOSKPD); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) @@ -1390,7 +1996,7 @@ static void intel_update_fbc(struct drm_device *dev) * - going to an unsupported config (interlace, pixel multiply, etc.) */ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { - if (tmp_crtc->enabled) { + if (tmp_crtc->enabled && tmp_crtc->fb) { if (crtc) { DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; @@ -1461,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, struct intel_ring_buffer *pipelined) { + struct drm_i915_private *dev_priv = dev->dev_private; u32 alignment; int ret; @@ -1485,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, BUG(); } + dev_priv->mm.interruptible = false; ret = i915_gem_object_pin(obj, alignment, true); if (ret) - return ret; + goto err_interruptible; ret = i915_gem_object_set_to_display_plane(obj, pipelined); if (ret) @@ -1499,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, * a fence as the cost is not that onerous. */ if (obj->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence(obj, pipelined, false); + ret = i915_gem_object_get_fence(obj, pipelined); if (ret) goto err_unpin; } + dev_priv->mm.interruptible = true; return 0; err_unpin: i915_gem_object_unpin(obj); +err_interruptible: + dev_priv->mm.interruptible = true; return ret; } @@ -1630,19 +2241,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; wait_event(dev_priv->pending_flip_queue, + atomic_read(&dev_priv->mm.wedged) || atomic_read(&obj->pending_flip) == 0); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old * framebuffer. + * + * This should only fail upon a hung GPU, in which case we + * can safely continue. */ - ret = i915_gem_object_flush_gpu(obj, false); - if (ret) { - i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + ret = i915_gem_object_flush_gpu(obj); + (void) ret; } ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, @@ -1753,8 +2364,13 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; u32 reg, temp, tries; + /* FDI needs bits from pipe & plane first */ + assert_pipe_enabled(dev_priv, pipe); + assert_plane_enabled(dev_priv, plane); + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); @@ -1784,7 +2400,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) udelay(150); /* Ironlake workaround, enable clock pointer after FDI enable*/ - I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE); + if (HAS_PCH_IBX(dev)) { + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | + FDI_RX_PHASE_SYNC_POINTER_EN); + } reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { @@ -1834,7 +2454,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) } -static const int const snb_b_fdi_train_param [] = { +static const int snb_b_fdi_train_param [] = { FDI_LINK_TRAIN_400MV_0DB_SNB_B, FDI_LINK_TRAIN_400MV_6DB_SNB_B, FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, @@ -2003,12 +2623,60 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc) } } -static void intel_flush_display_plane(struct drm_device *dev, - int plane) +static void ironlake_fdi_disable(struct drm_crtc *crtc) { + struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg = DSPADDR(plane); - I915_WRITE(reg, I915_READ(reg)); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp; + + /* disable CPU FDI tx and PCH FDI rx */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_ENABLE); + POSTING_READ(reg); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(0x7 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp & ~FDI_RX_ENABLE); + + POSTING_READ(reg); + udelay(100); + + /* Ironlake workaround, disable clock pointer after downing FDI */ + if (HAS_PCH_IBX(dev)) { + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_EN)); + } + + /* still set train pattern 1 */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + } + /* BPC in FDI rx is consistent with that in PIPECONF */ + temp &= ~(0x07 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(100); } /* @@ -2045,60 +2713,46 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) atomic_read(&obj->pending_flip) == 0); } -static void ironlake_crtc_enable(struct drm_crtc *crtc) +static bool intel_crtc_driving_pch(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - u32 reg, temp; - - if (intel_crtc->active) - return; - - intel_crtc->active = true; - intel_update_watermarks(dev); - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - temp = I915_READ(PCH_LVDS); - if ((temp & LVDS_PORT_EN) == 0) - I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); - } + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; - ironlake_fdi_enable(crtc); + /* + * If there's a non-PCH eDP on this crtc, it must be DP_A, and that + * must be driven by its own crtc; no sharing is possible. + */ + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + if (encoder->base.crtc != crtc) + continue; - /* Enable panel fitting for LVDS */ - if (dev_priv->pch_pf_size && - (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { - /* Force use of hard-coded filter coefficients - * as some pre-programmed values are broken, - * e.g. x201. - */ - I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, - PF_ENABLE | PF_FILTER_MED_3x3); - I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, - dev_priv->pch_pf_pos); - I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, - dev_priv->pch_pf_size); + switch (encoder->type) { + case INTEL_OUTPUT_EDP: + if (!intel_encoder_is_pch_edp(&encoder->base)) + return false; + continue; + } } - /* Enable CPU pipe */ - reg = PIPECONF(pipe); - temp = I915_READ(reg); - if ((temp & PIPECONF_ENABLE) == 0) { - I915_WRITE(reg, temp | PIPECONF_ENABLE); - POSTING_READ(reg); - intel_wait_for_vblank(dev, intel_crtc->pipe); - } + return true; +} - /* configure and enable CPU plane */ - reg = DSPCNTR(plane); - temp = I915_READ(reg); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); - intel_flush_display_plane(dev, plane); - } +/* + * Enable PCH resources required for PCH ports: + * - PCH PLLs + * - FDI training & RX/TX + * - update transcoder timings + * - DP transcoding bits + * - transcoder + */ +static void ironlake_pch_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp; /* For PCH output, training FDI link */ if (IS_GEN6(dev)) @@ -2106,14 +2760,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) else ironlake_fdi_link_train(crtc); - /* enable PCH DPLL */ - reg = PCH_DPLL(pipe); - temp = I915_READ(reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(reg, temp | DPLL_VCO_ENABLE); - POSTING_READ(reg); - udelay(200); - } + intel_enable_pch_pll(dev_priv, pipe); if (HAS_PCH_CPT(dev)) { /* Be sure PCH DPLL SEL is set */ @@ -2125,7 +2772,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(PCH_DPLL_SEL, temp); } - /* set transcoder timing */ + /* set transcoder timing, panel must allow it */ + assert_panel_unlocked(dev_priv, pipe); I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); @@ -2172,18 +2820,55 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(reg, temp); } - /* enable PCH transcoder */ - reg = TRANSCONF(pipe); - temp = I915_READ(reg); - /* - * make the BPC in transcoder be consistent with - * that in pipeconf reg. - */ - temp &= ~PIPE_BPC_MASK; - temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; - I915_WRITE(reg, temp | TRANS_ENABLE); - if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) - DRM_ERROR("failed to enable transcoder %d\n", pipe); + intel_enable_transcoder(dev_priv, pipe); +} + +static void ironlake_crtc_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + u32 temp; + bool is_pch_port; + + if (intel_crtc->active) + return; + + intel_crtc->active = true; + intel_update_watermarks(dev); + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + temp = I915_READ(PCH_LVDS); + if ((temp & LVDS_PORT_EN) == 0) + I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); + } + + is_pch_port = intel_crtc_driving_pch(crtc); + + if (is_pch_port) + ironlake_fdi_enable(crtc); + else + ironlake_fdi_disable(crtc); + + /* Enable panel fitting for LVDS */ + if (dev_priv->pch_pf_size && + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { + /* Force use of hard-coded filter coefficients + * as some pre-programmed values are broken, + * e.g. x201. + */ + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); + I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); + I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); + } + + intel_enable_pipe(dev_priv, pipe, is_pch_port); + intel_enable_plane(dev_priv, plane, pipe); + + if (is_pch_port) + ironlake_pch_enable(crtc); intel_crtc_load_lut(crtc); intel_update_fbc(dev); @@ -2206,116 +2891,58 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) drm_vblank_off(dev, pipe); intel_crtc_update_cursor(crtc, false); - /* Disable display plane */ - reg = DSPCNTR(plane); - temp = I915_READ(reg); - if (temp & DISPLAY_PLANE_ENABLE) { - I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); - intel_flush_display_plane(dev, plane); - } + intel_disable_plane(dev_priv, plane, pipe); if (dev_priv->cfb_plane == plane && dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); - /* disable cpu pipe, disable after all planes disabled */ - reg = PIPECONF(pipe); - temp = I915_READ(reg); - if (temp & PIPECONF_ENABLE) { - I915_WRITE(reg, temp & ~PIPECONF_ENABLE); - POSTING_READ(reg); - /* wait for cpu pipe off, pipe state */ - intel_wait_for_pipe_off(dev, intel_crtc->pipe); - } + intel_disable_pipe(dev_priv, pipe); /* Disable PF */ - I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); - I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); - - /* disable CPU FDI tx and PCH FDI rx */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_TX_ENABLE); - POSTING_READ(reg); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~(0x7 << 16); - temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; - I915_WRITE(reg, temp & ~FDI_RX_ENABLE); - - POSTING_READ(reg); - udelay(100); - - /* Ironlake workaround, disable clock pointer after downing FDI */ - if (HAS_PCH_IBX(dev)) - I915_WRITE(FDI_RX_CHICKEN(pipe), - I915_READ(FDI_RX_CHICKEN(pipe) & - ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); - - /* still set train pattern 1 */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(reg, temp); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - } - /* BPC in FDI rx is consistent with that in PIPECONF */ - temp &= ~(0x07 << 16); - temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; - I915_WRITE(reg, temp); + I915_WRITE(PF_CTL(pipe), 0); + I915_WRITE(PF_WIN_SZ(pipe), 0); - POSTING_READ(reg); - udelay(100); + ironlake_fdi_disable(crtc); - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - temp = I915_READ(PCH_LVDS); - if (temp & LVDS_PORT_EN) { - I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); - POSTING_READ(PCH_LVDS); - udelay(100); - } - } + /* This is a horrible layering violation; we should be doing this in + * the connector/encoder ->prepare instead, but we don't always have + * enough information there about the config to know whether it will + * actually be necessary or just cause undesired flicker. + */ + intel_disable_pch_ports(dev_priv, pipe); - /* disable PCH transcoder */ - reg = TRANSCONF(plane); - temp = I915_READ(reg); - if (temp & TRANS_ENABLE) { - I915_WRITE(reg, temp & ~TRANS_ENABLE); - /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) - DRM_ERROR("failed to disable transcoder\n"); - } + intel_disable_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev)) { /* disable TRANS_DP_CTL */ reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); + temp |= TRANS_DP_PORT_SEL_NONE; I915_WRITE(reg, temp); /* disable DPLL_SEL */ temp = I915_READ(PCH_DPLL_SEL); - if (pipe == 0) - temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); - else + switch (pipe) { + case 0: + temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); + break; + case 1: temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + break; + case 2: + /* FIXME: manage transcoder PLLs? */ + temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); + break; + default: + BUG(); /* wtf */ + } I915_WRITE(PCH_DPLL_SEL, temp); } /* disable PCH DPLL */ - reg = PCH_DPLL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE); + intel_disable_pch_pll(dev_priv, pipe); /* Switch from PCDclk to Rawclk */ reg = FDI_RX_CTL(pipe); @@ -2372,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) { if (!enable && intel_crtc->overlay) { struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; mutex_lock(&dev->struct_mutex); - (void) intel_overlay_switch_off(intel_crtc->overlay, false); + dev_priv->mm.interruptible = false; + (void) intel_overlay_switch_off(intel_crtc->overlay); + dev_priv->mm.interruptible = true; mutex_unlock(&dev->struct_mutex); } @@ -2390,7 +3020,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - u32 reg, temp; if (intel_crtc->active) return; @@ -2398,42 +3027,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_update_watermarks(dev); - /* Enable the DPLL */ - reg = DPLL(pipe); - temp = I915_READ(reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(reg, temp); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(reg); - udelay(150); - - I915_WRITE(reg, temp | DPLL_VCO_ENABLE); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(reg); - udelay(150); - - I915_WRITE(reg, temp | DPLL_VCO_ENABLE); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(reg); - udelay(150); - } - - /* Enable the pipe */ - reg = PIPECONF(pipe); - temp = I915_READ(reg); - if ((temp & PIPECONF_ENABLE) == 0) - I915_WRITE(reg, temp | PIPECONF_ENABLE); - - /* Enable the plane */ - reg = DSPCNTR(plane); - temp = I915_READ(reg); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); - intel_flush_display_plane(dev, plane); - } + intel_enable_pll(dev_priv, pipe); + intel_enable_pipe(dev_priv, pipe, false); + intel_enable_plane(dev_priv, plane, pipe); intel_crtc_load_lut(crtc); intel_update_fbc(dev); @@ -2450,7 +3046,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - u32 reg, temp; if (!intel_crtc->active) return; @@ -2465,45 +3060,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); - /* Disable display plane */ - reg = DSPCNTR(plane); - temp = I915_READ(reg); - if (temp & DISPLAY_PLANE_ENABLE) { - I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - intel_flush_display_plane(dev, plane); - - /* Wait for vblank for the disable to take effect */ - if (IS_GEN2(dev)) - intel_wait_for_vblank(dev, pipe); - } - - /* Don't disable pipe A or pipe A PLLs if needed */ - if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) - goto done; - - /* Next, disable display pipes */ - reg = PIPECONF(pipe); - temp = I915_READ(reg); - if (temp & PIPECONF_ENABLE) { - I915_WRITE(reg, temp & ~PIPECONF_ENABLE); - - /* Wait for the pipe to turn off */ - POSTING_READ(reg); - intel_wait_for_pipe_off(dev, pipe); - } - - reg = DPLL(pipe); - temp = I915_READ(reg); - if (temp & DPLL_VCO_ENABLE) { - I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE); - - /* Wait for the clocks to turn off. */ - POSTING_READ(reg); - udelay(150); - } + intel_disable_plane(dev_priv, plane, pipe); + intel_disable_pipe(dev_priv, pipe); + intel_disable_pll(dev_priv, pipe); -done: intel_crtc->active = false; intel_update_fbc(dev); intel_update_watermarks(dev); @@ -2565,7 +3125,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; break; default: - DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); + DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); break; } } @@ -2762,77 +3322,77 @@ struct intel_watermark_params { }; /* Pineview has different values for various configs */ -static struct intel_watermark_params pineview_display_wm = { +static const struct intel_watermark_params pineview_display_wm = { PINEVIEW_DISPLAY_FIFO, PINEVIEW_MAX_WM, PINEVIEW_DFT_WM, PINEVIEW_GUARD_WM, PINEVIEW_FIFO_LINE_SIZE }; -static struct intel_watermark_params pineview_display_hplloff_wm = { +static const struct intel_watermark_params pineview_display_hplloff_wm = { PINEVIEW_DISPLAY_FIFO, PINEVIEW_MAX_WM, PINEVIEW_DFT_HPLLOFF_WM, PINEVIEW_GUARD_WM, PINEVIEW_FIFO_LINE_SIZE }; -static struct intel_watermark_params pineview_cursor_wm = { +static const struct intel_watermark_params pineview_cursor_wm = { PINEVIEW_CURSOR_FIFO, PINEVIEW_CURSOR_MAX_WM, PINEVIEW_CURSOR_DFT_WM, PINEVIEW_CURSOR_GUARD_WM, PINEVIEW_FIFO_LINE_SIZE, }; -static struct intel_watermark_params pineview_cursor_hplloff_wm = { +static const struct intel_watermark_params pineview_cursor_hplloff_wm = { PINEVIEW_CURSOR_FIFO, PINEVIEW_CURSOR_MAX_WM, PINEVIEW_CURSOR_DFT_WM, PINEVIEW_CURSOR_GUARD_WM, PINEVIEW_FIFO_LINE_SIZE }; -static struct intel_watermark_params g4x_wm_info = { +static const struct intel_watermark_params g4x_wm_info = { G4X_FIFO_SIZE, G4X_MAX_WM, G4X_MAX_WM, 2, G4X_FIFO_LINE_SIZE, }; -static struct intel_watermark_params g4x_cursor_wm_info = { +static const struct intel_watermark_params g4x_cursor_wm_info = { I965_CURSOR_FIFO, I965_CURSOR_MAX_WM, I965_CURSOR_DFT_WM, 2, G4X_FIFO_LINE_SIZE, }; -static struct intel_watermark_params i965_cursor_wm_info = { +static const struct intel_watermark_params i965_cursor_wm_info = { I965_CURSOR_FIFO, I965_CURSOR_MAX_WM, I965_CURSOR_DFT_WM, 2, I915_FIFO_LINE_SIZE, }; -static struct intel_watermark_params i945_wm_info = { +static const struct intel_watermark_params i945_wm_info = { I945_FIFO_SIZE, I915_MAX_WM, 1, 2, I915_FIFO_LINE_SIZE }; -static struct intel_watermark_params i915_wm_info = { +static const struct intel_watermark_params i915_wm_info = { I915_FIFO_SIZE, I915_MAX_WM, 1, 2, I915_FIFO_LINE_SIZE }; -static struct intel_watermark_params i855_wm_info = { +static const struct intel_watermark_params i855_wm_info = { I855GM_FIFO_SIZE, I915_MAX_WM, 1, 2, I830_FIFO_LINE_SIZE }; -static struct intel_watermark_params i830_wm_info = { +static const struct intel_watermark_params i830_wm_info = { I830_FIFO_SIZE, I915_MAX_WM, 1, @@ -2840,31 +3400,28 @@ static struct intel_watermark_params i830_wm_info = { I830_FIFO_LINE_SIZE }; -static struct intel_watermark_params ironlake_display_wm_info = { +static const struct intel_watermark_params ironlake_display_wm_info = { ILK_DISPLAY_FIFO, ILK_DISPLAY_MAXWM, ILK_DISPLAY_DFTWM, 2, ILK_FIFO_LINE_SIZE }; - -static struct intel_watermark_params ironlake_cursor_wm_info = { +static const struct intel_watermark_params ironlake_cursor_wm_info = { ILK_CURSOR_FIFO, ILK_CURSOR_MAXWM, ILK_CURSOR_DFTWM, 2, ILK_FIFO_LINE_SIZE }; - -static struct intel_watermark_params ironlake_display_srwm_info = { +static const struct intel_watermark_params ironlake_display_srwm_info = { ILK_DISPLAY_SR_FIFO, ILK_DISPLAY_MAX_SRWM, ILK_DISPLAY_DFT_SRWM, 2, ILK_FIFO_LINE_SIZE }; - -static struct intel_watermark_params ironlake_cursor_srwm_info = { +static const struct intel_watermark_params ironlake_cursor_srwm_info = { ILK_CURSOR_SR_FIFO, ILK_CURSOR_MAX_SRWM, ILK_CURSOR_DFT_SRWM, @@ -2872,31 +3429,28 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { ILK_FIFO_LINE_SIZE }; -static struct intel_watermark_params sandybridge_display_wm_info = { +static const struct intel_watermark_params sandybridge_display_wm_info = { SNB_DISPLAY_FIFO, SNB_DISPLAY_MAXWM, SNB_DISPLAY_DFTWM, 2, SNB_FIFO_LINE_SIZE }; - -static struct intel_watermark_params sandybridge_cursor_wm_info = { +static const struct intel_watermark_params sandybridge_cursor_wm_info = { SNB_CURSOR_FIFO, SNB_CURSOR_MAXWM, SNB_CURSOR_DFTWM, 2, SNB_FIFO_LINE_SIZE }; - -static struct intel_watermark_params sandybridge_display_srwm_info = { +static const struct intel_watermark_params sandybridge_display_srwm_info = { SNB_DISPLAY_SR_FIFO, SNB_DISPLAY_MAX_SRWM, SNB_DISPLAY_DFT_SRWM, 2, SNB_FIFO_LINE_SIZE }; - -static struct intel_watermark_params sandybridge_cursor_srwm_info = { +static const struct intel_watermark_params sandybridge_cursor_srwm_info = { SNB_CURSOR_SR_FIFO, SNB_CURSOR_MAX_SRWM, SNB_CURSOR_DFT_SRWM, @@ -2924,7 +3478,8 @@ static struct intel_watermark_params sandybridge_cursor_srwm_info = { * will occur, and a display engine hang could result. */ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, - struct intel_watermark_params *wm, + const struct intel_watermark_params *wm, + int fifo_size, int pixel_size, unsigned long latency_ns) { @@ -2942,7 +3497,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); - wm_size = wm->fifo_size - (entries_required + wm->guard_size); + wm_size = fifo_size - (entries_required + wm->guard_size); DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); @@ -3115,15 +3670,28 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) return size; } -static void pineview_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int unused, - int pixel_size) +static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) +{ + struct drm_crtc *crtc, *enabled = NULL; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (crtc->enabled && crtc->fb) { + if (enabled) + return NULL; + enabled = crtc; + } + } + + return enabled; +} + +static void pineview_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; const struct cxsr_latency *latency; u32 reg; unsigned long wm; - int sr_clock; latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, dev_priv->fsb_freq, dev_priv->mem_freq); @@ -3133,11 +3701,14 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, return; } - if (!planea_clock || !planeb_clock) { - sr_clock = planea_clock ? planea_clock : planeb_clock; + crtc = single_enabled_crtc(dev); + if (crtc) { + int clock = crtc->mode.clock; + int pixel_size = crtc->fb->bits_per_pixel / 8; /* Display SR */ - wm = intel_calculate_wm(sr_clock, &pineview_display_wm, + wm = intel_calculate_wm(clock, &pineview_display_wm, + pineview_display_wm.fifo_size, pixel_size, latency->display_sr); reg = I915_READ(DSPFW1); reg &= ~DSPFW_SR_MASK; @@ -3146,7 +3717,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); /* cursor SR */ - wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm, + wm = intel_calculate_wm(clock, &pineview_cursor_wm, + pineview_display_wm.fifo_size, pixel_size, latency->cursor_sr); reg = I915_READ(DSPFW3); reg &= ~DSPFW_CURSOR_SR_MASK; @@ -3154,7 +3726,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, I915_WRITE(DSPFW3, reg); /* Display HPLL off SR */ - wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm, + wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, + pineview_display_hplloff_wm.fifo_size, pixel_size, latency->display_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_SR_MASK; @@ -3162,7 +3735,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, I915_WRITE(DSPFW3, reg); /* cursor HPLL off SR */ - wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm, + wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, + pineview_display_hplloff_wm.fifo_size, pixel_size, latency->cursor_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_CURSOR_MASK; @@ -3180,125 +3754,229 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, } } -static void g4x_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int sr_htotal, - int pixel_size) +static bool g4x_compute_wm0(struct drm_device *dev, + int plane, + const struct intel_watermark_params *display, + int display_latency_ns, + const struct intel_watermark_params *cursor, + int cursor_latency_ns, + int *plane_wm, + int *cursor_wm) { - struct drm_i915_private *dev_priv = dev->dev_private; - int total_size, cacheline_size; - int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr; - struct intel_watermark_params planea_params, planeb_params; - unsigned long line_time_us; - int sr_clock, sr_entries = 0, entries_required; + struct drm_crtc *crtc; + int htotal, hdisplay, clock, pixel_size; + int line_time_us, line_count; + int entries, tlb_miss; - /* Create copies of the base settings for each pipe */ - planea_params = planeb_params = g4x_wm_info; + crtc = intel_get_crtc_for_plane(dev, plane); + if (crtc->fb == NULL || !crtc->enabled) + return false; - /* Grab a couple of global values before we overwrite them */ - total_size = planea_params.fifo_size; - cacheline_size = planea_params.cacheline_size; + htotal = crtc->mode.htotal; + hdisplay = crtc->mode.hdisplay; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; - /* - * Note: we need to make sure we don't overflow for various clock & - * latency values. - * clocks go from a few thousand to several hundred thousand. - * latency is usually a few thousand - */ - entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / - 1000; - entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); - planea_wm = entries_required + planea_params.guard_size; + /* Use the small buffer method to calculate plane watermark */ + entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; + tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, display->cacheline_size); + *plane_wm = entries + display->guard_size; + if (*plane_wm > (int)display->max_wm) + *plane_wm = display->max_wm; - entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / - 1000; - entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); - planeb_wm = entries_required + planeb_params.guard_size; + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = ((htotal * 1000) / clock); + line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; + entries = line_count * 64 * pixel_size; + tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; + if (tlb_miss > 0) + entries += tlb_miss; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + if (*cursor_wm > (int)cursor->max_wm) + *cursor_wm = (int)cursor->max_wm; - cursora_wm = cursorb_wm = 16; - cursor_sr = 32; + return true; +} - DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); +/* + * Check the wm result. + * + * If any calculated watermark values is larger than the maximum value that + * can be programmed into the associated watermark register, that watermark + * must be disabled. + */ +static bool g4x_check_srwm(struct drm_device *dev, + int display_wm, int cursor_wm, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor) +{ + DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", + display_wm, cursor_wm); - /* Calc sr entries for one plane configs */ - if (sr_hdisplay && (!planea_clock || !planeb_clock)) { - /* self-refresh has much higher latency */ - static const int sr_latency_ns = 12000; + if (display_wm > display->max_wm) { + DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n", + display_wm, display->max_wm); + return false; + } - sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_htotal * 1000) / sr_clock); + if (cursor_wm > cursor->max_wm) { + DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n", + cursor_wm, cursor->max_wm); + return false; + } - /* Use ns/us then divide to preserve precision */ - sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * sr_hdisplay; - sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); - - entries_required = (((sr_latency_ns / line_time_us) + - 1000) / 1000) * pixel_size * 64; - entries_required = DIV_ROUND_UP(entries_required, - g4x_cursor_wm_info.cacheline_size); - cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; - - if (cursor_sr > g4x_cursor_wm_info.max_wm) - cursor_sr = g4x_cursor_wm_info.max_wm; - DRM_DEBUG_KMS("self-refresh watermark: display plane %d " - "cursor %d\n", sr_entries, cursor_sr); + if (!(display_wm || cursor_wm)) { + DRM_DEBUG_KMS("SR latency is 0, disabling\n"); + return false; + } - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); + return true; +} + +static bool g4x_compute_srwm(struct drm_device *dev, + int plane, + int latency_ns, + const struct intel_watermark_params *display, + const struct intel_watermark_params *cursor, + int *display_wm, int *cursor_wm) +{ + struct drm_crtc *crtc; + int hdisplay, htotal, pixel_size, clock; + unsigned long line_time_us; + int line_count, line_size; + int small, large; + int entries; + + if (!latency_ns) { + *display_wm = *cursor_wm = 0; + return false; } - DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", - planea_wm, planeb_wm, sr_entries); + crtc = intel_get_crtc_for_plane(dev, plane); + hdisplay = crtc->mode.hdisplay; + htotal = crtc->mode.htotal; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + line_time_us = (htotal * 1000) / clock; + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = hdisplay * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); + *display_wm = entries + display->guard_size; + + /* calculate the self-refresh watermark for display cursor */ + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + + return g4x_check_srwm(dev, + *display_wm, *cursor_wm, + display, cursor); +} + +static inline bool single_plane_enabled(unsigned int mask) +{ + return mask && (mask & -mask) == 0; +} + +static void g4x_update_wm(struct drm_device *dev) +{ + static const int sr_latency_ns = 12000; + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, cursora_wm, cursorb_wm; + int plane_sr, cursor_sr; + unsigned int enabled = 0; + + if (g4x_compute_wm0(dev, 0, + &g4x_wm_info, latency_ns, + &g4x_cursor_wm_info, latency_ns, + &planea_wm, &cursora_wm)) + enabled |= 1; + + if (g4x_compute_wm0(dev, 1, + &g4x_wm_info, latency_ns, + &g4x_cursor_wm_info, latency_ns, + &planeb_wm, &cursorb_wm)) + enabled |= 2; + + plane_sr = cursor_sr = 0; + if (single_plane_enabled(enabled) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + sr_latency_ns, + &g4x_wm_info, + &g4x_cursor_wm_info, + &plane_sr, &cursor_sr)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + else + I915_WRITE(FW_BLC_SELF, + I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); - planea_wm &= 0x3f; - planeb_wm &= 0x3f; + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + planea_wm, cursora_wm, + planeb_wm, cursorb_wm, + plane_sr, cursor_sr); - I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) | + I915_WRITE(DSPFW1, + (plane_sr << DSPFW_SR_SHIFT) | (cursorb_wm << DSPFW_CURSORB_SHIFT) | - (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm); - I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | + (planeb_wm << DSPFW_PLANEB_SHIFT) | + planea_wm); + I915_WRITE(DSPFW2, + (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | (cursora_wm << DSPFW_CURSORA_SHIFT)); /* HPLL off in SR has some issues on G4x... disable it */ - I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | + I915_WRITE(DSPFW3, + (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); } -static void i965_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int sr_htotal, - int pixel_size) +static void i965_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long line_time_us; - int sr_clock, sr_entries, srwm = 1; + struct drm_crtc *crtc; + int srwm = 1; int cursor_sr = 16; /* Calc sr entries for one plane configs */ - if (sr_hdisplay && (!planea_clock || !planeb_clock)) { + crtc = single_enabled_crtc(dev); + if (crtc) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 12000; + int clock = crtc->mode.clock; + int htotal = crtc->mode.htotal; + int hdisplay = crtc->mode.hdisplay; + int pixel_size = crtc->fb->bits_per_pixel / 8; + unsigned long line_time_us; + int entries; - sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_htotal * 1000) / sr_clock); + line_time_us = ((htotal * 1000) / clock); /* Use ns/us then divide to preserve precision */ - sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * sr_hdisplay; - sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); - DRM_DEBUG("self-refresh entries: %d\n", sr_entries); - srwm = I965_FIFO_SIZE - sr_entries; + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * hdisplay; + entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); + srwm = I965_FIFO_SIZE - entries; if (srwm < 0) srwm = 1; srwm &= 0x1ff; + DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", + entries, srwm); - sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * pixel_size * 64; - sr_entries = DIV_ROUND_UP(sr_entries, + entries = DIV_ROUND_UP(entries, i965_cursor_wm_info.cacheline_size); cursor_sr = i965_cursor_wm_info.fifo_size - - (sr_entries + i965_cursor_wm_info.guard_size); + (entries + i965_cursor_wm_info.guard_size); if (cursor_sr > i965_cursor_wm_info.max_wm) cursor_sr = i965_cursor_wm_info.max_wm; @@ -3319,46 +3997,56 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, srwm); /* 965 has limitations... */ - I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | - (8 << 0)); + I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | + (8 << 16) | (8 << 8) | (8 << 0)); I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); /* update cursor SR watermark */ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); } -static void i9xx_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int sr_htotal, - int pixel_size) +static void i9xx_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + const struct intel_watermark_params *wm_info; uint32_t fwater_lo; uint32_t fwater_hi; - int total_size, cacheline_size, cwm, srwm = 1; + int cwm, srwm = 1; + int fifo_size; int planea_wm, planeb_wm; - struct intel_watermark_params planea_params, planeb_params; - unsigned long line_time_us; - int sr_clock, sr_entries = 0; + struct drm_crtc *crtc, *enabled = NULL; - /* Create copies of the base settings for each pipe */ - if (IS_CRESTLINE(dev) || IS_I945GM(dev)) - planea_params = planeb_params = i945_wm_info; + if (IS_I945GM(dev)) + wm_info = &i945_wm_info; else if (!IS_GEN2(dev)) - planea_params = planeb_params = i915_wm_info; + wm_info = &i915_wm_info; else - planea_params = planeb_params = i855_wm_info; - - /* Grab a couple of global values before we overwrite them */ - total_size = planea_params.fifo_size; - cacheline_size = planea_params.cacheline_size; - - /* Update per-plane FIFO sizes */ - planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0); - planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1); + wm_info = &i855_wm_info; + + fifo_size = dev_priv->display.get_fifo_size(dev, 0); + crtc = intel_get_crtc_for_plane(dev, 0); + if (crtc->enabled && crtc->fb) { + planea_wm = intel_calculate_wm(crtc->mode.clock, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, + latency_ns); + enabled = crtc; + } else + planea_wm = fifo_size - wm_info->guard_size; + + fifo_size = dev_priv->display.get_fifo_size(dev, 1); + crtc = intel_get_crtc_for_plane(dev, 1); + if (crtc->enabled && crtc->fb) { + planeb_wm = intel_calculate_wm(crtc->mode.clock, + wm_info, fifo_size, + crtc->fb->bits_per_pixel / 8, + latency_ns); + if (enabled == NULL) + enabled = crtc; + else + enabled = NULL; + } else + planeb_wm = fifo_size - wm_info->guard_size; - planea_wm = intel_calculate_wm(planea_clock, &planea_params, - pixel_size, latency_ns); - planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, - pixel_size, latency_ns); DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); /* @@ -3366,39 +4054,39 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, */ cwm = 2; + /* Play safe and disable self-refresh before adjusting watermarks. */ + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); + else if (IS_I915GM(dev)) + I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); + /* Calc sr entries for one plane configs */ - if (HAS_FW_BLC(dev) && sr_hdisplay && - (!planea_clock || !planeb_clock)) { + if (HAS_FW_BLC(dev) && enabled) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 6000; + int clock = enabled->mode.clock; + int htotal = enabled->mode.htotal; + int hdisplay = enabled->mode.hdisplay; + int pixel_size = enabled->fb->bits_per_pixel / 8; + unsigned long line_time_us; + int entries; - sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_htotal * 1000) / sr_clock); + line_time_us = (htotal * 1000) / clock; /* Use ns/us then divide to preserve precision */ - sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * sr_hdisplay; - sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); - DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); - srwm = total_size - sr_entries; + entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * hdisplay; + entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); + DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); + srwm = wm_info->fifo_size - entries; if (srwm < 0) srwm = 1; if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); - else if (IS_I915GM(dev)) { - /* 915M has a smaller SRWM field */ + I915_WRITE(FW_BLC_SELF, + FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); + else if (IS_I915GM(dev)) I915_WRITE(FW_BLC_SELF, srwm & 0x3f); - I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); - } - } else { - /* Turn off self refresh if both pipes are enabled */ - if (IS_I945G(dev) || IS_I945GM(dev)) { - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); - } else if (IS_I915GM(dev)) { - I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); - } } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", @@ -3413,19 +4101,36 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, I915_WRITE(FW_BLC, fwater_lo); I915_WRITE(FW_BLC2, fwater_hi); + + if (HAS_FW_BLC(dev)) { + if (enabled) { + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, + FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); + else if (IS_I915GM(dev)) + I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); + DRM_DEBUG_KMS("memory self refresh enabled\n"); + } else + DRM_DEBUG_KMS("memory self refresh disabled\n"); + } } -static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, - int unused2, int unused3, int pixel_size) +static void i830_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; + struct drm_crtc *crtc; + uint32_t fwater_lo; int planea_wm; - i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0); + crtc = single_enabled_crtc(dev); + if (crtc == NULL) + return; - planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, - pixel_size, latency_ns); + planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, + dev_priv->display.get_fifo_size(dev, 0), + crtc->fb->bits_per_pixel / 8, + latency_ns); + fwater_lo = I915_READ(FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); @@ -3534,15 +4239,15 @@ static bool ironlake_check_srwm(struct drm_device *dev, int level, /* * Compute watermark values of WM[1-3], */ -static bool ironlake_compute_srwm(struct drm_device *dev, int level, - int hdisplay, int htotal, - int pixel_size, int clock, int latency_ns, +static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, + int latency_ns, const struct intel_watermark_params *display, const struct intel_watermark_params *cursor, int *fbc_wm, int *display_wm, int *cursor_wm) { - + struct drm_crtc *crtc; unsigned long line_time_us; + int hdisplay, htotal, pixel_size, clock; int line_count, line_size; int small, large; int entries; @@ -3552,6 +4257,12 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, return false; } + crtc = intel_get_crtc_for_plane(dev, plane); + hdisplay = crtc->mode.hdisplay; + htotal = crtc->mode.htotal; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + line_time_us = (htotal * 1000) / clock; line_count = (latency_ns / line_time_us + 1000) / 1000; line_size = hdisplay * pixel_size; @@ -3579,14 +4290,11 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, display, cursor); } -static void ironlake_update_wm(struct drm_device *dev, - int planea_clock, int planeb_clock, - int hdisplay, int htotal, - int pixel_size) +static void ironlake_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int fbc_wm, plane_wm, cursor_wm, enabled; - int clock; + int fbc_wm, plane_wm, cursor_wm; + unsigned int enabled; enabled = 0; if (ironlake_compute_wm0(dev, 0, @@ -3600,7 +4308,7 @@ static void ironlake_update_wm(struct drm_device *dev, DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled++; + enabled |= 1; } if (ironlake_compute_wm0(dev, 1, @@ -3614,7 +4322,7 @@ static void ironlake_update_wm(struct drm_device *dev, DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled++; + enabled |= 2; } /* @@ -3625,14 +4333,13 @@ static void ironlake_update_wm(struct drm_device *dev, I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); - if (enabled != 1) + if (!single_plane_enabled(enabled)) return; - - clock = planea_clock ? planea_clock : planeb_clock; + enabled = ffs(enabled) - 1; /* WM1 */ - if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, - clock, ILK_READ_WM1_LATENCY() * 500, + if (!ironlake_compute_srwm(dev, 1, enabled, + ILK_READ_WM1_LATENCY() * 500, &ironlake_display_srwm_info, &ironlake_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -3646,8 +4353,8 @@ static void ironlake_update_wm(struct drm_device *dev, cursor_wm); /* WM2 */ - if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size, - clock, ILK_READ_WM2_LATENCY() * 500, + if (!ironlake_compute_srwm(dev, 2, enabled, + ILK_READ_WM2_LATENCY() * 500, &ironlake_display_srwm_info, &ironlake_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -3666,15 +4373,12 @@ static void ironlake_update_wm(struct drm_device *dev, */ } -static void sandybridge_update_wm(struct drm_device *dev, - int planea_clock, int planeb_clock, - int hdisplay, int htotal, - int pixel_size) +static void sandybridge_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ - int fbc_wm, plane_wm, cursor_wm, enabled; - int clock; + int fbc_wm, plane_wm, cursor_wm; + unsigned int enabled; enabled = 0; if (ironlake_compute_wm0(dev, 0, @@ -3686,7 +4390,7 @@ static void sandybridge_update_wm(struct drm_device *dev, DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled++; + enabled |= 1; } if (ironlake_compute_wm0(dev, 1, @@ -3698,7 +4402,7 @@ static void sandybridge_update_wm(struct drm_device *dev, DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled++; + enabled |= 2; } /* @@ -3715,14 +4419,13 @@ static void sandybridge_update_wm(struct drm_device *dev, I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); - if (enabled != 1) + if (!single_plane_enabled(enabled)) return; - - clock = planea_clock ? planea_clock : planeb_clock; + enabled = ffs(enabled) - 1; /* WM1 */ - if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, - clock, SNB_READ_WM1_LATENCY() * 500, + if (!ironlake_compute_srwm(dev, 1, enabled, + SNB_READ_WM1_LATENCY() * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -3736,9 +4439,8 @@ static void sandybridge_update_wm(struct drm_device *dev, cursor_wm); /* WM2 */ - if (!ironlake_compute_srwm(dev, 2, - hdisplay, htotal, pixel_size, - clock, SNB_READ_WM2_LATENCY() * 500, + if (!ironlake_compute_srwm(dev, 2, enabled, + SNB_READ_WM2_LATENCY() * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -3752,9 +4454,8 @@ static void sandybridge_update_wm(struct drm_device *dev, cursor_wm); /* WM3 */ - if (!ironlake_compute_srwm(dev, 3, - hdisplay, htotal, pixel_size, - clock, SNB_READ_WM3_LATENCY() * 500, + if (!ironlake_compute_srwm(dev, 3, enabled, + SNB_READ_WM3_LATENCY() * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -3803,44 +4504,9 @@ static void sandybridge_update_wm(struct drm_device *dev, static void intel_update_watermarks(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - int sr_hdisplay = 0; - unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; - int enabled = 0, pixel_size = 0; - int sr_htotal = 0; - - if (!dev_priv->display.update_wm) - return; - - /* Get the clock config from both planes */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->active) { - enabled++; - if (intel_crtc->plane == 0) { - DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", - intel_crtc->pipe, crtc->mode.clock); - planea_clock = crtc->mode.clock; - } else { - DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", - intel_crtc->pipe, crtc->mode.clock); - planeb_clock = crtc->mode.clock; - } - sr_hdisplay = crtc->mode.hdisplay; - sr_clock = crtc->mode.clock; - sr_htotal = crtc->mode.htotal; - if (crtc->fb) - pixel_size = crtc->fb->bits_per_pixel / 8; - else - pixel_size = 4; /* by default */ - } - } - - if (enabled <= 0) - return; - dev_priv->display.update_wm(dev, planea_clock, planeb_clock, - sr_hdisplay, sr_htotal, pixel_size); + if (dev_priv->display.update_wm) + dev_priv->display.update_wm(dev); } static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) @@ -3872,6 +4538,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int ret; struct fdi_m_n m_n = {0}; u32 reg, temp; + u32 lvds_sync = 0; int target_clock; drm_vblank_pre_modeset(dev, pipe); @@ -4243,9 +4910,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, pipeconf &= ~PIPECONF_DOUBLE_WIDE; } - dspcntr |= DISPLAY_PLANE_ENABLE; - pipeconf |= PIPECONF_ENABLE; - dpll |= DPLL_VCO_ENABLE; + if (!HAS_PCH_SPLIT(dev)) + dpll |= DPLL_VCO_ENABLE; DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); @@ -4271,10 +4937,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* enable transcoder DPLL */ if (HAS_PCH_CPT(dev)) { temp = I915_READ(PCH_DPLL_SEL); - if (pipe == 0) + switch (pipe) { + case 0: temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; - else + break; + case 1: temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; + break; + case 2: + /* FIXME: manage transcoder PLLs? */ + temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL; + break; + default: + BUG(); + } I915_WRITE(PCH_DPLL_SEL, temp); POSTING_READ(PCH_DPLL_SEL); @@ -4324,6 +5000,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, else temp &= ~LVDS_ENABLE_DITHER; } + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) + lvds_sync |= LVDS_HSYNC_POLARITY; + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) + lvds_sync |= LVDS_VSYNC_POLARITY; + if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) + != lvds_sync) { + char flags[2] = "-+"; + DRM_INFO("Changing LVDS panel from " + "(%chsync, %cvsync) to (%chsync, %cvsync)\n", + flags[!(temp & LVDS_HSYNC_POLARITY)], + flags[!(temp & LVDS_VSYNC_POLARITY)], + flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], + flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); + temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); + temp |= lvds_sync; + } I915_WRITE(reg, temp); } @@ -4341,17 +5033,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, intel_dp_set_m_n(crtc, mode, adjusted_mode); } else if (HAS_PCH_SPLIT(dev)) { /* For non-DP output, clear any trans DP clock recovery setting.*/ - if (pipe == 0) { - I915_WRITE(TRANSA_DATA_M1, 0); - I915_WRITE(TRANSA_DATA_N1, 0); - I915_WRITE(TRANSA_DP_LINK_M1, 0); - I915_WRITE(TRANSA_DP_LINK_N1, 0); - } else { - I915_WRITE(TRANSB_DATA_M1, 0); - I915_WRITE(TRANSB_DATA_N1, 0); - I915_WRITE(TRANSB_DP_LINK_M1, 0); - I915_WRITE(TRANSB_DP_LINK_N1, 0); - } + I915_WRITE(TRANSDATA_M1(pipe), 0); + I915_WRITE(TRANSDATA_N1(pipe), 0); + I915_WRITE(TRANSDPLINK_M1(pipe), 0); + I915_WRITE(TRANSDPLINK_N1(pipe), 0); } if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { @@ -4454,6 +5139,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(PIPECONF(pipe), pipeconf); POSTING_READ(PIPECONF(pipe)); + if (!HAS_PCH_SPLIT(dev)) + intel_enable_pipe(dev_priv, pipe, false); intel_wait_for_vblank(dev, pipe); @@ -4464,6 +5151,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } I915_WRITE(DSPCNTR(plane), dspcntr); + POSTING_READ(DSPCNTR(plane)); + if (!HAS_PCH_SPLIT(dev)) + intel_enable_plane(dev_priv, plane, pipe); ret = intel_pipe_set_base(crtc, x, y, old_fb); @@ -4480,7 +5170,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; + int palreg = PALETTE(intel_crtc->pipe); int i; /* The clocks have to be on to load the palette. */ @@ -4489,8 +5179,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) /* use legacy palette for Ironlake */ if (HAS_PCH_SPLIT(dev)) - palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : - LGC_PALETTE_B; + palreg = LGC_PALETTE(intel_crtc->pipe); for (i = 0; i < 256; i++) { I915_WRITE(palreg + 4 * i, @@ -4511,12 +5200,12 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) if (intel_crtc->cursor_visible == visible) return; - cntl = I915_READ(CURACNTR); + cntl = I915_READ(_CURACNTR); if (visible) { /* On these chipsets we can only modify the base whilst * the cursor is disabled. */ - I915_WRITE(CURABASE, base); + I915_WRITE(_CURABASE, base); cntl &= ~(CURSOR_FORMAT_MASK); /* XXX width must be 64, stride 256 => 0x00 << 28 */ @@ -4525,7 +5214,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) CURSOR_FORMAT_ARGB; } else cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); - I915_WRITE(CURACNTR, cntl); + I915_WRITE(_CURACNTR, cntl); intel_crtc->cursor_visible = visible; } @@ -4539,7 +5228,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) bool visible = base != 0; if (intel_crtc->cursor_visible != visible) { - uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); + uint32_t cntl = I915_READ(CURCNTR(pipe)); if (base) { cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; @@ -4548,12 +5237,12 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); cntl |= CURSOR_MODE_DISABLE; } - I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); + I915_WRITE(CURCNTR(pipe), cntl); intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ - I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); + I915_WRITE(CURBASE(pipe), base); } /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ @@ -4603,7 +5292,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, if (!visible && !intel_crtc->cursor_visible) return; - I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); + I915_WRITE(CURPOS(pipe), pos); if (IS_845G(dev) || IS_I865G(dev)) i845_update_cursor(crtc, base); else @@ -4643,7 +5332,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, } obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); - if (!obj) + if (&obj->base == NULL) return -ENOENT; if (obj->base.size < width * height * 4) { @@ -4909,14 +5598,14 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B); + u32 dpll = I915_READ(DPLL(pipe)); u32 fp; intel_clock_t clock; if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) - fp = I915_READ((pipe == 0) ? FPA0 : FPB0); + fp = FP0(pipe); else - fp = I915_READ((pipe == 0) ? FPA1 : FPB1); + fp = FP1(pipe); clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; if (IS_PINEVIEW(dev)) { @@ -4998,10 +5687,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; struct drm_display_mode *mode; - int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); - int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B); - int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); - int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B); + int htot = I915_READ(HTOTAL(pipe)); + int hsync = I915_READ(HSYNC(pipe)); + int vtot = I915_READ(VTOTAL(pipe)); + int vsync = I915_READ(VSYNC(pipe)); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -5110,7 +5799,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; + int dpll_reg = DPLL(pipe); int dpll = I915_READ(dpll_reg); if (HAS_PCH_SPLIT(dev)) @@ -5158,7 +5847,6 @@ static void intel_idle_update(struct work_struct *work) struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; - int enabled = 0; if (!i915_powersave) return; @@ -5172,16 +5860,11 @@ static void intel_idle_update(struct work_struct *work) if (!crtc->fb) continue; - enabled++; intel_crtc = to_intel_crtc(crtc); if (!intel_crtc->busy) intel_decrease_pllclock(crtc); } - if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) { - DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); - } mutex_unlock(&dev->struct_mutex); } @@ -5206,17 +5889,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) if (!drm_core_check_feature(dev, DRIVER_MODESET)) return; - if (!dev_priv->busy) { - if (IS_I945G(dev) || IS_I945GM(dev)) { - u32 fw_blc_self; - - DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); - fw_blc_self = I915_READ(FW_BLC_SELF); - fw_blc_self &= ~FW_BLC_SELF_EN; - I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); - } + if (!dev_priv->busy) dev_priv->busy = true; - } else + else mod_timer(&dev_priv->idle_timer, jiffies + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); @@ -5228,14 +5903,6 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) intel_fb = to_intel_framebuffer(crtc->fb); if (intel_fb->obj == obj) { if (!intel_crtc->busy) { - if (IS_I945G(dev) || IS_I945GM(dev)) { - u32 fw_blc_self; - - DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); - fw_blc_self = I915_READ(FW_BLC_SELF); - fw_blc_self &= ~FW_BLC_SELF_EN; - I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); - } /* Non-busy -> busy, upclock */ intel_increase_pllclock(crtc); intel_crtc->busy = true; @@ -5513,7 +6180,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; */ pf = 0; - pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; + pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); break; @@ -5523,8 +6190,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(fb->pitch | obj->tiling_mode); OUT_RING(obj->gtt_offset); - pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; - pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; + pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE; + pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); break; } @@ -5558,9 +6225,7 @@ static void intel_crtc_reset(struct drm_crtc *crtc) /* Reset flags back to the 'unknown' status so that they * will be correctly set on the initial modeset. */ - intel_crtc->cursor_addr = 0; intel_crtc->dpms_mode = -1; - intel_crtc->active = true; /* force the pipe off on setup_init_config */ } static struct drm_crtc_helper_funcs intel_helper_funcs = { @@ -5615,22 +6280,8 @@ static void intel_sanitize_modesetting(struct drm_device *dev, pipe = !pipe; /* Disable the plane and wait for it to stop reading from the pipe. */ - I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); - intel_flush_display_plane(dev, plane); - - if (IS_GEN2(dev)) - intel_wait_for_vblank(dev, pipe); - - if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) - return; - - /* Switch off the pipe. */ - reg = PIPECONF(pipe); - val = I915_READ(reg); - if (val & PIPECONF_ENABLE) { - I915_WRITE(reg, val & ~PIPECONF_ENABLE); - intel_wait_for_pipe_off(dev, pipe); - } + intel_disable_plane(dev_priv, plane, pipe); + intel_disable_pipe(dev_priv, pipe); } static void intel_crtc_init(struct drm_device *dev, int pipe) @@ -5666,6 +6317,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; intel_crtc_reset(&intel_crtc->base); + intel_crtc->active = true; /* force the pipe off on setup_init_config */ if (HAS_PCH_SPLIT(dev)) { intel_helper_funcs.prepare = ironlake_crtc_prepare; @@ -5919,7 +6571,7 @@ intel_user_framebuffer_create(struct drm_device *dev, int ret; obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); - if (!obj) + if (&obj->base == NULL) return ERR_PTR(-ENOENT); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); @@ -6204,7 +6856,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) * userspace... */ I915_WRITE(GEN6_RC_STATE, 0); - __gen6_force_wake_get(dev_priv); + __gen6_gt_force_wake_get(dev_priv); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -6241,18 +6893,18 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 18 << 24 | 6 << 16); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000); + I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); I915_WRITE(GEN6_RP_UP_EI, 100000); - I915_WRITE(GEN6_RP_DOWN_EI, 300000); + I915_WRITE(GEN6_RP_DOWN_EI, 5000000); I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO | GEN6_RP_USE_NORMAL_FREQ | GEN6_RP_MEDIA_IS_GFX | GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_MAX | - GEN6_RP_DOWN_BUSY_MIN); + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_CONT); if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500)) @@ -6302,12 +6954,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); - __gen6_force_wake_put(dev_priv); + __gen6_gt_force_wake_put(dev_priv); } void intel_enable_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; /* * Disable clock gating reported to work incorrectly according to the @@ -6417,12 +7070,10 @@ void intel_enable_clock_gating(struct drm_device *dev) ILK_DPARB_CLK_GATE | ILK_DPFD_CLK_GATE); - I915_WRITE(DSPACNTR, - I915_READ(DSPACNTR) | - DISPPLANE_TRICKLE_FEED_DISABLE); - I915_WRITE(DSPBCNTR, - I915_READ(DSPBCNTR) | - DISPPLANE_TRICKLE_FEED_DISABLE); + for_each_pipe(pipe) + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); } } else if (IS_G4X(dev)) { uint32_t dspclk_gate; @@ -6463,52 +7114,60 @@ void intel_enable_clock_gating(struct drm_device *dev) } } -void intel_disable_clock_gating(struct drm_device *dev) +static void ironlake_teardown_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->renderctx) { - struct drm_i915_gem_object *obj = dev_priv->renderctx; - - I915_WRITE(CCID, 0); - POSTING_READ(CCID); - - i915_gem_object_unpin(obj); - drm_gem_object_unreference(&obj->base); + i915_gem_object_unpin(dev_priv->renderctx); + drm_gem_object_unreference(&dev_priv->renderctx->base); dev_priv->renderctx = NULL; } if (dev_priv->pwrctx) { - struct drm_i915_gem_object *obj = dev_priv->pwrctx; + i915_gem_object_unpin(dev_priv->pwrctx); + drm_gem_object_unreference(&dev_priv->pwrctx->base); + dev_priv->pwrctx = NULL; + } +} + +static void ironlake_disable_rc6(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (I915_READ(PWRCTXA)) { + /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); + wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), + 50); I915_WRITE(PWRCTXA, 0); POSTING_READ(PWRCTXA); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(&obj->base); - dev_priv->pwrctx = NULL; + I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); + POSTING_READ(RSTDBYCTL); } + + ironlake_teardown_rc6(dev); } -static void ironlake_disable_rc6(struct drm_device *dev) +static int ironlake_setup_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); - wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), - 10); - POSTING_READ(CCID); - I915_WRITE(PWRCTXA, 0); - POSTING_READ(PWRCTXA); - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - POSTING_READ(RSTDBYCTL); - i915_gem_object_unpin(dev_priv->renderctx); - drm_gem_object_unreference(&dev_priv->renderctx->base); - dev_priv->renderctx = NULL; - i915_gem_object_unpin(dev_priv->pwrctx); - drm_gem_object_unreference(&dev_priv->pwrctx->base); - dev_priv->pwrctx = NULL; + if (dev_priv->renderctx == NULL) + dev_priv->renderctx = intel_alloc_context_page(dev); + if (!dev_priv->renderctx) + return -ENOMEM; + + if (dev_priv->pwrctx == NULL) + dev_priv->pwrctx = intel_alloc_context_page(dev); + if (!dev_priv->pwrctx) { + ironlake_teardown_rc6(dev); + return -ENOMEM; + } + + return 0; } void ironlake_enable_rc6(struct drm_device *dev) @@ -6516,15 +7175,26 @@ void ironlake_enable_rc6(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; + /* rc6 disabled by default due to repeated reports of hanging during + * boot and resume. + */ + if (!i915_enable_rc6) + return; + + ret = ironlake_setup_rc6(dev); + if (ret) + return; + /* * GPU can automatically power down the render unit if given a page * to save state. */ ret = BEGIN_LP_RING(6); if (ret) { - ironlake_disable_rc6(dev); + ironlake_teardown_rc6(dev); return; } + OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); OUT_RING(MI_SET_CONTEXT); OUT_RING(dev_priv->renderctx->gtt_offset | @@ -6541,6 +7211,7 @@ void ironlake_enable_rc6(struct drm_device *dev) I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); } + /* Set up chip specific display functions */ static void intel_init_display(struct drm_device *dev) { @@ -6757,10 +7428,6 @@ void intel_modeset_init(struct drm_device *dev) } dev->mode_config.fb_base = dev->agp->base; - if (IS_MOBILE(dev) || !IS_GEN2(dev)) - dev_priv->num_pipe = 2; - else - dev_priv->num_pipe = 1; DRM_DEBUG_KMS("%d display pipe%s available.\n", dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); @@ -6783,21 +7450,9 @@ void intel_modeset_init(struct drm_device *dev) if (IS_GEN6(dev)) gen6_enable_rps(dev_priv); - if (IS_IRONLAKE_M(dev)) { - dev_priv->renderctx = intel_alloc_context_page(dev); - if (!dev_priv->renderctx) - goto skip_rc6; - dev_priv->pwrctx = intel_alloc_context_page(dev); - if (!dev_priv->pwrctx) { - i915_gem_object_unpin(dev_priv->renderctx); - drm_gem_object_unreference(&dev_priv->renderctx->base); - dev_priv->renderctx = NULL; - goto skip_rc6; - } + if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); - } -skip_rc6: INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, (unsigned long)dev); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1f4242b682c..d29e33f815d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -49,6 +49,7 @@ struct intel_dp { uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; bool has_audio; int force_audio; + uint32_t color_range; int dpms_mode; uint8_t link_bw; uint8_t lane_count; @@ -685,6 +686,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int lane_count = 4, bpp = 24; struct intel_dp_m_n m_n; + int pipe = intel_crtc->pipe; /* * Find the lane count in the intel_encoder private @@ -715,39 +717,19 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, mode->clock, adjusted_mode->clock, &m_n); if (HAS_PCH_SPLIT(dev)) { - if (intel_crtc->pipe == 0) { - I915_WRITE(TRANSA_DATA_M1, - ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | - m_n.gmch_m); - I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n); - I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m); - I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n); - } else { - I915_WRITE(TRANSB_DATA_M1, - ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | - m_n.gmch_m); - I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n); - I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m); - I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n); - } + I915_WRITE(TRANSDATA_M1(pipe), + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); + I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); + I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); } else { - if (intel_crtc->pipe == 0) { - I915_WRITE(PIPEA_GMCH_DATA_M, - ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | - m_n.gmch_m); - I915_WRITE(PIPEA_GMCH_DATA_N, - m_n.gmch_n); - I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m); - I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n); - } else { - I915_WRITE(PIPEB_GMCH_DATA_M, - ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | - m_n.gmch_m); - I915_WRITE(PIPEB_GMCH_DATA_N, - m_n.gmch_n); - I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m); - I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n); - } + I915_WRITE(PIPE_GMCH_DATA_M(pipe), + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); + I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); + I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); } } @@ -760,8 +742,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - intel_dp->DP = (DP_VOLTAGE_0_4 | - DP_PRE_EMPHASIS_0); + intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; + intel_dp->DP |= intel_dp->color_range; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) intel_dp->DP |= DP_SYNC_HS_HIGH; @@ -813,6 +795,40 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, } } +static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + + /* + * If the panel wasn't on, make sure there's not a currently + * active PP sequence before enabling AUX VDD. + */ + if (!(I915_READ(PCH_PP_STATUS) & PP_ON)) + msleep(dev_priv->panel_t3); + + pp = I915_READ(PCH_PP_CONTROL); + pp |= EDP_FORCE_VDD; + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); +} + +static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + + pp = I915_READ(PCH_PP_CONTROL); + pp &= ~EDP_FORCE_VDD; + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); + + /* Make sure sequencer is idle before allowing subsequent activity */ + msleep(dev_priv->panel_t12); +} + /* Returns true if the panel was already on when called */ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) { @@ -834,11 +850,6 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); - /* Ouch. We need to wait here for some panels, like Dell e6510 - * https://bugs.freedesktop.org/show_bug.cgi?id=29278i - */ - msleep(300); - if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 5000)) DRM_ERROR("panel on wait timed out: 0x%08x\n", @@ -875,11 +886,6 @@ static void ironlake_edp_panel_off (struct drm_device *dev) pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); - - /* Ouch. We need to wait here for some panels, like Dell e6510 - * https://bugs.freedesktop.org/show_bug.cgi?id=29278i - */ - msleep(300); } static void ironlake_edp_backlight_on (struct drm_device *dev) @@ -945,7 +951,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder) if (is_edp(intel_dp)) { ironlake_edp_backlight_off(dev); - ironlake_edp_panel_on(intel_dp); + ironlake_edp_panel_off(dev); if (!is_pch_edp(intel_dp)) ironlake_edp_pll_on(encoder); else @@ -959,10 +965,15 @@ static void intel_dp_commit(struct drm_encoder *encoder) struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; + if (is_edp(intel_dp)) + ironlake_edp_panel_vdd_on(intel_dp); + intel_dp_start_link_train(intel_dp); - if (is_edp(intel_dp)) + if (is_edp(intel_dp)) { ironlake_edp_panel_on(intel_dp); + ironlake_edp_panel_vdd_off(intel_dp); + } intel_dp_complete_link_train(intel_dp); @@ -988,9 +999,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) ironlake_edp_pll_off(encoder); } else { if (is_edp(intel_dp)) - ironlake_edp_panel_on(intel_dp); + ironlake_edp_panel_vdd_on(intel_dp); if (!(dp_reg & DP_PORT_EN)) { intel_dp_start_link_train(intel_dp); + if (is_edp(intel_dp)) { + ironlake_edp_panel_on(intel_dp); + ironlake_edp_panel_vdd_off(intel_dp); + } intel_dp_complete_link_train(intel_dp); } if (is_edp(intel_dp)) @@ -1508,9 +1523,13 @@ ironlake_dp_detect(struct intel_dp *intel_dp) { enum drm_connector_status status; - /* Can't disconnect eDP */ - if (is_edp(intel_dp)) - return connector_status_connected; + /* Can't disconnect eDP, but you can close the lid... */ + if (is_edp(intel_dp)) { + status = intel_panel_detect(intel_dp->base.base.dev); + if (status == connector_status_unknown) + status = connector_status_connected; + return status; + } status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, @@ -1639,11 +1658,30 @@ static int intel_dp_get_modes(struct drm_connector *connector) return 0; } +static bool +intel_dp_detect_audio(struct drm_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct edid *edid; + bool has_audio = false; + + edid = drm_get_edid(connector, &intel_dp->adapter); + if (edid) { + has_audio = drm_detect_monitor_audio(edid); + + connector->display_info.raw_edid = NULL; + kfree(edid); + } + + return has_audio; +} + static int intel_dp_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { + struct drm_i915_private *dev_priv = connector->dev->dev_private; struct intel_dp *intel_dp = intel_attached_dp(connector); int ret; @@ -1652,17 +1690,31 @@ intel_dp_set_property(struct drm_connector *connector, return ret; if (property == intel_dp->force_audio_property) { - if (val == intel_dp->force_audio) + int i = val; + bool has_audio; + + if (i == intel_dp->force_audio) return 0; - intel_dp->force_audio = val; + intel_dp->force_audio = i; - if (val > 0 && intel_dp->has_audio) + if (i == 0) + has_audio = intel_dp_detect_audio(connector); + else + has_audio = i > 0; + + if (has_audio == intel_dp->has_audio) return 0; - if (val < 0 && !intel_dp->has_audio) + + intel_dp->has_audio = has_audio; + goto done; + } + + if (property == dev_priv->broadcast_rgb_property) { + if (val == !!intel_dp->color_range) return 0; - intel_dp->has_audio = val > 0; + intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; goto done; } @@ -1785,6 +1837,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect intel_dp->force_audio_property->values[1] = 1; drm_connector_attach_property(connector, intel_dp->force_audio_property, 0); } + + intel_attach_broadcast_rgb_property(connector); } void @@ -1802,6 +1856,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) if (!intel_dp) return; + intel_dp->output_reg = output_reg; + intel_dp->dpms_mode = -1; + intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { kfree(intel_dp); @@ -1841,10 +1898,6 @@ intel_dp_init(struct drm_device *dev, int output_reg) connector->interlace_allowed = true; connector->doublescan_allowed = 0; - intel_dp->output_reg = output_reg; - intel_dp->has_audio = false; - intel_dp->dpms_mode = DRM_MODE_DPMS_ON; - drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); @@ -1882,21 +1935,33 @@ intel_dp_init(struct drm_device *dev, int output_reg) /* Cache some DPCD data in the eDP case */ if (is_edp(intel_dp)) { int ret; - bool was_on; + u32 pp_on, pp_div; + + pp_on = I915_READ(PCH_PP_ON_DELAYS); + pp_div = I915_READ(PCH_PP_DIVISOR); - was_on = ironlake_edp_panel_on(intel_dp); + /* Get T3 & T12 values (note: VESA not bspec terminology) */ + dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16; + dev_priv->panel_t3 /= 10; /* t3 in 100us units */ + dev_priv->panel_t12 = pp_div & 0xf; + dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ + + ironlake_edp_panel_vdd_on(intel_dp); ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, intel_dp->dpcd, sizeof(intel_dp->dpcd)); + ironlake_edp_panel_vdd_off(intel_dp); if (ret == sizeof(intel_dp->dpcd)) { if (intel_dp->dpcd[0] >= 0x11) dev_priv->no_aux_handshake = intel_dp->dpcd[3] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING; } else { + /* if this fails, presume the device is a ghost */ DRM_ERROR("failed to retrieve link info\n"); + intel_dp_destroy(&intel_connector->base); + intel_dp_encoder_destroy(&intel_dp->base.base); + return; } - if (!was_on) - ironlake_edp_panel_off(dev); } intel_encoder->hot_plug = intel_dp_hot_plug; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 74db2557d64..5daa991cb28 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -217,6 +217,13 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) return dev_priv->pipe_to_crtc_mapping[pipe]; } +static inline struct drm_crtc * +intel_get_crtc_for_plane(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + return dev_priv->plane_to_crtc_mapping[plane]; +} + struct intel_unpin_work { struct work_struct work; struct drm_device *dev; @@ -230,6 +237,8 @@ struct intel_unpin_work { int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); +extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); + extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); @@ -260,6 +269,7 @@ extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern void intel_panel_setup_backlight(struct drm_device *dev); extern void intel_panel_enable_backlight(struct drm_device *dev); extern void intel_panel_disable_backlight(struct drm_device *dev); +extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_encoder_prepare (struct drm_encoder *encoder); @@ -298,7 +308,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); extern void intel_enable_clock_gating(struct drm_device *dev); -extern void intel_disable_clock_gating(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); extern void gen6_enable_rps(struct drm_i915_private *dev_priv); @@ -322,8 +331,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); extern void intel_setup_overlay(struct drm_device *dev); extern void intel_cleanup_overlay(struct drm_device *dev); -extern int intel_overlay_switch_off(struct intel_overlay *overlay, - bool interruptible); +extern int intel_overlay_switch_off(struct intel_overlay *overlay); extern int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int intel_overlay_attrs(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index ea373283c93..6eda1b51c63 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -178,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, int pipe = intel_crtc->pipe; u32 dvo_val; u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; - int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; + int dpll_reg = DPLL(pipe); switch (dvo_reg) { case DVOA: diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 0d0273e7b02..f289b864297 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -41,6 +41,7 @@ struct intel_hdmi { struct intel_encoder base; u32 sdvox_reg; int ddc_bus; + uint32_t color_range; bool has_hdmi_sink; bool has_audio; int force_audio; @@ -124,6 +125,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, u32 sdvox; sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; + sdvox |= intel_hdmi->color_range; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) sdvox |= SDVO_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) @@ -251,12 +253,34 @@ static int intel_hdmi_get_modes(struct drm_connector *connector) &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); } +static bool +intel_hdmi_detect_audio(struct drm_connector *connector) +{ + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct edid *edid; + bool has_audio = false; + + edid = drm_get_edid(connector, + &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); + if (edid) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) + has_audio = drm_detect_monitor_audio(edid); + + connector->display_info.raw_edid = NULL; + kfree(edid); + } + + return has_audio; +} + static int intel_hdmi_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; int ret; ret = drm_connector_property_set_value(connector, property, val); @@ -264,17 +288,31 @@ intel_hdmi_set_property(struct drm_connector *connector, return ret; if (property == intel_hdmi->force_audio_property) { - if (val == intel_hdmi->force_audio) + int i = val; + bool has_audio; + + if (i == intel_hdmi->force_audio) return 0; - intel_hdmi->force_audio = val; + intel_hdmi->force_audio = i; + + if (i == 0) + has_audio = intel_hdmi_detect_audio(connector); + else + has_audio = i > 0; - if (val > 0 && intel_hdmi->has_audio) + if (has_audio == intel_hdmi->has_audio) return 0; - if (val < 0 && !intel_hdmi->has_audio) + + intel_hdmi->has_audio = has_audio; + goto done; + } + + if (property == dev_priv->broadcast_rgb_property) { + if (val == !!intel_hdmi->color_range) return 0; - intel_hdmi->has_audio = val > 0; + intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; goto done; } @@ -336,6 +374,8 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_hdmi->force_audio_property->values[1] = 1; drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0); } + + intel_attach_broadcast_rgb_property(connector); } void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 58040f68ed7..82d04c5899d 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -384,7 +384,8 @@ int intel_setup_gmbus(struct drm_device *dev) bus->reg0 = i | GMBUS_RATE_100KHZ; /* XXX force bit banging until GMBUS is fully debugged */ - bus->force_bit = intel_gpio_create(dev_priv, i); + if (IS_GEN2(dev)) + bus->force_bit = intel_gpio_create(dev_priv, i); } intel_i2c_reset(dev_priv->dev); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ace8d5d30dd..1a311ad0111 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -231,6 +231,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct drm_encoder *tmp_encoder; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; + int pipe; /* Should never happen!! */ if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { @@ -261,12 +262,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, return true; } - /* Make sure pre-965s set dither correctly */ - if (INTEL_INFO(dev)->gen < 4) { - if (dev_priv->lvds_dither) - pfit_control |= PANEL_8TO6_DITHER_ENABLE; - } - /* Native modes don't need fitting */ if (adjusted_mode->hdisplay == mode->hdisplay && adjusted_mode->vdisplay == mode->vdisplay) @@ -283,8 +278,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * to register description and PRM. * Change the value here to see the borders for debugging */ - I915_WRITE(BCLRPAT_A, 0); - I915_WRITE(BCLRPAT_B, 0); + for_each_pipe(pipe) + I915_WRITE(BCLRPAT(pipe), 0); switch (intel_lvds->fitting_mode) { case DRM_MODE_SCALE_CENTER: @@ -374,10 +369,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, } out: + /* If not enabling scaling, be consistent and always use 0. */ if ((pfit_control & PFIT_ENABLE) == 0) { pfit_control = 0; pfit_pgm_ratios = 0; } + + /* Make sure pre-965 set dither correctly */ + if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) + pfit_control |= PANEL_8TO6_DITHER_ENABLE; + if (pfit_control != intel_lvds->pfit_control || pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { intel_lvds->pfit_control = pfit_control; @@ -474,6 +475,10 @@ intel_lvds_detect(struct drm_connector *connector, bool force) struct drm_device *dev = connector->dev; enum drm_connector_status status = connector_status_connected; + status = intel_panel_detect(dev); + if (status != connector_status_unknown) + return status; + /* ACPI lid methods were generally unreliable in this generation, so * don't even bother. */ @@ -496,7 +501,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector) return drm_add_edid_modes(connector, intel_lvds->edid); mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); - if (mode == 0) + if (mode == NULL) return 0; drm_mode_probed_add(connector, mode); diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index f70b7cf32bf..9034dd8f33c 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -80,3 +80,33 @@ int intel_ddc_get_modes(struct drm_connector *connector, return ret; } + +static const char *broadcast_rgb_names[] = { + "Full", + "Limited 16:235", +}; + +void +intel_attach_broadcast_rgb_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_property *prop; + int i; + + prop = dev_priv->broadcast_rgb_property; + if (prop == NULL) { + prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, + "Broadcast RGB", + ARRAY_SIZE(broadcast_rgb_names)); + if (prop == NULL) + return; + + for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++) + drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]); + + dev_priv->broadcast_rgb_property = prop; + } + + drm_connector_attach_property(connector, prop, 0); +} diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 64fd64443ca..d2c71042290 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -39,6 +39,8 @@ #define OPREGION_HEADER_OFFSET 0 #define OPREGION_ACPI_OFFSET 0x100 +#define ACPI_CLID 0x01ac /* current lid state indicator */ +#define ACPI_CDCK 0x01b0 /* current docking state indicator */ #define OPREGION_SWSCI_OFFSET 0x200 #define OPREGION_ASLE_OFFSET 0x300 #define OPREGION_VBT_OFFSET 0x400 @@ -489,6 +491,8 @@ int intel_opregion_setup(struct drm_device *dev) opregion->header = base; opregion->vbt = base + OPREGION_VBT_OFFSET; + opregion->lid_state = base + ACPI_CLID; + mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3fbb98b948d..a670c006982 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay, static int intel_overlay_do_wait_request(struct intel_overlay *overlay, struct drm_i915_gem_request *request, - bool interruptible, void (*tail)(struct intel_overlay *)) { struct drm_device *dev = overlay->dev; @@ -221,16 +220,14 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, int ret; BUG_ON(overlay->last_flip_req); - ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); + ret = i915_add_request(LP_RING(dev_priv), NULL, request); if (ret) { kfree(request); return ret; } overlay->last_flip_req = request->seqno; overlay->flip_tail = tail; - ret = i915_do_wait_request(dev, - overlay->last_flip_req, true, - LP_RING(dev_priv)); + ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); if (ret) return ret; @@ -256,7 +253,7 @@ i830_activate_pipe_a(struct drm_device *dev) return 0; /* most i8xx have pipe a forced on, so don't trust dpms mode */ - if (I915_READ(PIPEACONF) & PIPECONF_ENABLE) + if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE) return 0; crtc_funcs = crtc->base.helper_private; @@ -322,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - ret = intel_overlay_do_wait_request(overlay, request, true, NULL); + ret = intel_overlay_do_wait_request(overlay, request, NULL); out: if (pipe_a_quirk) i830_deactivate_pipe_a(dev); @@ -364,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, OUT_RING(flip_addr); ADVANCE_LP_RING(); - ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); + ret = i915_add_request(LP_RING(dev_priv), NULL, request); if (ret) { kfree(request); return ret; @@ -401,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) } /* overlay needs to be disabled in OCMD reg */ -static int intel_overlay_off(struct intel_overlay *overlay, - bool interruptible) +static int intel_overlay_off(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -437,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay, OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); ADVANCE_LP_RING(); - return intel_overlay_do_wait_request(overlay, request, interruptible, + return intel_overlay_do_wait_request(overlay, request, intel_overlay_off_tail); } /* recover from an interruption due to a signal * We have to be careful not to repeat work forever an make forward progess. */ -static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, - bool interruptible) +static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -453,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, if (overlay->last_flip_req == 0) return 0; - ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible, LP_RING(dev_priv)); + ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req); if (ret) return ret; @@ -499,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - ret = intel_overlay_do_wait_request(overlay, request, true, + ret = intel_overlay_do_wait_request(overlay, request, intel_overlay_release_old_vid_tail); if (ret) return ret; @@ -868,8 +862,7 @@ out_unpin: return ret; } -int intel_overlay_switch_off(struct intel_overlay *overlay, - bool interruptible) +int intel_overlay_switch_off(struct intel_overlay *overlay) { struct overlay_registers *regs; struct drm_device *dev = overlay->dev; @@ -878,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay, BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); - ret = intel_overlay_recover_from_interrupt(overlay, interruptible); + ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) return ret; @@ -893,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay, regs->OCMD = 0; intel_overlay_unmap_regs(overlay, regs); - ret = intel_overlay_off(overlay, interruptible); + ret = intel_overlay_off(overlay); if (ret != 0) return ret; @@ -1135,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); - ret = intel_overlay_switch_off(overlay, true); + ret = intel_overlay_switch_off(overlay); mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); @@ -1157,7 +1150,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, put_image_rec->bo_handle)); - if (!new_bo) { + if (&new_bo->base == NULL) { ret = -ENOENT; goto out_free; } @@ -1171,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, goto out_unlock; } - ret = intel_overlay_recover_from_interrupt(overlay, true); + ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) goto out_unlock; if (overlay->crtc != crtc) { struct drm_display_mode *mode = &crtc->base.mode; - ret = intel_overlay_switch_off(overlay, true); + ret = intel_overlay_switch_off(overlay); if (ret != 0) goto out_unlock; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c65992df458..18391b3ec2c 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -30,8 +30,6 @@ #include "intel_drv.h" -#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ - void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) @@ -112,19 +110,6 @@ done: dev_priv->pch_pf_size = (width << 16) | height; } -static int is_backlight_combination_mode(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen >= 4) - return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; - - if (IS_GEN2(dev)) - return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; - - return 0; -} - static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) { u32 val; @@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 4) max &= ~1; } - - if (is_backlight_combination_mode(dev)) - max *= 0xff; } DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); @@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev) val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; if (IS_PINEVIEW(dev)) val >>= 1; - - if (is_backlight_combination_mode(dev)){ - u8 lbpc; - - val &= ~1; - pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); - val *= lbpc; - val >>= 1; - } } DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); @@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) if (HAS_PCH_SPLIT(dev)) return intel_pch_panel_set_backlight(dev, level); - - if (is_backlight_combination_mode(dev)){ - u32 max = intel_panel_get_max_backlight(dev); - u8 lpbc; - - lpbc = level * 0xfe / max + 1; - level /= lpbc; - pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); - } - tmp = I915_READ(BLC_PWM_CTL); if (IS_PINEVIEW(dev)) { tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); @@ -281,3 +244,22 @@ void intel_panel_setup_backlight(struct drm_device *dev) dev_priv->backlight_level = intel_panel_get_backlight(dev); dev_priv->backlight_enabled = dev_priv->backlight_level != 0; } + +enum drm_connector_status +intel_panel_detect(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (i915_panel_ignore_lid) + return i915_panel_ignore_lid > 0 ? + connector_status_connected : + connector_status_disconnected; + + /* Assume that the BIOS does not lie through the OpRegion... */ + if (dev_priv->opregion.lid_state) + return ioread32(dev_priv->opregion.lid_state) & 0x1 ? + connector_status_connected : + connector_status_disconnected; + + return connector_status_unknown; +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6218fa97aa1..789c47801ba 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -62,18 +62,9 @@ render_ring_flush(struct intel_ring_buffer *ring, u32 flush_domains) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; u32 cmd; int ret; -#if WATCH_EXEC - DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, - invalidate_domains, flush_domains); -#endif - - trace_i915_gem_request_flush(dev, dev_priv->next_seqno, - invalidate_domains, flush_domains); - if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { /* * read/write caches: @@ -122,9 +113,6 @@ render_ring_flush(struct intel_ring_buffer *ring, (IS_G4X(dev) || IS_GEN5(dev))) cmd |= MI_INVALIDATE_ISP; -#if WATCH_EXEC - DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); -#endif ret = intel_ring_begin(ring, 2); if (ret) return ret; @@ -612,7 +600,6 @@ ring_add_request(struct intel_ring_buffer *ring, intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_advance(ring); - DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); *result = seqno; return 0; } @@ -715,11 +702,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 len) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; int ret; - trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); - if (IS_I830(dev) || IS_845G(dev)) { ret = intel_ring_begin(ring, 4); if (ret) @@ -894,6 +878,10 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) /* Disable the ring buffer. The ring must be idle at this point */ dev_priv = ring->dev->dev_private; ret = intel_wait_ring_buffer(ring, ring->size - 8); + if (ret) + DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", + ring->name, ret); + I915_WRITE_CTL(ring, 0); drm_core_ioremapfree(&ring->map, ring->dev); @@ -950,13 +938,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) return 0; } - trace_i915_ring_wait_begin (dev); + trace_i915_ring_wait_begin(ring); end = jiffies + 3 * HZ; do { ring->head = I915_READ_HEAD(ring); ring->space = ring_space(ring); if (ring->space >= n) { - trace_i915_ring_wait_end(dev); + trace_i915_ring_wait_end(ring); return 0; } @@ -970,16 +958,20 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) if (atomic_read(&dev_priv->mm.wedged)) return -EAGAIN; } while (!time_after(jiffies, end)); - trace_i915_ring_wait_end (dev); + trace_i915_ring_wait_end(ring); return -EBUSY; } int intel_ring_begin(struct intel_ring_buffer *ring, int num_dwords) { + struct drm_i915_private *dev_priv = ring->dev->dev_private; int n = 4*num_dwords; int ret; + if (unlikely(atomic_read(&dev_priv->mm.wedged))) + return -EIO; + if (unlikely(ring->tail + n > ring->effective_size)) { ret = intel_wrap_ring_buffer(ring); if (unlikely(ret)) @@ -1059,22 +1051,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, } static int gen6_ring_flush(struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) + u32 invalidate, u32 flush) { + uint32_t cmd; int ret; - if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0) return 0; ret = intel_ring_begin(ring, 4); if (ret) return ret; - intel_ring_emit(ring, MI_FLUSH_DW); - intel_ring_emit(ring, 0); + cmd = MI_FLUSH_DW; + if (invalidate & I915_GEM_GPU_DOMAINS) + cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; + intel_ring_emit(ring, cmd); intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } @@ -1230,22 +1225,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring, } static int blt_ring_flush(struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) + u32 invalidate, u32 flush) { + uint32_t cmd; int ret; - if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0) return 0; ret = blt_ring_begin(ring, 4); if (ret) return ret; - intel_ring_emit(ring, MI_FLUSH_DW); - intel_ring_emit(ring, 0); + cmd = MI_FLUSH_DW; + if (invalidate & I915_GEM_DOMAIN_RENDER) + cmd |= MI_INVALIDATE_TLB; + intel_ring_emit(ring, cmd); intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6d6fde85a63..f23cc5f037a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -14,22 +14,23 @@ struct intel_hw_status_page { struct drm_i915_gem_object *obj; }; -#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) +#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) +#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) -#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) +#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) -#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) +#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) -#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) +#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) -#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) +#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) -#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) +#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) @@ -43,7 +44,7 @@ struct intel_ring_buffer { RING_BLT = 0x4, } id; u32 mmio_base; - void *virtual_start; + void __iomem *virtual_start; struct drm_device *dev; struct drm_i915_gem_object *obj; @@ -58,6 +59,7 @@ struct intel_ring_buffer { u32 irq_refcount; u32 irq_mask; u32 irq_seqno; /* last seq seem at irq time */ + u32 trace_irq_seqno; u32 waiting_seqno; u32 sync_seqno[I915_NUM_RINGS-1]; bool __must_check (*irq_get)(struct intel_ring_buffer *ring); @@ -141,6 +143,26 @@ intel_read_status_page(struct intel_ring_buffer *ring, return ioread32(ring->status_page.page_addr + reg); } +/** + * Reads a dword out of the status page, which is written to from the command + * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or + * MI_STORE_DATA_IMM. + * + * The following dwords have a reserved meaning: + * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. + * 0x04: ring 0 head pointer + * 0x05: ring 1 head pointer (915-class) + * 0x06: ring 2 head pointer (915-class) + * 0x10-0x1b: Context status DWords (GM45) + * 0x1f: Last written status offset. (GM45) + * + * The area from dword 0x20 to 0x3ff is available for driver usage. + */ +#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg) +#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) +#define I915_GEM_HWS_INDEX 0x20 +#define I915_BREADCRUMB_INDEX 0x21 + void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); @@ -166,6 +188,12 @@ int intel_init_blt_ring_buffer(struct drm_device *dev); u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); void intel_ring_setup_status_page(struct intel_ring_buffer *ring); +static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) +{ + if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) + ring->trace_irq_seqno = seqno; +} + /* DRI warts */ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 318f398e6b2..4324f33212d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -46,6 +46,7 @@ SDVO_TV_MASK) #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) +#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) @@ -92,6 +93,12 @@ struct intel_sdvo { uint16_t attached_output; /** + * This is used to select the color range of RBG outputs in HDMI mode. + * It is only valid when using TMDS encoding and 8 bit per color mode. + */ + uint32_t color_range; + + /** * This is set if we're going to treat the device as TV-out. * * While we have these nice friendly flags for output types that ought @@ -584,6 +591,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i { struct intel_sdvo_get_trained_inputs_response response; + BUILD_BUG_ON(sizeof(response) != 1); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS, &response, sizeof(response))) return false; @@ -631,6 +639,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo { struct intel_sdvo_pixel_clock_range clocks; + BUILD_BUG_ON(sizeof(clocks) != 4); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, &clocks, sizeof(clocks))) @@ -698,6 +707,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo, struct intel_sdvo_dtd *dtd) { + BUILD_BUG_ON(sizeof(dtd->part1) != 8); + BUILD_BUG_ON(sizeof(dtd->part2) != 8); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, &dtd->part1, sizeof(dtd->part1)) && intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, @@ -795,6 +806,7 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_encode encode; + BUILD_BUG_ON(sizeof(encode) != 2); return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPP_ENCODE, &encode, sizeof(encode)); @@ -1050,6 +1062,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* Set the SDVO control regs. */ if (INTEL_INFO(dev)->gen >= 4) { sdvox = 0; + if (intel_sdvo->is_hdmi) + sdvox |= intel_sdvo->color_range; if (INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_BORDER_ENABLE; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) @@ -1161,6 +1175,7 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { + BUILD_BUG_ON(sizeof(*caps) != 8); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps))) @@ -1267,33 +1282,9 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) static bool intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) { - int caps = 0; - - if (intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) - caps++; - if (intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) - caps++; - if (intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1)) - caps++; - if (intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) - caps++; - if (intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) - caps++; - - if (intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) - caps++; - - if (intel_sdvo->caps.output_flags & - (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) - caps++; - - return (caps > 1); + /* Is there more than one type of output? */ + int caps = intel_sdvo->caps.output_flags & 0xf; + return caps & -caps; } static struct edid * @@ -1359,7 +1350,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); } - } + } else + status = connector_status_disconnected; connector->display_info.raw_edid = NULL; kfree(edid); } @@ -1407,10 +1399,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) if ((intel_sdvo_connector->output_flag & response) == 0) ret = connector_status_disconnected; - else if (response & SDVO_TMDS_MASK) + else if (IS_TMDS(intel_sdvo_connector)) ret = intel_sdvo_hdmi_sink_detect(connector); - else - ret = connector_status_connected; + else { + struct edid *edid; + + /* if we have an edid check it matches the connection */ + edid = intel_sdvo_get_edid(connector); + if (edid == NULL) + edid = intel_sdvo_get_analog_edid(connector); + if (edid != NULL) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) + ret = connector_status_disconnected; + else + ret = connector_status_connected; + connector->display_info.raw_edid = NULL; + kfree(edid); + } else + ret = connector_status_connected; + } /* May update encoder flag for like clock for SDVO TV, etc.*/ if (ret == connector_status_connected) { @@ -1446,10 +1453,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { - if (edid->input & DRM_EDID_INPUT_DIGITAL) { + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); + bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector); + + if (connector_is_digital == monitor_is_digital) { drm_mode_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); } + connector->display_info.raw_edid = NULL; kfree(edid); } @@ -1668,6 +1680,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector) kfree(connector); } +static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) +{ + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + struct edid *edid; + bool has_audio = false; + + if (!intel_sdvo->is_hdmi) + return false; + + edid = intel_sdvo_get_edid(connector); + if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) + has_audio = drm_detect_monitor_audio(edid); + + return has_audio; +} + static int intel_sdvo_set_property(struct drm_connector *connector, struct drm_property *property, @@ -1675,6 +1703,7 @@ intel_sdvo_set_property(struct drm_connector *connector, { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; uint16_t temp_value; uint8_t cmd; int ret; @@ -1684,17 +1713,31 @@ intel_sdvo_set_property(struct drm_connector *connector, return ret; if (property == intel_sdvo_connector->force_audio_property) { - if (val == intel_sdvo_connector->force_audio) + int i = val; + bool has_audio; + + if (i == intel_sdvo_connector->force_audio) return 0; - intel_sdvo_connector->force_audio = val; + intel_sdvo_connector->force_audio = i; + + if (i == 0) + has_audio = intel_sdvo_detect_hdmi_audio(connector); + else + has_audio = i > 0; - if (val > 0 && intel_sdvo->has_hdmi_audio) + if (has_audio == intel_sdvo->has_hdmi_audio) return 0; - if (val < 0 && !intel_sdvo->has_hdmi_audio) + + intel_sdvo->has_hdmi_audio = has_audio; + goto done; + } + + if (property == dev_priv->broadcast_rgb_property) { + if (val == !!intel_sdvo->color_range) return 0; - intel_sdvo->has_hdmi_audio = val > 0; + intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; goto done; } @@ -2002,6 +2045,9 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) drm_connector_attach_property(&connector->base.base, connector->force_audio_property, 0); } + + if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) + intel_attach_broadcast_rgb_property(&connector->base.base); } static bool @@ -2224,6 +2270,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, if (!intel_sdvo_set_target_output(intel_sdvo, type)) return false; + BUILD_BUG_ON(sizeof(format) != 6); if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, &format, sizeof(format))) @@ -2430,6 +2477,8 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, uint16_t response; } enhancements; + BUILD_BUG_ON(sizeof(enhancements) != 2); + enhancements.response = 0; intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 93206e4eaa6..4256b8ef394 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1006,6 +1006,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, const struct video_levels *video_levels; const struct color_conversion *color_conversion; bool burst_ena; + int pipe = intel_crtc->pipe; if (!tv_mode) return; /* can't happen (mode_prepare prevents this) */ @@ -1149,14 +1150,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); { - int pipeconf_reg = (intel_crtc->pipe == 0) ? - PIPEACONF : PIPEBCONF; - int dspcntr_reg = (intel_crtc->plane == 0) ? - DSPACNTR : DSPBCNTR; + int pipeconf_reg = PIPECONF(pipe); + int dspcntr_reg = DSPCNTR(pipe); int pipeconf = I915_READ(pipeconf_reg); int dspcntr = I915_READ(dspcntr_reg); - int dspbase_reg = (intel_crtc->plane == 0) ? - DSPAADDR : DSPBADDR; + int dspbase_reg = DSPADDR(pipe); int xpos = 0x0, ypos = 0x0; unsigned int xsize, ysize; /* Pipe must be off here */ @@ -1234,7 +1232,8 @@ static const struct drm_display_mode reported_modes[] = { * \return false if TV is disconnected. */ static int -intel_tv_detect_type (struct intel_tv *intel_tv) +intel_tv_detect_type (struct intel_tv *intel_tv, + struct drm_connector *connector) { struct drm_encoder *encoder = &intel_tv->base.base; struct drm_device *dev = encoder->dev; @@ -1245,11 +1244,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv) int type; /* Disable TV interrupts around load detect or we'll recurse */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, 0, - PIPE_HOTPLUG_INTERRUPT_ENABLE | - PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + if (connector->polled & DRM_CONNECTOR_POLL_HPD) { + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | + PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } save_tv_dac = tv_dac = I915_READ(TV_DAC); save_tv_ctl = tv_ctl = I915_READ(TV_CTL); @@ -1302,11 +1303,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv) I915_WRITE(TV_CTL, save_tv_ctl); /* Restore interrupt config */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, 0, - PIPE_HOTPLUG_INTERRUPT_ENABLE | - PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + if (connector->polled & DRM_CONNECTOR_POLL_HPD) { + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_enable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | + PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } return type; } @@ -1356,7 +1359,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { - type = intel_tv_detect_type(intel_tv); + type = intel_tv_detect_type(intel_tv, connector); } else if (force) { struct drm_crtc *crtc; int dpms_mode; @@ -1364,7 +1367,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, &mode, &dpms_mode); if (crtc) { - type = intel_tv_detect_type(intel_tv); + type = intel_tv_detect_type(intel_tv, connector); intel_release_load_detect_pipe(&intel_tv->base, connector, dpms_mode); } else @@ -1658,6 +1661,18 @@ intel_tv_init(struct drm_device *dev) intel_encoder = &intel_tv->base; connector = &intel_connector->base; + /* The documentation, for the older chipsets at least, recommend + * using a polling method rather than hotplug detection for TVs. + * This is because in order to perform the hotplug detection, the PLLs + * for the TV must be kept alive increasing power drain and starving + * bandwidth from other encoders. Notably for instance, it causes + * pipe underruns on Crestline when this encoder is supposedly idle. + * + * More recent chipsets favour HDMI rather than integrated S-Video. + */ + connector->polled = + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + drm_connector_init(dev, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 3fcffcf75e3..2ad49cbf7c8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) DRM_ERROR("bo %p still attached to GEM object\n", bo); nv10_mem_put_tile_region(dev, nvbo->tile, NULL); - nouveau_vm_put(&nvbo->vma); + if (nvbo->vma.node) { + nouveau_vm_unmap(&nvbo->vma); + nouveau_vm_put(&nvbo->vma); + } kfree(nvbo); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index d56f08d3cbd..a2199fe9fa9 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, switch (radeon_crtc->rmx_type) { case RMX_CENTER: - args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; - args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; - args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; - args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; + args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); + args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); + args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); + args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); break; case RMX_ASPECT: a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; if (a1 > a2) { - args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; - args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; + args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); + args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); } else if (a2 > a1) { - args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; - args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; + args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); + args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); } break; case RMX_FULL: default: - args.usOverscanRight = radeon_crtc->h_border; - args.usOverscanLeft = radeon_crtc->h_border; - args.usOverscanBottom = radeon_crtc->v_border; - args.usOverscanTop = radeon_crtc->v_border; + args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border); + args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border); + args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border); + args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border); break; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); @@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE5(rdev)) { - args.v3.usSpreadSpectrumAmountFrac = 0; + args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0); args.v3.ucSpreadSpectrumType = ss->type; switch (pll_id) { case ATOM_PPLL1: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; - args.v3.usSpreadSpectrumAmount = ss->amount; - args.v3.usSpreadSpectrumStep = ss->step; + args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); + args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_PPLL2: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; - args.v3.usSpreadSpectrumAmount = ss->amount; - args.v3.usSpreadSpectrumStep = ss->step; + args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); + args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_DCPLL: args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; - args.v3.usSpreadSpectrumAmount = 0; - args.v3.usSpreadSpectrumStep = 0; + args.v3.usSpreadSpectrumAmount = cpu_to_le16(0); + args.v3.usSpreadSpectrumStep = cpu_to_le16(0); break; case ATOM_PPLL_INVALID: return; @@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, switch (pll_id) { case ATOM_PPLL1: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; - args.v2.usSpreadSpectrumAmount = ss->amount; - args.v2.usSpreadSpectrumStep = ss->step; + args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); + args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_PPLL2: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; - args.v2.usSpreadSpectrumAmount = ss->amount; - args.v2.usSpreadSpectrumStep = ss->step; + args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); + args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); break; case ATOM_DCPLL: args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; - args.v2.usSpreadSpectrumAmount = 0; - args.v2.usSpreadSpectrumStep = 0; + args.v2.usSpreadSpectrumAmount = cpu_to_le16(0); + args.v2.usSpreadSpectrumStep = cpu_to_le16(0); break; case ATOM_PPLL_INVALID: return; @@ -538,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; - } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { @@ -555,29 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, dp_clock = dig_connector->dp_clock; } } -/* this might work properly with the new pll algo */ -#if 0 /* doesn't work properly on some laptops */ + /* use recommended ref_div for ss */ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (ss_enabled) { if (ss->refdiv) { + pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; pll->flags |= RADEON_PLL_USE_REF_DIV; pll->reference_div = ss->refdiv; + if (ASIC_IS_AVIVO(rdev)) + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; } } } -#endif + if (ASIC_IS_AVIVO(rdev)) { /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) adjusted_clock = mode->clock * 2; if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; - /* rv515 needs more testing with this option */ - if (rdev->family != CHIP_RV515) { - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - pll->flags |= RADEON_PLL_IS_LCD; - } + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + pll->flags |= RADEON_PLL_IS_LCD; } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; @@ -664,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, index, (uint32_t *)&args); adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; if (args.v3.sOutput.ucRefDiv) { + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; pll->flags |= RADEON_PLL_USE_REF_DIV; pll->reference_div = args.v3.sOutput.ucRefDiv; } if (args.v3.sOutput.ucPostDiv) { + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; pll->flags |= RADEON_PLL_USE_POST_DIV; pll->post_div = args.v3.sOutput.ucPostDiv; } @@ -721,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc, * SetPixelClock provides the dividers */ args.v5.ucCRTC = ATOM_CRTC_INVALID; - args.v5.usPixelClock = dispclk; + args.v5.usPixelClock = cpu_to_le16(dispclk); args.v5.ucPpll = ATOM_DCPLL; break; case 6: /* if the default dcpll clock is specified, * SetPixelClock provides the dividers */ - args.v6.ulDispEngClkFreq = dispclk; + args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); args.v6.ucPpll = ATOM_DCPLL; break; default: @@ -957,11 +957,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode /* adjust pixel clock as needed */ adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); - /* rv515 seems happier with the old algo */ - if (rdev->family == CHIP_RV515) - radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, - &ref_div, &post_div); - else if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_AVIVO(rdev)) radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); else @@ -995,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode } } -static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int x, int y, int atomic) +static int dce4_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -1137,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); - if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) - WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, - EVERGREEN_INTERLEAVE_EN); - else - WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); - if (!atomic && fb && fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); @@ -1300,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); - if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) - WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, - AVIVO_D1MODE_INTERLEAVE_EN); - else - WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); - if (!atomic && fb && fb != crtc->fb) { radeon_fb = to_radeon_framebuffer(fb); rbo = gem_to_radeon_bo(radeon_fb->obj); @@ -1329,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) - return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0); + return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0); else if (ASIC_IS_AVIVO(rdev)) return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); else @@ -1344,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) - return evergreen_crtc_do_set_base(crtc, fb, x, y, 1); + return dce4_crtc_do_set_base(crtc, fb, x, y, 1); else if (ASIC_IS_AVIVO(rdev)) return avivo_crtc_do_set_base(crtc, fb, x, y, 1); else diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index d4045223d0f..789441ed983 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1192,7 +1192,11 @@ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) radeon_ring_write(rdev, 1); /* FIXME: implement */ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); - radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); + radeon_ring_write(rdev, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (ib->gpu_addr & 0xFFFFFFFC)); radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); radeon_ring_write(rdev, ib->length_dw); } @@ -1207,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) return -EINVAL; r700_cp_stop(rdev); - WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); + WREG32(CP_RB_CNTL, +#ifdef __BIG_ENDIAN + BUF_SWAP_32BIT | +#endif + RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); fw_data = (const __be32 *)rdev->pfp_fw->data; WREG32(CP_PFP_UCODE_ADDR, 0); @@ -1326,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev) WREG32(CP_RB_WPTR, 0); /* set the wb address wether it's enabled or not */ - WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); + WREG32(CP_RB_RPTR_ADDR, +#ifdef __BIG_ENDIAN + RB_RPTR_SWAP(2) | +#endif + ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); @@ -2627,8 +2639,8 @@ restart_ih: while (rptr != wptr) { /* wptr/rptr are in bytes! */ ring_index = rptr / 4; - src_id = rdev->ih.ring[ring_index] & 0xff; - src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; + src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; + src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; switch (src_id) { case 1: /* D1 vblank/vline */ diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 2ed930e02f3..3218287f4c5 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format, if (h < 8) h = 8; - cb_color_info = ((format << 2) | (1 << 24)); + cb_color_info = ((format << 2) | (1 << 24) | (1 << 8)); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; @@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) /* high addr, stride */ sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); +#ifdef __BIG_ENDIAN + sq_vtx_constant_word2 |= (2 << 30); +#endif /* xyzw swizzles */ sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); @@ -173,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev, sq_tex_resource_word0 = (1 << 0); /* 2D */ sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | ((w - 1) << 18)); - sq_tex_resource_word1 = ((h - 1) << 0); + sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28); /* xyzw swizzles */ sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); @@ -221,7 +224,11 @@ draw_auto(struct radeon_device *rdev) radeon_ring_write(rdev, DI_PT_RECTLIST); radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); - radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); + radeon_ring_write(rdev, +#ifdef __BIG_ENDIAN + (2 << 2) | +#endif + DI_INDEX_SIZE_16_BIT); radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); radeon_ring_write(rdev, 1); @@ -541,7 +548,7 @@ static inline uint32_t i2f(uint32_t input) int evergreen_blit_init(struct radeon_device *rdev) { u32 obj_size; - int r, dwords; + int i, r, dwords; void *ptr; u32 packet2s[16]; int num_packet2s = 0; @@ -557,7 +564,7 @@ int evergreen_blit_init(struct radeon_device *rdev) dwords = rdev->r600_blit.state_len; while (dwords & 0xf) { - packet2s[num_packet2s++] = PACKET2(0); + packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); dwords++; } @@ -598,8 +605,10 @@ int evergreen_blit_init(struct radeon_device *rdev) if (num_packet2s) memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), packet2s, num_packet2s * 4); - memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); - memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); + for (i = 0; i < evergreen_vs_size; i++) + *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); + for (i = 0; i < evergreen_ps_size; i++) + *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c index ef1d28c07fb..3a10399e006 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c @@ -311,11 +311,19 @@ const u32 evergreen_vs[] = 0x00000000, 0x3c000000, 0x67961001, +#ifdef __BIG_ENDIAN + 0x000a0000, +#else 0x00080000, +#endif 0x00000000, 0x1c000000, 0x67961000, +#ifdef __BIG_ENDIAN + 0x00020008, +#else 0x00000008, +#endif 0x00000000, }; diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 21e839bd20e..9aaa3f0c937 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -98,6 +98,7 @@ #define BUF_SWAP_32BIT (2 << 16) #define CP_RB_RPTR 0x8700 #define CP_RB_RPTR_ADDR 0xC10C +#define RB_RPTR_SWAP(x) ((x) << 0) #define CP_RB_RPTR_ADDR_HI 0xC110 #define CP_RB_RPTR_WR 0xC108 #define CP_RB_WPTR 0xC114 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index 607241c6a8a..5a82b6b7584 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c @@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename) last_reg = strtol(last_reg_s, NULL, 16); do { - if (fgets(buf, 1024, file) == NULL) + if (fgets(buf, 1024, file) == NULL) { + fclose(file); return -1; + } len = strlen(buf); if (ftell(file) == end) done = 1; @@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename) fprintf(stderr, "Error matching regular expression %d in %s\n", r, filename); + fclose(file); return -1; } else { buf[match[0].rm_eo] = 0; diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 5f15820efe1..93fa735c8c1 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1427,6 +1427,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } track->zb.robj = reloc->robj; track->zb.offset = idx_value; + track->zb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case RADEON_RB3D_COLOROFFSET: @@ -1439,6 +1440,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } track->cb[0].robj = reloc->robj; track->cb[0].offset = idx_value; + track->cb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case RADEON_PP_TXOFFSET_0: @@ -1454,6 +1456,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); track->textures[i].robj = reloc->robj; + track->tex_dirty = true; break; case RADEON_PP_CUBIC_OFFSET_T0_0: case RADEON_PP_CUBIC_OFFSET_T0_1: @@ -1471,6 +1474,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->textures[0].cube_info[i].offset = idx_value; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); track->textures[0].cube_info[i].robj = reloc->robj; + track->tex_dirty = true; break; case RADEON_PP_CUBIC_OFFSET_T1_0: case RADEON_PP_CUBIC_OFFSET_T1_1: @@ -1488,6 +1492,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->textures[1].cube_info[i].offset = idx_value; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); track->textures[1].cube_info[i].robj = reloc->robj; + track->tex_dirty = true; break; case RADEON_PP_CUBIC_OFFSET_T2_0: case RADEON_PP_CUBIC_OFFSET_T2_1: @@ -1505,9 +1510,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->textures[2].cube_info[i].offset = idx_value; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); track->textures[2].cube_info[i].robj = reloc->robj; + track->tex_dirty = true; break; case RADEON_RE_WIDTH_HEIGHT: track->maxy = ((idx_value >> 16) & 0x7FF); + track->cb_dirty = true; + track->zb_dirty = true; break; case RADEON_RB3D_COLORPITCH: r = r100_cs_packet_next_reloc(p, &reloc); @@ -1528,9 +1536,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p, ib[idx] = tmp; track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; + track->cb_dirty = true; break; case RADEON_RB3D_DEPTHPITCH: track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; + track->zb_dirty = true; break; case RADEON_RB3D_CNTL: switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { @@ -1555,6 +1565,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, return -EINVAL; } track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); + track->cb_dirty = true; + track->zb_dirty = true; break; case RADEON_RB3D_ZSTENCILCNTL: switch (idx_value & 0xf) { @@ -1572,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, default: break; } + track->zb_dirty = true; break; case RADEON_RB3D_ZPASS_ADDR: r = r100_cs_packet_next_reloc(p, &reloc); @@ -1588,6 +1601,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, uint32_t temp = idx_value >> 4; for (i = 0; i < track->num_texture; i++) track->textures[i].enabled = !!(temp & (1 << i)); + track->tex_dirty = true; } break; case RADEON_SE_VF_CNTL: @@ -1602,12 +1616,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, i = (reg - RADEON_PP_TEX_SIZE_0) / 8; track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; + track->tex_dirty = true; break; case RADEON_PP_TEX_PITCH_0: case RADEON_PP_TEX_PITCH_1: case RADEON_PP_TEX_PITCH_2: i = (reg - RADEON_PP_TEX_PITCH_0) / 8; track->textures[i].pitch = idx_value + 32; + track->tex_dirty = true; break; case RADEON_PP_TXFILTER_0: case RADEON_PP_TXFILTER_1: @@ -1621,6 +1637,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, tmp = (idx_value >> 27) & 0x7; if (tmp == 2 || tmp == 6) track->textures[i].roundup_h = false; + track->tex_dirty = true; break; case RADEON_PP_TXFORMAT_0: case RADEON_PP_TXFORMAT_1: @@ -1673,6 +1690,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); + track->tex_dirty = true; break; case RADEON_PP_CUBIC_FACES_0: case RADEON_PP_CUBIC_FACES_1: @@ -1683,6 +1701,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); } + track->tex_dirty = true; break; default: printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", @@ -3318,9 +3337,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) unsigned long size; unsigned prim_walk; unsigned nverts; - unsigned num_cb = track->num_cb; + unsigned num_cb = track->cb_dirty ? track->num_cb : 0; - if (!track->zb_cb_clear && !track->color_channel_mask && + if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && !track->blend_read_enable) num_cb = 0; @@ -3341,7 +3360,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) return -EINVAL; } } - if (track->z_enabled) { + track->cb_dirty = false; + + if (track->zb_dirty && track->z_enabled) { if (track->zb.robj == NULL) { DRM_ERROR("[drm] No buffer for z buffer !\n"); return -EINVAL; @@ -3358,6 +3379,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) return -EINVAL; } } + track->zb_dirty = false; + + if (track->aa_dirty && track->aaresolve) { + if (track->aa.robj == NULL) { + DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); + return -EINVAL; + } + /* I believe the format comes from colorbuffer0. */ + size = track->aa.pitch * track->cb[0].cpp * track->maxy; + size += track->aa.offset; + if (size > radeon_bo_size(track->aa.robj)) { + DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " + "(need %lu have %lu) !\n", i, size, + radeon_bo_size(track->aa.robj)); + DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", + i, track->aa.pitch, track->cb[0].cpp, + track->aa.offset, track->maxy); + return -EINVAL; + } + } + track->aa_dirty = false; + prim_walk = (track->vap_vf_cntl >> 4) & 0x3; if (track->vap_vf_cntl & (1 << 14)) { nverts = track->vap_alt_nverts; @@ -3417,13 +3460,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) prim_walk); return -EINVAL; } - return r100_cs_track_texture_check(rdev, track); + + if (track->tex_dirty) { + track->tex_dirty = false; + return r100_cs_track_texture_check(rdev, track); + } + return 0; } void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) { unsigned i, face; + track->cb_dirty = true; + track->zb_dirty = true; + track->tex_dirty = true; + track->aa_dirty = true; + if (rdev->family < CHIP_R300) { track->num_cb = 1; if (rdev->family <= CHIP_RS200) @@ -3437,6 +3490,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track track->num_texture = 16; track->maxy = 4096; track->separate_cube = 0; + track->aaresolve = false; + track->aa.robj = NULL; } for (i = 0; i < track->num_cb; i++) { @@ -3746,8 +3801,6 @@ static int r100_startup(struct radeon_device *rdev) r100_mc_program(rdev); /* Resume clock */ r100_clock_startup(rdev); - /* Initialize GPU configuration (# pipes, ...) */ -// r100_gpu_init(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ r100_enable_bm(rdev); diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index af65600e656..2fef9de7f36 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h @@ -52,14 +52,7 @@ struct r100_cs_track_texture { unsigned compress_format; }; -struct r100_cs_track_limits { - unsigned num_cb; - unsigned num_texture; - unsigned max_levels; -}; - struct r100_cs_track { - struct radeon_device *rdev; unsigned num_cb; unsigned num_texture; unsigned maxy; @@ -73,11 +66,17 @@ struct r100_cs_track { struct r100_cs_track_array arrays[11]; struct r100_cs_track_cb cb[R300_MAX_CB]; struct r100_cs_track_cb zb; + struct r100_cs_track_cb aa; struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; bool z_enabled; bool separate_cube; bool zb_cb_clear; bool blend_read_enable; + bool cb_dirty; + bool zb_dirty; + bool tex_dirty; + bool aa_dirty; + bool aaresolve; }; int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index d2408c39561..f2405830041 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, } track->zb.robj = reloc->robj; track->zb.offset = idx_value; + track->zb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case RADEON_RB3D_COLOROFFSET: @@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, } track->cb[0].robj = reloc->robj; track->cb[0].offset = idx_value; + track->cb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case R200_PP_TXOFFSET_0: @@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, } ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); track->textures[i].robj = reloc->robj; + track->tex_dirty = true; break; case R200_PP_CUBIC_OFFSET_F1_0: case R200_PP_CUBIC_OFFSET_F2_0: @@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->textures[i].cube_info[face - 1].offset = idx_value; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); track->textures[i].cube_info[face - 1].robj = reloc->robj; + track->tex_dirty = true; break; case RADEON_RE_WIDTH_HEIGHT: track->maxy = ((idx_value >> 16) & 0x7FF); + track->cb_dirty = true; + track->zb_dirty = true; break; case RADEON_RB3D_COLORPITCH: r = r100_cs_packet_next_reloc(p, &reloc); @@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p, ib[idx] = tmp; track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; + track->cb_dirty = true; break; case RADEON_RB3D_DEPTHPITCH: track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; + track->zb_dirty = true; break; case RADEON_RB3D_CNTL: switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { @@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, } track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); + track->cb_dirty = true; + track->zb_dirty = true; break; case RADEON_RB3D_ZSTENCILCNTL: switch (idx_value & 0xf) { @@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, default: break; } + track->zb_dirty = true; break; case RADEON_RB3D_ZPASS_ADDR: r = r100_cs_packet_next_reloc(p, &reloc); @@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, uint32_t temp = idx_value >> 4; for (i = 0; i < track->num_texture; i++) track->textures[i].enabled = !!(temp & (1 << i)); + track->tex_dirty = true; } break; case RADEON_SE_VF_CNTL: @@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, i = (reg - R200_PP_TXSIZE_0) / 32; track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; + track->tex_dirty = true; break; case R200_PP_TXPITCH_0: case R200_PP_TXPITCH_1: @@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, case R200_PP_TXPITCH_5: i = (reg - R200_PP_TXPITCH_0) / 32; track->textures[i].pitch = idx_value + 32; + track->tex_dirty = true; break; case R200_PP_TXFILTER_0: case R200_PP_TXFILTER_1: @@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, tmp = (idx_value >> 27) & 0x7; if (tmp == 2 || tmp == 6) track->textures[i].roundup_h = false; + track->tex_dirty = true; break; case R200_PP_TXMULTI_CTL_0: case R200_PP_TXMULTI_CTL_1: @@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->textures[i].tex_coord_type = 1; break; } + track->tex_dirty = true; break; case R200_PP_TXFORMAT_0: case R200_PP_TXFORMAT_1: @@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, } track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); + track->tex_dirty = true; break; case R200_PP_CUBIC_FACES_0: case R200_PP_CUBIC_FACES_1: @@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); } + track->tex_dirty = true; break; default: printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 55fe5ba7def..069efa8c8ec 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -667,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, } track->cb[i].robj = reloc->robj; track->cb[i].offset = idx_value; + track->cb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case R300_ZB_DEPTHOFFSET: @@ -679,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, } track->zb.robj = reloc->robj; track->zb.offset = idx_value; + track->zb_dirty = true; ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; case R300_TX_OFFSET_0: @@ -717,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, tmp |= tile_flags; ib[idx] = tmp; track->textures[i].robj = reloc->robj; + track->tex_dirty = true; break; /* Tracked registers */ case 0x2084: @@ -743,6 +746,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, if (p->rdev->family < CHIP_RV515) { track->maxy -= 1440; } + track->cb_dirty = true; + track->zb_dirty = true; break; case 0x4E00: /* RB3D_CCTL */ @@ -752,6 +757,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, return -EINVAL; } track->num_cb = ((idx_value >> 5) & 0x3) + 1; + track->cb_dirty = true; break; case 0x4E38: case 0x4E3C: @@ -814,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ((idx_value >> 21) & 0xF)); return -EINVAL; } + track->cb_dirty = true; break; case 0x4F00: /* ZB_CNTL */ @@ -822,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, } else { track->z_enabled = false; } + track->zb_dirty = true; break; case 0x4F10: /* ZB_FORMAT */ @@ -838,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, (idx_value & 0xF)); return -EINVAL; } + track->zb_dirty = true; break; case 0x4F24: /* ZB_DEPTHPITCH */ @@ -861,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ib[idx] = tmp; track->zb.pitch = idx_value & 0x3FFC; + track->zb_dirty = true; break; case 0x4104: + /* TX_ENABLE */ for (i = 0; i < 16; i++) { bool enabled; enabled = !!(idx_value & (1 << i)); track->textures[i].enabled = enabled; } + track->tex_dirty = true; break; case 0x44C0: case 0x44C4: @@ -898,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_X16: + case R300_TX_FORMAT_FL_I16: case R300_TX_FORMAT_Y8X8: case R300_TX_FORMAT_Z5Y6X5: case R300_TX_FORMAT_Z6Y5X5: @@ -910,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->textures[i].compress_format = R100_TRACK_COMP_NONE; break; case R300_TX_FORMAT_Y16X16: + case R300_TX_FORMAT_FL_I16A16: case R300_TX_FORMAT_Z11Y11X10: case R300_TX_FORMAT_Z10Y11X11: case R300_TX_FORMAT_W8Z8Y8X8: @@ -951,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, DRM_ERROR("Invalid texture format %u\n", (idx_value & 0x1F)); return -EINVAL; - break; } + track->tex_dirty = true; break; case 0x4400: case 0x4404: @@ -980,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, if (tmp == 2 || tmp == 4 || tmp == 6) { track->textures[i].roundup_h = false; } + track->tex_dirty = true; break; case 0x4500: case 0x4504: @@ -1017,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); return -EINVAL; } + track->tex_dirty = true; break; case 0x4480: case 0x4484: @@ -1046,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->textures[i].use_pitch = !!tmp; tmp = (idx_value >> 22) & 0xF; track->textures[i].txdepth = tmp; + track->tex_dirty = true; break; case R300_ZB_ZPASS_ADDR: r = r100_cs_packet_next_reloc(p, &reloc); @@ -1060,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case 0x4e0c: /* RB3D_COLOR_CHANNEL_MASK */ track->color_channel_mask = idx_value; + track->cb_dirty = true; break; case 0x43a4: /* SC_HYPERZ_EN */ @@ -1073,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case 0x4f1c: /* ZB_BW_CNTL */ track->zb_cb_clear = !!(idx_value & (1 << 5)); + track->cb_dirty = true; + track->zb_dirty = true; if (p->rdev->hyperz_filp != p->filp) { if (idx_value & (R300_HIZ_ENABLE | R300_RD_COMP_ENABLE | @@ -1084,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case 0x4e04: /* RB3D_BLENDCNTL */ track->blend_read_enable = !!(idx_value & (1 << 2)); + track->cb_dirty = true; + break; + case R300_RB3D_AARESOLVE_OFFSET: + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + track->aa.robj = reloc->robj; + track->aa.offset = idx_value; + track->aa_dirty = true; + ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + break; + case R300_RB3D_AARESOLVE_PITCH: + track->aa.pitch = idx_value & 0x3FFE; + track->aa_dirty = true; break; - case 0x4f28: /* ZB_DEPTHCLEARVALUE */ + case R300_RB3D_AARESOLVE_CTL: + track->aaresolve = idx_value & 0x1; + track->aa_dirty = true; break; case 0x4f30: /* ZB_MASK_OFFSET */ case 0x4f34: /* ZB_ZMASK_PITCH */ diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 1a0d5362cd7..f0bce399c9f 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h @@ -1371,6 +1371,8 @@ #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ +#define R300_RB3D_AARESOLVE_OFFSET 0x4E80 +#define R300_RB3D_AARESOLVE_PITCH 0x4E84 #define R300_RB3D_AARESOLVE_CTL 0x4E88 /* gap */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 1cd56dc8c8a..b409b24207a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2106,7 +2106,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev) r600_cp_stop(rdev); - WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); + WREG32(CP_RB_CNTL, +#ifdef __BIG_ENDIAN + BUF_SWAP_32BIT | +#endif + RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); /* Reset cp */ WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); @@ -2193,7 +2197,11 @@ int r600_cp_resume(struct radeon_device *rdev) WREG32(CP_RB_WPTR, 0); /* set the wb address whether it's enabled or not */ - WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); + WREG32(CP_RB_RPTR_ADDR, +#ifdef __BIG_ENDIAN + RB_RPTR_SWAP(2) | +#endif + ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); @@ -2629,7 +2637,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { /* FIXME: implement */ radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); - radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); + radeon_ring_write(rdev, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (ib->gpu_addr & 0xFFFFFFFC)); radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); radeon_ring_write(rdev, ib->length_dw); } @@ -3305,8 +3317,8 @@ restart_ih: while (rptr != wptr) { /* wptr/rptr are in bytes! */ ring_index = rptr / 4; - src_id = rdev->ih.ring[ring_index] & 0xff; - src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; + src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; + src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; switch (src_id) { case 1: /* D1 vblank/vline */ diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index ca5c29f7077..7f1043448d2 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c @@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev) ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); for (i = 0; i < r6xx_vs_size; i++) - vs[i] = r6xx_vs[i]; + vs[i] = cpu_to_le32(r6xx_vs[i]); for (i = 0; i < r6xx_ps_size; i++) - ps[i] = r6xx_ps[i]; + ps[i] = cpu_to_le32(r6xx_ps[i]); dev_priv->blit_vb->used = 512; @@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr) DRM_DEBUG("\n"); sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); +#ifdef __BIG_ENDIAN + sq_vtx_constant_word2 |= (2 << 30); +#endif BEGIN_RING(9); OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); @@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv) OUT_RING(DI_PT_RECTLIST); OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); +#ifdef __BIG_ENDIAN + OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT); +#else OUT_RING(DI_INDEX_SIZE_16_BIT); +#endif OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); OUT_RING(1); diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 16e211a614d..2fed9175012 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format, if (h < 8) h = 8; - cb_color_info = ((format << 2) | (1 << 27)); + cb_color_info = ((format << 2) | (1 << 27) | (1 << 8)); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; @@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) u32 sq_vtx_constant_word2; sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); +#ifdef __BIG_ENDIAN + sq_vtx_constant_word2 |= (2 << 30); +#endif radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(rdev, 0x460); @@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev, if (h < 1) h = 1; - sq_tex_resource_word0 = (1 << 0); + sq_tex_resource_word0 = (1 << 0) | (1 << 3); sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | ((w - 1) << 19)); @@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev) radeon_ring_write(rdev, DI_PT_RECTLIST); radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); - radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); + radeon_ring_write(rdev, +#ifdef __BIG_ENDIAN + (2 << 2) | +#endif + DI_INDEX_SIZE_16_BIT); radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); radeon_ring_write(rdev, 1); @@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev) dwords = ALIGN(rdev->r600_blit.state_len, 0x10); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); - radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); + radeon_ring_write(rdev, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (gpu_addr & 0xFFFFFFFC)); radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); radeon_ring_write(rdev, dwords); @@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input) int r600_blit_init(struct radeon_device *rdev) { u32 obj_size; - int r, dwords; + int i, r, dwords; void *ptr; u32 packet2s[16]; int num_packet2s = 0; @@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev) dwords = rdev->r600_blit.state_len; while (dwords & 0xf) { - packet2s[num_packet2s++] = PACKET2(0); + packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0)); dwords++; } @@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev) if (num_packet2s) memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), packet2s, num_packet2s * 4); - memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); - memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); + for (i = 0; i < r6xx_vs_size; i++) + *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]); + for (i = 0; i < r6xx_ps_size; i++) + *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]); radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index e8151c1d55b..2d1f6c5ee2a 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c @@ -684,7 +684,11 @@ const u32 r6xx_vs[] = 0x00000000, 0x3c000000, 0x68cd1000, +#ifdef __BIG_ENDIAN + 0x000a0000, +#else 0x00080000, +#endif 0x00000000, }; diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 4f4cd8b286d..c3ab959bdc7 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) r600_do_cp_stop(dev_priv); RADEON_WRITE(R600_CP_RB_CNTL, +#ifdef __BIG_ENDIAN + R600_BUF_SWAP_32BIT | +#endif R600_RB_NO_UPDATE | R600_RB_BLKSZ(15) | R600_RB_BUFSZ(3)); @@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv) r600_do_cp_stop(dev_priv); RADEON_WRITE(R600_CP_RB_CNTL, +#ifdef __BIG_ENDIAN + R600_BUF_SWAP_32BIT | +#endif R600_RB_NO_UPDATE | - (15 << 8) | - (3 << 0)); + R600_RB_BLKSZ(15) | + R600_RB_BUFSZ(3)); RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); RADEON_READ(R600_GRBM_SOFT_RESET); @@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv) if (!dev_priv->writeback_works) { /* Disable writeback to avoid unnecessary bus master transfer */ - RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) | - RADEON_RB_NO_UPDATE); + RADEON_WRITE(R600_CP_RB_CNTL, +#ifdef __BIG_ENDIAN + R600_BUF_SWAP_32BIT | +#endif + RADEON_READ(R600_CP_RB_CNTL) | + R600_RB_NO_UPDATE); RADEON_WRITE(R600_SCRATCH_UMSK, 0); } } @@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev) RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); - RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA); + RADEON_WRITE(R600_CP_RB_CNTL, +#ifdef __BIG_ENDIAN + R600_BUF_SWAP_32BIT | +#endif + R600_RB_RPTR_WR_ENA); RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); @@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, + dev_priv->gart_vm_start; } RADEON_WRITE(R600_CP_RB_RPTR_ADDR, - rptr_addr & 0xffffffff); +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (rptr_addr & 0xfffffffc)); RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr)); @@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, { u64 scratch_addr; - scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR); + scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC; scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; scratch_addr += R600_SCRATCH_REG_OFFSET; scratch_addr >>= 8; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index fe0c8eb7601..0a0848f0346 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -388,17 +388,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) } if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", - __func__, __LINE__, pitch); + dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", + __func__, __LINE__, pitch, pitch_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(height, height_align)) { - dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", - __func__, __LINE__, height); + dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", + __func__, __LINE__, height, height_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { - dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); + dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, + base_offset, base_align, array_mode); return -EINVAL; } @@ -413,7 +414,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) * broken userspace. */ } else { - dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); + dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i, + array_mode, + track->cb_color_bo_offset[i], tmp, + radeon_bo_size(track->cb_color_bo[i])); return -EINVAL; } } @@ -548,17 +552,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) } if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", - __func__, __LINE__, pitch); + dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", + __func__, __LINE__, pitch, pitch_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(height, height_align)) { - dev_warn(p->dev, "%s:%d db height (%d) invalid\n", - __func__, __LINE__, height); + dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", + __func__, __LINE__, height, height_align, array_mode); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { - dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); + dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i, + base_offset, base_align, array_mode); return -EINVAL; } @@ -566,9 +571,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; tmp = ntiles * bpe * 64 * nviews; if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { - dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", - track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, - radeon_bo_size(track->db_bo)); + dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", + array_mode, + track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, + radeon_bo_size(track->db_bo)); return -EINVAL; } } @@ -1350,18 +1356,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i /* XXX check height as well... */ if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", - __func__, __LINE__, pitch); + dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", + __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (!IS_ALIGNED(base_offset, base_align)) { - dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", - __func__, __LINE__, base_offset); + dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", + __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } if (!IS_ALIGNED(mip_offset, base_align)) { - dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", - __func__, __LINE__, mip_offset); + dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", + __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index d1f598663da..b2b944bcd05 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -154,13 +154,14 @@ #define ROQ_IB2_START(x) ((x) << 8) #define CP_RB_BASE 0xC100 #define CP_RB_CNTL 0xC104 -#define RB_BUFSZ(x) ((x)<<0) -#define RB_BLKSZ(x) ((x)<<8) -#define RB_NO_UPDATE (1<<27) -#define RB_RPTR_WR_ENA (1<<31) +#define RB_BUFSZ(x) ((x) << 0) +#define RB_BLKSZ(x) ((x) << 8) +#define RB_NO_UPDATE (1 << 27) +#define RB_RPTR_WR_ENA (1 << 31) #define BUF_SWAP_32BIT (2 << 16) #define CP_RB_RPTR 0x8700 #define CP_RB_RPTR_ADDR 0xC10C +#define RB_RPTR_SWAP(x) ((x) << 0) #define CP_RB_RPTR_ADDR_HI 0xC110 #define CP_RB_RPTR_WR 0xC108 #define CP_RB_WPTR 0xC114 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 5c1cc7ad9a1..02d5c415f49 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev /* some evergreen boards have bad data for this entry */ if (ASIC_IS_DCE4(rdev)) { if ((i == 7) && - (gpio->usClkMaskRegisterIndex == 0x1936) && + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && (gpio->sucI2cId.ucAccess == 0)) { gpio->sucI2cId.ucAccess = 0x97; gpio->ucDataMaskShift = 8; @@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev /* some DCE3 boards have bad data for this entry */ if (ASIC_IS_DCE3(rdev)) { if ((i == 4) && - (gpio->usClkMaskRegisterIndex == 0x1fda) && + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && (gpio->sucI2cId.ucAccess == 0x94)) gpio->sucI2cId.ucAccess = 0x14; } @@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) /* some evergreen boards have bad data for this entry */ if (ASIC_IS_DCE4(rdev)) { if ((i == 7) && - (gpio->usClkMaskRegisterIndex == 0x1936) && + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && (gpio->sucI2cId.ucAccess == 0)) { gpio->sucI2cId.ucAccess = 0x97; gpio->ucDataMaskShift = 8; @@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) /* some DCE3 boards have bad data for this entry */ if (ASIC_IS_DCE3(rdev)) { if ((i == 4) && - (gpio->usClkMaskRegisterIndex == 0x1fda) && + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && (gpio->sucI2cId.ucAccess == 0x94)) gpio->sucI2cId.ucAccess = 0x14; } @@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd pin = &gpio_info->asGPIO_Pin[i]; if (id == pin->ucGPIO_ID) { gpio.id = pin->ucGPIO_ID; - gpio.reg = pin->usGpioPin_AIndex * 4; + gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; gpio.mask = (1 << pin->ucGpioPinBitShift); gpio.valid = true; break; @@ -1274,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) data_offset); switch (crev) { case 1: - if (igp_info->info.ulBootUpMemoryClock) + if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock)) return true; break; case 2: - if (igp_info->info_2.ulBootUpSidePortClock) + if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock)) return true; break; default: @@ -1442,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, for (i = 0; i < num_indices; i++) { if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && - (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { + (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; @@ -1456,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); for (i = 0; i < num_indices; i++) { if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && - (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { + (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; @@ -1470,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); for (i = 0; i < num_indices; i++) { if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && - (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { + (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; @@ -1553,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct if (misc & ATOM_DOUBLE_CLOCK_MODE) lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; - lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize; - lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize; + lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize); + lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize); /* set crtc values */ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); @@ -1569,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct lvds->linkb = false; /* parse the lcd record table */ - if (lvds_info->info.usModePatchTableOffset) { + if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) { ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; bool bad_record = false; u8 *record = (u8 *)(mode_info->atom_context->bios + data_offset + - lvds_info->info.usModePatchTableOffset); + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); while (*record != ATOM_RECORD_END_TYPE) { switch (*record) { case LCD_MODE_PATCH_RECORD_MODE_TYPE: @@ -2189,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); - vddc = firmware_info->info_14.usBootUpVDDCVoltage; + vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); } return vddc; @@ -2284,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = - clock_info->evergreen.usVDDC; + le16_to_cpu(clock_info->evergreen.usVDDC); } else { sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); sclk |= clock_info->r600.ucEngineClockHigh << 16; @@ -2295,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = - clock_info->r600.usVDDC; + le16_to_cpu(clock_info->r600.usVDDC); } if (rdev->flags & RADEON_IS_IGP) { @@ -2408,13 +2408,13 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); state_array = (struct StateArray *) (mode_info->atom_context->bios + data_offset + - power_info->pplib.usStateArrayOffset); + le16_to_cpu(power_info->pplib.usStateArrayOffset)); clock_info_array = (struct ClockInfoArray *) (mode_info->atom_context->bios + data_offset + - power_info->pplib.usClockInfoArrayOffset); + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); non_clock_info_array = (struct NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + - power_info->pplib.usNonClockInfoArrayOffset); + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * state_array->ucNumEntries, GFP_KERNEL); if (!rdev->pm.power_state) @@ -2533,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - return args.ulReturnEngineClock; + return le32_to_cpu(args.ulReturnEngineClock); } uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) @@ -2542,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - return args.ulReturnMemoryClock; + return le32_to_cpu(args.ulReturnMemoryClock); } void radeon_atom_set_engine_clock(struct radeon_device *rdev, @@ -2551,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev, SET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); - args.ulTargetEngineClock = eng_clock; /* 10 khz */ + args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } @@ -2565,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, if (rdev->flags & RADEON_IS_IGP) return; - args.ulTargetMemoryClock = mem_clock; /* 10 khz */ + args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index d27ef74590c..cf7c8d5b4ec 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -1504,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) (rdev->pdev->subsystem_device == 0x4a48)) { /* Mac X800 */ rdev->mode_info.connector_table = CT_MAC_X800; + } else if ((rdev->pdev->device == 0x4150) && + (rdev->pdev->subsystem_vendor == 0x1002) && + (rdev->pdev->subsystem_device == 0x4150)) { + /* Mac G5 9600 */ + rdev->mode_info.connector_table = CT_MAC_G5_9600; } else #endif /* CONFIG_PPC_PMAC */ #ifdef CONFIG_PPC64 @@ -2022,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, &hpd); break; + case CT_MAC_G5_9600: + DRM_INFO("Connector Table: %d (mac g5 9600)\n", + rdev->mode_info.connector_table); + /* DVI - tv dac, dvo */ + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); + hpd.hpd = RADEON_HPD_1; /* ??? */ + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_DFP2_SUPPORT, + 0), + ATOM_DEVICE_DFP2_SUPPORT); + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_CRT2_SUPPORT, + 2), + ATOM_DEVICE_CRT2_SUPPORT); + radeon_add_legacy_connector(dev, 0, + ATOM_DEVICE_DFP2_SUPPORT | + ATOM_DEVICE_CRT2_SUPPORT, + DRM_MODE_CONNECTOR_DVII, &ddc_i2c, + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, + &hpd); + /* ADC - primary dac, internal tmds */ + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); + hpd.hpd = RADEON_HPD_2; /* ??? */ + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_DFP1_SUPPORT, + 0), + ATOM_DEVICE_DFP1_SUPPORT); + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_CRT1_SUPPORT, + 1), + ATOM_DEVICE_CRT1_SUPPORT); + radeon_add_legacy_connector(dev, 1, + ATOM_DEVICE_DFP1_SUPPORT | + ATOM_DEVICE_CRT1_SUPPORT, + DRM_MODE_CONNECTOR_DVII, &ddc_i2c, + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, + &hpd); + break; default: DRM_INFO("Connector table: %d (invalid)\n", rdev->mode_info.connector_table); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0ca5eb21792..f0209be7a34 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -937,8 +937,11 @@ int radeon_resume_kms(struct drm_device *dev) int radeon_gpu_reset(struct radeon_device *rdev) { int r; + int resched; radeon_save_bios_scratch_regs(rdev); + /* block TTM */ + resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); radeon_suspend(rdev); r = radeon_asic_reset(rdev); @@ -947,6 +950,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) radeon_resume(rdev); radeon_restore_bios_scratch_regs(rdev); drm_helper_resume_force_mode(rdev->ddev); + ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); return 0; } /* bad news, how to tell it to userspace ? */ diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 4409975a363..4be58793dc1 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -793,6 +793,11 @@ static void avivo_get_fb_div(struct radeon_pll *pll, tmp *= target_clock; *fb_div = tmp / pll->reference_freq; *frac_fb_div = tmp % pll->reference_freq; + + if (*fb_div > pll->max_feedback_div) + *fb_div = pll->max_feedback_div; + else if (*fb_div < pll->min_feedback_div) + *fb_div = pll->min_feedback_div; } static u32 avivo_get_post_div(struct radeon_pll *pll, @@ -826,6 +831,11 @@ static u32 avivo_get_post_div(struct radeon_pll *pll, post_div--; } + if (post_div > pll->max_post_div) + post_div = pll->max_post_div; + else if (post_div < pll->min_post_div) + post_div = pll->min_post_div; + return post_div; } @@ -961,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll, max_fractional_feed_div = pll->max_frac_feedback_div; } - for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { + for (post_div = max_post_div; post_div >= min_post_div; --post_div) { uint32_t ref_div; if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 448eba89d1e..5cba46b9779 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); #define R600_CP_RB_CNTL 0xc104 # define R600_RB_BUFSZ(x) ((x) << 0) # define R600_RB_BLKSZ(x) ((x) << 8) +# define R600_BUF_SWAP_32BIT (2 << 16) # define R600_RB_NO_UPDATE (1 << 27) # define R600_RB_RPTR_WR_ENA (1 << 31) #define R600_CP_RB_RPTR_WR 0xc108 diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index d4a54224761..b4274883227 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t args.v1.ucAction = action; if (action == ATOM_TRANSMITTER_ACTION_INIT) { - args.v1.usInitInfo = connector_object_id; + args.v1.usInitInfo = cpu_to_le16(connector_object_id); } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { args.v1.asMode.ucLaneSel = lane_num; args.v1.asMode.ucLaneSet = lane_set; @@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, case 3: args.v3.sExtEncoder.ucAction = action; if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) - args.v3.sExtEncoder.usConnectorId = connector_object_id; + args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); else args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); @@ -1570,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, } /* set scaler clears this on some chips */ - /* XXX check DCE4 */ - if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { - if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) - WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, - AVIVO_D1MODE_INTERLEAVE_EN); + if (ASIC_IS_AVIVO(rdev) && + (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { + if (ASIC_IS_DCE4(rdev)) { + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, + EVERGREEN_INTERLEAVE_EN); + else + WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); + } else { + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, + AVIVO_D1MODE_INTERLEAVE_EN); + else + WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); + } } } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 28431e78ab5..0b7b486c97e 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, u32 tiling_flags = 0; int ret; int aligned_size, size; + int height = mode_cmd->height; /* need to align pitch with crtc limits */ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); - size = mode_cmd->pitch * mode_cmd->height; + if (rdev->family >= CHIP_R600) + height = ALIGN(mode_cmd->height, 8); + size = mode_cmd->pitch * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, RADEON_GEM_DOMAIN_VRAM, diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index c3f23f6ff60..5067d18d000 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -209,6 +209,7 @@ enum radeon_connector_table { CT_EMAC, CT_RN50_POWER, CT_MAC_X800, + CT_MAC_G5_9600, }; enum radeon_dvo_chip { diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index df5734d0c4a..e446979e0e0 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -791,9 +791,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) radeon_mem_types_list[i].show = &radeon_mm_dump_table; radeon_mem_types_list[i].driver_features = 0; if (i == 0) - radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; + radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; else - radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; + radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; } /* Add ttm page pool to debugfs */ diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index b506ec1cab4..e8a1786b642 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 @@ -683,9 +683,7 @@ r300 0x4f60 0x4DF4 US_ALU_CONST_G_31 0x4DF8 US_ALU_CONST_B_31 0x4DFC US_ALU_CONST_A_31 -0x4E04 RB3D_BLENDCNTL_R3 0x4E08 RB3D_ABLENDCNTL_R3 -0x4E0C RB3D_COLOR_CHANNEL_MASK 0x4E10 RB3D_CONSTANT_COLOR 0x4E14 RB3D_COLOR_CLEAR_VALUE 0x4E18 RB3D_ROPCNTL_R3 @@ -706,13 +704,11 @@ r300 0x4f60 0x4E74 RB3D_CMASK_WRINDEX 0x4E78 RB3D_CMASK_DWORD 0x4E7C RB3D_CMASK_RDINDEX -0x4E80 RB3D_AARESOLVE_OFFSET -0x4E84 RB3D_AARESOLVE_PITCH -0x4E88 RB3D_AARESOLVE_CTL 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 0x4F04 ZB_ZSTENCILCNTL 0x4F08 ZB_STENCILREFMASK 0x4F14 ZB_ZTOP 0x4F18 ZB_ZCACHE_CTLSTAT +0x4F28 ZB_DEPTHCLEARVALUE 0x4F58 ZB_ZPASS_DATA diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index 8c1214c2390..722074e21e2 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 @@ -130,7 +130,6 @@ r420 0x4f60 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE -0x4028 GB_Z_PEQ_CONFIG 0x4100 TX_INVALTAGS 0x4200 GA_POINT_S0 0x4204 GA_POINT_T0 @@ -750,9 +749,7 @@ r420 0x4f60 0x4DF4 US_ALU_CONST_G_31 0x4DF8 US_ALU_CONST_B_31 0x4DFC US_ALU_CONST_A_31 -0x4E04 RB3D_BLENDCNTL_R3 0x4E08 RB3D_ABLENDCNTL_R3 -0x4E0C RB3D_COLOR_CHANNEL_MASK 0x4E10 RB3D_CONSTANT_COLOR 0x4E14 RB3D_COLOR_CLEAR_VALUE 0x4E18 RB3D_ROPCNTL_R3 @@ -773,13 +770,11 @@ r420 0x4f60 0x4E74 RB3D_CMASK_WRINDEX 0x4E78 RB3D_CMASK_DWORD 0x4E7C RB3D_CMASK_RDINDEX -0x4E80 RB3D_AARESOLVE_OFFSET -0x4E84 RB3D_AARESOLVE_PITCH -0x4E88 RB3D_AARESOLVE_CTL 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 0x4F04 ZB_ZSTENCILCNTL 0x4F08 ZB_STENCILREFMASK 0x4F14 ZB_ZTOP 0x4F18 ZB_ZCACHE_CTLSTAT +0x4F28 ZB_DEPTHCLEARVALUE 0x4F58 ZB_ZPASS_DATA diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 0828d80396f..d9f62866bbc 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 @@ -749,9 +749,7 @@ rs600 0x6d40 0x4DF4 US_ALU_CONST_G_31 0x4DF8 US_ALU_CONST_B_31 0x4DFC US_ALU_CONST_A_31 -0x4E04 RB3D_BLENDCNTL_R3 0x4E08 RB3D_ABLENDCNTL_R3 -0x4E0C RB3D_COLOR_CHANNEL_MASK 0x4E10 RB3D_CONSTANT_COLOR 0x4E14 RB3D_COLOR_CLEAR_VALUE 0x4E18 RB3D_ROPCNTL_R3 @@ -772,13 +770,11 @@ rs600 0x6d40 0x4E74 RB3D_CMASK_WRINDEX 0x4E78 RB3D_CMASK_DWORD 0x4E7C RB3D_CMASK_RDINDEX -0x4E80 RB3D_AARESOLVE_OFFSET -0x4E84 RB3D_AARESOLVE_PITCH -0x4E88 RB3D_AARESOLVE_CTL 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 0x4F04 ZB_ZSTENCILCNTL 0x4F08 ZB_STENCILREFMASK 0x4F14 ZB_ZTOP 0x4F18 ZB_ZCACHE_CTLSTAT +0x4F28 ZB_DEPTHCLEARVALUE 0x4F58 ZB_ZPASS_DATA diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index ef422bbacfc..911a8fbd32b 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 @@ -164,7 +164,6 @@ rv515 0x6d40 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE -0x4028 GB_Z_PEQ_CONFIG 0x4100 TX_INVALTAGS 0x4114 SU_TEX_WRAP_PS3 0x4118 PS3_ENABLE @@ -461,9 +460,7 @@ rv515 0x6d40 0x4DF4 US_ALU_CONST_G_31 0x4DF8 US_ALU_CONST_B_31 0x4DFC US_ALU_CONST_A_31 -0x4E04 RB3D_BLENDCNTL_R3 0x4E08 RB3D_ABLENDCNTL_R3 -0x4E0C RB3D_COLOR_CHANNEL_MASK 0x4E10 RB3D_CONSTANT_COLOR 0x4E14 RB3D_COLOR_CLEAR_VALUE 0x4E18 RB3D_ROPCNTL_R3 @@ -484,9 +481,6 @@ rv515 0x6d40 0x4E74 RB3D_CMASK_WRINDEX 0x4E78 RB3D_CMASK_DWORD 0x4E7C RB3D_CMASK_RDINDEX -0x4E80 RB3D_AARESOLVE_OFFSET -0x4E84 RB3D_AARESOLVE_PITCH -0x4E88 RB3D_AARESOLVE_CTL 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 0x4EF8 RB3D_CONSTANT_COLOR_AR @@ -496,4 +490,5 @@ rv515 0x6d40 0x4F14 ZB_ZTOP 0x4F18 ZB_ZCACHE_CTLSTAT 0x4F58 ZB_ZPASS_DATA +0x4F28 ZB_DEPTHCLEARVALUE 0x4FD4 ZB_STENCILREFMASK_BF diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 0137d3e3728..6638c8e4c81 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev) switch (crev) { case 1: tmp.full = dfixed_const(100); - rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); + rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock)); rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); - if (info->info.usK8MemoryClock) + if (le16_to_cpu(info->info.usK8MemoryClock)) rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); else if (rdev->clock.default_mclk) { rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); @@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev) break; case 2: tmp.full = dfixed_const(100); - rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); + rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock)); rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); - if (info->info_v2.ulBootUpUMAClock) - rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); + if (le32_to_cpu(info->info_v2.ulBootUpUMAClock)) + rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock)); else if (rdev->clock.default_mclk) rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); else rdev->pm.igp_system_mclk.full = dfixed_const(66700); rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); - rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); + rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq)); rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); break; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 3a95999d2fe..ee5541c6a62 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -321,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) return -EINVAL; r700_cp_stop(rdev); - WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); + WREG32(CP_RB_CNTL, +#ifdef __BIG_ENDIAN + BUF_SWAP_32BIT | +#endif + RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); /* Reset cp */ WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index abc8cf5a367..79fa588e9ed 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -76,10 +76,10 @@ #define ROQ_IB1_START(x) ((x) << 0) #define ROQ_IB2_START(x) ((x) << 8) #define CP_RB_CNTL 0xC104 -#define RB_BUFSZ(x) ((x)<<0) -#define RB_BLKSZ(x) ((x)<<8) -#define RB_NO_UPDATE (1<<27) -#define RB_RPTR_WR_ENA (1<<31) +#define RB_BUFSZ(x) ((x) << 0) +#define RB_BLKSZ(x) ((x) << 8) +#define RB_NO_UPDATE (1 << 27) +#define RB_RPTR_WR_ENA (1 << 31) #define BUF_SWAP_32BIT (2 << 16) #define CP_RB_RPTR 0x8700 #define CP_RB_RPTR_ADDR 0xC10C diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 773e484f164..297bc9a7d6e 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -238,13 +238,13 @@ config SENSORS_K8TEMP will be called k8temp. config SENSORS_K10TEMP - tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor" + tristate "AMD Family 10h/11h/12h/14h temperature sensor" depends on X86 && PCI help If you say yes here you get support for the temperature sensor(s) inside your CPU. Supported are later revisions of - the AMD Family 10h and all revisions of the AMD Family 11h - microarchitectures. + the AMD Family 10h and all revisions of the AMD Family 11h, + 12h (Llano), and 14h (Brazos) microarchitectures. This driver can also be built as a module. If so, the module will be called k10temp. @@ -455,13 +455,14 @@ config SENSORS_JZ4740 called jz4740-hwmon. config SENSORS_JC42 - tristate "JEDEC JC42.4 compliant temperature sensors" + tristate "JEDEC JC42.4 compliant memory module temperature sensors" depends on I2C help - If you say yes here you get support for Jedec JC42.4 compliant - temperature sensors. Support will include, but not be limited to, - ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, - MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3. + If you say yes here, you get support for JEDEC JC42.4 compliant + temperature sensors, which are used on many DDR3 memory modules for + mobile devices and servers. Support will include, but not be limited + to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, + MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3. This driver can also be built as a module. If so, the module will be called jc42. @@ -574,7 +575,7 @@ config SENSORS_LM85 help If you say yes here you get support for National Semiconductor LM85 sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, - EMC6D101 and EMC6D102. + EMC6D101, EMC6D102, and EMC6D103. This driver can also be built as a module. If so, the module will be called lm85. diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c index 86d822aa9bb..d46c0c758dd 100644 --- a/drivers/hwmon/ad7414.c +++ b/drivers/hwmon/ad7414.c @@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = { { "ad7414", 0 }, {} }; +MODULE_DEVICE_TABLE(i2c, ad7414_id); static struct i2c_driver ad7414_driver = { .driver = { diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c index f13c843a296..5cc3e3784b4 100644 --- a/drivers/hwmon/adt7411.c +++ b/drivers/hwmon/adt7411.c @@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = { { "adt7411", 0 }, { } }; +MODULE_DEVICE_TABLE(i2c, adt7411_id); static struct i2c_driver adt7411_driver = { .driver = { diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 5dea9faa165..cd2a6e437ae 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c @@ -344,7 +344,7 @@ static int emc1403_remove(struct i2c_client *client) } static const unsigned short emc1403_address_list[] = { - 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END + 0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END }; static const struct i2c_device_id emc1403_idtable[] = { diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 340fc78c8dd..93499123706 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = { /* Configuration register defines */ #define JC42_CFG_CRIT_ONLY (1 << 2) +#define JC42_CFG_TCRIT_LOCK (1 << 6) +#define JC42_CFG_EVENT_LOCK (1 << 7) #define JC42_CFG_SHUTDOWN (1 << 8) #define JC42_CFG_HYST_SHIFT 9 #define JC42_CFG_HYST_MASK 0x03 @@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct jc42_data *data = i2c_get_clientdata(client); - long val; + unsigned long val; int diff, hyst; int err; int ret = count; @@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev, static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL); -static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, set_temp_crit); -static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min, set_temp_min); -static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, set_temp_max); -static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, set_temp_crit_hyst); static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp_max_hyst, NULL); @@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = { NULL }; +static mode_t jc42_attribute_mode(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct i2c_client *client = to_i2c_client(dev); + struct jc42_data *data = i2c_get_clientdata(client); + unsigned int config = data->config; + bool readonly; + + if (attr == &dev_attr_temp1_crit.attr) + readonly = config & JC42_CFG_TCRIT_LOCK; + else if (attr == &dev_attr_temp1_min.attr || + attr == &dev_attr_temp1_max.attr) + readonly = config & JC42_CFG_EVENT_LOCK; + else if (attr == &dev_attr_temp1_crit_hyst.attr) + readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK); + else + readonly = true; + + return S_IRUGO | (readonly ? 0 : S_IWUSR); +} + static const struct attribute_group jc42_group = { .attrs = jc42_attributes, + .is_visible = jc42_attribute_mode, }; /* Return 0 if detection is successful, -ENODEV otherwise */ diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index da5a2404cd3..82bf65aa296 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -1,5 +1,5 @@ /* - * k10temp.c - AMD Family 10h/11h processor hardware monitoring + * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> * @@ -25,7 +25,7 @@ #include <linux/pci.h> #include <asm/processor.h> -MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor"); +MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor"); MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL"); @@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev) static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 776aeb3019d..508cb291f71 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -98,6 +98,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; * value, it uses signed 8-bit values with LSB = 1 degree Celsius. * For remote temperature, low and high limits, it uses signed 11-bit values * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers. + * For LM64 the actual remote diode temperature is 16 degree Celsius higher + * than the register reading. Remote temperature setpoints have to be + * adapted accordingly. */ #define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \ @@ -165,6 +168,8 @@ struct lm63_data { struct mutex update_lock; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ + int kind; + int temp2_offset; /* registers values */ u8 config, config_fan; @@ -247,16 +252,34 @@ static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dum return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2); } -static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr, - char *buf) +/* + * There are 8bit registers for both local(temp1) and remote(temp2) sensor. + * For remote sensor registers temp2_offset has to be considered, + * for local sensor it must not. + * So we need separate 8bit accessors for local and remote sensor. + */ +static ssize_t show_local_temp8(struct device *dev, + struct device_attribute *devattr, + char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm63_data *data = lm63_update_device(dev); return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])); } -static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy, - const char *buf, size_t count) +static ssize_t show_remote_temp8(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct lm63_data *data = lm63_update_device(dev); + return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]) + + data->temp2_offset); +} + +static ssize_t set_local_temp8(struct device *dev, + struct device_attribute *dummy, + const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm63_data *data = i2c_get_clientdata(client); @@ -274,7 +297,8 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr, { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm63_data *data = lm63_update_device(dev); - return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])); + return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]) + + data->temp2_offset); } static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, @@ -294,7 +318,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, int nr = attr->index; mutex_lock(&data->update_lock); - data->temp11[nr] = TEMP11_TO_REG(val); + data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset); i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2], data->temp11[nr] >> 8); i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1], @@ -310,6 +334,7 @@ static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute { struct lm63_data *data = lm63_update_device(dev); return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2]) + + data->temp2_offset - TEMP8_FROM_REG(data->temp2_crit_hyst)); } @@ -324,7 +349,7 @@ static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute * long hyst; mutex_lock(&data->update_lock); - hyst = TEMP8_FROM_REG(data->temp8[2]) - val; + hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val; i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST, HYST_TO_REG(hyst)); mutex_unlock(&data->update_lock); @@ -355,16 +380,21 @@ static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan, static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1); static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL); -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0); -static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8, - set_temp8, 1); +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8, + set_local_temp8, 1); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11, set_temp11, 1); static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11, set_temp11, 2); -static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2); +/* + * On LM63, temp2_crit can be set only once, which should be job + * of the bootloader. + */ +static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8, + NULL, 2); static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst, set_temp2_crit_hyst); @@ -479,7 +509,12 @@ static int lm63_probe(struct i2c_client *new_client, data->valid = 0; mutex_init(&data->update_lock); - /* Initialize the LM63 chip */ + /* Set the device type */ + data->kind = id->driver_data; + if (data->kind == lm64) + data->temp2_offset = 16000; + + /* Initialize chip */ lm63_init_client(new_client); /* Register sysfs hooks */ diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index 1e229847f37..d2cc2866081 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; enum chips { any_chip, lm85b, lm85c, adm1027, adt7463, adt7468, - emc6d100, emc6d102 + emc6d100, emc6d102, emc6d103 }; /* The LM85 registers */ @@ -90,6 +90,9 @@ enum chips { #define LM85_VERSTEP_EMC6D100_A0 0x60 #define LM85_VERSTEP_EMC6D100_A1 0x61 #define LM85_VERSTEP_EMC6D102 0x65 +#define LM85_VERSTEP_EMC6D103_A0 0x68 +#define LM85_VERSTEP_EMC6D103_A1 0x69 +#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */ #define LM85_REG_CONFIG 0x40 @@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = { { "emc6d100", emc6d100 }, { "emc6d101", emc6d100 }, { "emc6d102", emc6d102 }, + { "emc6d103", emc6d103 }, { } }; MODULE_DEVICE_TABLE(i2c, lm85_id); @@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) case LM85_VERSTEP_EMC6D102: type_name = "emc6d102"; break; + case LM85_VERSTEP_EMC6D103_A0: + case LM85_VERSTEP_EMC6D103_A1: + type_name = "emc6d103"; + break; + /* + * Registers apparently missing in EMC6D103S/EMC6D103:A2 + * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102 + * (according to the data sheets), but used unconditionally + * in the driver: 62[5:7], 6D[0:7], and 6E[0:7]. + * So skip EMC6D103S for now. + case LM85_VERSTEP_EMC6D103S: + type_name = "emc6d103s"; + break; + */ } } else { dev_dbg(&adapter->dev, @@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client, case adt7468: case emc6d100: case emc6d102: + case emc6d103: data->freq_map = adm1027_freq_map; break; default: @@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev) /* More alarm bits */ data->alarms |= lm85_read_value(client, EMC6D100_REG_ALARM3) << 16; - } else if (data->type == emc6d102) { + } else if (data->type == emc6d102 || data->type == emc6d103) { /* Have to read LSB bits after the MSB ones because the reading of the MSB bits has frozen the LSBs (backward from the ADM1027). diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b605ff3a1fa..829a2a1029f 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -847,11 +847,15 @@ complete: dev_err(dev->dev, "Arbitration lost\n"); err |= OMAP_I2C_STAT_AL; } + /* + * ProDB0017052: Clear ARDY bit twice + */ if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | - OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); + OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR | + OMAP_I2C_STAT_ARDY)); omap_i2c_complete_cmd(dev, err); return IRQ_HANDLED; } @@ -1137,12 +1141,41 @@ omap_i2c_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_SUSPEND +static int omap_i2c_suspend(struct device *dev) +{ + if (!pm_runtime_suspended(dev)) + if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) + dev->bus->pm->runtime_suspend(dev); + + return 0; +} + +static int omap_i2c_resume(struct device *dev) +{ + if (!pm_runtime_suspended(dev)) + if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) + dev->bus->pm->runtime_resume(dev); + + return 0; +} + +static struct dev_pm_ops omap_i2c_pm_ops = { + .suspend = omap_i2c_suspend, + .resume = omap_i2c_resume, +}; +#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) +#else +#define OMAP_I2C_PM_OPS NULL +#endif + static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, .remove = omap_i2c_remove, .driver = { .name = "omap_i2c", .owner = THIS_MODULE, + .pm = OMAP_I2C_PM_OPS, }, }; diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 495be451d32..266135ddf7f 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c @@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev) adap->owner = THIS_MODULE; /* DDC class but actually often used for more generic I2C */ adap->class = I2C_CLASS_DDC; - strncpy(adap->name, "ST Microelectronics DDC I2C adapter", + strlcpy(adap->name, "ST Microelectronics DDC I2C adapter", sizeof(adap->name)); adap->nr = bus_nr; adap->algo = &stu300_algo; diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8b606fd6402..08c194861af 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) netif_carrier_on(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); - if (nesdev->iw_status == 0) { - nesdev->iw_status = 1; - nes_port_ibevent(nesvnic); + if (nesvnic->of_device_registered) { + if (nesdev->iw_status == 0) { + nesdev->iw_status = 1; + nes_port_ibevent(nesvnic); + } } spin_unlock(&nesvnic->port_ibevent_lock); } @@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) netif_carrier_off(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); - if (nesdev->iw_status == 1) { - nesdev->iw_status = 0; - nes_port_ibevent(nesvnic); + if (nesvnic->of_device_registered) { + if (nesdev->iw_status == 1) { + nesdev->iw_status = 0; + nes_port_ibevent(nesvnic); + } } spin_unlock(&nesvnic->port_ibevent_lock); } @@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work) netif_carrier_on(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); - if (nesdev->iw_status == 0) { - nesdev->iw_status = 1; - nes_port_ibevent(nesvnic); + if (nesvnic->of_device_registered) { + if (nesdev->iw_status == 0) { + nesdev->iw_status = 1; + nes_port_ibevent(nesvnic); + } } spin_unlock(&nesvnic->port_ibevent_lock); } @@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work) netif_carrier_off(nesvnic->netdev); spin_lock(&nesvnic->port_ibevent_lock); - if (nesdev->iw_status == 1) { - nesdev->iw_status = 0; - nes_port_ibevent(nesvnic); + if (nesvnic->of_device_registered) { + if (nesdev->iw_status == 1) { + nesdev->iw_status = 0; + nes_port_ibevent(nesvnic); + } } spin_unlock(&nesvnic->port_ibevent_lock); } diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 8245237b67c..eca0c41f122 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) * there are still requests that haven't been acked. */ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && - !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) + !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && + (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) start_timer(qp); while (qp->s_last != qp->s_acked) { @@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, } spin_lock_irqsave(&qp->s_lock, flags); + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto ack_done; /* Ignore invalid responses. */ if (qib_cmp24(psn, qp->s_next_psn) >= 0) diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 23cf8fc933e..5b8f59d6c3e 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c @@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner, event->owner = owner; list_add_tail(&event->node, &gameport_event_list); - schedule_work(&gameport_event_work); + queue_work(system_long_wq, &gameport_event_work); out: spin_unlock_irqrestore(&gameport_event_lock, flags); diff --git a/drivers/input/input.c b/drivers/input/input.c index 7985114beac..11905b6a302 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -75,7 +75,6 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz) * dev->event_lock held and interrupts disabled. */ static void input_pass_event(struct input_dev *dev, - struct input_handler *src_handler, unsigned int type, unsigned int code, int value) { struct input_handler *handler; @@ -94,15 +93,6 @@ static void input_pass_event(struct input_dev *dev, continue; handler = handle->handler; - - /* - * If this is the handler that injected this - * particular event we want to skip it to avoid - * filters firing again and again. - */ - if (handler == src_handler) - continue; - if (!handler->filter) { if (filtered) break; @@ -132,7 +122,7 @@ static void input_repeat_key(unsigned long data) if (test_bit(dev->repeat_key, dev->key) && is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { - input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2); + input_pass_event(dev, EV_KEY, dev->repeat_key, 2); if (dev->sync) { /* @@ -141,7 +131,7 @@ static void input_repeat_key(unsigned long data) * Otherwise assume that the driver will send * SYN_REPORT once it's done. */ - input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); + input_pass_event(dev, EV_SYN, SYN_REPORT, 1); } if (dev->rep[REP_PERIOD]) @@ -174,7 +164,6 @@ static void input_stop_autorepeat(struct input_dev *dev) #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) static int input_handle_abs_event(struct input_dev *dev, - struct input_handler *src_handler, unsigned int code, int *pval) { bool is_mt_event; @@ -218,15 +207,13 @@ static int input_handle_abs_event(struct input_dev *dev, /* Flush pending "slot" event */ if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); - input_pass_event(dev, src_handler, - EV_ABS, ABS_MT_SLOT, dev->slot); + input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); } return INPUT_PASS_TO_HANDLERS; } static void input_handle_event(struct input_dev *dev, - struct input_handler *src_handler, unsigned int type, unsigned int code, int value) { int disposition = INPUT_IGNORE_EVENT; @@ -279,8 +266,7 @@ static void input_handle_event(struct input_dev *dev, case EV_ABS: if (is_event_supported(code, dev->absbit, ABS_MAX)) - disposition = input_handle_abs_event(dev, src_handler, - code, &value); + disposition = input_handle_abs_event(dev, code, &value); break; @@ -338,7 +324,7 @@ static void input_handle_event(struct input_dev *dev, dev->event(dev, type, code, value); if (disposition & INPUT_PASS_TO_HANDLERS) - input_pass_event(dev, src_handler, type, code, value); + input_pass_event(dev, type, code, value); } /** @@ -367,7 +353,7 @@ void input_event(struct input_dev *dev, spin_lock_irqsave(&dev->event_lock, flags); add_input_randomness(type, code, value); - input_handle_event(dev, NULL, type, code, value); + input_handle_event(dev, type, code, value); spin_unlock_irqrestore(&dev->event_lock, flags); } } @@ -397,8 +383,7 @@ void input_inject_event(struct input_handle *handle, rcu_read_lock(); grab = rcu_dereference(dev->grab); if (!grab || grab == handle) - input_handle_event(dev, handle->handler, - type, code, value); + input_handle_event(dev, type, code, value); rcu_read_unlock(); spin_unlock_irqrestore(&dev->event_lock, flags); @@ -611,10 +596,10 @@ static void input_dev_release_keys(struct input_dev *dev) for (code = 0; code <= KEY_MAX; code++) { if (is_event_supported(code, dev->keybit, KEY_MAX) && __test_and_clear_bit(code, dev->key)) { - input_pass_event(dev, NULL, EV_KEY, code, 0); + input_pass_event(dev, EV_KEY, code, 0); } } - input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); + input_pass_event(dev, EV_SYN, SYN_REPORT, 1); } } @@ -889,9 +874,9 @@ int input_set_keycode(struct input_dev *dev, !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && __test_and_clear_bit(old_keycode, dev->key)) { - input_pass_event(dev, NULL, EV_KEY, old_keycode, 0); + input_pass_event(dev, EV_KEY, old_keycode, 0); if (dev->sync) - input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); + input_pass_event(dev, EV_SYN, SYN_REPORT, 1); } out: diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index ac471b77c18..99ce9032d08 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c @@ -71,8 +71,9 @@ struct tegra_kbc { spinlock_t lock; unsigned int repoll_dly; unsigned long cp_dly_jiffies; + bool use_fn_map; const struct tegra_kbc_platform_data *pdata; - unsigned short keycode[KBC_MAX_KEY]; + unsigned short keycode[KBC_MAX_KEY * 2]; unsigned short current_keys[KBC_MAX_KPENT]; unsigned int num_pressed_keys; struct timer_list timer; @@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = { KEY(15, 5, KEY_F2), KEY(15, 6, KEY_CAPSLOCK), KEY(15, 7, KEY_F6), + + /* Software Handled Function Keys */ + KEY(20, 0, KEY_KP7), + + KEY(21, 0, KEY_KP9), + KEY(21, 1, KEY_KP8), + KEY(21, 2, KEY_KP4), + KEY(21, 4, KEY_KP1), + + KEY(22, 1, KEY_KPSLASH), + KEY(22, 2, KEY_KP6), + KEY(22, 3, KEY_KP5), + KEY(22, 4, KEY_KP3), + KEY(22, 5, KEY_KP2), + KEY(22, 7, KEY_KP0), + + KEY(27, 1, KEY_KPASTERISK), + KEY(27, 3, KEY_KPMINUS), + KEY(27, 4, KEY_KPPLUS), + KEY(27, 5, KEY_KPDOT), + + KEY(28, 5, KEY_VOLUMEUP), + + KEY(29, 3, KEY_HOME), + KEY(29, 4, KEY_END), + KEY(29, 5, KEY_BRIGHTNESSDOWN), + KEY(29, 6, KEY_VOLUMEDOWN), + KEY(29, 7, KEY_BRIGHTNESSUP), + + KEY(30, 0, KEY_NUMLOCK), + KEY(30, 1, KEY_SCROLLLOCK), + KEY(30, 2, KEY_MUTE), + + KEY(31, 4, KEY_HELP), }; static const struct matrix_keymap_data tegra_kbc_default_keymap_data = { @@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc) unsigned int i; unsigned int num_down = 0; unsigned long flags; + bool fn_keypress = false; spin_lock_irqsave(&kbc->lock, flags); for (i = 0; i < KBC_MAX_KPENT; i++) { @@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc) MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT); scancodes[num_down] = scancode; - keycodes[num_down++] = kbc->keycode[scancode]; + keycodes[num_down] = kbc->keycode[scancode]; + /* If driver uses Fn map, do not report the Fn key. */ + if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map) + fn_keypress = true; + else + num_down++; } val >>= 8; } + + /* + * If the platform uses Fn keymaps, translate keys on a Fn keypress. + * Function keycodes are KBC_MAX_KEY apart from the plain keycodes. + */ + if (fn_keypress) { + for (i = 0; i < num_down; i++) { + scancodes[i] += KBC_MAX_KEY; + keycodes[i] = kbc->keycode[scancodes[i]]; + } + } + spin_unlock_irqrestore(&kbc->lock, flags); tegra_kbc_report_released_keys(kbc->idev, @@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev) input_dev->keycode = kbc->keycode; input_dev->keycodesize = sizeof(kbc->keycode[0]); - input_dev->keycodemax = ARRAY_SIZE(kbc->keycode); + input_dev->keycodemax = KBC_MAX_KEY; + if (pdata->use_fn_map) + input_dev->keycodemax *= 2; + kbc->use_fn_map = pdata->use_fn_map; keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data; matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT, input_dev->keycode, input_dev->keybit); diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c index 1f8e0108962..7e64d01da2b 100644 --- a/drivers/input/misc/rotary_encoder.c +++ b/drivers/input/misc/rotary_encoder.c @@ -176,7 +176,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev) /* request the IRQs */ err = request_irq(encoder->irq_a, &rotary_encoder_irq, - IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, DRV_NAME, encoder); if (err) { dev_err(&pdev->dev, "unable to request IRQ %d\n", @@ -185,7 +185,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev) } err = request_irq(encoder->irq_b, &rotary_encoder_irq, - IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, DRV_NAME, encoder); if (err) { dev_err(&pdev->dev, "unable to request IRQ %d\n", diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index 25e5d042a72..7453938bf5e 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -51,6 +51,29 @@ #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) + +/* + * The following describes response for the 0x0c query. + * + * byte mask name meaning + * ---- ---- ------- ------------ + * 1 0x01 adjustable threshold capacitive button sensitivity + * can be adjusted + * 1 0x02 report max query 0x0d gives max coord reported + * 1 0x04 clearpad sensor is ClearPad product + * 1 0x08 advanced gesture not particularly meaningful + * 1 0x10 clickpad bit 0 1-button ClickPad + * 1 0x60 multifinger mode identifies firmware finger counting + * (not reporting!) algorithm. + * Not particularly meaningful + * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered + * 2 0x01 clickpad bit 1 2-button ClickPad + * 2 0x02 deluxe LED controls touchpad support LED commands + * ala multimedia control bar + * 2 0x04 reduced filtering firmware does less filtering on + * position data, driver should watch + * for noise. + */ #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index db5b0bca1a1..ba70058e2be 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c @@ -188,7 +188,8 @@ static void serio_free_event(struct serio_event *event) kfree(event); } -static void serio_remove_duplicate_events(struct serio_event *event) +static void serio_remove_duplicate_events(void *object, + enum serio_event_type type) { struct serio_event *e, *next; unsigned long flags; @@ -196,13 +197,13 @@ static void serio_remove_duplicate_events(struct serio_event *event) spin_lock_irqsave(&serio_event_lock, flags); list_for_each_entry_safe(e, next, &serio_event_list, node) { - if (event->object == e->object) { + if (object == e->object) { /* * If this event is of different type we should not * look further - we only suppress duplicate events * that were sent back-to-back. */ - if (event->type != e->type) + if (type != e->type) break; list_del_init(&e->node); @@ -245,7 +246,7 @@ static void serio_handle_event(struct work_struct *work) break; } - serio_remove_duplicate_events(event); + serio_remove_duplicate_events(event->object, event->type); serio_free_event(event); } @@ -298,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner, event->owner = owner; list_add_tail(&event->node, &serio_event_list); - schedule_work(&serio_event_work); + queue_work(system_long_wq, &serio_event_work); out: spin_unlock_irqrestore(&serio_event_lock, flags); @@ -436,10 +437,12 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute * } else if (!strncmp(buf, "rescan", count)) { serio_disconnect_port(serio); serio_find_driver(serio); + serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { serio_disconnect_port(serio); error = serio_bind_driver(serio, to_serio_driver(drv)); put_driver(drv); + serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); } else { error = -EINVAL; } diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index fc381498b79..cf8fb9f5d4a 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -519,7 +519,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i /* Retrieve the physical and logical size for OEM devices */ error = wacom_retrieve_hid_descriptor(intf, features); if (error) - goto fail2; + goto fail3; wacom_setup_device_quirks(features); diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 14ea54b78e4..4bf2316e328 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -941,28 +941,29 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784 struct ads7846_platform_data *pdata = spi->dev.platform_data; int err; - /* REVISIT when the irq can be triggered active-low, or if for some + /* + * REVISIT when the irq can be triggered active-low, or if for some * reason the touchscreen isn't hooked up, we don't need to access * the pendown state. */ - if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) { - dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); - return -EINVAL; - } if (pdata->get_pendown_state) { ts->get_pendown_state = pdata->get_pendown_state; - return 0; - } + } else if (gpio_is_valid(pdata->gpio_pendown)) { - err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); - if (err) { - dev_err(&spi->dev, "failed to request pendown GPIO%d\n", - pdata->gpio_pendown); - return err; - } + err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); + if (err) { + dev_err(&spi->dev, "failed to request pendown GPIO%d\n", + pdata->gpio_pendown); + return err; + } - ts->gpio_pendown = pdata->gpio_pendown; + ts->gpio_pendown = pdata->gpio_pendown; + + } else { + dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); + return -EINVAL; + } return 0; } @@ -1353,7 +1354,7 @@ static int __devinit ads7846_probe(struct spi_device *spi) err_put_regulator: regulator_put(ts->reg); err_free_gpio: - if (ts->gpio_pendown != -1) + if (!ts->get_pendown_state) gpio_free(ts->gpio_pendown); err_cleanup_filter: if (ts->filter_cleanup) @@ -1383,8 +1384,13 @@ static int __devexit ads7846_remove(struct spi_device *spi) regulator_disable(ts->reg); regulator_put(ts->reg); - if (ts->gpio_pendown != -1) + if (!ts->get_pendown_state) { + /* + * If we are not using specialized pendown method we must + * have been relying on gpio we set up ourselves. + */ gpio_free(ts->gpio_pendown); + } if (ts->filter_cleanup) ts->filter_cleanup(ts->filter_data); diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index 5cb8449c909..c14412ef464 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c @@ -51,6 +51,10 @@ MODULE_LICENSE("GPL"); #define W8001_PKTLEN_TPCCTL 11 /* control packet */ #define W8001_PKTLEN_TOUCH2FG 13 +/* resolution in points/mm */ +#define W8001_PEN_RESOLUTION 100 +#define W8001_TOUCH_RESOLUTION 10 + struct w8001_coord { u8 rdy; u8 tsw; @@ -198,7 +202,7 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query) query->y = 1024; if (query->panel_res) query->x = query->y = (1 << query->panel_res); - query->panel_res = 10; + query->panel_res = W8001_TOUCH_RESOLUTION; } } @@ -394,6 +398,8 @@ static int w8001_setup(struct w8001 *w8001) input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0); input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0); + input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION); + input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION); input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0); if (coord.tilt_x && coord.tilt_y) { input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0); @@ -418,14 +424,17 @@ static int w8001_setup(struct w8001 *w8001) w8001->max_touch_x = touch.x; w8001->max_touch_y = touch.y; - /* scale to pen maximum */ if (w8001->max_pen_x && w8001->max_pen_y) { + /* if pen is supported scale to pen maximum */ touch.x = w8001->max_pen_x; touch.y = w8001->max_pen_y; + touch.panel_res = W8001_PEN_RESOLUTION; } input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0); input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0); + input_abs_set_res(dev, ABS_X, touch.panel_res); + input_abs_set_res(dev, ABS_Y, touch.panel_res); switch (touch.sensor_id) { case 0: diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index 0858791978d..cfff0c41d29 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c @@ -1247,10 +1247,10 @@ static void l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) { struct PStack *st = fi->userdata; - struct sk_buff *skb, *oskb; + struct sk_buff *skb; struct Layer2 *l2 = &st->l2; u_char header[MAX_HEADER_LEN]; - int i; + int i, hdr_space_needed; int unsigned p1; u_long flags; @@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) if (!skb) return; + hdr_space_needed = l2headersize(l2, 0); + if (hdr_space_needed > skb_headroom(skb)) { + struct sk_buff *orig_skb = skb; + + skb = skb_realloc_headroom(skb, hdr_space_needed); + if (!skb) { + dev_kfree_skb(orig_skb); + return; + } + } spin_lock_irqsave(&l2->lock, flags); if(test_bit(FLG_MOD128, &l2->flag)) p1 = (l2->vs - l2->va) % 128; @@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) l2->vs = (l2->vs + 1) % 8; } spin_unlock_irqrestore(&l2->lock, flags); - p1 = skb->data - skb->head; - if (p1 >= i) - memcpy(skb_push(skb, i), header, i); - else { - printk(KERN_WARNING - "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); - oskb = skb; - skb = alloc_skb(oskb->len + i, GFP_ATOMIC); - memcpy(skb_put(skb, i), header, i); - skb_copy_from_linear_data(oskb, - skb_put(skb, oskb->len), oskb->len); - dev_kfree_skb(oskb); - } + memcpy(skb_push(skb, i), header, i); st->l2.l2l1(st, PH_PULL | INDICATION, skb); test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/isdn/hysdn/hysdn_defs.h index 729df408938..18b801ad97a 100644 --- a/drivers/isdn/hysdn/hysdn_defs.h +++ b/drivers/isdn/hysdn/hysdn_defs.h @@ -227,7 +227,6 @@ extern hysdn_card *card_root; /* pointer to first card */ /*************************/ /* im/exported functions */ /*************************/ -extern char *hysdn_getrev(const char *); /* hysdn_procconf.c */ extern int hysdn_procconf_init(void); /* init proc config filesys */ @@ -259,7 +258,6 @@ extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *, /* hysdn_net.c */ extern unsigned int hynet_enable; -extern char *hysdn_net_revision; extern int hysdn_net_create(hysdn_card *); /* create a new net device */ extern int hysdn_net_release(hysdn_card *); /* delete the device */ extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */ diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c index b7cc5c2f08c..0ab42ace169 100644 --- a/drivers/isdn/hysdn/hysdn_init.c +++ b/drivers/isdn/hysdn/hysdn_init.c @@ -36,7 +36,6 @@ MODULE_DESCRIPTION("ISDN4Linux: Driver for HYSDN cards"); MODULE_AUTHOR("Werner Cornelius"); MODULE_LICENSE("GPL"); -static char *hysdn_init_revision = "$Revision: 1.6.6.6 $"; static int cardmax; /* number of found cards */ hysdn_card *card_root = NULL; /* pointer to first card */ static hysdn_card *card_last = NULL; /* pointer to first card */ @@ -49,25 +48,6 @@ static hysdn_card *card_last = NULL; /* pointer to first card */ /* Additionally newer versions may be activated without rebooting. */ /****************************************************************************/ -/******************************************************/ -/* extract revision number from string for log output */ -/******************************************************/ -char * -hysdn_getrev(const char *revision) -{ - char *rev; - char *p; - - if ((p = strchr(revision, ':'))) { - rev = p + 2; - p = strchr(rev, '$'); - *--p = 0; - } else - rev = "???"; - return rev; -} - - /****************************************************************************/ /* init_module is called once when the module is loaded to do all necessary */ /* things like autodetect... */ @@ -175,13 +155,9 @@ static int hysdn_have_procfs; static int __init hysdn_init(void) { - char tmp[50]; int rc; - strcpy(tmp, hysdn_init_revision); - printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp)); - strcpy(tmp, hysdn_net_revision); - printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp)); + printk(KERN_NOTICE "HYSDN: module loaded\n"); rc = pci_register_driver(&hysdn_pci_driver); if (rc) diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c index feec8d89d71..11f2cce2600 100644 --- a/drivers/isdn/hysdn/hysdn_net.c +++ b/drivers/isdn/hysdn/hysdn_net.c @@ -26,9 +26,6 @@ unsigned int hynet_enable = 0xffffffff; module_param(hynet_enable, uint, 0); -/* store the actual version for log reporting */ -char *hysdn_net_revision = "$Revision: 1.8.6.4 $"; - #define MAX_SKB_BUFFERS 20 /* number of buffers for keeping TX-data */ /****************************************************************************/ diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 96b3e39c335..5fe83bd4206 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c @@ -23,7 +23,6 @@ #include "hysdn_defs.h" static DEFINE_MUTEX(hysdn_conf_mutex); -static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $"; #define INFO_OUT_LEN 80 /* length of info line including lf */ @@ -404,7 +403,7 @@ hysdn_procconf_init(void) card = card->next; /* next entry */ } - printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision)); + printk(KERN_NOTICE "HYSDN: procfs initialised\n"); return (0); } /* hysdn_procconf_init */ diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 8a2f767f26d..0ed7f6bc2a7 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev) if (md_check_no_bitmap(mddev)) return -EINVAL; - mddev->queue->queue_lock = &mddev->queue->__queue_lock; conf = linear_conf(mddev, mddev->raid_disks); if (!conf) diff --git a/drivers/md/md.c b/drivers/md/md.c index b76cfc89e1b..818313e277e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -287,6 +287,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio) mddev_t *mddev = q->queuedata; int rv; int cpu; + unsigned int sectors; if (mddev == NULL || mddev->pers == NULL || !mddev->ready) { @@ -311,12 +312,16 @@ static int md_make_request(struct request_queue *q, struct bio *bio) atomic_inc(&mddev->active_io); rcu_read_unlock(); + /* + * save the sectors now since our bio can + * go away inside make_request + */ + sectors = bio_sectors(bio); rv = mddev->pers->make_request(mddev, bio); cpu = part_stat_lock(); part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); - part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], - bio_sectors(bio)); + part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); part_stat_unlock(); if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) @@ -548,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit) { mddev_t *mddev, *new = NULL; + if (unit && MAJOR(unit) != MD_MAJOR) + unit &= ~((1<<MdpMinorShift)-1); + retry: spin_lock(&all_mddevs_lock); @@ -1947,8 +1955,6 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) __bdevname(dev, b)); return PTR_ERR(bdev); } - if (!shared) - set_bit(AllReserved, &rdev->flags); rdev->bdev = bdev; return err; } @@ -2465,6 +2471,9 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) if (rdev->raid_disk != -1) return -EBUSY; + if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) + return -EBUSY; + if (rdev->mddev->pers->hot_add_disk == NULL) return -EINVAL; @@ -2610,12 +2619,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) mddev_lock(mddev); list_for_each_entry(rdev2, &mddev->disks, same_set) - if (test_bit(AllReserved, &rdev2->flags) || - (rdev->bdev == rdev2->bdev && - rdev != rdev2 && - overlaps(rdev->data_offset, rdev->sectors, - rdev2->data_offset, - rdev2->sectors))) { + if (rdev->bdev == rdev2->bdev && + rdev != rdev2 && + overlaps(rdev->data_offset, rdev->sectors, + rdev2->data_offset, + rdev2->sectors)) { overlap = 1; break; } @@ -4133,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len) } mddev->array_sectors = sectors; - set_capacity(mddev->gendisk, mddev->array_sectors); - if (mddev->pers) + if (mddev->pers) { + set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); - + } return len; } @@ -4619,6 +4627,7 @@ static int do_md_run(mddev_t *mddev) } set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); + mddev->changed = 1; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); out: return err; @@ -4707,6 +4716,7 @@ static void md_clean(mddev_t *mddev) mddev->sync_speed_min = mddev->sync_speed_max = 0; mddev->recovery = 0; mddev->in_sync = 0; + mddev->changed = 0; mddev->degraded = 0; mddev->safemode = 0; mddev->bitmap_info.offset = 0; @@ -4822,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) set_capacity(disk, 0); mutex_unlock(&mddev->open_mutex); + mddev->changed = 1; revalidate_disk(disk); if (mddev->ro) @@ -5578,6 +5589,8 @@ static int update_raid_disks(mddev_t *mddev, int raid_disks) mddev->delta_disks = raid_disks - mddev->raid_disks; rv = mddev->pers->check_reshape(mddev); + if (rv < 0) + mddev->delta_disks = 0; return rv; } @@ -6004,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) atomic_inc(&mddev->openers); mutex_unlock(&mddev->open_mutex); - check_disk_size_change(mddev->gendisk, bdev); + check_disk_change(bdev); out: return err; } @@ -6019,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode) return 0; } + +static int md_media_changed(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + return mddev->changed; +} + +static int md_revalidate(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + mddev->changed = 0; + return 0; +} static const struct block_device_operations md_fops = { .owner = THIS_MODULE, @@ -6029,6 +6057,8 @@ static const struct block_device_operations md_fops = .compat_ioctl = md_compat_ioctl, #endif .getgeo = md_getgeo, + .media_changed = md_media_changed, + .revalidate_disk= md_revalidate, }; static int md_thread(void * arg) @@ -6985,9 +7015,6 @@ void md_do_sync(mddev_t *mddev) } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) mddev->resync_min = mddev->curr_resync_completed; mddev->curr_resync = 0; - if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) - mddev->curr_resync_completed = 0; - sysfs_notify(&mddev->kobj, NULL, "sync_completed"); wake_up(&resync_wait); set_bit(MD_RECOVERY_DONE, &mddev->recovery); md_wakeup_thread(mddev->thread); @@ -7028,7 +7055,7 @@ static int remove_and_add_spares(mddev_t *mddev) } } - if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { + if (mddev->degraded && !mddev->recovery_disabled) { list_for_each_entry(rdev, &mddev->disks, same_set) { if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags) && @@ -7151,7 +7178,20 @@ void md_check_recovery(mddev_t *mddev) /* Only thing we do on a ro array is remove * failed devices. */ - remove_and_add_spares(mddev); + mdk_rdev_t *rdev; + list_for_each_entry(rdev, &mddev->disks, same_set) + if (rdev->raid_disk >= 0 && + !test_bit(Blocked, &rdev->flags) && + test_bit(Faulty, &rdev->flags) && + atomic_read(&rdev->nr_pending)==0) { + if (mddev->pers->hot_remove_disk( + mddev, rdev->raid_disk)==0) { + char nm[20]; + sprintf(nm,"rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + rdev->raid_disk = -1; + } + } clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); goto unlock; } diff --git a/drivers/md/md.h b/drivers/md/md.h index eec517ced31..12215d437fc 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -93,8 +93,6 @@ struct mdk_rdev_s #define Faulty 1 /* device is known to have a fault */ #define In_sync 2 /* device is in_sync with rest of array */ #define WriteMostly 4 /* Avoid reading if at all possible */ -#define AllReserved 6 /* If whole device is reserved for - * one array */ #define AutoDetected 7 /* added by auto-detect */ #define Blocked 8 /* An error occured on an externally * managed array, don't allow writes @@ -276,6 +274,8 @@ struct mddev_s atomic_t active; /* general refcount */ atomic_t openers; /* number of active opens */ + int changed; /* True if we might need to + * reread partition info */ int degraded; /* whether md should consider * adding a spare */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 6d7ddf32ef2..3a62d440e27 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev) * bookkeeping area. [whatever we allocate in multipath_run(), * should be freed in multipath_stop()] */ - mddev->queue->queue_lock = &mddev->queue->__queue_lock; conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); mddev->private = conf; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a39f4c355e5..c0ac457f121 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -179,6 +179,14 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) rdev1->new_raid_disk = j; } + if (mddev->level == 1) { + /* taiking over a raid1 array- + * we have only one active disk + */ + j = 0; + rdev1->new_raid_disk = j; + } + if (j < 0 || j >= mddev->raid_disks) { printk(KERN_ERR "md/raid0:%s: bad disk number %d - " "aborting!\n", mdname(mddev), j); @@ -353,7 +361,6 @@ static int raid0_run(mddev_t *mddev) if (md_check_no_bitmap(mddev)) return -EINVAL; blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); - mddev->queue->queue_lock = &mddev->queue->__queue_lock; /* if private is not null, we are here after takeover */ if (mddev->private == NULL) { @@ -644,12 +651,39 @@ static void *raid0_takeover_raid10(mddev_t *mddev) return priv_conf; } +static void *raid0_takeover_raid1(mddev_t *mddev) +{ + raid0_conf_t *priv_conf; + + /* Check layout: + * - (N - 1) mirror drives must be already faulty + */ + if ((mddev->raid_disks - 1) != mddev->degraded) { + printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", + mdname(mddev)); + return ERR_PTR(-EINVAL); + } + + /* Set new parameters */ + mddev->new_level = 0; + mddev->new_layout = 0; + mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ + mddev->delta_disks = 1 - mddev->raid_disks; + mddev->raid_disks = 1; + /* make sure it will be not marked as dirty */ + mddev->recovery_cp = MaxSector; + + create_strip_zones(mddev, &priv_conf); + return priv_conf; +} + static void *raid0_takeover(mddev_t *mddev) { /* raid0 can take over: * raid4 - if all data disks are active. * raid5 - providing it is Raid4 layout and one disk is faulty * raid10 - assuming we have all necessary active disks + * raid1 - with (N -1) mirror drives faulty */ if (mddev->level == 4) return raid0_takeover_raid45(mddev); @@ -665,6 +699,12 @@ static void *raid0_takeover(mddev_t *mddev) if (mddev->level == 10) return raid0_takeover_raid10(mddev); + if (mddev->level == 1) + return raid0_takeover_raid1(mddev); + + printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n", + mddev->level); + return ERR_PTR(-EINVAL); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a23ffa397ba..06cd712807d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf) if (conf->pending_bio_list.head) { struct bio *bio; bio = bio_list_get(&conf->pending_bio_list); + /* Only take the spinlock to quiet a warning */ + spin_lock(conf->mddev->queue->queue_lock); blk_remove_plug(conf->mddev->queue); + spin_unlock(conf->mddev->queue->queue_lock); spin_unlock_irq(&conf->device_lock); /* flush any pending bitmap writes to * disk before proceeding w/ I/O */ @@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) atomic_inc(&r1_bio->remaining); spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); - blk_plug_device(mddev->queue); + blk_plug_device_unlocked(mddev->queue); spin_unlock_irqrestore(&conf->device_lock, flags); } r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); @@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev) if (IS_ERR(conf)) return PTR_ERR(conf); - mddev->queue->queue_lock = &conf->device_lock; list_for_each_entry(rdev, &mddev->disks, same_set) { disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 69b65954439..747d061d8e0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf) if (conf->pending_bio_list.head) { struct bio *bio; bio = bio_list_get(&conf->pending_bio_list); + /* Spinlock only taken to quiet a warning */ + spin_lock(conf->mddev->queue->queue_lock); blk_remove_plug(conf->mddev->queue); + spin_unlock(conf->mddev->queue->queue_lock); spin_unlock_irq(&conf->device_lock); /* flush any pending bitmap writes to disk * before proceeding w/ I/O */ @@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) atomic_inc(&r10_bio->remaining); spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); - blk_plug_device(mddev->queue); + blk_plug_device_unlocked(mddev->queue); spin_unlock_irqrestore(&conf->device_lock, flags); } @@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev) if (!conf) goto out; - mddev->queue->queue_lock = &conf->device_lock; - mddev->thread = conf->thread; conf->thread = NULL; @@ -2463,11 +2464,13 @@ static void *raid10_takeover_raid0(mddev_t *mddev) mddev->recovery_cp = MaxSector; conf = setup_conf(mddev); - if (!IS_ERR(conf)) + if (!IS_ERR(conf)) { list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0) rdev->new_raid_disk = rdev->raid_disk * 2; - + conf->barrier = 1; + } + return conf; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 5044babfcda..78536fdbd87 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev) mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_fn = raid5_congested; - mddev->queue->queue_lock = &conf->device_lock; mddev->queue->unplug_fn = raid5_unplug_queue; chunk_size = mddev->chunk_sectors << 9; @@ -5517,7 +5516,6 @@ static int raid5_start_reshape(mddev_t *mddev) raid5_conf_t *conf = mddev->private; mdk_rdev_t *rdev; int spares = 0; - int added_devices = 0; unsigned long flags; if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) @@ -5527,8 +5525,8 @@ static int raid5_start_reshape(mddev_t *mddev) return -ENOSPC; list_for_each_entry(rdev, &mddev->disks, same_set) - if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks) - && !test_bit(Faulty, &rdev->flags)) + if (!test_bit(In_sync, &rdev->flags) + && !test_bit(Faulty, &rdev->flags)) spares++; if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) @@ -5571,34 +5569,35 @@ static int raid5_start_reshape(mddev_t *mddev) * to correctly record the "partially reconstructed" state of * such devices during the reshape and confusion could result. */ - if (mddev->delta_disks >= 0) - list_for_each_entry(rdev, &mddev->disks, same_set) - if (rdev->raid_disk < 0 && - !test_bit(Faulty, &rdev->flags)) { - if (raid5_add_disk(mddev, rdev) == 0) { - char nm[20]; - if (rdev->raid_disk >= conf->previous_raid_disks) { - set_bit(In_sync, &rdev->flags); - added_devices++; - } else - rdev->recovery_offset = 0; - sprintf(nm, "rd%d", rdev->raid_disk); - if (sysfs_create_link(&mddev->kobj, - &rdev->kobj, nm)) - /* Failure here is OK */; - } else - break; - } else if (rdev->raid_disk >= conf->previous_raid_disks - && !test_bit(Faulty, &rdev->flags)) { - /* This is a spare that was manually added */ - set_bit(In_sync, &rdev->flags); - added_devices++; - } + if (mddev->delta_disks >= 0) { + int added_devices = 0; + list_for_each_entry(rdev, &mddev->disks, same_set) + if (rdev->raid_disk < 0 && + !test_bit(Faulty, &rdev->flags)) { + if (raid5_add_disk(mddev, rdev) == 0) { + char nm[20]; + if (rdev->raid_disk + >= conf->previous_raid_disks) { + set_bit(In_sync, &rdev->flags); + added_devices++; + } else + rdev->recovery_offset = 0; + sprintf(nm, "rd%d", rdev->raid_disk); + if (sysfs_create_link(&mddev->kobj, + &rdev->kobj, nm)) + /* Failure here is OK */; + } + } else if (rdev->raid_disk >= conf->previous_raid_disks + && !test_bit(Faulty, &rdev->flags)) { + /* This is a spare that was manually added */ + set_bit(In_sync, &rdev->flags); + added_devices++; + } - /* When a reshape changes the number of devices, ->degraded - * is measured against the larger of the pre and post number of - * devices.*/ - if (mddev->delta_disks > 0) { + /* When a reshape changes the number of devices, + * ->degraded is measured against the larger of the + * pre and post number of devices. + */ spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) - added_devices; diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index e9a3eab7b0c..8c1d85e27be 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -621,7 +621,7 @@ static int __init memstick_init(void) { int rc; - workqueue = create_freezeable_workqueue("kmemstick"); + workqueue = create_freezable_workqueue("kmemstick"); if (!workqueue) return -ENOMEM; diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index f71f2294847..1735c84ff75 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.17" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17" +#define MPT_LINUX_VERSION_COMMON "3.04.18" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index a3856ed90ae..e8deb8ed049 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) } static int +mptctl_release(struct inode *inode, struct file *filep) +{ + fasync_helper(-1, filep, 0, &async_queue); + return 0; +} + +static int mptctl_fasync(int fd, struct file *filep, int mode) { MPT_ADAPTER *ioc; @@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = { .llseek = no_llseek, .fasync = mptctl_fasync, .unlocked_ioctl = mptctl_ioctl, + .release = mptctl_release, #ifdef CONFIG_COMPAT .compat_ioctl = compat_mpctl_ioctl, #endif diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 59b8f53d1ec..0d9b82a4454 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) } out: - printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", - ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); + printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", + ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, + SCpnt, SCpnt->serial_number); return retval; } @@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget) { - retval = SUCCESS; + retval = 0; goto out; } diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 5f6852dff40..44d4475a09d 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c @@ -329,7 +329,7 @@ static int __init tifm_init(void) { int rc; - workqueue = create_freezeable_workqueue("tifm"); + workqueue = create_freezable_workqueue("tifm"); if (!workqueue) return -ENOMEM; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 4d2ea8e8014..6df5a55da11 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -785,7 +785,7 @@ static int __init vmballoon_init(void) if (x86_hyper != &x86_hyper_vmware) return -ENODEV; - vmballoon_wq = create_freezeable_workqueue("vmmemctl"); + vmballoon_wq = create_freezable_workqueue("vmmemctl"); if (!vmballoon_wq) { pr_err("failed to create workqueue\n"); return -ENOMEM; diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index d9d7efbc77c..6322d1fb5d6 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c @@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) init_completion(&dev->dma_done); - dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); + dev->card_workqueue = create_freezable_workqueue(DRV_NAME); if (!dev->card_workqueue) goto error9; diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 67822cf6c02..ac0d6a8613b 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = { static __init int sm_module_init(void) { int error = 0; - cache_flush_workqueue = create_freezeable_workqueue("smflush"); + cache_flush_workqueue = create_freezable_workqueue("smflush"); if (IS_ERR(cache_flush_workqueue)) return PTR_ERR(cache_flush_workqueue); diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 0c7811faf72..a179cc6d79f 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } req = nonemb_cmd->va; sge = nonembedded_sgl(wrb); @@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); +err: spin_unlock_bh(&adapter->mcc_lock); return status; } diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index f40740e68ea..d584d32c747 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -4276,9 +4276,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | BNX2X_ACCEPT_MULTICAST; #ifdef BCM_CNIC - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | - BNX2X_ACCEPT_MULTICAST); + if (!NO_FCOE(bp)) { + cl_id = bnx2x_fcoe(bp, cl_id); + bnx2x_rxq_set_mac_filters(bp, cl_id, + BNX2X_ACCEPT_UNICAST | + BNX2X_ACCEPT_MULTICAST); + } #endif break; @@ -4286,18 +4289,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | BNX2X_ACCEPT_ALL_MULTICAST; #ifdef BCM_CNIC - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | - BNX2X_ACCEPT_MULTICAST); + /* + * Prevent duplication of multicast packets by configuring FCoE + * L2 Client to receive only matched unicast frames. + */ + if (!NO_FCOE(bp)) { + cl_id = bnx2x_fcoe(bp, cl_id); + bnx2x_rxq_set_mac_filters(bp, cl_id, + BNX2X_ACCEPT_UNICAST); + } #endif break; case BNX2X_RX_MODE_PROMISC: def_q_filters |= BNX2X_PROMISCUOUS_MODE; #ifdef BCM_CNIC - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | - BNX2X_ACCEPT_MULTICAST); + /* + * Prevent packets duplication by configuring DROP_ALL for FCoE + * L2 Client. + */ + if (!NO_FCOE(bp)) { + cl_id = bnx2x_fcoe(bp, cl_id); + bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); + } #endif /* pass management unicast packets as well */ llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 7ab534aee45..7513c4523ac 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net) goto open_unlock; } - priv->wq = create_freezeable_workqueue("mcp251x_wq"); + priv->wq = create_freezable_workqueue("mcp251x_wq"); INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig index 27d1d398e25..d38706958af 100644 --- a/drivers/net/can/mscan/Kconfig +++ b/drivers/net/can/mscan/Kconfig @@ -1,5 +1,5 @@ config CAN_MSCAN - depends on CAN_DEV && (PPC || M68K || M68KNOMMU) + depends on CAN_DEV && (PPC || M68K) tristate "Support for Freescale MSCAN based chips" ---help--- The Motorola Scalable Controller Area Network (MSCAN) definition diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index c42e9726824..e54712b22c2 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -185,7 +185,7 @@ struct pch_can_priv { static struct can_bittiming_const pch_can_bittiming_const = { .name = KBUILD_MODNAME, - .tseg1_min = 1, + .tseg1_min = 2, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, @@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev) struct pch_can_priv *priv = netdev_priv(ndev); unregister_candev(priv->ndev); - pci_iounmap(pdev, priv->regs); if (priv->use_msi) pci_disable_msi(priv->dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); pch_can_reset(priv); + pci_iounmap(pdev, priv->regs); free_candev(priv->ndev); } @@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev, priv->use_msi = 0; } else { netdev_err(ndev, "PCH CAN opened with MSI\n"); + pci_set_master(pdev); priv->use_msi = 1; } diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 8ba81b3ddd9..5de46a9a77b 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig @@ -18,7 +18,7 @@ config CAN_SOFTING config CAN_SOFTING_CS tristate "Softing Gmbh CAN pcmcia cards" depends on PCMCIA - select CAN_SOFTING + depends on CAN_SOFTING ---help--- Support for PCMCIA cards from Softing Gmbh & some cards from Vector Gmbh. diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index 300fe75dd1a..c11bb4de863 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 56166ae2059..6aad64df4dc 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) { int i; - BUG_ON(adapter->debugfs_root == NULL); + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); /* * Debugfs support is best effort. @@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) */ static void cleanup_debugfs(struct adapter *adapter) { - BUG_ON(adapter->debugfs_root == NULL); + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); /* * Unlike our sister routine cleanup_proc(), we don't need to remove @@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, struct net_device *netdev; /* - * Vet our module parameters. - */ - if (msi != MSI_MSIX && msi != MSI_MSI) { - dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" - " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, - MSI_MSI); - err = -EINVAL; - goto err_out; - } - - /* * Print our driver banner the first time we're called to initialize a * device. */ @@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, /* * Set up our debugfs entries. */ - if (cxgb4vf_debugfs_root) { + if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), cxgb4vf_debugfs_root); - if (adapter->debugfs_root == NULL) + if (IS_ERR_OR_NULL(adapter->debugfs_root)) dev_warn(&pdev->dev, "could not create debugfs" " directory"); else @@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, */ err_free_debugfs: - if (adapter->debugfs_root) { + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { cleanup_debugfs(adapter); debugfs_remove_recursive(adapter->debugfs_root); } @@ -2802,7 +2791,6 @@ err_release_regions: err_disable_device: pci_disable_device(pdev); -err_out: return err; } @@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) /* * Tear down our debugfs entries. */ - if (adapter->debugfs_root) { + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { cleanup_debugfs(adapter); debugfs_remove_recursive(adapter->debugfs_root); } @@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) } /* + * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt + * delivery. + */ +static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) +{ + struct adapter *adapter; + int pidx; + + adapter = pci_get_drvdata(pdev); + if (!adapter) + return; + + /* + * Disable all Virtual Interfaces. This will shut down the + * delivery of all ingress packets into the chip for these + * Virtual Interfaces. + */ + for_each_port(adapter, pidx) { + struct net_device *netdev; + struct port_info *pi; + + if (!test_bit(pidx, &adapter->registered_device_map)) + continue; + + netdev = adapter->port[pidx]; + if (!netdev) + continue; + + pi = netdev_priv(netdev); + t4vf_enable_vi(adapter, pi->viid, false, false); + } + + /* + * Free up all Queues which will prevent further DMA and + * Interrupts allowing various internal pathways to drain. + */ + t4vf_free_sge_resources(adapter); +} + +/* * PCI Device registration data structures. */ #define CH_DEVICE(devid, idx) \ @@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = { .id_table = cxgb4vf_pci_tbl, .probe = cxgb4vf_pci_probe, .remove = __devexit_p(cxgb4vf_pci_remove), + .shutdown = __devexit_p(cxgb4vf_pci_shutdown), }; /* @@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void) { int ret; + /* + * Vet our module parameters. + */ + if (msi != MSI_MSIX && msi != MSI_MSI) { + printk(KERN_WARNING KBUILD_MODNAME + ": bad module parameter msi=%d; must be %d" + " (MSI-X or MSI) or %d (MSI)\n", + msi, MSI_MSIX, MSI_MSI); + return -EINVAL; + } + /* Debugfs support is optional, just warn if this fails */ cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!cxgb4vf_debugfs_root) + if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) printk(KERN_WARNING KBUILD_MODNAME ": could not create" " debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4vf_driver); - if (ret < 0) + if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) debugfs_remove(cxgb4vf_debugfs_root); return ret; } diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index 0f51c80475c..192db226ec7 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c @@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, delay_idx = 0; ms = delay[0]; - for (i = 0; i < 500; i += ms) { + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { if (sleep_ok) { ms = delay[delay_idx]; if (delay_idx < ARRAY_SIZE(delay) - 1) diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 2d4c4fc1d90..461dd6f905f 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev) /* Checksum mode */ dm9000_set_rx_csum_unlocked(dev, db->rx_csum); - /* GPIO0 on pre-activate PHY */ - iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ - iow(db, DM9000_GPR, 0); /* Enable PHY */ ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; @@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev) unsigned long flags; /* Save previous register address */ - reg_save = readb(db->io_addr); spin_lock_irqsave(&db->lock, flags); + reg_save = readb(db->io_addr); netif_stop_queue(dev); dm9000_reset(db); @@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev) if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) return -EAGAIN; + /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ + iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ + mdelay(1); /* delay needs by DM9000B */ + /* Initialize DM9000 board */ dm9000_reset(db); dm9000_init_dm9000(dev); diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index aed223b1b89..7501d977d99 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -124,6 +124,7 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw) case M88E1000_I_PHY_ID: case M88E1011_I_PHY_ID: case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: hw->phy_type = e1000_phy_m88; break; case IGP01E1000_I_PHY_ID: @@ -3222,7 +3223,8 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw) break; case e1000_ce4100: if ((hw->phy_id == RTL8211B_PHY_ID) || - (hw->phy_id == RTL8201N_PHY_ID)) + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) match = true; break; case e1000_82541: diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 196eeda2dd6..c70b23d5228 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -2917,6 +2917,7 @@ struct e1000_host_command_info { #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID #define M88E1011_I_REV_4 0x04 #define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 #define L1LXT971A_PHY_ID 0x001378E0 #define RTL8211B_PHY_ID 0x001CC910 diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 1c18f26b081..3fa110ddb04 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work) u16 phy_status, phy_1000t_status, phy_ext_status; u16 pci_status; + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + e1e_rphy(hw, PHY_STATUS, &phy_status); e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); @@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, downshift_task); + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); } @@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter) return 0; } +static void e1000e_flush_descriptors(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & FLAG2_DMA_BURST)) + return; + + /* flush pending descriptor writebacks to memory */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); +} + void e1000e_down(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter) if (!pci_channel_offline(adapter->pdev)) e1000e_reset(adapter); + + e1000e_flush_descriptors(adapter); + e1000_clean_tx_ring(adapter); e1000_clean_rx_ring(adapter); @@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, update_phy_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + e1000_get_phy_info(&adapter->hw); } @@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work) static void e1000_update_phy_info(unsigned long data) { struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + schedule_work(&adapter->update_phy_task); } @@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work) u32 link, tctl; int tx_pending = 0; + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + link = e1000e_has_link(adapter); if ((netif_carrier_ok(netdev)) && link) { /* Cancel scheduled suspend requests. */ @@ -4309,7 +4344,6 @@ link_up: * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). */ - adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); /* return immediately since reset is imminent */ return; @@ -4338,19 +4372,12 @@ link_up: else ew32(ICS, E1000_ICS_RXDMT0); + /* flush pending descriptors to memory before detecting Tx hang */ + e1000e_flush_descriptors(adapter); + /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = 1; - /* flush partial descriptors to memory before detecting Tx hang */ - if (adapter->flags2 & FLAG2_DMA_BURST) { - ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); - ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); - /* - * no need to flush the writes because the timeout code does - * an er32 first thing - */ - } - /* * With 82571 controllers, LAA may be overwritten due to controller * reset from the other port. Set the appropriate LAA in RAR[0] @@ -4888,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); + /* don't run the task if already down */ + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && (adapter->flags & FLAG_RX_RESTART_NOW))) { e1000e_dump(adapter); diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index af09296ef0d..9c0b1bac6af 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_error; } + netif_carrier_off(dev); + dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index d5ede2df3e4..ebbda7d1525 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); } hw->addr_ctrl.overflow_promisc = 0; diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 6342d485979..c54a88274d5 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -159,13 +159,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, struct scatterlist *sg; unsigned int i, j, dmacount; unsigned int len; - static const unsigned int bufflen = 4096; + static const unsigned int bufflen = IXGBE_FCBUFF_MIN; unsigned int firstoff = 0; unsigned int lastsize; unsigned int thisoff = 0; unsigned int thislen = 0; u32 fcbuff, fcdmarw, fcfltrw; - dma_addr_t addr; + dma_addr_t addr = 0; if (!netdev || !sgl) return 0; @@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, /* only the last buffer may have non-full bufflen */ lastsize = thisoff + thislen; + /* + * lastsize can not be buffer len. + * If it is then adding another buffer with lastsize = 1. + */ + if (lastsize == bufflen) { + if (j >= IXGBE_BUFFCNT_MAX) { + e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " + "not enough user buffers. We need an extra " + "buffer because lastsize is bufflen.\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); + j++; + lastsize = 1; + } + fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); @@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) e_err(drv, "failed to allocated FCoE DDP pool\n"); spin_lock_init(&fcoe->lock); + + /* Extra buffer to be shared by all DDPs for HW work around */ + fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (fcoe->extra_ddp_buffer == NULL) { + e_err(drv, "failed to allocated extra DDP buffer\n"); + goto out_extra_ddp_buffer_alloc; + } + + fcoe->extra_ddp_buffer_dma = + dma_map_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma)) { + e_err(drv, "failed to map extra DDP buffer\n"); + goto out_extra_ddp_buffer_dma; + } } /* Enable L2 eth type filter for FCoE */ @@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) } } #endif + + return; + +out_extra_ddp_buffer_dma: + kfree(fcoe->extra_ddp_buffer); +out_extra_ddp_buffer_alloc: + pci_pool_destroy(fcoe->pool); + fcoe->pool = NULL; } /** @@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) if (fcoe->pool) { for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); + dma_unmap_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + kfree(fcoe->extra_ddp_buffer); pci_pool_destroy(fcoe->pool); fcoe->pool = NULL; } diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index 4bc2c551c8d..65cc8fb14fe 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h @@ -70,6 +70,8 @@ struct ixgbe_fcoe { spinlock_t lock; struct pci_pool *pool; struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; + unsigned char *extra_ddp_buffer; + dma_addr_t extra_ddp_buffer_dma; }; #endif /* _IXGBE_FCOE_H */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 602078b8489..30f9ccfb4f8 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "3.0.12-k2" +#define DRV_VERSION "3.2.9-k2" const char ixgbe_driver_version[] = DRV_VERSION; static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; @@ -3176,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) u32 mhadd, hlreg0; /* Decide whether to use packet split mode or not */ + /* On by default */ + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + /* Do not use packet split if we're in SR-IOV Mode */ - if (!adapter->num_vfs) - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + if (adapter->num_vfs) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + + /* Disable packet split due to 82599 erratum #45 */ + if (hw->mac.type == ixgbe_mac_82599EB) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; /* Set the RX buffer length according to the mode */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { @@ -3721,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) * We need to try and force an autonegotiation * session, then bring up link. */ - hw->mac.ops.setup_sfp(hw); + if (hw->mac.ops.setup_sfp) + hw->mac.ops.setup_sfp(hw); if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) schedule_work(&adapter->multispeed_fiber_task); } else { @@ -4863,16 +4871,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) { int q_idx, num_q_vectors; struct ixgbe_q_vector *q_vector; - int napi_vectors; int (*poll)(struct napi_struct *, int); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - napi_vectors = adapter->num_rx_queues; poll = &ixgbe_clean_rxtx_many; } else { num_q_vectors = 1; - napi_vectors = 1; poll = &ixgbe_poll; } @@ -5964,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) unregister_netdev(adapter->netdev); return; } - hw->mac.ops.setup_sfp(hw); + if (hw->mac.ops.setup_sfp) + hw->mac.ops.setup_sfp(hw); if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) /* This will also work for DA Twinax connections */ diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 47b15738b00..187b3a16ec1 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); } - static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) { u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); vmolr |= (IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_ROPE | IXGBE_VMOLR_BAM); if (aupe) vmolr |= IXGBE_VMOLR_AUPE; diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index 3a8923993ce..f2518b01067 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c @@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) } ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); IXGBE_WRITE_FLUSH(hw); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); - if (!(ctrl & IXGBE_CTRL_RST)) + if (!(ctrl & reset_bit)) break; } - if (ctrl & IXGBE_CTRL_RST) { + if (ctrl & reset_bit) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index a0c26a99520..e1e33c80fb2 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h @@ -73,7 +73,7 @@ struct pch_gbe_regs { struct pch_gbe_regs_mac_adr mac_adr[16]; u32 ADDR_MASK; u32 MIIM; - u32 reserve2; + u32 MAC_ADDR_LOAD; u32 RGMII_ST; u32 RGMII_CTRL; u32 reserve3[3]; diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 1bf12339441..b99e90aca37 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_SHORT_PKT 64 #define DSC_INIT16 0xC000 #define PCH_GBE_DMA_ALIGN 0 +#define PCH_GBE_DMA_PADDING 2 #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ #define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 @@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data); + +inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) +{ + iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); +} + /** * pch_gbe_mac_read_mac_addr - Read MAC address * @hw: Pointer to the HW structure @@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work) struct pch_gbe_adapter *adapter; adapter = container_of(work, struct pch_gbe_adapter, reset_task); + rtnl_lock(); pch_gbe_reinit_locked(adapter); + rtnl_unlock(); } /** @@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work) */ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - - rtnl_lock(); - if (netif_running(netdev)) { - pch_gbe_down(adapter); - pch_gbe_up(adapter); - } - rtnl_unlock(); + pch_gbe_down(adapter); + pch_gbe_up(adapter); } /** @@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info; struct pch_gbe_rx_desc *rx_desc; u32 length; - unsigned char tmp_packet[ETH_HLEN]; unsigned int i; unsigned int cleaned_count = 0; bool cleaned = false; - struct sk_buff *skb; + struct sk_buff *skb, *new_skb; u8 dma_status; u16 gbec_status; u32 tcp_ip_status; - u8 skb_copy_flag = 0; - u8 skb_padding_flag = 0; i = rx_ring->next_to_clean; @@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, pr_err("Receive CRC Error\n"); } else { /* get receive length */ - /* length convert[-3], padding[-2] */ - length = (rx_desc->rx_words_eob) - 3 - 2; + /* length convert[-3] */ + length = (rx_desc->rx_words_eob) - 3; /* Decide the data conversion method */ if (!adapter->rx_csum) { /* [Header:14][payload] */ - skb_padding_flag = 0; - skb_copy_flag = 1; + if (NET_IP_ALIGN) { + /* Because alignment differs, + * the new_skb is newly allocated, + * and data is copied to new_skb.*/ + new_skb = netdev_alloc_skb(netdev, + length + NET_IP_ALIGN); + if (!new_skb) { + /* dorrop error */ + pr_err("New skb allocation " + "Error\n"); + goto dorrop; + } + skb_reserve(new_skb, NET_IP_ALIGN); + memcpy(new_skb->data, skb->data, + length); + skb = new_skb; + } else { + /* DMA buffer is used as SKB as it is.*/ + buffer_info->skb = NULL; + } } else { /* [Header:14][padding:2][payload] */ - skb_padding_flag = 1; - if (length < copybreak) - skb_copy_flag = 1; - else - skb_copy_flag = 0; - } - - /* Data conversion */ - if (skb_copy_flag) { /* recycle skb */ - struct sk_buff *new_skb; - new_skb = - netdev_alloc_skb(netdev, - length + NET_IP_ALIGN); - if (new_skb) { - if (!skb_padding_flag) { - skb_reserve(new_skb, - NET_IP_ALIGN); + /* The length includes padding length */ + length = length - PCH_GBE_DMA_PADDING; + if ((length < copybreak) || + (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { + /* Because alignment differs, + * the new_skb is newly allocated, + * and data is copied to new_skb. + * Padding data is deleted + * at the time of a copy.*/ + new_skb = netdev_alloc_skb(netdev, + length + NET_IP_ALIGN); + if (!new_skb) { + /* dorrop error */ + pr_err("New skb allocation " + "Error\n"); + goto dorrop; } + skb_reserve(new_skb, NET_IP_ALIGN); memcpy(new_skb->data, skb->data, - length); - /* save the skb - * in buffer_info as good */ + ETH_HLEN); + memcpy(&new_skb->data[ETH_HLEN], + &skb->data[ETH_HLEN + + PCH_GBE_DMA_PADDING], + length - ETH_HLEN); skb = new_skb; - } else if (!skb_padding_flag) { - /* dorrop error */ - pr_err("New skb allocation Error\n"); - goto dorrop; + } else { + /* Padding data is deleted + * by moving header data.*/ + memmove(&skb->data[PCH_GBE_DMA_PADDING], + &skb->data[0], ETH_HLEN); + skb_reserve(skb, NET_IP_ALIGN); + buffer_info->skb = NULL; } - } else { - buffer_info->skb = NULL; } - if (skb_padding_flag) { - memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); - memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], - ETH_HLEN); - skb_reserve(skb, NET_IP_ALIGN); - - } - + /* The length includes FCS length */ + length = length - ETH_FCS_LEN; /* update status of driver */ adapter->stats.rx_bytes += length; adapter->stats.rx_packets++; @@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; pch_gbe_set_ethtool_ops(netdev); + pch_gbe_mac_load_mac_addr(&adapter->hw); pch_gbe_mac_reset_hw(&adapter->hw); /* setup the private structure */ diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 59ccf0c5c61..ef2133b16f8 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -617,8 +617,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) } } -static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) { + void __iomem *ioaddr = tp->mmio_addr; int i; RTL_W8(ERIDR, cmd); @@ -630,7 +631,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) break; } - ocp_write(ioaddr, 0x1, 0x30, 0x00000001); + ocp_write(tp, 0x1, 0x30, 0x00000001); } #define OOB_CMD_RESET 0x00 @@ -2868,8 +2869,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - if (tp->mac_version == RTL_GIGA_MAC_VER_27) + if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || + (tp->mac_version == RTL_GIGA_MAC_VER_28)) && + (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { return; + } if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || (tp->mac_version == RTL_GIGA_MAC_VER_24)) && @@ -2891,6 +2895,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); break; } @@ -2900,12 +2906,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; - if (tp->mac_version == RTL_GIGA_MAC_VER_27) + if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || + (tp->mac_version == RTL_GIGA_MAC_VER_28)) && + (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { return; + } switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); break; } @@ -3042,7 +3053,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_mwi_2; } - tp->cp_cmd = PCIMulRW | RxChkSum; + tp->cp_cmd = RxChkSum; if ((sizeof(dma_addr_t) > 4) && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { @@ -3190,6 +3201,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); + netif_carrier_off(dev); + out: return rc; @@ -3316,7 +3329,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) /* Disable interrupts */ rtl8169_irq_mask_and_ack(ioaddr); - if (tp->mac_version == RTL_GIGA_MAC_VER_28) { + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28) { while (RTL_R8(TxPoll) & NPQ) udelay(20); @@ -3845,8 +3859,7 @@ static void rtl_hw_start_8168(struct net_device *dev) Cxpl_dbg_sel | \ ASF | \ PktCntrDisable | \ - PCIDAC | \ - PCIMulRW) + Mac_dbgo_sel) static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) { @@ -3876,8 +3889,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) RTL_W8(Config1, cfg1 & ~LEDS0); - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); - rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); } @@ -3889,8 +3900,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); - - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); } static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) @@ -3916,6 +3925,8 @@ static void rtl_hw_start_8101(struct net_device *dev) } } + RTL_W8(Cfg9346, Cfg9346_Unlock); + switch (tp->mac_version) { case RTL_GIGA_MAC_VER_07: rtl_hw_start_8102e_1(ioaddr, pdev); @@ -3930,14 +3941,13 @@ static void rtl_hw_start_8101(struct net_device *dev) break; } - RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Cfg9346, Cfg9346_Lock); RTL_W8(MaxTxPacketSize, TxPacketMax); rtl_set_rx_max_size(ioaddr, rx_buf_sz); - tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; - + tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; RTL_W16(CPlusCmd, tp->cp_cmd); RTL_W16(IntrMitigate, 0x0000); @@ -3947,14 +3957,10 @@ static void rtl_hw_start_8101(struct net_device *dev) RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_tx_config_registers(tp); - RTL_W8(Cfg9346, Cfg9346_Lock); - RTL_R8(IntrMask); rtl_set_rx_mode(dev); - RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); - RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); RTL_W16(IntrMask, tp->intr_event); diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 0e8bb19ed60..ca886d98bdc 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev, struct ethtool_test *test, u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); - struct efx_self_tests efx_tests; + struct efx_self_tests *efx_tests; int already_up; - int rc; + int rc = -ENOMEM; + + efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); + if (!efx_tests) + goto fail; + ASSERT_RTNL(); if (efx->state != STATE_RUNNING) { @@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, if (rc) { netif_err(efx, drv, efx->net_dev, "failed opening device.\n"); - goto fail2; + goto fail1; } } - memset(&efx_tests, 0, sizeof(efx_tests)); - - rc = efx_selftest(efx, &efx_tests, test->flags); + rc = efx_selftest(efx, efx_tests, test->flags); if (!already_up) dev_close(efx->net_dev); @@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, rc == 0 ? "passed" : "failed", (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); - fail2: - fail1: +fail1: /* Fill ethtool results structures */ - efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); + efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); + kfree(efx_tests); +fail: if (rc) test->flags |= ETH_TEST_FL_FAILED; } diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 5976d1d51df..640e368ebee 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev) "cur_rx:%4.4d, dirty_rx:%4.4d\n", net_dev->name, sis_priv->cur_rx, sis_priv->dirty_rx); + dev_kfree_skb(skb); break; } diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 34a0af3837f..0e5f03135b5 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev) priv->hw = device; - if (device_can_wakeup(priv->device)) + if (device_can_wakeup(priv->device)) { priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ + enable_irq_wake(dev->irq); + } return 0; } diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 93b32d36661..06c0e503365 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !netif_running(dev))) return -EAGAIN; spin_lock_bh(&tp->lock); @@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) break; /* We have no PHY */ - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || + ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !netif_running(dev))) return -EAGAIN; spin_lock_bh(&tp->lock); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 04e8ce14a1d..7113168473c 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1,7 +1,7 @@ /* * cdc_ncm.c * - * Copyright (C) ST-Ericsson 2010 + * Copyright (C) ST-Ericsson 2010-2011 * Contact: Alexey Orishko <alexey.orishko@stericsson.com> * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> * @@ -54,7 +54,7 @@ #include <linux/usb/usbnet.h> #include <linux/usb/cdc.h> -#define DRIVER_VERSION "17-Jan-2011" +#define DRIVER_VERSION "7-Feb-2011" /* CDC NCM subclass 3.2.1 */ #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 @@ -77,6 +77,9 @@ */ #define CDC_NCM_DPT_DATAGRAMS_MAX 32 +/* Maximum amount of IN datagrams in NTB */ +#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */ + /* Restart the timer, if amount of datagrams is less than given value */ #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 @@ -85,11 +88,6 @@ (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) -struct connection_speed_change { - __le32 USBitRate; /* holds 3GPP downlink value, bits per second */ - __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */ -} __attribute__ ((packed)); - struct cdc_ncm_data { struct usb_cdc_ncm_nth16 nth16; struct usb_cdc_ncm_ndp16 ndp16; @@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) { struct usb_cdc_notification req; u32 val; - __le16 max_datagram_size; u8 flags; u8 iface_no; int err; + u16 ntb_fmt_supported; iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; @@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); + /* devices prior to NCM Errata shall set this field to zero */ + ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); + ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); if (ctx->func_desc != NULL) flags = ctx->func_desc->bmNetworkCapabilities; @@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " - "wNdpOutAlignment=%u flags=0x%x\n", + "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n", ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, - ctx->tx_ndp_modulus, flags); + ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags); - /* max count of tx datagrams without terminating NULL entry */ - ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; + /* max count of tx datagrams */ + if ((ctx->tx_max_datagrams == 0) || + (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) + ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; /* verify maximum size of received NTB in bytes */ - if ((ctx->rx_max < - (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || - (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { + if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) { + pr_debug("Using min receive length=%d\n", + USB_CDC_NCM_NTB_MIN_IN_SIZE); + ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE; + } + + if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) { pr_debug("Using default maximum receive length=%d\n", CDC_NCM_NTB_MAX_SIZE_RX); ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; } + /* inform device about NTB input size changes */ + if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE; + req.wValue = 0; + req.wIndex = cpu_to_le16(iface_no); + + if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { + struct usb_cdc_ncm_ndp_input_size ndp_in_sz; + + req.wLength = 8; + ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); + ndp_in_sz.wNtbInMaxDatagrams = + cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX); + ndp_in_sz.wReserved = 0; + err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL, + 1000); + } else { + __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); + + req.wLength = 4; + err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0, + NULL, 1000); + } + + if (err) + pr_debug("Setting NTB Input Size failed\n"); + } + /* verify maximum size of transmitted NTB in bytes */ if ((ctx->tx_max < (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || @@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) /* additional configuration */ /* set CRC Mode */ - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_SET_CRC_MODE; - req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); - req.wIndex = cpu_to_le16(iface_no); - req.wLength = 0; - - err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); - if (err) - pr_debug("Setting CRC mode off failed\n"); + if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_SET_CRC_MODE; + req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); + req.wIndex = cpu_to_le16(iface_no); + req.wLength = 0; + + err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); + if (err) + pr_debug("Setting CRC mode off failed\n"); + } - /* set NTB format */ - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_SET_NTB_FORMAT; - req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); - req.wIndex = cpu_to_le16(iface_no); - req.wLength = 0; + /* set NTB format, if both formats are supported */ + if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_SET_NTB_FORMAT; + req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); + req.wIndex = cpu_to_le16(iface_no); + req.wLength = 0; + + err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); + if (err) + pr_debug("Setting NTB format to 16-bit failed\n"); + } - err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); - if (err) - pr_debug("Setting NTB format to 16-bit failed\n"); + ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; /* set Max Datagram Size (MTU) */ - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; - req.wValue = 0; - req.wIndex = cpu_to_le16(iface_no); - req.wLength = cpu_to_le16(2); + if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { + __le16 max_datagram_size; + u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); + + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; + req.wValue = 0; + req.wIndex = cpu_to_le16(iface_no); + req.wLength = cpu_to_le16(2); + + err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, + 1000); + if (err) { + pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", + CDC_NCM_MIN_DATAGRAM_SIZE); + } else { + ctx->max_datagram_size = le16_to_cpu(max_datagram_size); + /* Check Eth descriptor value */ + if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { + if (ctx->max_datagram_size > eth_max_sz) + ctx->max_datagram_size = eth_max_sz; + } else { + if (ctx->max_datagram_size > + CDC_NCM_MAX_DATAGRAM_SIZE) + ctx->max_datagram_size = + CDC_NCM_MAX_DATAGRAM_SIZE; + } - err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); - if (err) { - pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", - CDC_NCM_MIN_DATAGRAM_SIZE); - /* use default */ - ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; - } else { - ctx->max_datagram_size = le16_to_cpu(max_datagram_size); + if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) + ctx->max_datagram_size = + CDC_NCM_MIN_DATAGRAM_SIZE; + + /* if value changed, update device */ + req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | + USB_RECIP_INTERFACE; + req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE; + req.wValue = 0; + req.wIndex = cpu_to_le16(iface_no); + req.wLength = 2; + max_datagram_size = cpu_to_le16(ctx->max_datagram_size); + + err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, + 0, NULL, 1000); + if (err) + pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); + } - if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) - ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; - else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE) - ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE; } if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) @@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) ctx->ether_desc = (const struct usb_cdc_ether_desc *)buf; - dev->hard_mtu = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); - if (dev->hard_mtu < - (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) - dev->hard_mtu = - CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; - - else if (dev->hard_mtu > - (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN)) - dev->hard_mtu = - CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN; + if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE) + dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE; + else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE) + dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE; break; case USB_CDC_NCM_TYPE: @@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) u32 offset; u32 last_offset; u16 n = 0; - u8 timeout = 0; + u8 ready2send = 0; /* if there is a remaining skb, it gets priority */ if (skb != NULL) swap(skb, ctx->tx_rem_skb); else - timeout = 1; + ready2send = 1; /* * +----------------+ @@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) for (; n < ctx->tx_max_datagrams; n++) { /* check if end of transmit buffer is reached */ - if (offset >= ctx->tx_max) + if (offset >= ctx->tx_max) { + ready2send = 1; break; - + } /* compute maximum buffer size */ rem = ctx->tx_max - offset; @@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) } ctx->tx_rem_skb = skb; skb = NULL; - - /* loop one more time */ - timeout = 1; + ready2send = 1; } break; } @@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) ctx->tx_curr_last_offset = last_offset; goto exit_no_skb; - } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { + } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { /* wait for more frames */ /* push variables */ ctx->tx_curr_skb = skb_out; @@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); - ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), + ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus); memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); @@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * sizeof(struct usb_cdc_ncm_dpe16)); ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); - ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ + ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ - memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, + memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex, &(ctx->tx_ncm.ndp16), sizeof(ctx->tx_ncm.ndp16)); - memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + + memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex + sizeof(ctx->tx_ncm.ndp16), &(ctx->tx_ncm.dpe16), (ctx->tx_curr_frame_num + 1) * @@ -961,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) goto error; } - temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); + temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex); if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { pr_debug("invalid DPT16 index\n"); goto error; @@ -1048,10 +1115,10 @@ error: static void cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, - struct connection_speed_change *data) + struct usb_cdc_speed_change *data) { - uint32_t rx_speed = le32_to_cpu(data->USBitRate); - uint32_t tx_speed = le32_to_cpu(data->DSBitRate); + uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); + uint32_t tx_speed = le32_to_cpu(data->ULBitRate); /* * Currently the USB-NET API does not support reporting the actual @@ -1092,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) /* test for split data in 8-byte chunks */ if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { cdc_ncm_speed_change(ctx, - (struct connection_speed_change *)urb->transfer_buffer); + (struct usb_cdc_speed_change *)urb->transfer_buffer); return; } @@ -1120,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) break; case USB_CDC_NOTIFY_SPEED_CHANGE: - if (urb->actual_length < - (sizeof(*event) + sizeof(struct connection_speed_change))) + if (urb->actual_length < (sizeof(*event) + + sizeof(struct usb_cdc_speed_change))) set_bit(EVENT_STS_SPLIT, &dev->flags); else cdc_ncm_speed_change(ctx, - (struct connection_speed_change *) &event[1]); + (struct usb_cdc_speed_change *) &event[1]); break; default: diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 02b622e3b9f..5002f5be47b 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -651,6 +651,10 @@ static const struct usb_device_id products[] = { .driver_info = (unsigned long)&dm9601_info, }, { + USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */ + .driver_info = (unsigned long)&dm9601_info, + }, + { USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ .driver_info = (unsigned long)&dm9601_info, }, diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bed8fcedff4..6d83812603b 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2628,15 +2628,15 @@ exit: static void hso_free_tiomget(struct hso_serial *serial) { - struct hso_tiocmget *tiocmget = serial->tiocmget; + struct hso_tiocmget *tiocmget; + if (!serial) + return; + tiocmget = serial->tiocmget; if (tiocmget) { - if (tiocmget->urb) { - usb_free_urb(tiocmget->urb); - tiocmget->urb = NULL; - } + usb_free_urb(tiocmget->urb); + tiocmget->urb = NULL; serial->tiocmget = NULL; kfree(tiocmget); - } } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ed9a41643ff..95c41d56631 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -931,8 +931,10 @@ fail_halt: if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); - if (status < 0) + if (status < 0) { + usb_free_urb(urb); goto fail_lowmem; + } if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) resched = 0; usb_autopm_put_interface(dev->intf); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 90a23e410d1..82dba5aaf42 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq) } } +static void virtnet_napi_enable(struct virtnet_info *vi) +{ + napi_enable(&vi->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&vi->napi)) { + virtqueue_disable_cb(vi->rvq); + __napi_schedule(&vi->napi); + } +} + static void refill_work(struct work_struct *work) { struct virtnet_info *vi; @@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work) vi = container_of(work, struct virtnet_info, refill.work); napi_disable(&vi->napi); still_empty = !try_fill_recv(vi, GFP_KERNEL); - napi_enable(&vi->napi); + virtnet_napi_enable(vi); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ @@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); - napi_enable(&vi->napi); - - /* If all buffers were filled by other side before we napi_enabled, we - * won't get another interrupt, so process any outstanding packets - * now. virtnet_poll wants re-enable the queue, so we disable here. - * We synchronize against interrupts via NAPI_STATE_SCHED */ - if (napi_schedule_prep(&vi->napi)) { - virtqueue_disable_cb(vi->rvq); - __napi_schedule(&vi->napi); - } + virtnet_napi_enable(vi); return 0; } diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 78c26fdccad..62ce2f4e860 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah) return 0; } +/* + * Wait for synth to settle + */ +static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, + struct ieee80211_channel *channel) +{ + /* + * On 5211+ read activation -> rx delay + * and use it (100ns steps). + */ + if (ah->ah_version != AR5K_AR5210) { + u32 delay; + delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & + AR5K_PHY_RX_DELAY_M; + delay = (channel->hw_value & CHANNEL_CCK) ? + ((delay << 2) / 22) : (delay / 10); + if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) + delay = delay << 1; + if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) + delay = delay << 2; + /* XXX: /2 on turbo ? Let's be safe + * for now */ + udelay(100 + delay); + } else { + mdelay(1); + } +} + /**********************\ * RF Gain optimization * @@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah, case AR5K_RF5111: ret = ath5k_hw_rf5111_channel(ah, channel); break; + case AR5K_RF2317: case AR5K_RF2425: ret = ath5k_hw_rf2425_channel(ah, channel); break; @@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, /* Failed */ if (i >= 100) return -EIO; + + /* Set channel and wait for synth */ + ret = ath5k_hw_channel(ah, channel); + if (ret) + return ret; + + ath5k_hw_wait_for_synth(ah, channel); } /* @@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, if (ret) return ret; + /* Write OFDM timings on 5212*/ + if (ah->ah_version == AR5K_AR5212 && + channel->hw_value & CHANNEL_OFDM) { + + ret = ath5k_hw_write_ofdm_timings(ah, channel); + if (ret) + return ret; + + /* Spur info is available only from EEPROM versions + * greater than 5.3, but the EEPROM routines will use + * static values for older versions */ + if (ah->ah_mac_srev >= AR5K_SREV_AR5424) + ath5k_hw_set_spur_mitigation_filter(ah, + channel); + } + + /* If we used fast channel switching + * we are done, release RF bus and + * fire up NF calibration. + * + * Note: Only NF calibration due to + * channel change, not AGC calibration + * since AGC is still running ! + */ + if (fast) { + /* + * Release RF Bus grant + */ + AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, + AR5K_PHY_RFBUS_REQ_REQUEST); + + /* + * Start NF calibration + */ + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, + AR5K_PHY_AGCCTL_NF); + + return ret; + } + /* * For 5210 we do all initialization using * initvals, so we don't have to modify * any settings (5210 also only supports * a/aturbo modes) */ - if ((ah->ah_version != AR5K_AR5210) && !fast) { + if (ah->ah_version != AR5K_AR5210) { /* * Write initial RF gain settings @@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, if (ret) return ret; - /* Write OFDM timings on 5212*/ - if (ah->ah_version == AR5K_AR5212 && - channel->hw_value & CHANNEL_OFDM) { - - ret = ath5k_hw_write_ofdm_timings(ah, channel); - if (ret) - return ret; - - /* Spur info is available only from EEPROM versions - * greater than 5.3, but the EEPROM routines will use - * static values for older versions */ - if (ah->ah_mac_srev >= AR5K_SREV_AR5424) - ath5k_hw_set_spur_mitigation_filter(ah, - channel); - } - /*Enable/disable 802.11b mode on 5111 (enable 2111 frequency converter + CCK)*/ if (ah->ah_radio == AR5K_RF5111) { @@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, */ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); + ath5k_hw_wait_for_synth(ah, channel); + /* - * On 5211+ read activation -> rx delay - * and use it. + * Perform ADC test to see if baseband is ready + * Set tx hold and check adc test register */ - if (ah->ah_version != AR5K_AR5210) { - u32 delay; - delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & - AR5K_PHY_RX_DELAY_M; - delay = (channel->hw_value & CHANNEL_CCK) ? - ((delay << 2) / 22) : (delay / 10); - if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) - delay = delay << 1; - if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) - delay = delay << 2; - /* XXX: /2 on turbo ? Let's be safe - * for now */ - udelay(100 + delay); - } else { - mdelay(1); - } - - if (fast) - /* - * Release RF Bus grant - */ - AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, - AR5K_PHY_RFBUS_REQ_REQUEST); - else { - /* - * Perform ADC test to see if baseband is ready - * Set tx hold and check adc test register - */ - phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); - ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); - for (i = 0; i <= 20; i++) { - if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) - break; - udelay(200); - } - ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); + phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); + ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); + for (i = 0; i <= 20; i++) { + if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) + break; + udelay(200); } + ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); /* * Start automatic gain control calibration diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 3681caf5428..1a7fa6ea4cf 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -21,7 +21,6 @@ #include <linux/device.h> #include <linux/leds.h> #include <linux/completion.h> -#include <linux/pm_qos_params.h> #include "debug.h" #include "common.h" @@ -57,8 +56,6 @@ struct ath_node; #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) -#define ATH9K_PM_QOS_DEFAULT_VALUE 55 - #define TSF_TO_TU(_h,_l) \ ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) @@ -218,6 +215,7 @@ struct ath_frame_info { struct ath_buf_state { u8 bf_type; u8 bfs_paprd; + unsigned long bfs_paprd_timestamp; enum ath9k_internal_frame_type bfs_ftype; }; @@ -593,7 +591,6 @@ struct ath_softc { struct work_struct paprd_work; struct work_struct hw_check_work; struct completion paprd_complete; - bool paprd_pending; u32 intrstatus; u32 sc_flags; /* SC_OP_* */ @@ -633,8 +630,6 @@ struct ath_softc { struct ath_descdma txsdma; struct ath_ant_comb ant_comb; - - struct pm_qos_request_list pm_qos_req; }; struct ath_wiphy { @@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) extern struct ieee80211_ops ath9k_ops; extern int ath9k_modparam_nohwcrypt; extern int led_blink; -extern int ath9k_pm_qos_value; extern bool is_ath9k_unloaded; irqreturn_t ath_isr(int irq, void *dev); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 087a6a95edd..a033d01bf8a 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -41,10 +41,6 @@ static int ath9k_btcoex_enable; module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); -int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE; -module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH); -MODULE_PARM_DESC(pmqos, "User specified PM-QOS value"); - bool is_ath9k_unloaded; /* We use the hw_value as an index into our private channel structure */ @@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, ath_init_leds(sc); ath_start_rfkill_poll(sc); - pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); - return 0; error_world: @@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc) } ieee80211_unregister_hw(hw); - pm_qos_remove_request(&sc->pm_qos_req); ath_rx_cleanup(sc); ath_tx_cleanup(sc); ath9k_deinit_softc(sc); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9040c2ff190..a09d15f7aa6 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -342,7 +342,6 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int tx_info->control.rates[1].idx = -1; init_completion(&sc->paprd_complete); - sc->paprd_pending = true; txctl.paprd = BIT(chain); if (ath_tx_start(hw, skb, &txctl) != 0) { @@ -353,7 +352,6 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int time_left = wait_for_completion_timeout(&sc->paprd_complete, msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); - sc->paprd_pending = false; if (!time_left) ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, @@ -1175,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw) ath9k_btcoex_timer_resume(sc); } - /* User has the option to provide pm-qos value as a module - * parameter rather than using the default value of - * 'ATH9K_PM_QOS_DEFAULT_VALUE'. - */ - pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value); - if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) common->bus_ops->extn_synch_en(common); @@ -1347,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) sc->sc_flags |= SC_OP_INVALID; - pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE); - mutex_unlock(&sc->mutex); ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 33a37edbaf7..07b7804aec5 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, bf->bf_state.bfs_paprd); + if (txctl->paprd) + bf->bf_state.bfs_paprd_timestamp = jiffies; + ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); } @@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, bf->bf_buf_addr = 0; if (bf->bf_state.bfs_paprd) { - if (!sc->paprd_pending) + if (time_after(jiffies, + bf->bf_state.bfs_paprd_timestamp + + msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) dev_kfree_skb_any(skb); else complete(&sc->paprd_complete); diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 939a0e96ed1..84866a4b835 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); /* 2. Maybe the AP wants to send multicast/broadcast data? */ - cam = !!(tim_ie->bitmap_ctrl & 0x01); + cam |= !!(tim_ie->bitmap_ctrl & 0x01); if (!cam) { /* back to low-power land. */ diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index a9b852be450..39b6f16c87f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv, } #endif -/** - * iwl3945_good_plcp_health - checks for plcp error. - * - * When the plcp error is exceeding the thresholds, reset the radio - * to improve the throughput. - */ -static bool iwl3945_good_plcp_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt) -{ - bool rc = true; - struct iwl3945_notif_statistics current_stat; - int combined_plcp_delta; - unsigned int plcp_msec; - unsigned long plcp_received_jiffies; - - if (priv->cfg->base_params->plcp_delta_threshold == - IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { - IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); - return rc; - } - memcpy(¤t_stat, pkt->u.raw, sizeof(struct - iwl3945_notif_statistics)); - /* - * check for plcp_err and trigger radio reset if it exceeds - * the plcp error threshold plcp_delta. - */ - plcp_received_jiffies = jiffies; - plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - - (long) priv->plcp_jiffies); - priv->plcp_jiffies = plcp_received_jiffies; - /* - * check to make sure plcp_msec is not 0 to prevent division - * by zero. - */ - if (plcp_msec) { - combined_plcp_delta = - (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - - le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); - - if ((combined_plcp_delta > 0) && - ((combined_plcp_delta * 100) / plcp_msec) > - priv->cfg->base_params->plcp_delta_threshold) { - /* - * if plcp_err exceed the threshold, the following - * data is printed in csv format: - * Text: plcp_err exceeded %d, - * Received ofdm.plcp_err, - * Current ofdm.plcp_err, - * combined_plcp_delta, - * plcp_msec - */ - IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " - "%u, %d, %u mSecs\n", - priv->cfg->base_params->plcp_delta_threshold, - le32_to_cpu(current_stat.rx.ofdm.plcp_err), - combined_plcp_delta, plcp_msec); - /* - * Reset the RF radio due to the high plcp - * error rate - */ - rc = false; - } - } - return rc; -} - void iwl3945_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { @@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = { .isr_ops = { .isr = iwl_isr_legacy, }, - .check_plcp_health = iwl3945_good_plcp_health, .debugfs_ops = { .rx_stats_read = iwl3945_ucode_rx_stats_read, diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index af505bcd7ae..ef36aff1bb4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = { .fw_name_pre = IWL6050_FW_PRE, \ .ucode_api_max = IWL6050_UCODE_API_MAX, \ .ucode_api_min = IWL6050_UCODE_API_MIN, \ + .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ + .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ .ops = &iwl6050_ops, \ .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 36335b1b54d..c1cfd9952e5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -1157,6 +1157,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(priv); #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { @@ -1371,6 +1374,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(priv); } /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 1eacba4daa5..0494d7b102d 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, while (i != idx) { u16 len; struct sk_buff *skb; + dma_addr_t dma_addr; desc = &ring[i]; len = le16_to_cpu(desc->len); skb = rx_buf[i]; @@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, len = priv->common.rx_mtu; } + dma_addr = le32_to_cpu(desc->host_addr); + pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); skb_put(skb, len); if (p54_rx(dev, skb)) { - pci_unmap_single(priv->pdev, - le32_to_cpu(desc->host_addr), - priv->common.rx_mtu + 32, - PCI_DMA_FROMDEVICE); + pci_unmap_single(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); rx_buf[i] = NULL; - desc->host_addr = 0; + desc->host_addr = cpu_to_le32(0); } else { skb_trim(skb, 0); + pci_dma_sync_single_for_device(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); desc->len = cpu_to_le16(priv->common.rx_mtu + 32); } diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index aa97971a38a..3b3f1e45ab3 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry, */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; + /* + * The hardware has already checked the Michael Mic and has + * stripped it from the frame. Signal this to mac80211. + */ + rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) @@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, #endif #ifdef CONFIG_RT2800PCI_RT35XX + { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) }, + { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index b97a4a54ff4..197a36c05fd 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; + /* + * The hardware has already checked the Michael Mic and has + * stripped it from the frame. Signal this to mac80211. + */ + rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c index 012e1a4016f..40372bac948 100644 --- a/drivers/net/wireless/wl1251/main.c +++ b/drivers/net/wireless/wl1251/main.c @@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON) { beacon = ieee80211_beacon_get(hw, vif); + if (!beacon) + goto out_sleep; + ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, beacon->len); diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index ffedfd49275..ea158008534 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig @@ -3,7 +3,7 @@ # menuconfig NFC_DEVICES - bool "NFC devices" + bool "Near Field Communication (NFC) devices" default n ---help--- You'll have to say Y if your computer contains an NFC device that diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c index bae647264dd..724f65d8f9e 100644 --- a/drivers/nfc/pn544.c +++ b/drivers/nfc/pn544.c @@ -60,7 +60,7 @@ enum pn544_irq { struct pn544_info { struct miscdevice miscdev; struct i2c_client *i2c_dev; - struct regulator_bulk_data regs[2]; + struct regulator_bulk_data regs[3]; enum pn544_state state; wait_queue_head_t read_wait; @@ -74,6 +74,7 @@ struct pn544_info { static const char reg_vdd_io[] = "Vdd_IO"; static const char reg_vbat[] = "VBat"; +static const char reg_vsim[] = "VSim"; /* sysfs interface */ static ssize_t pn544_test(struct device *dev, @@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client, info->regs[0].supply = reg_vdd_io; info->regs[1].supply = reg_vbat; + info->regs[2].supply = reg_vsim; r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), info->regs); if (r < 0) diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 8ecaac98392..ea25e5bfcf2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -23,6 +23,7 @@ #include <linux/mm.h> #include <linux/fs.h> #include <linux/capability.h> +#include <linux/security.h> #include <linux/pci-aspm.h> #include <linux/slab.h> #include "pci.h" @@ -368,7 +369,7 @@ pci_read_config(struct file *filp, struct kobject *kobj, u8 *data = (u8*) buf; /* Several chips lock up trying to read undefined config space */ - if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) { + if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) { size = dev->cfg_size; } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { size = 128; diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index 0bdda5b3ed5..42fbf1a7557 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev) flags |= CONF_ENABLE_IOCARD; if (flags & CONF_ENABLE_IOCARD) s->socket.flags |= SS_IOCARD; + if (flags & CONF_ENABLE_ZVCARD) + s->socket.flags |= SS_ZVCARD | SS_IOCARD; if (flags & CONF_ENABLE_SPKR) { s->socket.flags |= SS_SPKR_ENA; status = CCSR_AUDIO_ENA; diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index 3755e7c8c71..2c540542b5a 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c @@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, } #endif -static void pxa2xx_configure_sockets(struct device *dev) +void pxa2xx_configure_sockets(struct device *dev) { struct pcmcia_low_level *ops = dev->platform_data; /* diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h index bb62ea87b8f..b609b45469e 100644 --- a/drivers/pcmcia/pxa2xx_base.h +++ b/drivers/pcmcia/pxa2xx_base.h @@ -1,3 +1,4 @@ int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); +void pxa2xx_configure_sockets(struct device *dev); diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c index b9f8c8fb42b..25afe637c65 100644 --- a/drivers/pcmcia/pxa2xx_lubbock.c +++ b/drivers/pcmcia/pxa2xx_lubbock.c @@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev) lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); + pxa2xx_configure_sockets(&sadev->dev); ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, pxa2xx_drv_pcmcia_add_one); } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index d163bc2e2b9..a59af5b24f0 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -227,7 +227,7 @@ config SONYPI_COMPAT config IDEAPAD_LAPTOP tristate "Lenovo IdeaPad Laptop Extras" depends on ACPI - depends on RFKILL + depends on RFKILL && INPUT select INPUT_SPARSEKMAP help This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index c5c4b8c32eb..38b34a73866 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -84,7 +84,7 @@ MODULE_LICENSE("GPL"); */ #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" -#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" +#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" @@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev, return -EINVAL; return count; } -static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, +static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, set_bool_threeg); static ssize_t show_interface(struct device *dev, struct device_attribute *attr, diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index 4633fd8532c..fe495939c30 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c @@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device) struct proc_dir_entry *proc; mode_t mode; - /* - * If parameter uid or gid is not changed, keep the default setting for - * our proc entries (-rw-rw-rw-) else, it means we care about security, - * and then set to -rw-rw---- - */ - if ((asus_uid == 0) && (asus_gid == 0)) { - mode = S_IFREG | S_IRUGO | S_IWUGO; + mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP; } else { mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; printk(KERN_WARNING " asus_uid and asus_gid parameters are " diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 34657f96b5a..ad24ef36f9f 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked) dell_send_request(buffer, 17, 11); /* If the hardware switch controls this radio, and the hardware - switch is disabled, don't allow changing the software state */ + switch is disabled, don't allow changing the software state. + If the hardware switch is reported as not supported, always + fire the SMI to toggle the killswitch. */ if ((hwswitch_state & BIT(hwswitch_bit)) && - !(buffer->output[1] & BIT(16))) { + !(buffer->output[1] & BIT(16)) && + (buffer->output[1] & BIT(0))) { ret = -EINVAL; goto out; } @@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = { static void dell_update_rfkill(struct work_struct *ignored) { + int status; + + get_buffer(); + dell_send_request(buffer, 17, 11); + status = buffer->output[1]; + release_buffer(); + + /* if hardware rfkill is not supported, set it explicitly */ + if (!(status & BIT(0))) { + if (wifi_rfkill) + dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17)); + if (bluetooth_rfkill) + dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18)); + if (wwan_rfkill) + dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19)); + } + if (wifi_rfkill) dell_rfkill_query(wifi_rfkill, (void *)1); if (bluetooth_rfkill) diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index 930e6276236..61433d49286 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c @@ -60,69 +60,20 @@ enum pmic_gpio_register { #define GPOSW_DOU 0x08 #define GPOSW_RDRV 0x30 +#define GPIO_UPDATE_TYPE 0x80000000 #define NUM_GPIO 24 -struct pmic_gpio_irq { - spinlock_t lock; - u32 trigger[NUM_GPIO]; - u32 dirty; - struct work_struct work; -}; - - struct pmic_gpio { + struct mutex buslock; struct gpio_chip chip; - struct pmic_gpio_irq irqtypes; void *gpiointr; int irq; unsigned irq_base; + unsigned int update_type; + u32 trigger_type; }; -static void pmic_program_irqtype(int gpio, int type) -{ - if (type & IRQ_TYPE_EDGE_RISING) - intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20); - else - intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20); - - if (type & IRQ_TYPE_EDGE_FALLING) - intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10); - else - intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10); -}; - -static void pmic_irqtype_work(struct work_struct *work) -{ - struct pmic_gpio_irq *t = - container_of(work, struct pmic_gpio_irq, work); - unsigned long flags; - int i; - u16 type; - - spin_lock_irqsave(&t->lock, flags); - /* As we drop the lock, we may need multiple scans if we race the - pmic_irq_type function */ - while (t->dirty) { - /* - * For each pin that has the dirty bit set send an IPC - * message to configure the hardware via the PMIC - */ - for (i = 0; i < NUM_GPIO; i++) { - if (!(t->dirty & (1 << i))) - continue; - t->dirty &= ~(1 << i); - /* We can't trust the array entry or dirty - once the lock is dropped */ - type = t->trigger[i]; - spin_unlock_irqrestore(&t->lock, flags); - pmic_program_irqtype(i, type); - spin_lock_irqsave(&t->lock, flags); - } - } - spin_unlock_irqrestore(&t->lock, flags); -} - static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { if (offset > 8) { @@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 1 << (offset - 16)); } -static int pmic_irq_type(unsigned irq, unsigned type) +/* + * This is called from genirq with pg->buslock locked and + * irq_desc->lock held. We can not access the scu bus here, so we + * store the change and update in the bus_sync_unlock() function below + */ +static int pmic_irq_type(struct irq_data *data, unsigned type) { - struct pmic_gpio *pg = get_irq_chip_data(irq); - u32 gpio = irq - pg->irq_base; - unsigned long flags; + struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); + u32 gpio = data->irq - pg->irq_base; if (gpio >= pg->chip.ngpio) return -EINVAL; - spin_lock_irqsave(&pg->irqtypes.lock, flags); - pg->irqtypes.trigger[gpio] = type; - pg->irqtypes.dirty |= (1 << gpio); - spin_unlock_irqrestore(&pg->irqtypes.lock, flags); - schedule_work(&pg->irqtypes.work); + pg->trigger_type = type; + pg->update_type = gpio | GPIO_UPDATE_TYPE; return 0; } - - static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); @@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) } /* the gpiointr register is read-clear, so just do nothing. */ -static void pmic_irq_unmask(unsigned irq) -{ -}; +static void pmic_irq_unmask(struct irq_data *data) { } -static void pmic_irq_mask(unsigned irq) -{ -}; +static void pmic_irq_mask(struct irq_data *data) { } static struct irq_chip pmic_irqchip = { .name = "PMIC-GPIO", - .mask = pmic_irq_mask, - .unmask = pmic_irq_unmask, - .set_type = pmic_irq_type, + .irq_mask = pmic_irq_mask, + .irq_unmask = pmic_irq_unmask, + .irq_set_type = pmic_irq_type, }; -static void pmic_irq_handler(unsigned irq, struct irq_desc *desc) +static irqreturn_t pmic_irq_handler(int irq, void *data) { - struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq); + struct pmic_gpio *pg = data; u8 intsts = *((u8 *)pg->gpiointr + 4); int gpio; + irqreturn_t ret = IRQ_NONE; for (gpio = 0; gpio < 8; gpio++) { if (intsts & (1 << gpio)) { pr_debug("pmic pin %d triggered\n", gpio); generic_handle_irq(pg->irq_base + gpio); + ret = IRQ_HANDLED; } } - - if (desc->chip->irq_eoi) - desc->chip->irq_eoi(irq_get_irq_data(irq)); - else - dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq); + return ret; } static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) @@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) pg->chip.can_sleep = 1; pg->chip.dev = dev; - INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work); - spin_lock_init(&pg->irqtypes.lock); + mutex_init(&pg->buslock); pg->chip.dev = dev; retval = gpiochip_add(&pg->chip); @@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); goto err; } - set_irq_data(pg->irq, pg); - set_irq_chained_handler(pg->irq, pmic_irq_handler); + + retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg); + if (retval) { + printk(KERN_WARNING "pmic: Interrupt request failed\n"); + goto err; + } + for (i = 0; i < 8; i++) { set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, handle_simple_irq, "demux"); diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c index 1fe0f1feff7..865ef78d6f1 100644 --- a/drivers/platform/x86/tc1100-wmi.c +++ b/drivers/platform/x86/tc1100-wmi.c @@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \ return -EINVAL; \ return count; \ } \ -static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ +static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \ show_bool_##value, set_bool_##value); show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index dd599585c6a..eb9922385ef 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode) if (keycode != KEY_RESERVED) { mutex_lock(&tpacpi_inputdev_send_mutex); + input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode); input_report_key(tpacpi_inputdev, keycode, 1); - if (keycode == KEY_UNKNOWN) - input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, - scancode); input_sync(tpacpi_inputdev); + input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode); input_report_key(tpacpi_inputdev, keycode, 0); - if (keycode == KEY_UNKNOWN) - input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, - scancode); input_sync(tpacpi_inputdev); mutex_unlock(&tpacpi_inputdev_send_mutex); diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index cba1b43f751..a4e8eb9fece 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c @@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, { unsigned long flags; int captured = 0; - struct pps_ktime ts_real; + struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 }; /* check event type */ BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 76b41853a87..1269fbd2dec 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c @@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj, /* Several chips lock up trying to read undefined config space */ if (capable(CAP_SYS_ADMIN)) - size = 0x200000; + size = RIO_MAINT_SPACE_SZ; - if (off > size) + if (off >= size) return 0; if (off + count > size) { size -= off; @@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj, loff_t init_off = off; u8 *data = (u8 *) buf; - if (off > 0x200000) + if (off >= RIO_MAINT_SPACE_SZ) return 0; - if (off + count > 0x200000) { - size = 0x200000 - off; + if (off + count > RIO_MAINT_SPACE_SZ) { + size = RIO_MAINT_SPACE_SZ - off; count = size; } @@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, }, - .size = 0x200000, + .size = RIO_MAINT_SPACE_SZ, .read = rio_read_config, .write = rio_write_config, }; diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c index f53d31b950d..2bb5de1f242 100644 --- a/drivers/regulator/mc13xxx-regulator-core.c +++ b/drivers/regulator/mc13xxx-regulator-core.c @@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev) dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); - BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages); + BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages); return mc13xxx_regulators[id].voltages[val]; } diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 8b0d2c4bde9..06df898842c 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c @@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev) return REGULATOR_MODE_IDLE; default: BUG(); + return -EINVAL; } } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index cdd97192dc6..4941cade319 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -97,6 +97,18 @@ config RTC_INTF_DEV If unsure, say Y. +config RTC_INTF_DEV_UIE_EMUL + bool "RTC UIE emulation on dev interface" + depends on RTC_INTF_DEV + help + Provides an emulation for RTC_UIE if the underlying rtc chip + driver does not expose RTC_UIE ioctls. Those requests generate + once-per-second update interrupts, used for synchronization. + + The emulation code will read the time from the hardware + clock several times per second, please enable this option + only if you know that you really need it. + config RTC_DRV_TEST tristate "Test driver/device" help diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index a0c01967244..cb2f0728fd7 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) } if (err) - return err; - - if (!rtc->ops) + /* nothing */; + else if (!rtc->ops) err = -ENODEV; else if (!rtc->ops->alarm_irq_enable) err = -EINVAL; @@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) if (err) return err; +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + if (enabled == 0 && rtc->uie_irq_active) { + mutex_unlock(&rtc->ops_lock); + return rtc_dev_update_irq_enable_emul(rtc, 0); + } +#endif /* make sure we're changing state */ if (rtc->uie_rtctimer.enabled == enabled) goto out; @@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) out: mutex_unlock(&rtc->ops_lock); +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + /* + * Enable emulation if the driver did not provide + * the update_irq_enable function pointer or if returned + * -EINVAL to signal that it has been configured without + * interrupts or that are not available at the moment. + */ + if (err == -EINVAL) + err = rtc_dev_update_irq_enable_emul(rtc, enabled); +#endif return err; } @@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable); * * Triggers the registered irq_task function callback. */ -static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) +void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) { unsigned long flags; diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c index b2752b6e7a2..e725d51e773 100644 --- a/drivers/rtc/rtc-at32ap700x.c +++ b/drivers/rtc/rtc-at32ap700x.c @@ -134,36 +134,29 @@ static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) return ret; } -static int at32_rtc_ioctl(struct device *dev, unsigned int cmd, - unsigned long arg) +static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); int ret = 0; spin_lock_irq(&rtc->lock); - switch (cmd) { - case RTC_AIE_ON: + if(enabled) { if (rtc_readl(rtc, VAL) > rtc->alarm_time) { ret = -EINVAL; - break; + goto out; } rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); - break; - case RTC_AIE_OFF: + } else { rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); - break; - default: - ret = -ENOIOCTLCMD; - break; } - +out: spin_unlock_irq(&rtc->lock); return ret; @@ -195,11 +188,11 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id) } static struct rtc_class_ops at32_rtc_ops = { - .ioctl = at32_rtc_ioctl, .read_time = at32_rtc_readtime, .set_time = at32_rtc_settime, .read_alarm = at32_rtc_readalarm, .set_alarm = at32_rtc_setalarm, + .alarm_irq_enable = at32_rtc_alarm_irq_enable, }; static int __init at32_rtc_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index bc8bbca9a2e..26d1cf5d19a 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -195,13 +195,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, /* important: scrub old status before enabling IRQs */ switch (cmd) { - case RTC_AIE_OFF: /* alarm off */ - at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); - break; - case RTC_AIE_ON: /* alarm on */ - at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM); - at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); - break; case RTC_UIE_OFF: /* update off */ at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); break; @@ -217,6 +210,18 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, return ret; } +static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + pr_debug("%s(): cmd=%08x\n", __func__, enabled); + + if (enabled) { + at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM); + at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); + } else + at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); + + return 0; +} /* * Provide additional RTC information in /proc/driver/rtc */ @@ -270,6 +275,7 @@ static const struct rtc_class_ops at91_rtc_ops = { .read_alarm = at91_rtc_readalarm, .set_alarm = at91_rtc_setalarm, .proc = at91_rtc_proc, + .alarm_irq_enable = at91_rtc_alarm_irq_enable, }; /* diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index f677e0710ca..5469c52cba3 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c @@ -229,12 +229,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr); switch (cmd) { - case RTC_AIE_OFF: /* alarm off */ - rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); - break; - case RTC_AIE_ON: /* alarm on */ - rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); - break; case RTC_UIE_OFF: /* update off */ rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); break; @@ -249,6 +243,19 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, return ret; } +static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct sam9_rtc *rtc = dev_get_drvdata(dev); + u32 mr = rtt_readl(rtc, MR); + + dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr); + if (enabled) + rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); + else + rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); + return 0; +} + /* * Provide additional RTC information in /proc/driver/rtc */ @@ -302,6 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = { .read_alarm = at91_rtc_readalarm, .set_alarm = at91_rtc_setalarm, .proc = at91_rtc_proc, + .alarm_irq_enable = at91_rtc_alarm_irq_enable, }; /* diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index b4b6087f223..17971d93354 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c @@ -259,15 +259,6 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar bfin_rtc_int_clear(~RTC_ISTAT_SEC); break; - case RTC_AIE_ON: - dev_dbg_stamp(dev); - bfin_rtc_int_set_alarm(rtc); - break; - case RTC_AIE_OFF: - dev_dbg_stamp(dev); - bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); - break; - default: dev_dbg_stamp(dev); ret = -ENOIOCTLCMD; @@ -276,6 +267,17 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar return ret; } +static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct bfin_rtc *rtc = dev_get_drvdata(dev); + + dev_dbg_stamp(dev); + if (enabled) + bfin_rtc_int_set_alarm(rtc); + else + bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); +} + static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct bfin_rtc *rtc = dev_get_drvdata(dev); @@ -362,6 +364,7 @@ static struct rtc_class_ops bfin_rtc_ops = { .read_alarm = bfin_rtc_read_alarm, .set_alarm = bfin_rtc_set_alarm, .proc = bfin_rtc_proc, + .alarm_irq_enable = bfin_rtc_alarm_irq_enable, }; static int __devinit bfin_rtc_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 212b16edafc..d0e06edb14c 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file) return err; } +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL +/* + * Routine to poll RTC seconds field for change as often as possible, + * after first RTC_UIE use timer to reduce polling + */ +static void rtc_uie_task(struct work_struct *work) +{ + struct rtc_device *rtc = + container_of(work, struct rtc_device, uie_task); + struct rtc_time tm; + int num = 0; + int err; + + err = rtc_read_time(rtc, &tm); + + spin_lock_irq(&rtc->irq_lock); + if (rtc->stop_uie_polling || err) { + rtc->uie_task_active = 0; + } else if (rtc->oldsecs != tm.tm_sec) { + num = (tm.tm_sec + 60 - rtc->oldsecs) % 60; + rtc->oldsecs = tm.tm_sec; + rtc->uie_timer.expires = jiffies + HZ - (HZ/10); + rtc->uie_timer_active = 1; + rtc->uie_task_active = 0; + add_timer(&rtc->uie_timer); + } else if (schedule_work(&rtc->uie_task) == 0) { + rtc->uie_task_active = 0; + } + spin_unlock_irq(&rtc->irq_lock); + if (num) + rtc_handle_legacy_irq(rtc, num, RTC_UF); +} +static void rtc_uie_timer(unsigned long data) +{ + struct rtc_device *rtc = (struct rtc_device *)data; + unsigned long flags; + + spin_lock_irqsave(&rtc->irq_lock, flags); + rtc->uie_timer_active = 0; + rtc->uie_task_active = 1; + if ((schedule_work(&rtc->uie_task) == 0)) + rtc->uie_task_active = 0; + spin_unlock_irqrestore(&rtc->irq_lock, flags); +} + +static int clear_uie(struct rtc_device *rtc) +{ + spin_lock_irq(&rtc->irq_lock); + if (rtc->uie_irq_active) { + rtc->stop_uie_polling = 1; + if (rtc->uie_timer_active) { + spin_unlock_irq(&rtc->irq_lock); + del_timer_sync(&rtc->uie_timer); + spin_lock_irq(&rtc->irq_lock); + rtc->uie_timer_active = 0; + } + if (rtc->uie_task_active) { + spin_unlock_irq(&rtc->irq_lock); + flush_scheduled_work(); + spin_lock_irq(&rtc->irq_lock); + } + rtc->uie_irq_active = 0; + } + spin_unlock_irq(&rtc->irq_lock); + return 0; +} + +static int set_uie(struct rtc_device *rtc) +{ + struct rtc_time tm; + int err; + + err = rtc_read_time(rtc, &tm); + if (err) + return err; + spin_lock_irq(&rtc->irq_lock); + if (!rtc->uie_irq_active) { + rtc->uie_irq_active = 1; + rtc->stop_uie_polling = 0; + rtc->oldsecs = tm.tm_sec; + rtc->uie_task_active = 1; + if (schedule_work(&rtc->uie_task) == 0) + rtc->uie_task_active = 0; + } + rtc->irq_data = 0; + spin_unlock_irq(&rtc->irq_lock); + return 0; +} + +int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled) +{ + if (enabled) + return set_uie(rtc); + else + return clear_uie(rtc); +} +EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul); + +#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */ static ssize_t rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) @@ -154,19 +253,7 @@ static long rtc_dev_ioctl(struct file *file, if (err) goto done; - /* try the driver's ioctl interface */ - if (ops->ioctl) { - err = ops->ioctl(rtc->dev.parent, cmd, arg); - if (err != -ENOIOCTLCMD) { - mutex_unlock(&rtc->ops_lock); - return err; - } - } - - /* if the driver does not provide the ioctl interface - * or if that particular ioctl was not implemented - * (-ENOIOCTLCMD), we will try to emulate here. - * + /* * Drivers *SHOULD NOT* provide ioctl implementations * for these requests. Instead, provide methods to * support the following code, so that the RTC's main @@ -329,7 +416,12 @@ static long rtc_dev_ioctl(struct file *file, return err; default: - err = -ENOTTY; + /* Finally try the driver's ioctl interface */ + if (ops->ioctl) { + err = ops->ioctl(rtc->dev.parent, cmd, arg); + if (err == -ENOIOCTLCMD) + err = -ENOTTY; + } break; } @@ -394,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc) rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + INIT_WORK(&rtc->uie_task, rtc_uie_task); + setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); +#endif + cdev_init(&rtc->char_dev, &rtc_dev_fops); rtc->char_dev.owner = rtc->owner; } diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c index bf430f9091e..60ce6960082 100644 --- a/drivers/rtc/rtc-ds1286.c +++ b/drivers/rtc/rtc-ds1286.c @@ -40,6 +40,26 @@ static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg) __raw_writel(data, &priv->rtcregs[reg]); } + +static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct ds1286_priv *priv = dev_get_drvdata(dev); + unsigned long flags; + unsigned char val; + + /* Allow or mask alarm interrupts */ + spin_lock_irqsave(&priv->lock, flags); + val = ds1286_rtc_read(priv, RTC_CMD); + if (enabled) + val &= ~RTC_TDM; + else + val |= RTC_TDM; + ds1286_rtc_write(priv, val, RTC_CMD); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + #ifdef CONFIG_RTC_INTF_DEV static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) @@ -49,22 +69,6 @@ static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) unsigned char val; switch (cmd) { - case RTC_AIE_OFF: - /* Mask alarm int. enab. bit */ - spin_lock_irqsave(&priv->lock, flags); - val = ds1286_rtc_read(priv, RTC_CMD); - val |= RTC_TDM; - ds1286_rtc_write(priv, val, RTC_CMD); - spin_unlock_irqrestore(&priv->lock, flags); - break; - case RTC_AIE_ON: - /* Allow alarm interrupts. */ - spin_lock_irqsave(&priv->lock, flags); - val = ds1286_rtc_read(priv, RTC_CMD); - val &= ~RTC_TDM; - ds1286_rtc_write(priv, val, RTC_CMD); - spin_unlock_irqrestore(&priv->lock, flags); - break; case RTC_WIE_OFF: /* Mask watchdog int. enab. bit */ spin_lock_irqsave(&priv->lock, flags); @@ -316,12 +320,13 @@ static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm) } static const struct rtc_class_ops ds1286_ops = { - .ioctl = ds1286_ioctl, - .proc = ds1286_proc, + .ioctl = ds1286_ioctl, + .proc = ds1286_proc, .read_time = ds1286_read_time, .set_time = ds1286_set_time, .read_alarm = ds1286_read_alarm, .set_alarm = ds1286_set_alarm, + .alarm_irq_enable = ds1286_alarm_irq_enable, }; static int __devinit ds1286_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 077af1d7b9e..57fbcc149ba 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c @@ -139,49 +139,32 @@ static u8 hour2bcd(bool hr12, int hour) * Interface to RTC framework */ -#ifdef CONFIG_RTC_INTF_DEV - -/* - * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl) - */ -static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg) +static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct ds1305 *ds1305 = dev_get_drvdata(dev); u8 buf[2]; - int status = -ENOIOCTLCMD; + long err = -EINVAL; buf[0] = DS1305_WRITE | DS1305_CONTROL; buf[1] = ds1305->ctrl[0]; - switch (cmd) { - case RTC_AIE_OFF: - status = 0; - if (!(buf[1] & DS1305_AEI0)) - goto done; - buf[1] &= ~DS1305_AEI0; - break; - - case RTC_AIE_ON: - status = 0; + if (enabled) { if (ds1305->ctrl[0] & DS1305_AEI0) goto done; buf[1] |= DS1305_AEI0; - break; - } - if (status == 0) { - status = spi_write_then_read(ds1305->spi, buf, sizeof buf, - NULL, 0); - if (status >= 0) - ds1305->ctrl[0] = buf[1]; + } else { + if (!(buf[1] & DS1305_AEI0)) + goto done; + buf[1] &= ~DS1305_AEI0; } - + err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0); + if (err >= 0) + ds1305->ctrl[0] = buf[1]; done: - return status; + return err; + } -#else -#define ds1305_ioctl NULL -#endif /* * Get/set of date and time is pretty normal. @@ -460,12 +443,12 @@ done: #endif static const struct rtc_class_ops ds1305_ops = { - .ioctl = ds1305_ioctl, .read_time = ds1305_get_time, .set_time = ds1305_set_time, .read_alarm = ds1305_get_alarm, .set_alarm = ds1305_set_alarm, .proc = ds1305_proc, + .alarm_irq_enable = ds1305_alarm_irq_enable, }; static void ds1305_work(struct work_struct *work) diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 0d559b6416d..4724ba3acf1 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -495,50 +495,27 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } -static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct i2c_client *client = to_i2c_client(dev); struct ds1307 *ds1307 = i2c_get_clientdata(client); int ret; - switch (cmd) { - case RTC_AIE_OFF: - if (!test_bit(HAS_ALARM, &ds1307->flags)) - return -ENOTTY; - - ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); - if (ret < 0) - return ret; - - ret &= ~DS1337_BIT_A1IE; - - ret = i2c_smbus_write_byte_data(client, - DS1337_REG_CONTROL, ret); - if (ret < 0) - return ret; - - break; - - case RTC_AIE_ON: - if (!test_bit(HAS_ALARM, &ds1307->flags)) - return -ENOTTY; + if (!test_bit(HAS_ALARM, &ds1307->flags)) + return -ENOTTY; - ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); - if (ret < 0) - return ret; + ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); + if (ret < 0) + return ret; + if (enabled) ret |= DS1337_BIT_A1IE; + else + ret &= ~DS1337_BIT_A1IE; - ret = i2c_smbus_write_byte_data(client, - DS1337_REG_CONTROL, ret); - if (ret < 0) - return ret; - - break; - - default: - return -ENOIOCTLCMD; - } + ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret); + if (ret < 0) + return ret; return 0; } @@ -548,7 +525,7 @@ static const struct rtc_class_ops ds13xx_rtc_ops = { .set_time = ds1307_set_time, .read_alarm = ds1337_read_alarm, .set_alarm = ds1337_set_alarm, - .ioctl = ds1307_ioctl, + .alarm_irq_enable = ds1307_alarm_irq_enable, }; /*----------------------------------------------------------------------*/ diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 47fb6357c34..d834a63ec4b 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -307,42 +307,25 @@ unlock: mutex_unlock(&ds1374->mutex); } -static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct i2c_client *client = to_i2c_client(dev); struct ds1374 *ds1374 = i2c_get_clientdata(client); - int ret = -ENOIOCTLCMD; + int ret; mutex_lock(&ds1374->mutex); - switch (cmd) { - case RTC_AIE_OFF: - ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); - if (ret < 0) - goto out; - - ret &= ~DS1374_REG_CR_WACE; - - ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); - if (ret < 0) - goto out; - - break; - - case RTC_AIE_ON: - ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); - if (ret < 0) - goto out; + ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); + if (ret < 0) + goto out; + if (enabled) { ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE; ret &= ~DS1374_REG_CR_WDALM; - - ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); - if (ret < 0) - goto out; - - break; + } else { + ret &= ~DS1374_REG_CR_WACE; } + ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); out: mutex_unlock(&ds1374->mutex); @@ -354,7 +337,7 @@ static const struct rtc_class_ops ds1374_rtc_ops = { .set_time = ds1374_set_time, .read_alarm = ds1374_read_alarm, .set_alarm = ds1374_set_alarm, - .ioctl = ds1374_ioctl, + .alarm_irq_enable = ds1374_alarm_irq_enable, }; static int ds1374_probe(struct i2c_client *client, diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index 23a9ee19764..950735415a7 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c @@ -1,7 +1,7 @@ /* * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C * - * Copyright (C) 2009-2010 Freescale Semiconductor. + * Copyright (C) 2009-2011 Freescale Semiconductor. * Author: Jack Lan <jack.lan@freescale.com> * * This program is free software; you can redistribute it and/or modify it @@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time) time->tm_hour = bcd2bin(hour); } - time->tm_wday = bcd2bin(week); + /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ + time->tm_wday = bcd2bin(week) - 1; time->tm_mday = bcd2bin(day); - time->tm_mon = bcd2bin(month & 0x7F); + /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ + time->tm_mon = bcd2bin(month & 0x7F) - 1; if (century) add_century = 100; @@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time) buf[0] = bin2bcd(time->tm_sec); buf[1] = bin2bcd(time->tm_min); buf[2] = bin2bcd(time->tm_hour); - buf[3] = bin2bcd(time->tm_wday); /* Day of the week */ + /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ + buf[3] = bin2bcd(time->tm_wday + 1); buf[4] = bin2bcd(time->tm_mday); /* Date */ - buf[5] = bin2bcd(time->tm_mon); + /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ + buf[5] = bin2bcd(time->tm_mon + 1); if (time->tm_year >= 100) { buf[5] |= 0x80; buf[6] = bin2bcd(time->tm_year - 100); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 5a8daa35806..69fe664a222 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -213,41 +213,27 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm) return m41t80_set_datetime(to_i2c_client(dev), tm); } -#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) -static int -m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct i2c_client *client = to_i2c_client(dev); int rc; - switch (cmd) { - case RTC_AIE_OFF: - case RTC_AIE_ON: - break; - default: - return -ENOIOCTLCMD; - } - rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); if (rc < 0) goto err; - switch (cmd) { - case RTC_AIE_OFF: - rc &= ~M41T80_ALMON_AFE; - break; - case RTC_AIE_ON: + + if (enabled) rc |= M41T80_ALMON_AFE; - break; - } + else + rc &= ~M41T80_ALMON_AFE; + if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0) goto err; + return 0; err: return -EIO; } -#else -#define m41t80_rtc_ioctl NULL -#endif static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) { @@ -374,7 +360,7 @@ static struct rtc_class_ops m41t80_rtc_ops = { .read_alarm = m41t80_rtc_read_alarm, .set_alarm = m41t80_rtc_set_alarm, .proc = m41t80_rtc_proc, - .ioctl = m41t80_rtc_ioctl, + .alarm_irq_enable = m41t80_rtc_alarm_irq_enable, }; #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c index a99a0b554eb..3978f4caf72 100644 --- a/drivers/rtc/rtc-m48t59.c +++ b/drivers/rtc/rtc-m48t59.c @@ -263,30 +263,21 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) /* * Handle commands from user-space */ -static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd, - unsigned long arg) +static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct platform_device *pdev = to_platform_device(dev); struct m48t59_plat_data *pdata = pdev->dev.platform_data; struct m48t59_private *m48t59 = platform_get_drvdata(pdev); unsigned long flags; - int ret = 0; spin_lock_irqsave(&m48t59->lock, flags); - switch (cmd) { - case RTC_AIE_OFF: /* alarm interrupt off */ - M48T59_WRITE(0x00, M48T59_INTR); - break; - case RTC_AIE_ON: /* alarm interrupt on */ + if (enabled) M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR); - break; - default: - ret = -ENOIOCTLCMD; - break; - } + else + M48T59_WRITE(0x00, M48T59_INTR); spin_unlock_irqrestore(&m48t59->lock, flags); - return ret; + return 0; } static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq) @@ -330,12 +321,12 @@ static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id) } static const struct rtc_class_ops m48t59_rtc_ops = { - .ioctl = m48t59_rtc_ioctl, .read_time = m48t59_rtc_read_time, .set_time = m48t59_rtc_set_time, .read_alarm = m48t59_rtc_readalarm, .set_alarm = m48t59_rtc_setalarm, .proc = m48t59_rtc_proc, + .alarm_irq_enable = m48t59_rtc_alarm_irq_enable, }; static const struct rtc_class_ops m48t02_rtc_ops = { diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index bcd0cf63eb1..1db62db8469 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c @@ -255,42 +255,21 @@ static int mrst_irq_set_state(struct device *dev, int enabled) return 0; } -#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) - /* Currently, the vRTC doesn't support UIE ON/OFF */ -static int -mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct mrst_rtc *mrst = dev_get_drvdata(dev); unsigned long flags; - switch (cmd) { - case RTC_AIE_OFF: - case RTC_AIE_ON: - if (!mrst->irq) - return -EINVAL; - break; - default: - /* PIE ON/OFF is handled by mrst_irq_set_state() */ - return -ENOIOCTLCMD; - } - spin_lock_irqsave(&rtc_lock, flags); - switch (cmd) { - case RTC_AIE_OFF: /* alarm off */ - mrst_irq_disable(mrst, RTC_AIE); - break; - case RTC_AIE_ON: /* alarm on */ + if (enabled) mrst_irq_enable(mrst, RTC_AIE); - break; - } + else + mrst_irq_disable(mrst, RTC_AIE); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } -#else -#define mrst_rtc_ioctl NULL -#endif #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) @@ -317,13 +296,13 @@ static int mrst_procfs(struct device *dev, struct seq_file *seq) #endif static const struct rtc_class_ops mrst_rtc_ops = { - .ioctl = mrst_rtc_ioctl, .read_time = mrst_read_time, .set_time = mrst_set_time, .read_alarm = mrst_read_alarm, .set_alarm = mrst_set_alarm, .proc = mrst_procfs, .irq_set_state = mrst_irq_set_state, + .alarm_irq_enable = mrst_rtc_alarm_irq_enable, }; static struct mrst_rtc mrst_rtc; diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c index b2fff0ca49f..67820626e18 100644 --- a/drivers/rtc/rtc-msm6242.c +++ b/drivers/rtc/rtc-msm6242.c @@ -82,7 +82,7 @@ static inline unsigned int msm6242_read(struct msm6242_priv *priv, static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val, unsigned int reg) { - return __raw_writel(val, &priv->regs[reg]); + __raw_writel(val, &priv->regs[reg]); } static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val, diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c index bcca4729855..60627a76451 100644 --- a/drivers/rtc/rtc-mv.c +++ b/drivers/rtc/rtc-mv.c @@ -169,25 +169,19 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) return 0; } -static int mv_rtc_ioctl(struct device *dev, unsigned int cmd, - unsigned long arg) +static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; if (pdata->irq < 0) - return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */ - switch (cmd) { - case RTC_AIE_OFF: - writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); - break; - case RTC_AIE_ON: + return -EINVAL; /* fall back into rtc-dev's emulation */ + + if (enabled) writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); - break; - default: - return -ENOIOCTLCMD; - } + else + writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); return 0; } @@ -216,7 +210,7 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = { .set_time = mv_rtc_set_time, .read_alarm = mv_rtc_read_alarm, .set_alarm = mv_rtc_set_alarm, - .ioctl = mv_rtc_ioctl, + .alarm_irq_enable = mv_rtc_alarm_irq_enable, }; static int __devinit mv_rtc_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index e72b523c79a..b4dbf3a319b 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -143,8 +143,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) u8 reg; switch (cmd) { - case RTC_AIE_OFF: - case RTC_AIE_ON: case RTC_UIE_OFF: case RTC_UIE_ON: break; @@ -156,13 +154,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) rtc_wait_not_busy(); reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); switch (cmd) { - /* AIE = Alarm Interrupt Enable */ - case RTC_AIE_OFF: - reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; - break; - case RTC_AIE_ON: - reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; - break; /* UIE = Update Interrupt Enable (1/second) */ case RTC_UIE_OFF: reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER; @@ -182,6 +173,24 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) #define omap_rtc_ioctl NULL #endif +static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + u8 reg; + + local_irq_disable(); + rtc_wait_not_busy(); + reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); + if (enabled) + reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; + else + reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; + rtc_wait_not_busy(); + rtc_write(reg, OMAP_RTC_INTERRUPTS_REG); + local_irq_enable(); + + return 0; +} + /* this hardware doesn't support "don't care" alarm fields */ static int tm2bcd(struct rtc_time *tm) { @@ -309,6 +318,7 @@ static struct rtc_class_ops omap_rtc_ops = { .set_time = omap_rtc_set_time, .read_alarm = omap_rtc_read_alarm, .set_alarm = omap_rtc_set_alarm, + .alarm_irq_enable = omap_rtc_alarm_irq_enable, }; static int omap_rtc_alarm; diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c index c086fc30a84..242bbf86c74 100644 --- a/drivers/rtc/rtc-proc.c +++ b/drivers/rtc/rtc-proc.c @@ -81,12 +81,16 @@ static int rtc_proc_show(struct seq_file *seq, void *offset) static int rtc_proc_open(struct inode *inode, struct file *file) { + int ret; struct rtc_device *rtc = PDE(inode)->data; if (!try_module_get(THIS_MODULE)) return -ENODEV; - return single_open(file, rtc_proc_show, rtc); + ret = single_open(file, rtc_proc_show, rtc); + if (ret) + module_put(THIS_MODULE); + return ret; } static int rtc_proc_release(struct inode *inode, struct file *file) diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 36eb6618446..694da39b6dd 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c @@ -76,7 +76,7 @@ static inline unsigned int rp5c01_read(struct rp5c01_priv *priv, static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val, unsigned int reg) { - return __raw_writel(val, &priv->regs[reg]); + __raw_writel(val, &priv->regs[reg]); } static void rp5c01_lock(struct rp5c01_priv *priv) diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c index dd14e202c2c..6aaa1550e3b 100644 --- a/drivers/rtc/rtc-rs5c372.c +++ b/drivers/rtc/rtc-rs5c372.c @@ -299,14 +299,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) if (rs5c->type == rtc_rs5c372a && (buf & RS5C372A_CTRL1_SL1)) return -ENOIOCTLCMD; - case RTC_AIE_OFF: - case RTC_AIE_ON: - /* these irq management calls only make sense for chips - * which are wired up to an IRQ. - */ - if (!rs5c->has_irq) - return -ENOIOCTLCMD; - break; default: return -ENOIOCTLCMD; } @@ -317,12 +309,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) addr = RS5C_ADDR(RS5C_REG_CTRL1); switch (cmd) { - case RTC_AIE_OFF: /* alarm off */ - buf &= ~RS5C_CTRL1_AALE; - break; - case RTC_AIE_ON: /* alarm on */ - buf |= RS5C_CTRL1_AALE; - break; case RTC_UIE_OFF: /* update off */ buf &= ~RS5C_CTRL1_CT_MASK; break; @@ -347,6 +333,39 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) #endif +static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct i2c_client *client = to_i2c_client(dev); + struct rs5c372 *rs5c = i2c_get_clientdata(client); + unsigned char buf; + int status, addr; + + buf = rs5c->regs[RS5C_REG_CTRL1]; + + if (!rs5c->has_irq) + return -EINVAL; + + status = rs5c_get_regs(rs5c); + if (status < 0) + return status; + + addr = RS5C_ADDR(RS5C_REG_CTRL1); + if (enabled) + buf |= RS5C_CTRL1_AALE; + else + buf &= ~RS5C_CTRL1_AALE; + + if (i2c_smbus_write_byte_data(client, addr, buf) < 0) { + printk(KERN_WARNING "%s: can't update alarm\n", + rs5c->rtc->name); + status = -EIO; + } else + rs5c->regs[RS5C_REG_CTRL1] = buf; + + return status; +} + + /* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI, * which only exposes a polled programming interface; and since * these calls map directly to those EFI requests; we don't demand @@ -466,6 +485,7 @@ static const struct rtc_class_ops rs5c372_rtc_ops = { .set_time = rs5c372_rtc_set_time, .read_alarm = rs5c_read_alarm, .set_alarm = rs5c_set_alarm, + .alarm_irq_enable = rs5c_rtc_alarm_irq_enable, }; #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 88ea52b8647..5dfe5ffcb0d 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -314,16 +314,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { switch (cmd) { - case RTC_AIE_OFF: - spin_lock_irq(&sa1100_rtc_lock); - RTSR &= ~RTSR_ALE; - spin_unlock_irq(&sa1100_rtc_lock); - return 0; - case RTC_AIE_ON: - spin_lock_irq(&sa1100_rtc_lock); - RTSR |= RTSR_ALE; - spin_unlock_irq(&sa1100_rtc_lock); - return 0; case RTC_UIE_OFF: spin_lock_irq(&sa1100_rtc_lock); RTSR &= ~RTSR_HZE; @@ -338,6 +328,17 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd, return -ENOIOCTLCMD; } +static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + spin_lock_irq(&sa1100_rtc_lock); + if (enabled) + RTSR |= RTSR_ALE; + else + RTSR &= ~RTSR_ALE; + spin_unlock_irq(&sa1100_rtc_lock); + return 0; +} + static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) { rtc_time_to_tm(RCNR, tm); @@ -410,6 +411,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = { .proc = sa1100_rtc_proc, .irq_set_freq = sa1100_irq_set_freq, .irq_set_state = sa1100_irq_set_state, + .alarm_irq_enable = sa1100_rtc_alarm_irq_enable, }; static int sa1100_rtc_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 06e41ed9323..93314a9e7fa 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -350,10 +350,6 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) unsigned int ret = 0; switch (cmd) { - case RTC_AIE_OFF: - case RTC_AIE_ON: - sh_rtc_setaie(dev, cmd == RTC_AIE_ON); - break; case RTC_UIE_OFF: rtc->periodic_freq &= ~PF_OXS; sh_rtc_setcie(dev, 0); @@ -369,6 +365,12 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) return ret; } +static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + sh_rtc_setaie(dev, enabled); + return 0; +} + static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); @@ -604,6 +606,7 @@ static struct rtc_class_ops sh_rtc_ops = { .irq_set_state = sh_rtc_irq_set_state, .irq_set_freq = sh_rtc_irq_set_freq, .proc = sh_rtc_proc, + .alarm_irq_enable = sh_rtc_alarm_irq_enable, }; static int __init sh_rtc_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c index 51725f7755b..a82d6fe9707 100644 --- a/drivers/rtc/rtc-test.c +++ b/drivers/rtc/rtc-test.c @@ -50,24 +50,9 @@ static int test_rtc_proc(struct device *dev, struct seq_file *seq) return 0; } -static int test_rtc_ioctl(struct device *dev, unsigned int cmd, - unsigned long arg) +static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) { - /* We do support interrupts, they're generated - * using the sysfs interface. - */ - switch (cmd) { - case RTC_PIE_ON: - case RTC_PIE_OFF: - case RTC_UIE_ON: - case RTC_UIE_OFF: - case RTC_AIE_ON: - case RTC_AIE_OFF: - return 0; - - default: - return -ENOIOCTLCMD; - } + return 0; } static const struct rtc_class_ops test_rtc_ops = { @@ -76,7 +61,7 @@ static const struct rtc_class_ops test_rtc_ops = { .read_alarm = test_rtc_read_alarm, .set_alarm = test_rtc_set_alarm, .set_mmss = test_rtc_set_mmss, - .ioctl = test_rtc_ioctl, + .alarm_irq_enable = test_rtc_alarm_irq_enable, }; static ssize_t test_irq_show(struct device *dev, diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index c3244244e8c..769190ac6d1 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c @@ -240,26 +240,6 @@ static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled) static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { switch (cmd) { - case RTC_AIE_ON: - spin_lock_irq(&rtc_lock); - - if (!alarm_enabled) { - enable_irq(aie_irq); - alarm_enabled = 1; - } - - spin_unlock_irq(&rtc_lock); - break; - case RTC_AIE_OFF: - spin_lock_irq(&rtc_lock); - - if (alarm_enabled) { - disable_irq(aie_irq); - alarm_enabled = 0; - } - - spin_unlock_irq(&rtc_lock); - break; case RTC_EPOCH_READ: return put_user(epoch, (unsigned long __user *)arg); case RTC_EPOCH_SET: @@ -275,6 +255,24 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long return 0; } +static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + spin_lock_irq(&rtc_lock); + if (enabled) { + if (!alarm_enabled) { + enable_irq(aie_irq); + alarm_enabled = 1; + } + } else { + if (alarm_enabled) { + disable_irq(aie_irq); + alarm_enabled = 0; + } + } + spin_unlock_irq(&rtc_lock); + return 0; +} + static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id) { struct platform_device *pdev = (struct platform_device *)dev_id; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 318672d0556..a9fe23d5bd0 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline; static struct ccw_device_id dasd_eckd_ids[] = { { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, - { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, + { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 44578b56ad0..d3e58d763b4 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) { struct Scsi_Host *host = rport_to_shost(rport); fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + unsigned long flags; if (!fcport) return; @@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) * Transport has effectively 'deleted' the rport, clear * all local references. */ - spin_lock_irq(host->host_lock); + spin_lock_irqsave(host->host_lock, flags); fcport->rport = fcport->drport = NULL; *((fc_port_t **)rport->dd_data) = NULL; - spin_unlock_irq(host->host_lock); + spin_unlock_irqrestore(host->host_lock, flags); if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) return; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f948e1a73ae..d9479c3fe5f 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data) { fc_port_t *fcport = data; struct fc_rport *rport; + unsigned long flags; - spin_lock_irq(fcport->vha->host->host_lock); + spin_lock_irqsave(fcport->vha->host->host_lock, flags); rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; - spin_unlock_irq(fcport->vha->host->host_lock); + spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); if (rport) fc_remote_port_delete(rport); } @@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) struct fc_rport_identifiers rport_ids; struct fc_rport *rport; struct qla_hw_data *ha = vha->hw; + unsigned long flags; qla2x00_rport_del(fcport); @@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) "Unable to allocate fc remote port!\n"); return; } - spin_lock_irq(fcport->vha->host->host_lock); + spin_lock_irqsave(fcport->vha->host->host_lock, flags); *((fc_port_t **)rport->dd_data) = fcport; - spin_unlock_irq(fcport->vha->host->host_lock); + spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); rport->supported_classes = fcport->supported_classes; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c194c23ca1f..f27724d76cf 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *) } if (atomic_read(&fcport->state) != FCS_ONLINE) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || - atomic_read(&fcport->state) == FCS_DEVICE_LOST || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; @@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, { struct fc_rport *rport; scsi_qla_host_t *base_vha; + unsigned long flags; if (!fcport->rport) return; @@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, rport = fcport->rport; if (defer) { base_vha = pci_get_drvdata(vha->hw->pdev); - spin_lock_irq(vha->host->host_lock); + spin_lock_irqsave(vha->host->host_lock, flags); fcport->drport = rport; - spin_unlock_irq(vha->host->host_lock); + spin_unlock_irqrestore(vha->host->host_lock, flags); set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); } else @@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data) set_user_nice(current, -20); + set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { DEBUG3(printk("qla2x00: DPC handler sleeping\n")); - set_current_state(TASK_INTERRUPTIBLE); schedule(); __set_current_state(TASK_RUNNING); @@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data) qla2x00_do_dpc_all_vps(base_vha); ha->dpc_active = 0; + set_current_state(TASK_INTERRUPTIBLE); } /* End of while(1) */ + __set_current_state(TASK_RUNNING); DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 7b310934efe..a6b2d72022f 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd, unsigned long long lba, unsigned int num, int write) { int ret; - unsigned int block, rest = 0; + unsigned long long block, rest = 0; int (*func)(struct scsi_cmnd *, unsigned char *, int); func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c index 351d8a375b5..19752b09e15 100644 --- a/drivers/spi/pxa2xx_spi_pci.c +++ b/drivers/spi/pxa2xx_spi_pci.c @@ -7,10 +7,9 @@ #include <linux/of_device.h> #include <linux/spi/pxa2xx_spi.h> -struct awesome_struct { +struct ce4100_info { struct ssp_device ssp; - struct platform_device spi_pdev; - struct pxa2xx_spi_master spi_pdata; + struct platform_device *spi_pdev; }; static DEFINE_MUTEX(ssp_lock); @@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp) } EXPORT_SYMBOL_GPL(pxa_ssp_free); -static void plat_dev_release(struct device *dev) -{ - struct awesome_struct *as = container_of(dev, - struct awesome_struct, spi_pdev.dev); - - of_device_node_put(&as->spi_pdev.dev); -} - static int __devinit ce4100_spi_probe(struct pci_dev *dev, const struct pci_device_id *ent) { int ret; resource_size_t phys_beg; resource_size_t phys_len; - struct awesome_struct *spi_info; + struct ce4100_info *spi_info; struct platform_device *pdev; - struct pxa2xx_spi_master *spi_pdata; + struct pxa2xx_spi_master spi_pdata; struct ssp_device *ssp; ret = pci_enable_device(dev); @@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, return ret; } + pdev = platform_device_alloc("pxa2xx-spi", dev->devfn); spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); - if (!spi_info) { + if (!pdev || !spi_info ) { ret = -ENOMEM; - goto err_kz; + goto err_nomem; } - ssp = &spi_info->ssp; - pdev = &spi_info->spi_pdev; - spi_pdata = &spi_info->spi_pdata; + memset(&spi_pdata, 0, sizeof(spi_pdata)); + spi_pdata.num_chipselect = dev->devfn; - pdev->name = "pxa2xx-spi"; - pdev->id = dev->devfn; - pdev->dev.parent = &dev->dev; - pdev->dev.platform_data = &spi_info->spi_pdata; + ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata)); + if (ret) + goto err_nomem; + pdev->dev.parent = &dev->dev; #ifdef CONFIG_OF pdev->dev.of_node = dev->dev.of_node; #endif - pdev->dev.release = plat_dev_release; - - spi_pdata->num_chipselect = dev->devfn; - + ssp = &spi_info->ssp; ssp->phys_base = pci_resource_start(dev, 0); ssp->mmio_base = ioremap(phys_beg, phys_len); if (!ssp->mmio_base) { dev_err(&pdev->dev, "failed to ioremap() registers\n"); ret = -EIO; - goto err_remap; + goto err_nomem; } ssp->irq = dev->irq; ssp->port_id = pdev->id; @@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, pci_set_drvdata(dev, spi_info); - ret = platform_device_register(pdev); + ret = platform_device_add(pdev); if (ret) goto err_dev_add; @@ -135,27 +123,21 @@ err_dev_add: mutex_unlock(&ssp_lock); iounmap(ssp->mmio_base); -err_remap: - kfree(spi_info); - -err_kz: +err_nomem: release_mem_region(phys_beg, phys_len); - + platform_device_put(pdev); + kfree(spi_info); return ret; } static void __devexit ce4100_spi_remove(struct pci_dev *dev) { - struct awesome_struct *spi_info; - struct platform_device *pdev; + struct ce4100_info *spi_info; struct ssp_device *ssp; spi_info = pci_get_drvdata(dev); - ssp = &spi_info->ssp; - pdev = &spi_info->spi_pdev; - - platform_device_unregister(pdev); + platform_device_unregister(spi_info->spi_pdev); iounmap(ssp->mmio_base); release_mem_region(pci_resource_start(dev, 0), @@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev) } static struct pci_device_id ce4100_spi_devices[] __devinitdata = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, { }, }; diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c index 56f60c8ea0a..2c665fceaac 100644 --- a/drivers/spi/spi_sh_msiof.c +++ b/drivers/spi/spi_sh_msiof.c @@ -509,9 +509,11 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t) bytes_done = 0; while (bytes_done < t->len) { + void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL; + const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL; n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, - t->tx_buf + bytes_done, - t->rx_buf + bytes_done, + tx_buf, + rx_buf, words, bits); if (n < 0) break; diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c index c7345dbf43f..f8533795ee7 100644 --- a/drivers/ssb/pcmcia.c +++ b/drivers/ssb/pcmcia.c @@ -733,7 +733,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus, /* Fetch the vendor specific tuples. */ res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS, - ssb_pcmcia_do_get_invariants, sprom); + ssb_pcmcia_do_get_invariants, iv); if ((res == 0) || (res == -ENOSPC)) return 0; diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c index f1235884cc5..cd8392badff 100644 --- a/drivers/staging/brcm80211/sys/wl_mac80211.c +++ b/drivers/staging/brcm80211/sys/wl_mac80211.c @@ -263,9 +263,7 @@ ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan, switch (type) { case NL80211_CHAN_HT20: case NL80211_CHAN_NO_HT: - WL_LOCK(wl); err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value); - WL_UNLOCK(wl); break; case NL80211_CHAN_HT40MINUS: case NL80211_CHAN_HT40PLUS: @@ -285,6 +283,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed) int err = 0; int new_int; + WL_LOCK(wl); if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { WL_NONE("%s: Setting listen interval to %d\n", __func__, conf->listen_interval); @@ -341,6 +340,7 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed) } config_out: + WL_UNLOCK(wl); return err; } @@ -459,13 +459,21 @@ wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) static void wl_ops_sw_scan_start(struct ieee80211_hw *hw) { + struct wl_info *wl = hw->priv; WL_NONE("Scan Start\n"); + WL_LOCK(wl); + wlc_scan_start(wl->wlc); + WL_UNLOCK(wl); return; } static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw) { + struct wl_info *wl = hw->priv; WL_NONE("Scan Complete\n"); + WL_LOCK(wl); + wlc_scan_stop(wl->wlc); + WL_UNLOCK(wl); return; } diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c index a1303863686..e37e8058e2b 100644 --- a/drivers/staging/brcm80211/sys/wlc_mac80211.c +++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c @@ -8461,3 +8461,16 @@ static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh, kfree(qi); } + +/* + * Flag 'scan in progress' to withold dynamic phy calibration + */ +void wlc_scan_start(struct wlc_info *wlc) +{ + wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true); +} + +void wlc_scan_stop(struct wlc_info *wlc) +{ + wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false); +} diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h index 146a6904a39..aff413001b7 100644 --- a/drivers/staging/brcm80211/sys/wlc_pub.h +++ b/drivers/staging/brcm80211/sys/wlc_pub.h @@ -570,6 +570,8 @@ extern void wlc_enable_mac(struct wlc_info *wlc); extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate); extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg); extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg); +extern void wlc_scan_start(struct wlc_info *wlc); +extern void wlc_scan_stop(struct wlc_info *wlc); static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name, uint *arg) diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig index aad47326d6d..1502d80f6f7 100644 --- a/drivers/staging/comedi/Kconfig +++ b/drivers/staging/comedi/Kconfig @@ -439,6 +439,7 @@ config COMEDI_NI_AT_AO config COMEDI_NI_ATMIO tristate "NI AT-MIO E series ISA-PNP card support" depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON + select COMEDI_8255 default N ---help--- Enable support for National Instruments AT-MIO E series cards @@ -1040,6 +1041,8 @@ config COMEDI_NI_PCIDIO config COMEDI_NI_PCIMIO tristate "NI PCI-MIO-E series and M series support" depends on COMEDI_NI_TIO && COMEDI_NI_COMMON + select COMEDI_8255 + select COMEDI_FC default N ---help--- Enable support for National Instruments PCI-MIO-E series and M series @@ -1164,6 +1167,7 @@ config COMEDI_NI_LABPC_CS config COMEDI_NI_MIO_CS tristate "NI DAQCard E series PCMCIA support" depends on COMEDI_NI_TIO && COMEDI_NI_COMMON + select COMEDI_8255 select COMEDI_FC default N ---help--- @@ -1268,7 +1272,6 @@ config COMEDI_MITE config COMEDI_NI_TIO tristate "NI general purpose counter support" depends on COMEDI_MITE - select COMEDI_8255 default N ---help--- Enable support for National Instruments general purpose counters. diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c index cd25b241cc1..fd274e9c7b7 100644 --- a/drivers/staging/comedi/drivers/mite.c +++ b/drivers/staging/comedi/drivers/mite.c @@ -61,8 +61,6 @@ #define PCI_DAQ_SIZE 4096 #define PCI_DAQ_SIZE_660X 8192 -MODULE_LICENSE("GPL"); - struct mite_struct *mite_devices; EXPORT_SYMBOL(mite_devices); diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 14e716e99a5..54741c9e1af 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -527,3 +527,7 @@ static void __exit driver_ni6527_cleanup_module(void) module_init(driver_ni6527_init_module); module_exit(driver_ni6527_cleanup_module); + +MODULE_AUTHOR("Comedi http://www.comedi.org"); +MODULE_DESCRIPTION("Comedi low-level driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c index 8b8e2aaf77f..403fc0997d3 100644 --- a/drivers/staging/comedi/drivers/ni_65xx.c +++ b/drivers/staging/comedi/drivers/ni_65xx.c @@ -871,3 +871,7 @@ static void __exit driver_ni_65xx_cleanup_module(void) module_init(driver_ni_65xx_init_module); module_exit(driver_ni_65xx_cleanup_module); + +MODULE_AUTHOR("Comedi http://www.comedi.org"); +MODULE_DESCRIPTION("Comedi low-level driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c index 6612b085c4e..ca2aeaa9449 100644 --- a/drivers/staging/comedi/drivers/ni_660x.c +++ b/drivers/staging/comedi/drivers/ni_660x.c @@ -1421,3 +1421,7 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev, }; return 0; } + +MODULE_AUTHOR("Comedi http://www.comedi.org"); +MODULE_DESCRIPTION("Comedi low-level driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c index e9f034efdc6..d8d91f90060 100644 --- a/drivers/staging/comedi/drivers/ni_670x.c +++ b/drivers/staging/comedi/drivers/ni_670x.c @@ -384,3 +384,7 @@ static int ni_670x_find_device(struct comedi_device *dev, int bus, int slot) mite_list_devices(); return -EIO; } + +MODULE_AUTHOR("Comedi http://www.comedi.org"); +MODULE_DESCRIPTION("Comedi low-level driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c index 84a15c34e48..005d2fe86ee 100644 --- a/drivers/staging/comedi/drivers/ni_pcidio.c +++ b/drivers/staging/comedi/drivers/ni_pcidio.c @@ -1354,3 +1354,7 @@ static void __exit driver_pcidio_cleanup_module(void) module_init(driver_pcidio_init_module); module_exit(driver_pcidio_cleanup_module); + +MODULE_AUTHOR("Comedi http://www.comedi.org"); +MODULE_DESCRIPTION("Comedi low-level driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c index 23a38124728..9148abdad07 100644 --- a/drivers/staging/comedi/drivers/ni_pcimio.c +++ b/drivers/staging/comedi/drivers/ni_pcimio.c @@ -1853,3 +1853,7 @@ static int pcimio_dio_change(struct comedi_device *dev, return 0; } + +MODULE_AUTHOR("Comedi http://www.comedi.org"); +MODULE_DESCRIPTION("Comedi low-level driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index 54706a16dc0..b41c9640b72 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c @@ -236,6 +236,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, if (status == 1) { netif_carrier_on(net); netif_wake_queue(net); + netif_notify_peers(net); } else { netif_carrier_off(net); netif_stop_queue(net); diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c index e38e89df6e8..e2f6d6a3c85 100644 --- a/drivers/staging/intel_sst/intelmid_v2_control.c +++ b/drivers/staging/intel_sst/intelmid_v2_control.c @@ -874,7 +874,10 @@ static int nc_set_selected_input_dev(u8 value) sc_access[3].reg_addr = 0x109; sc_access[3].mask = MASK6; sc_access[3].value = 0x00; - num_val = 4; + sc_access[4].reg_addr = 0x104; + sc_access[4].value = 0x3C; + sc_access[4].mask = 0xff; + num_val = 5; break; default: return -EINVAL; diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 5415712f01f..4bd8cbdaee7 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -227,6 +227,7 @@ static int zram_read(struct zram *zram, struct bio *bio) if (zram_test_flag(zram, index, ZRAM_ZERO)) { handle_zero_page(page); + index++; continue; } @@ -235,12 +236,14 @@ static int zram_read(struct zram *zram, struct bio *bio) pr_debug("Read before write: sector=%lu, size=%u", (ulong)(bio->bi_sector), bio->bi_size); /* Do nothing */ + index++; continue; } /* Page is stored uncompressed since it's incompressible */ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { handle_uncompressed_page(zram, page, index); + index++; continue; } @@ -320,6 +323,7 @@ static int zram_write(struct zram *zram, struct bio *bio) mutex_unlock(&zram->lock); zram_stat_inc(&zram->stats.pages_zero); zram_set_flag(zram, index, ZRAM_ZERO); + index++; continue; } diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 5cfd70819f0..973bb190ef5 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -13,8 +13,7 @@ target_core_mod-y := target_core_configfs.o \ target_core_transport.o \ target_core_cdb.o \ target_core_ua.o \ - target_core_rd.o \ - target_core_mib.o + target_core_rd.o obj-$(CONFIG_TARGET_CORE) += target_core_mod.o diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 2764510798b..caf8dc18ee0 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -37,7 +37,6 @@ #include <linux/parser.h> #include <linux/syscalls.h> #include <linux/configfs.h> -#include <linux/proc_fs.h> #include <target/target_core_base.h> #include <target/target_core_device.h> @@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item) { struct se_subsystem_dev *se_dev = container_of(to_config_group(item), struct se_subsystem_dev, se_dev_group); - struct config_group *dev_cg; - - if (!(se_dev)) - return; + struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); + struct se_subsystem_api *t = hba->transport; + struct config_group *dev_cg = &se_dev->se_dev_group; - dev_cg = &se_dev->se_dev_group; kfree(dev_cg->default_groups); + /* + * This pointer will set when the storage is enabled with: + *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` + */ + if (se_dev->se_dev_ptr) { + printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" + "virtual_device() for se_dev_ptr: %p\n", + se_dev->se_dev_ptr); + + se_free_virtual_device(se_dev->se_dev_ptr, hba); + } else { + /* + * Release struct se_subsystem_dev->se_dev_su_ptr.. + */ + printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" + "device() for se_dev_su_ptr: %p\n", + se_dev->se_dev_su_ptr); + + t->free_device(se_dev->se_dev_su_ptr); + } + + printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" + "_dev_t: %p\n", se_dev); + kfree(se_dev); } static ssize_t target_core_dev_show(struct config_item *item, @@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { NULL, }; +static void target_core_alua_lu_gp_release(struct config_item *item) +{ + struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), + struct t10_alua_lu_gp, lu_gp_group); + + core_alua_free_lu_gp(lu_gp); +} + static struct configfs_item_operations target_core_alua_lu_gp_ops = { + .release = target_core_alua_lu_gp_release, .show_attribute = target_core_alua_lu_gp_attr_show, .store_attribute = target_core_alua_lu_gp_attr_store, }; @@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp( printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" " Group: core/alua/lu_gps/%s, ID: %hu\n", config_item_name(item), lu_gp->lu_gp_id); - + /* + * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() + * -> target_core_alua_lu_gp_release() + */ config_item_put(item); - core_alua_free_lu_gp(lu_gp); } static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { @@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { NULL, }; +static void target_core_alua_tg_pt_gp_release(struct config_item *item) +{ + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), + struct t10_alua_tg_pt_gp, tg_pt_gp_group); + + core_alua_free_tg_pt_gp(tg_pt_gp); +} + static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { + .release = target_core_alua_tg_pt_gp_release, .show_attribute = target_core_alua_tg_pt_gp_attr_show, .store_attribute = target_core_alua_tg_pt_gp_attr_store, }; @@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp( printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" " Group: alua/tg_pt_gps/%s, ID: %hu\n", config_item_name(item), tg_pt_gp->tg_pt_gp_id); - + /* + * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() + * -> target_core_alua_tg_pt_gp_release(). + */ config_item_put(item); - core_alua_free_tg_pt_gp(tg_pt_gp); } static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { @@ -2771,13 +2814,11 @@ static void target_core_drop_subdev( struct se_subsystem_api *t; struct config_item *df_item; struct config_group *dev_cg, *tg_pt_gp_cg; - int i, ret; + int i; hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); - if (mutex_lock_interruptible(&hba->hba_access_mutex)) - goto out; - + mutex_lock(&hba->hba_access_mutex); t = hba->transport; spin_lock(&se_global->g_device_lock); @@ -2791,7 +2832,10 @@ static void target_core_drop_subdev( config_item_put(df_item); } kfree(tg_pt_gp_cg->default_groups); - core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); + /* + * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp + * directly from target_core_alua_tg_pt_gp_release(). + */ T10_ALUA(se_dev)->default_tg_pt_gp = NULL; dev_cg = &se_dev->se_dev_group; @@ -2800,38 +2844,12 @@ static void target_core_drop_subdev( dev_cg->default_groups[i] = NULL; config_item_put(df_item); } - - config_item_put(item); /* - * This pointer will set when the storage is enabled with: - * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` + * The releasing of se_dev and associated se_dev->se_dev_ptr is done + * from target_core_dev_item_ops->release() ->target_core_dev_release(). */ - if (se_dev->se_dev_ptr) { - printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" - "virtual_device() for se_dev_ptr: %p\n", - se_dev->se_dev_ptr); - - ret = se_free_virtual_device(se_dev->se_dev_ptr, hba); - if (ret < 0) - goto hba_out; - } else { - /* - * Release struct se_subsystem_dev->se_dev_su_ptr.. - */ - printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" - "device() for se_dev_su_ptr: %p\n", - se_dev->se_dev_su_ptr); - - t->free_device(se_dev->se_dev_su_ptr); - } - - printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" - "_dev_t: %p\n", se_dev); - -hba_out: + config_item_put(item); mutex_unlock(&hba->hba_access_mutex); -out: - kfree(se_dev); } static struct configfs_group_operations target_core_hba_group_ops = { @@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); +static void target_core_hba_release(struct config_item *item) +{ + struct se_hba *hba = container_of(to_config_group(item), + struct se_hba, hba_group); + core_delete_hba(hba); +} + static struct configfs_attribute *target_core_hba_attrs[] = { &target_core_hba_hba_info.attr, &target_core_hba_hba_mode.attr, @@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = { }; static struct configfs_item_operations target_core_hba_item_ops = { + .release = target_core_hba_release, .show_attribute = target_core_hba_attr_show, .store_attribute = target_core_hba_attr_store, }; @@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget( struct config_group *group, struct config_item *item) { - struct se_hba *hba = item_to_hba(item); - + /* + * core_delete_hba() is called from target_core_hba_item_ops->release() + * -> target_core_hba_release() + */ config_item_put(item); - core_delete_hba(hba); } static struct configfs_group_operations target_core_group_ops = { @@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void) struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; struct config_group *lu_gp_cg = NULL; struct configfs_subsystem *subsys; - struct proc_dir_entry *scsi_target_proc = NULL; struct t10_alua_lu_gp *lu_gp; int ret; @@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void) if (core_dev_setup_virtual_lun0() < 0) goto out; - scsi_target_proc = proc_mkdir("scsi_target", 0); - if (!(scsi_target_proc)) { - printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n"); - goto out; - } - ret = init_scsi_target_mib(); - if (ret < 0) - goto out; - return 0; out: configfs_unregister_subsystem(subsys); - if (scsi_target_proc) - remove_proc_entry("scsi_target", 0); core_dev_release_virtual_lun0(); rd_module_exit(); out_global: @@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void) config_item_put(item); } kfree(lu_gp_cg->default_groups); - core_alua_free_lu_gp(se_global->default_lu_gp); - se_global->default_lu_gp = NULL; + lu_gp_cg->default_groups = NULL; alua_cg = &se_global->alua_group; for (i = 0; alua_cg->default_groups[i]; i++) { @@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void) config_item_put(item); } kfree(alua_cg->default_groups); + alua_cg->default_groups = NULL; hba_cg = &se_global->target_core_hbagroup; for (i = 0; hba_cg->default_groups[i]; i++) { @@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void) config_item_put(item); } kfree(hba_cg->default_groups); - - for (i = 0; subsys->su_group.default_groups[i]; i++) { - item = &subsys->su_group.default_groups[i]->cg_item; - subsys->su_group.default_groups[i] = NULL; - config_item_put(item); - } + hba_cg->default_groups = NULL; + /* + * We expect subsys->su_group.default_groups to be released + * by configfs subsystem provider logic.. + */ + configfs_unregister_subsystem(subsys); kfree(subsys->su_group.default_groups); - configfs_unregister_subsystem(subsys); + core_alua_free_lu_gp(se_global->default_lu_gp); + se_global->default_lu_gp = NULL; + printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" " Infrastructure\n"); - remove_scsi_target_mib(); - remove_proc_entry("scsi_target", 0); core_dev_release_virtual_lun0(); rd_module_exit(); release_se_global(); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 317ce58d426..5da051a07fa 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -373,11 +373,11 @@ int core_update_device_list_for_node( /* * deve->se_lun_acl will be NULL for demo-mode created LUNs * that have not been explictly concerted to MappedLUNs -> - * struct se_lun_acl. + * struct se_lun_acl, but we remove deve->alua_port_list from + * port->sep_alua_list. This also means that active UAs and + * NodeACL context specific PR metadata for demo-mode + * MappedLUN *deve will be released below.. */ - if (!(deve->se_lun_acl)) - return 0; - spin_lock_bh(&port->sep_alua_lock); list_del(&deve->alua_port_list); spin_unlock_bh(&port->sep_alua_lock); @@ -395,12 +395,14 @@ int core_update_device_list_for_node( printk(KERN_ERR "struct se_dev_entry->se_lun_acl" " already set for demo mode -> explict" " LUN ACL transition\n"); + spin_unlock_irq(&nacl->device_list_lock); return -1; } if (deve->se_lun != lun) { printk(KERN_ERR "struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" " -> explict LUN ACL transition\n"); + spin_unlock_irq(&nacl->device_list_lock); return -1; } deve->se_lun_acl = lun_acl; @@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev) } } spin_unlock(&hba->device_lock); - - while (atomic_read(&hba->dev_mib_access_count)) - cpu_relax(); } int se_dev_check_online(struct se_device *dev) diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 32b148d7e26..b65d1c8e774 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); +static void target_fabric_mappedlun_release(struct config_item *item) +{ + struct se_lun_acl *lacl = container_of(to_config_group(item), + struct se_lun_acl, se_lun_group); + struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; + + core_dev_free_initiator_node_lun_acl(se_tpg, lacl); +} + static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { &target_fabric_mappedlun_write_protect.attr, NULL, }; static struct configfs_item_operations target_fabric_mappedlun_item_ops = { + .release = target_fabric_mappedlun_release, .show_attribute = target_fabric_mappedlun_attr_show, .store_attribute = target_fabric_mappedlun_attr_store, .allow_link = target_fabric_mappedlun_link, @@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun( struct config_group *group, struct config_item *item) { - struct se_lun_acl *lacl = container_of(to_config_group(item), - struct se_lun_acl, se_lun_group); - struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; - config_item_put(item); - core_dev_free_initiator_node_lun_acl(se_tpg, lacl); +} + +static void target_fabric_nacl_base_release(struct config_item *item) +{ + struct se_node_acl *se_nacl = container_of(to_config_group(item), + struct se_node_acl, acl_group); + struct se_portal_group *se_tpg = se_nacl->se_tpg; + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + + tf->tf_ops.fabric_drop_nodeacl(se_nacl); } static struct configfs_item_operations target_fabric_nacl_base_item_ops = { + .release = target_fabric_nacl_base_release, .show_attribute = target_fabric_nacl_base_attr_show, .store_attribute = target_fabric_nacl_base_attr_store, }; @@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl( struct config_group *group, struct config_item *item) { - struct se_portal_group *se_tpg = container_of(group, - struct se_portal_group, tpg_acl_group); - struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; struct se_node_acl *se_nacl = container_of(to_config_group(item), struct se_node_acl, acl_group); struct config_item *df_item; @@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl( nacl_cg->default_groups[i] = NULL; config_item_put(df_item); } - + /* + * struct se_node_acl free is done in target_fabric_nacl_base_release() + */ config_item_put(item); - tf->tf_ops.fabric_drop_nodeacl(se_nacl); } static struct configfs_group_operations target_fabric_nacl_group_ops = { @@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); +static void target_fabric_np_base_release(struct config_item *item) +{ + struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), + struct se_tpg_np, tpg_np_group); + struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; + struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; + + tf->tf_ops.fabric_drop_np(se_tpg_np); +} + static struct configfs_item_operations target_fabric_np_base_item_ops = { + .release = target_fabric_np_base_release, .show_attribute = target_fabric_np_base_attr_show, .store_attribute = target_fabric_np_base_attr_store, }; @@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np( if (!(se_tpg_np) || IS_ERR(se_tpg_np)) return ERR_PTR(-EINVAL); + se_tpg_np->tpg_np_parent = se_tpg; config_group_init_type_name(&se_tpg_np->tpg_np_group, name, &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); @@ -476,14 +502,10 @@ static void target_fabric_drop_np( struct config_group *group, struct config_item *item) { - struct se_portal_group *se_tpg = container_of(group, - struct se_portal_group, tpg_np_group); - struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; - struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), - struct se_tpg_np, tpg_np_group); - + /* + * struct se_tpg_np is released via target_fabric_np_base_release() + */ config_item_put(item); - tf->tf_ops.fabric_drop_np(se_tpg_np); } static struct configfs_group_operations target_fabric_np_group_ops = { @@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); */ CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); +static void target_fabric_tpg_release(struct config_item *item) +{ + struct se_portal_group *se_tpg = container_of(to_config_group(item), + struct se_portal_group, tpg_group); + struct se_wwn *wwn = se_tpg->se_tpg_wwn; + struct target_fabric_configfs *tf = wwn->wwn_tf; + + tf->tf_ops.fabric_drop_tpg(se_tpg); +} + static struct configfs_item_operations target_fabric_tpg_base_item_ops = { + .release = target_fabric_tpg_release, .show_attribute = target_fabric_tpg_attr_show, .store_attribute = target_fabric_tpg_attr_store, }; @@ -872,8 +905,6 @@ static void target_fabric_drop_tpg( struct config_group *group, struct config_item *item) { - struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); - struct target_fabric_configfs *tf = wwn->wwn_tf; struct se_portal_group *se_tpg = container_of(to_config_group(item), struct se_portal_group, tpg_group); struct config_group *tpg_cg = &se_tpg->tpg_group; @@ -890,15 +921,28 @@ static void target_fabric_drop_tpg( } config_item_put(item); - tf->tf_ops.fabric_drop_tpg(se_tpg); } +static void target_fabric_release_wwn(struct config_item *item) +{ + struct se_wwn *wwn = container_of(to_config_group(item), + struct se_wwn, wwn_group); + struct target_fabric_configfs *tf = wwn->wwn_tf; + + tf->tf_ops.fabric_drop_wwn(wwn); +} + +static struct configfs_item_operations target_fabric_tpg_item_ops = { + .release = target_fabric_release_wwn, +}; + static struct configfs_group_operations target_fabric_tpg_group_ops = { .make_group = target_fabric_make_tpg, .drop_item = target_fabric_drop_tpg, }; -TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); +TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, + NULL); /* End of tfc_tpg_cit */ @@ -932,13 +976,7 @@ static void target_fabric_drop_wwn( struct config_group *group, struct config_item *item) { - struct target_fabric_configfs *tf = container_of(group, - struct target_fabric_configfs, tf_group); - struct se_wwn *wwn = container_of(to_config_group(item), - struct se_wwn, wwn_group); - config_item_put(item); - tf->tf_ops.fabric_drop_wwn(wwn); } static struct configfs_group_operations target_fabric_wwn_group_ops = { diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c6e0d757e76..67f0c09983c 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice( bd = blkdev_get_by_path(ib_dev->ibd_udev_path, FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); - if (!(bd)) + if (IS_ERR(bd)) goto failed; /* * Setup the local scope queue_limits from struct request_queue->limits @@ -220,8 +220,10 @@ static void iblock_free_device(void *p) { struct iblock_dev *ib_dev = p; - blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); - bioset_free(ib_dev->ibd_bio_set); + if (ib_dev->ibd_bd != NULL) + blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); + if (ib_dev->ibd_bio_set != NULL) + bioset_free(ib_dev->ibd_bio_set); kfree(ib_dev); } diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c deleted file mode 100644 index d5a48aa0d2d..00000000000 --- a/drivers/target/target_core_mib.c +++ /dev/null @@ -1,1078 +0,0 @@ -/******************************************************************************* - * Filename: target_core_mib.c - * - * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. - * Copyright (c) 2007-2010 Rising Tide Systems - * Copyright (c) 2008-2010 Linux-iSCSI.org - * - * Nicholas A. Bellinger <nab@linux-iscsi.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - ******************************************************************************/ - - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/delay.h> -#include <linux/timer.h> -#include <linux/string.h> -#include <linux/version.h> -#include <generated/utsrelease.h> -#include <linux/utsname.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/blkdev.h> -#include <scsi/scsi.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_host.h> - -#include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> -#include <target/target_core_configfs.h> - -#include "target_core_hba.h" -#include "target_core_mib.h" - -/* SCSI mib table index */ -static struct scsi_index_table scsi_index_table; - -#ifndef INITIAL_JIFFIES -#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) -#endif - -/* SCSI Instance Table */ -#define SCSI_INST_SW_INDEX 1 -#define SCSI_TRANSPORT_INDEX 1 - -#define NONE "None" -#define ISPRINT(a) ((a >= ' ') && (a <= '~')) - -static inline int list_is_first(const struct list_head *list, - const struct list_head *head) -{ - return list->prev == head; -} - -static void *locate_hba_start( - struct seq_file *seq, - loff_t *pos) -{ - spin_lock(&se_global->g_device_lock); - return seq_list_start(&se_global->g_se_dev_list, *pos); -} - -static void *locate_hba_next( - struct seq_file *seq, - void *v, - loff_t *pos) -{ - return seq_list_next(v, &se_global->g_se_dev_list, pos); -} - -static void locate_hba_stop(struct seq_file *seq, void *v) -{ - spin_unlock(&se_global->g_device_lock); -} - -/**************************************************************************** - * SCSI MIB Tables - ****************************************************************************/ - -/* - * SCSI Instance Table - */ -static void *scsi_inst_seq_start( - struct seq_file *seq, - loff_t *pos) -{ - spin_lock(&se_global->hba_lock); - return seq_list_start(&se_global->g_hba_list, *pos); -} - -static void *scsi_inst_seq_next( - struct seq_file *seq, - void *v, - loff_t *pos) -{ - return seq_list_next(v, &se_global->g_hba_list, pos); -} - -static void scsi_inst_seq_stop(struct seq_file *seq, void *v) -{ - spin_unlock(&se_global->hba_lock); -} - -static int scsi_inst_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba = list_entry(v, struct se_hba, hba_list); - - if (list_is_first(&hba->hba_list, &se_global->g_hba_list)) - seq_puts(seq, "inst sw_indx\n"); - - seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX); - seq_printf(seq, "plugin: %s version: %s\n", - hba->transport->name, TARGET_CORE_VERSION); - - return 0; -} - -static const struct seq_operations scsi_inst_seq_ops = { - .start = scsi_inst_seq_start, - .next = scsi_inst_seq_next, - .stop = scsi_inst_seq_stop, - .show = scsi_inst_seq_show -}; - -static int scsi_inst_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_inst_seq_ops); -} - -static const struct file_operations scsi_inst_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_inst_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Device Table - */ -static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_dev_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -static int scsi_dev_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - char str[28]; - int k; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst indx role ports\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - seq_printf(seq, "%u %u %s %u\n", hba->hba_index, - dev->dev_index, "Target", dev->dev_port_count); - - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - - /* vendor */ - for (k = 0; k < 8; k++) - str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ? - DEV_T10_WWN(dev)->vendor[k] : 0x20; - str[k] = 0x20; - - /* model */ - for (k = 0; k < 16; k++) - str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ? - DEV_T10_WWN(dev)->model[k] : 0x20; - str[k + 9] = 0; - - seq_printf(seq, "dev_alias: %s\n", str); - - return 0; -} - -static const struct seq_operations scsi_dev_seq_ops = { - .start = scsi_dev_seq_start, - .next = scsi_dev_seq_next, - .stop = scsi_dev_seq_stop, - .show = scsi_dev_seq_show -}; - -static int scsi_dev_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_dev_seq_ops); -} - -static const struct file_operations scsi_dev_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_dev_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Port Table - */ -static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_port_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -static int scsi_port_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - struct se_port *sep, *sep_tmp; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst device indx role busy_count\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - /* FIXME: scsiPortBusyStatuses count */ - spin_lock(&dev->se_port_lock); - list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { - seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index, - dev->dev_index, sep->sep_index, "Device", - dev->dev_index, 0); - } - spin_unlock(&dev->se_port_lock); - - return 0; -} - -static const struct seq_operations scsi_port_seq_ops = { - .start = scsi_port_seq_start, - .next = scsi_port_seq_next, - .stop = scsi_port_seq_stop, - .show = scsi_port_seq_show -}; - -static int scsi_port_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_port_seq_ops); -} - -static const struct file_operations scsi_port_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_port_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Transport Table - */ -static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_transport_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -static int scsi_transport_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - struct se_port *se, *se_tmp; - struct se_portal_group *tpg; - struct t10_wwn *wwn; - char buf[64]; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst device indx dev_name\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - wwn = DEV_T10_WWN(dev); - - spin_lock(&dev->se_port_lock); - list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) { - tpg = se->sep_tpg; - sprintf(buf, "scsiTransport%s", - TPG_TFO(tpg)->get_fabric_name()); - - seq_printf(seq, "%u %s %u %s+%s\n", - hba->hba_index, /* scsiTransportIndex */ - buf, /* scsiTransportType */ - (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ? - TPG_TFO(tpg)->tpg_get_inst_index(tpg) : - 0, - TPG_TFO(tpg)->tpg_get_wwn(tpg), - (strlen(wwn->unit_serial)) ? - /* scsiTransportDevName */ - wwn->unit_serial : wwn->vendor); - } - spin_unlock(&dev->se_port_lock); - - return 0; -} - -static const struct seq_operations scsi_transport_seq_ops = { - .start = scsi_transport_seq_start, - .next = scsi_transport_seq_next, - .stop = scsi_transport_seq_stop, - .show = scsi_transport_seq_show -}; - -static int scsi_transport_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_transport_seq_ops); -} - -static const struct file_operations scsi_transport_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_transport_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Target Device Table - */ -static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - - -#define LU_COUNT 1 /* for now */ -static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - int non_accessible_lus = 0; - char status[16]; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst indx num_LUs status non_access_LUs" - " resets\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - switch (dev->dev_status) { - case TRANSPORT_DEVICE_ACTIVATED: - strcpy(status, "activated"); - break; - case TRANSPORT_DEVICE_DEACTIVATED: - strcpy(status, "deactivated"); - non_accessible_lus = 1; - break; - case TRANSPORT_DEVICE_SHUTDOWN: - strcpy(status, "shutdown"); - non_accessible_lus = 1; - break; - case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: - case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: - strcpy(status, "offline"); - non_accessible_lus = 1; - break; - default: - sprintf(status, "unknown(%d)", dev->dev_status); - non_accessible_lus = 1; - } - - seq_printf(seq, "%u %u %u %s %u %u\n", - hba->hba_index, dev->dev_index, LU_COUNT, - status, non_accessible_lus, dev->num_resets); - - return 0; -} - -static const struct seq_operations scsi_tgt_dev_seq_ops = { - .start = scsi_tgt_dev_seq_start, - .next = scsi_tgt_dev_seq_next, - .stop = scsi_tgt_dev_seq_stop, - .show = scsi_tgt_dev_seq_show -}; - -static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_tgt_dev_seq_ops); -} - -static const struct file_operations scsi_tgt_dev_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_tgt_dev_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Target Port Table - */ -static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - struct se_port *sep, *sep_tmp; - struct se_portal_group *tpg; - u32 rx_mbytes, tx_mbytes; - unsigned long long num_cmds; - char buf[64]; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst device indx name port_index in_cmds" - " write_mbytes read_mbytes hs_in_cmds\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - spin_lock(&dev->se_port_lock); - list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { - tpg = sep->sep_tpg; - sprintf(buf, "%sPort#", - TPG_TFO(tpg)->get_fabric_name()); - - seq_printf(seq, "%u %u %u %s%d %s%s%d ", - hba->hba_index, - dev->dev_index, - sep->sep_index, - buf, sep->sep_index, - TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", - TPG_TFO(tpg)->tpg_get_tag(tpg)); - - spin_lock(&sep->sep_lun->lun_sep_lock); - num_cmds = sep->sep_stats.cmd_pdus; - rx_mbytes = (sep->sep_stats.rx_data_octets >> 20); - tx_mbytes = (sep->sep_stats.tx_data_octets >> 20); - spin_unlock(&sep->sep_lun->lun_sep_lock); - - seq_printf(seq, "%llu %u %u %u\n", num_cmds, - rx_mbytes, tx_mbytes, 0); - } - spin_unlock(&dev->se_port_lock); - - return 0; -} - -static const struct seq_operations scsi_tgt_port_seq_ops = { - .start = scsi_tgt_port_seq_start, - .next = scsi_tgt_port_seq_next, - .stop = scsi_tgt_port_seq_stop, - .show = scsi_tgt_port_seq_show -}; - -static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_tgt_port_seq_ops); -} - -static const struct file_operations scsi_tgt_port_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_tgt_port_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Authorized Initiator Table: - * It contains the SCSI Initiators authorized to be attached to one of the - * local Target ports. - * Iterates through all active TPGs and extracts the info from the ACLs - */ -static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos) -{ - spin_lock_bh(&se_global->se_tpg_lock); - return seq_list_start(&se_global->g_se_tpg_list, *pos); -} - -static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v, - loff_t *pos) -{ - return seq_list_next(v, &se_global->g_se_tpg_list, pos); -} - -static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v) -{ - spin_unlock_bh(&se_global->se_tpg_lock); -} - -static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v) -{ - struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, - se_tpg_list); - struct se_dev_entry *deve; - struct se_lun *lun; - struct se_node_acl *se_nacl; - int j; - - if (list_is_first(&se_tpg->se_tpg_list, - &se_global->g_se_tpg_list)) - seq_puts(seq, "inst dev port indx dev_or_port intr_name " - "map_indx att_count num_cmds read_mbytes " - "write_mbytes hs_num_cmds creation_time row_status\n"); - - if (!(se_tpg)) - return 0; - - spin_lock(&se_tpg->acl_node_lock); - list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) { - - atomic_inc(&se_nacl->mib_ref_count); - smp_mb__after_atomic_inc(); - spin_unlock(&se_tpg->acl_node_lock); - - spin_lock_irq(&se_nacl->device_list_lock); - for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { - deve = &se_nacl->device_list[j]; - if (!(deve->lun_flags & - TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || - (!deve->se_lun)) - continue; - lun = deve->se_lun; - if (!lun->lun_se_dev) - continue; - - seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u" - " %u %s\n", - /* scsiInstIndex */ - (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? - TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : - 0, - /* scsiDeviceIndex */ - lun->lun_se_dev->dev_index, - /* scsiAuthIntrTgtPortIndex */ - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), - /* scsiAuthIntrIndex */ - se_nacl->acl_index, - /* scsiAuthIntrDevOrPort */ - 1, - /* scsiAuthIntrName */ - se_nacl->initiatorname[0] ? - se_nacl->initiatorname : NONE, - /* FIXME: scsiAuthIntrLunMapIndex */ - 0, - /* scsiAuthIntrAttachedTimes */ - deve->attach_count, - /* scsiAuthIntrOutCommands */ - deve->total_cmds, - /* scsiAuthIntrReadMegaBytes */ - (u32)(deve->read_bytes >> 20), - /* scsiAuthIntrWrittenMegaBytes */ - (u32)(deve->write_bytes >> 20), - /* FIXME: scsiAuthIntrHSOutCommands */ - 0, - /* scsiAuthIntrLastCreation */ - (u32)(((u32)deve->creation_time - - INITIAL_JIFFIES) * 100 / HZ), - /* FIXME: scsiAuthIntrRowStatus */ - "Ready"); - } - spin_unlock_irq(&se_nacl->device_list_lock); - - spin_lock(&se_tpg->acl_node_lock); - atomic_dec(&se_nacl->mib_ref_count); - smp_mb__after_atomic_dec(); - } - spin_unlock(&se_tpg->acl_node_lock); - - return 0; -} - -static const struct seq_operations scsi_auth_intr_seq_ops = { - .start = scsi_auth_intr_seq_start, - .next = scsi_auth_intr_seq_next, - .stop = scsi_auth_intr_seq_stop, - .show = scsi_auth_intr_seq_show -}; - -static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_auth_intr_seq_ops); -} - -static const struct file_operations scsi_auth_intr_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_auth_intr_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Attached Initiator Port Table: - * It lists the SCSI Initiators attached to one of the local Target ports. - * Iterates through all active TPGs and use active sessions from each TPG - * to list the info fo this table. - */ -static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos) -{ - spin_lock_bh(&se_global->se_tpg_lock); - return seq_list_start(&se_global->g_se_tpg_list, *pos); -} - -static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v, - loff_t *pos) -{ - return seq_list_next(v, &se_global->g_se_tpg_list, pos); -} - -static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v) -{ - spin_unlock_bh(&se_global->se_tpg_lock); -} - -static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v) -{ - struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, - se_tpg_list); - struct se_dev_entry *deve; - struct se_lun *lun; - struct se_node_acl *se_nacl; - struct se_session *se_sess; - unsigned char buf[64]; - int j; - - if (list_is_first(&se_tpg->se_tpg_list, - &se_global->g_se_tpg_list)) - seq_puts(seq, "inst dev port indx port_auth_indx port_name" - " port_ident\n"); - - if (!(se_tpg)) - return 0; - - spin_lock(&se_tpg->session_lock); - list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { - if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) || - (!se_sess->se_node_acl) || - (!se_sess->se_node_acl->device_list)) - continue; - - atomic_inc(&se_sess->mib_ref_count); - smp_mb__after_atomic_inc(); - se_nacl = se_sess->se_node_acl; - atomic_inc(&se_nacl->mib_ref_count); - smp_mb__after_atomic_inc(); - spin_unlock(&se_tpg->session_lock); - - spin_lock_irq(&se_nacl->device_list_lock); - for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { - deve = &se_nacl->device_list[j]; - if (!(deve->lun_flags & - TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || - (!deve->se_lun)) - continue; - - lun = deve->se_lun; - if (!lun->lun_se_dev) - continue; - - memset(buf, 0, 64); - if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) - TPG_TFO(se_tpg)->sess_get_initiator_sid( - se_sess, (unsigned char *)&buf[0], 64); - - seq_printf(seq, "%u %u %u %u %u %s+i+%s\n", - /* scsiInstIndex */ - (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? - TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : - 0, - /* scsiDeviceIndex */ - lun->lun_se_dev->dev_index, - /* scsiPortIndex */ - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), - /* scsiAttIntrPortIndex */ - (TPG_TFO(se_tpg)->sess_get_index != NULL) ? - TPG_TFO(se_tpg)->sess_get_index(se_sess) : - 0, - /* scsiAttIntrPortAuthIntrIdx */ - se_nacl->acl_index, - /* scsiAttIntrPortName */ - se_nacl->initiatorname[0] ? - se_nacl->initiatorname : NONE, - /* scsiAttIntrPortIdentifier */ - buf); - } - spin_unlock_irq(&se_nacl->device_list_lock); - - spin_lock(&se_tpg->session_lock); - atomic_dec(&se_nacl->mib_ref_count); - smp_mb__after_atomic_dec(); - atomic_dec(&se_sess->mib_ref_count); - smp_mb__after_atomic_dec(); - } - spin_unlock(&se_tpg->session_lock); - - return 0; -} - -static const struct seq_operations scsi_att_intr_port_seq_ops = { - .start = scsi_att_intr_port_seq_start, - .next = scsi_att_intr_port_seq_next, - .stop = scsi_att_intr_port_seq_stop, - .show = scsi_att_intr_port_seq_show -}; - -static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_att_intr_port_seq_ops); -} - -static const struct file_operations scsi_att_intr_port_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_att_intr_port_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/* - * SCSI Logical Unit Table - */ -static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos) -{ - return locate_hba_start(seq, pos); -} - -static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - return locate_hba_next(seq, v, pos); -} - -static void scsi_lu_seq_stop(struct seq_file *seq, void *v) -{ - locate_hba_stop(seq, v); -} - -#define SCSI_LU_INDEX 1 -static int scsi_lu_seq_show(struct seq_file *seq, void *v) -{ - struct se_hba *hba; - struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, - g_se_dev_list); - struct se_device *dev = se_dev->se_dev_ptr; - int j; - char str[28]; - - if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) - seq_puts(seq, "inst dev indx LUN lu_name vend prod rev" - " dev_type status state-bit num_cmds read_mbytes" - " write_mbytes resets full_stat hs_num_cmds creation_time\n"); - - if (!(dev)) - return 0; - - hba = dev->se_hba; - if (!(hba)) { - /* Log error ? */ - return 0; - } - - /* Fix LU state, if we can read it from the device */ - seq_printf(seq, "%u %u %u %llu %s", hba->hba_index, - dev->dev_index, SCSI_LU_INDEX, - (unsigned long long)0, /* FIXME: scsiLuDefaultLun */ - (strlen(DEV_T10_WWN(dev)->unit_serial)) ? - /* scsiLuWwnName */ - (char *)&DEV_T10_WWN(dev)->unit_serial[0] : - "None"); - - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - /* scsiLuVendorId */ - for (j = 0; j < 8; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? - DEV_T10_WWN(dev)->vendor[j] : 0x20; - str[8] = 0; - seq_printf(seq, " %s", str); - - /* scsiLuProductId */ - for (j = 0; j < 16; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? - DEV_T10_WWN(dev)->model[j] : 0x20; - str[16] = 0; - seq_printf(seq, " %s", str); - - /* scsiLuRevisionId */ - for (j = 0; j < 4; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? - DEV_T10_WWN(dev)->revision[j] : 0x20; - str[4] = 0; - seq_printf(seq, " %s", str); - - seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n", - /* scsiLuPeripheralType */ - TRANSPORT(dev)->get_device_type(dev), - (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? - "available" : "notavailable", /* scsiLuStatus */ - "exposed", /* scsiLuState */ - (unsigned long long)dev->num_cmds, - /* scsiLuReadMegaBytes */ - (u32)(dev->read_bytes >> 20), - /* scsiLuWrittenMegaBytes */ - (u32)(dev->write_bytes >> 20), - dev->num_resets, /* scsiLuInResets */ - 0, /* scsiLuOutTaskSetFullStatus */ - 0, /* scsiLuHSInCommands */ - (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) * - 100 / HZ)); - - return 0; -} - -static const struct seq_operations scsi_lu_seq_ops = { - .start = scsi_lu_seq_start, - .next = scsi_lu_seq_next, - .stop = scsi_lu_seq_stop, - .show = scsi_lu_seq_show -}; - -static int scsi_lu_seq_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &scsi_lu_seq_ops); -} - -static const struct file_operations scsi_lu_seq_fops = { - .owner = THIS_MODULE, - .open = scsi_lu_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -/****************************************************************************/ - -/* - * Remove proc fs entries - */ -void remove_scsi_target_mib(void) -{ - remove_proc_entry("scsi_target/mib/scsi_inst", NULL); - remove_proc_entry("scsi_target/mib/scsi_dev", NULL); - remove_proc_entry("scsi_target/mib/scsi_port", NULL); - remove_proc_entry("scsi_target/mib/scsi_transport", NULL); - remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL); - remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL); - remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL); - remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL); - remove_proc_entry("scsi_target/mib/scsi_lu", NULL); - remove_proc_entry("scsi_target/mib", NULL); -} - -/* - * Create proc fs entries for the mib tables - */ -int init_scsi_target_mib(void) -{ - struct proc_dir_entry *dir_entry; - struct proc_dir_entry *scsi_inst_entry; - struct proc_dir_entry *scsi_dev_entry; - struct proc_dir_entry *scsi_port_entry; - struct proc_dir_entry *scsi_transport_entry; - struct proc_dir_entry *scsi_tgt_dev_entry; - struct proc_dir_entry *scsi_tgt_port_entry; - struct proc_dir_entry *scsi_auth_intr_entry; - struct proc_dir_entry *scsi_att_intr_port_entry; - struct proc_dir_entry *scsi_lu_entry; - - dir_entry = proc_mkdir("scsi_target/mib", NULL); - if (!(dir_entry)) { - printk(KERN_ERR "proc_mkdir() failed.\n"); - return -1; - } - - scsi_inst_entry = - create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL); - if (scsi_inst_entry) - scsi_inst_entry->proc_fops = &scsi_inst_seq_fops; - else - goto error; - - scsi_dev_entry = - create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL); - if (scsi_dev_entry) - scsi_dev_entry->proc_fops = &scsi_dev_seq_fops; - else - goto error; - - scsi_port_entry = - create_proc_entry("scsi_target/mib/scsi_port", 0, NULL); - if (scsi_port_entry) - scsi_port_entry->proc_fops = &scsi_port_seq_fops; - else - goto error; - - scsi_transport_entry = - create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL); - if (scsi_transport_entry) - scsi_transport_entry->proc_fops = &scsi_transport_seq_fops; - else - goto error; - - scsi_tgt_dev_entry = - create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL); - if (scsi_tgt_dev_entry) - scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops; - else - goto error; - - scsi_tgt_port_entry = - create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL); - if (scsi_tgt_port_entry) - scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops; - else - goto error; - - scsi_auth_intr_entry = - create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL); - if (scsi_auth_intr_entry) - scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops; - else - goto error; - - scsi_att_intr_port_entry = - create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL); - if (scsi_att_intr_port_entry) - scsi_att_intr_port_entry->proc_fops = - &scsi_att_intr_port_seq_fops; - else - goto error; - - scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL); - if (scsi_lu_entry) - scsi_lu_entry->proc_fops = &scsi_lu_seq_fops; - else - goto error; - - return 0; - -error: - printk(KERN_ERR "create_proc_entry() failed.\n"); - remove_scsi_target_mib(); - return -1; -} - -/* - * Initialize the index table for allocating unique row indexes to various mib - * tables - */ -void init_scsi_index_table(void) -{ - memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); - spin_lock_init(&scsi_index_table.lock); -} - -/* - * Allocate a new row index for the entry type specified - */ -u32 scsi_get_new_index(scsi_index_t type) -{ - u32 new_index; - - if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { - printk(KERN_ERR "Invalid index type %d\n", type); - return -1; - } - - spin_lock(&scsi_index_table.lock); - new_index = ++scsi_index_table.scsi_mib_index[type]; - if (new_index == 0) - new_index = ++scsi_index_table.scsi_mib_index[type]; - spin_unlock(&scsi_index_table.lock); - - return new_index; -} -EXPORT_SYMBOL(scsi_get_new_index); diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h deleted file mode 100644 index 27720463385..00000000000 --- a/drivers/target/target_core_mib.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef TARGET_CORE_MIB_H -#define TARGET_CORE_MIB_H - -typedef enum { - SCSI_INST_INDEX, - SCSI_DEVICE_INDEX, - SCSI_AUTH_INTR_INDEX, - SCSI_INDEX_TYPE_MAX -} scsi_index_t; - -struct scsi_index_table { - spinlock_t lock; - u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; -} ____cacheline_aligned; - -/* SCSI Port stats */ -struct scsi_port_stats { - u64 cmd_pdus; - u64 tx_data_octets; - u64 rx_data_octets; -} ____cacheline_aligned; - -extern int init_scsi_target_mib(void); -extern void remove_scsi_target_mib(void); -extern void init_scsi_index_table(void); -extern u32 scsi_get_new_index(scsi_index_t); - -#endif /*** TARGET_CORE_MIB_H ***/ diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 742d24609a9..f2a08477a68 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk( */ bd = blkdev_get_by_path(se_dev->se_dev_udev_path, FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); - if (!(bd)) { - printk("pSCSI: blkdev_get_by_path() failed\n"); + if (IS_ERR(bd)) { + printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); scsi_device_put(sd); return NULL; } diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index abfa81a5711..c26f6746762 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( spin_lock_init(&acl->device_list_lock); spin_lock_init(&acl->nacl_sess_lock); atomic_set(&acl->acl_pr_ref_count, 0); - atomic_set(&acl->mib_ref_count, 0); acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; @@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) cpu_relax(); } -void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl) -{ - while (atomic_read(&nacl->mib_ref_count) != 0) - cpu_relax(); -} - void core_tpg_clear_object_luns(struct se_portal_group *tpg) { int i, ret; @@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl( spin_unlock_bh(&tpg->session_lock); core_tpg_wait_for_nacl_pr_ref(acl); - core_tpg_wait_for_mib_ref(acl); core_clear_initiator_node_from_tpg(acl, tpg); core_free_device_list_for_node(acl, tpg); @@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register); int core_tpg_deregister(struct se_portal_group *se_tpg) { + struct se_node_acl *nacl, *nacl_tmp; + printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" " for endpoint: %s Portal Tag %u\n", (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? @@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) cpu_relax(); + /* + * Release any remaining demo-mode generated se_node_acl that have + * not been released because of TFO->tpg_check_demo_mode_cache() == 1 + * in transport_deregister_session(). + */ + spin_lock_bh(&se_tpg->acl_node_lock); + list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, + acl_list) { + list_del(&nacl->acl_list); + se_tpg->num_node_acls--; + spin_unlock_bh(&se_tpg->acl_node_lock); + + core_tpg_wait_for_nacl_pr_ref(nacl); + core_free_device_list_for_node(nacl, se_tpg); + TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); + + spin_lock_bh(&se_tpg->acl_node_lock); + } + spin_unlock_bh(&se_tpg->acl_node_lock); if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) core_tpg_release_virtual_lun0(se_tpg); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 28b6292ff29..236e22d8cfa 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -379,6 +379,40 @@ void release_se_global(void) se_global = NULL; } +/* SCSI statistics table index */ +static struct scsi_index_table scsi_index_table; + +/* + * Initialize the index table for allocating unique row indexes to various mib + * tables. + */ +void init_scsi_index_table(void) +{ + memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); + spin_lock_init(&scsi_index_table.lock); +} + +/* + * Allocate a new row index for the entry type specified + */ +u32 scsi_get_new_index(scsi_index_t type) +{ + u32 new_index; + + if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { + printk(KERN_ERR "Invalid index type %d\n", type); + return -EINVAL; + } + + spin_lock(&scsi_index_table.lock); + new_index = ++scsi_index_table.scsi_mib_index[type]; + if (new_index == 0) + new_index = ++scsi_index_table.scsi_mib_index[type]; + spin_unlock(&scsi_index_table.lock); + + return new_index; +} + void transport_init_queue_obj(struct se_queue_obj *qobj) { atomic_set(&qobj->queue_cnt, 0); @@ -437,7 +471,6 @@ struct se_session *transport_init_session(void) } INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); - atomic_set(&se_sess->mib_ref_count, 0); return se_sess; } @@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess) transport_free_session(se_sess); return; } - /* - * Wait for possible reference in drivers/target/target_core_mib.c: - * scsi_att_intr_port_seq_show() - */ - while (atomic_read(&se_sess->mib_ref_count) != 0) - cpu_relax(); spin_lock_bh(&se_tpg->session_lock); list_del(&se_sess->sess_list); @@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess) spin_unlock_bh(&se_tpg->acl_node_lock); core_tpg_wait_for_nacl_pr_ref(se_nacl); - core_tpg_wait_for_mib_ref(se_nacl); core_free_device_list_for_node(se_nacl, se_tpg); TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, se_nacl); @@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map( return ret; } + + BUG_ON(list_empty(se_mem_list)); /* * This is the normal path for all normal non BIDI and BIDI-COMMAND * WRITE payloads.. If we need to do BIDI READ passthrough for @@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) struct se_mem *se_mem = NULL, *se_mem_lout = NULL; u32 se_mem_cnt = 0, task_offset = 0; - BUG_ON(list_empty(cmd->t_task->t_mem_list)); + if (!list_empty(T_TASK(cmd)->t_mem_list)) + se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, + struct se_mem, se_list); ret = transport_do_se_mem_map(dev, task, cmd->t_task->t_mem_list, NULL, se_mem, diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index f7a5dba3ca2..bf7c687519e 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -4,7 +4,6 @@ menuconfig THERMAL tristate "Generic Thermal sysfs driver" - depends on NET help Generic Thermal Sysfs driver offers a generic mechanism for thermal management. Usually it's made up of one or more thermal diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 7d0e63c7928..713b7ea4a60 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock); static unsigned int thermal_event_seqnum; -static struct genl_family thermal_event_genl_family = { - .id = GENL_ID_GENERATE, - .name = THERMAL_GENL_FAMILY_NAME, - .version = THERMAL_GENL_VERSION, - .maxattr = THERMAL_GENL_ATTR_MAX, -}; - -static struct genl_multicast_group thermal_event_mcgrp = { - .name = THERMAL_GENL_MCAST_GROUP_NAME, -}; - -static int genetlink_init(void); -static void genetlink_exit(void); - static int get_idr(struct idr *idr, struct mutex *lock, int *id) { int err; @@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) EXPORT_SYMBOL(thermal_zone_device_unregister); +#ifdef CONFIG_NET +static struct genl_family thermal_event_genl_family = { + .id = GENL_ID_GENERATE, + .name = THERMAL_GENL_FAMILY_NAME, + .version = THERMAL_GENL_VERSION, + .maxattr = THERMAL_GENL_ATTR_MAX, +}; + +static struct genl_multicast_group thermal_event_mcgrp = { + .name = THERMAL_GENL_MCAST_GROUP_NAME, +}; + int generate_netlink_event(u32 orig, enum events event) { struct sk_buff *skb; @@ -1301,6 +1299,15 @@ static int genetlink_init(void) return result; } +static void genetlink_exit(void) +{ + genl_unregister_family(&thermal_event_genl_family); +} +#else /* !CONFIG_NET */ +static inline int genetlink_init(void) { return 0; } +static inline void genetlink_exit(void) {} +#endif /* !CONFIG_NET */ + static int __init thermal_init(void) { int result = 0; @@ -1316,11 +1323,6 @@ static int __init thermal_init(void) return result; } -static void genetlink_exit(void) -{ - genl_unregister_family(&thermal_event_genl_family); -} - static void __exit thermal_exit(void) { class_unregister(&thermal_class); diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile index e6bed5f177f..d79e7e9bf9d 100644 --- a/drivers/tty/hvc/Makefile +++ b/drivers/tty/hvc/Makefile @@ -10,4 +10,3 @@ obj-$(CONFIG_HVC_XEN) += hvc_xen.o obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o obj-$(CONFIG_HVCS) += hvcs.o -obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 44b8412a04e..aa2e5d3eb01 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2414,6 +2414,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm, gsm->initiator = c->initiator; gsm->mru = c->mru; + gsm->mtu = c->mtu; gsm->encoding = c->encapsulation; gsm->adaption = c->adaption; gsm->n2 = c->n2; diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c index be0ebce36e5..de0160e3f8c 100644 --- a/drivers/tty/serial/68328serial.c +++ b/drivers/tty/serial/68328serial.c @@ -262,7 +262,7 @@ static void status_handle(struct m68k_serial *info, unsigned short status) static void receive_chars(struct m68k_serial *info, unsigned short rx) { - struct tty_struct *tty = info->port.tty; + struct tty_struct *tty = info->tty; m68328_uart *uart = &uart_addr[info->line]; unsigned char ch, flag; @@ -329,7 +329,7 @@ static void transmit_chars(struct m68k_serial *info) goto clear_and_return; } - if((info->xmit_cnt <= 0) || info->port.tty->stopped) { + if((info->xmit_cnt <= 0) || info->tty->stopped) { /* That's peculiar... TX ints off */ uart->ustcnt &= ~USTCNT_TX_INTR_MASK; goto clear_and_return; @@ -383,7 +383,7 @@ static void do_softint(struct work_struct *work) struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue); struct tty_struct *tty; - tty = info->port.tty; + tty = info->tty; if (!tty) return; #if 0 @@ -407,7 +407,7 @@ static void do_serial_hangup(struct work_struct *work) struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup); struct tty_struct *tty; - tty = info->port.tty; + tty = info->tty; if (!tty) return; @@ -451,8 +451,8 @@ static int startup(struct m68k_serial * info) uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK; #endif - if (info->port.tty) - clear_bit(TTY_IO_ERROR, &info->port.tty->flags); + if (info->tty) + clear_bit(TTY_IO_ERROR, &info->tty->flags); info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; /* @@ -486,8 +486,8 @@ static void shutdown(struct m68k_serial * info) info->xmit_buf = 0; } - if (info->port.tty) - set_bit(TTY_IO_ERROR, &info->port.tty->flags); + if (info->tty) + set_bit(TTY_IO_ERROR, &info->tty->flags); info->flags &= ~S_INITIALIZED; local_irq_restore(flags); @@ -553,9 +553,9 @@ static void change_speed(struct m68k_serial *info) unsigned cflag; int i; - if (!info->port.tty || !info->port.tty->termios) + if (!info->tty || !info->tty->termios) return; - cflag = info->port.tty->termios->c_cflag; + cflag = info->tty->termios->c_cflag; if (!(port = info->port)) return; @@ -970,7 +970,6 @@ static void send_break(struct m68k_serial * info, unsigned int duration) static int rs_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg) { - int error; struct m68k_serial * info = (struct m68k_serial *)tty->driver_data; int retval; @@ -1104,7 +1103,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) tty_ldisc_flush(tty); tty->closing = 0; info->event = 0; - info->port.tty = NULL; + info->tty = NULL; #warning "This is not and has never been valid so fix it" #if 0 if (tty->ldisc.num != ldiscs[N_TTY].num) { @@ -1142,7 +1141,7 @@ void rs_hangup(struct tty_struct *tty) info->event = 0; info->count = 0; info->flags &= ~S_NORMAL_ACTIVE; - info->port.tty = NULL; + info->tty = NULL; wake_up_interruptible(&info->open_wait); } @@ -1261,7 +1260,7 @@ int rs_open(struct tty_struct *tty, struct file * filp) info->count++; tty->driver_data = info; - info->port.tty = tty; + info->tty = tty; /* * Start up serial port @@ -1338,7 +1337,7 @@ rs68328_init(void) info = &m68k_soft[i]; info->magic = SERIAL_MAGIC; info->port = (int) &uart_addr[i]; - info->port.tty = NULL; + info->tty = NULL; info->irq = uart_irqs[i]; info->custom_divisor = 16; info->close_delay = 50; diff --git a/drivers/tty/serial/68360serial.c b/drivers/tty/serial/68360serial.c index 88b13356ec1..bc21eeae8fd 100644 --- a/drivers/tty/serial/68360serial.c +++ b/drivers/tty/serial/68360serial.c @@ -2428,6 +2428,7 @@ static const struct tty_operations rs_360_ops = { /* .read_proc = rs_360_read_proc, */ .tiocmget = rs_360_tiocmget, .tiocmset = rs_360_tiocmset, + .get_icount = rs_360_get_icount, }; static int __init rs_360_init(void) diff --git a/drivers/tty/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c index e381b895b04..9b1ff2b6bb3 100644 --- a/drivers/tty/serial/bfin_5xx.c +++ b/drivers/tty/serial/bfin_5xx.c @@ -370,10 +370,8 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; - spin_lock(&uart->port.lock); while (UART_GET_LSR(uart) & DR) bfin_serial_rx_chars(uart); - spin_unlock(&uart->port.lock); return IRQ_HANDLED; } @@ -490,9 +488,8 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) { int x_pos, pos; - dma_disable_irq(uart->tx_dma_channel); - dma_disable_irq(uart->rx_dma_channel); - spin_lock_bh(&uart->port.lock); + dma_disable_irq_nosync(uart->rx_dma_channel); + spin_lock_bh(&uart->rx_lock); /* 2D DMA RX buffer ring is used. Because curr_y_count and * curr_x_count can't be read as an atomic operation, @@ -523,8 +520,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) uart->rx_dma_buf.tail = uart->rx_dma_buf.head; } - spin_unlock_bh(&uart->port.lock); - dma_enable_irq(uart->tx_dma_channel); + spin_unlock_bh(&uart->rx_lock); dma_enable_irq(uart->rx_dma_channel); mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); @@ -571,7 +567,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) unsigned short irqstat; int x_pos, pos; - spin_lock(&uart->port.lock); + spin_lock(&uart->rx_lock); irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); clear_dma_irqstat(uart->rx_dma_channel); @@ -589,7 +585,7 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) uart->rx_dma_buf.tail = uart->rx_dma_buf.head; } - spin_unlock(&uart->port.lock); + spin_unlock(&uart->rx_lock); return IRQ_HANDLED; } @@ -1332,6 +1328,7 @@ static int bfin_serial_probe(struct platform_device *pdev) } #ifdef CONFIG_SERIAL_BFIN_DMA + spin_lock_init(&uart->rx_lock); uart->tx_done = 1; uart->tx_count = 0; diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index beb1afa27d8..7b951adac54 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port) s->rts = 0; sprintf(b, "max3100-%d", s->minor); - s->workqueue = create_freezeable_workqueue(b); + s->workqueue = create_freezable_workqueue(b); if (!s->workqueue) { dev_warn(&s->spi->dev, "cannot create workqueue\n"); return -EBUSY; diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c index 910870edf70..750b4f62731 100644 --- a/drivers/tty/serial/max3107.c +++ b/drivers/tty/serial/max3107.c @@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port) struct max3107_port *s = container_of(port, struct max3107_port, port); /* Initialize work queue */ - s->workqueue = create_freezeable_workqueue("max3107"); + s->workqueue = create_freezable_workqueue("max3107"); if (!s->workqueue) { dev_err(&s->spi->dev, "Workqueue creation failed\n"); return -EBUSY; diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 8e0dd254eb1..81f13958e75 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -571,6 +571,7 @@ struct sysrq_state { unsigned int alt_use; bool active; bool need_reinject; + bool reinjecting; }; static void sysrq_reinject_alt_sysrq(struct work_struct *work) @@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work) unsigned int alt_code = sysrq->alt_use; if (sysrq->need_reinject) { + /* we do not want the assignment to be reordered */ + sysrq->reinjecting = true; + mb(); + /* Simulate press and release of Alt + SysRq */ input_inject_event(handle, EV_KEY, alt_code, 1); input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); @@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work) input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); input_inject_event(handle, EV_KEY, alt_code, 0); input_inject_event(handle, EV_SYN, SYN_REPORT, 1); + + mb(); + sysrq->reinjecting = false; } } @@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle, bool was_active = sysrq->active; bool suppress; + /* + * Do not filter anything if we are in the process of re-injecting + * Alt+SysRq combination. + */ + if (sysrq->reinjecting) + return false; + switch (type) { case EV_SYN: @@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle, sysrq->alt_use = sysrq->alt; /* * If nothing else will be pressed we'll need - * to * re-inject Alt-SysRq keysroke. + * to re-inject Alt-SysRq keysroke. */ sysrq->need_reinject = true; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index d6ede989ff2..4ab49d4eebf 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1607,6 +1607,7 @@ static const struct usb_device_id acm_ids[] = { { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ + { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */ { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 6a95017fa62..e935f71d7a3 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1955,7 +1955,6 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) dev_dbg(&rhdev->dev, "usb %s%s\n", (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); - clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); if (!hcd->driver->bus_resume) return -ENOENT; if (hcd->state == HC_STATE_RUNNING) @@ -1963,6 +1962,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) hcd->state = HC_STATE_RESUMING; status = hcd->driver->bus_resume(hcd); + clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); if (status == 0) { /* TRSMRCY = 10 msec */ msleep(10); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 4310cc4b1cb..0f299b7aad6 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, mutex_lock(&usb_address0_mutex); - if (!udev->config && oldspeed == USB_SPEED_SUPER) { - /* Don't reset USB 3.0 devices during an initial setup */ - usb_set_device_state(udev, USB_STATE_DEFAULT); - } else { - /* Reset the device; full speed may morph to high speed */ - /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ - retval = hub_port_reset(hub, port1, udev, delay); - if (retval < 0) /* error or disconnect */ - goto fail; - /* success, speed is known */ - } + /* Reset the device; full speed may morph to high speed */ + /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ + retval = hub_port_reset(hub, port1, udev, delay); + if (retval < 0) /* error or disconnect */ + goto fail; + /* success, speed is known */ + retval = -ENODEV; if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { @@ -2753,6 +2749,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, udev->ttport = hdev->ttport; } else if (udev->speed != USB_SPEED_HIGH && hdev->speed == USB_SPEED_HIGH) { + if (!hub->tt.hub) { + dev_err(&udev->dev, "parent hub has no TT\n"); + retval = -EINVAL; + goto fail; + } udev->tt = &hub->tt; udev->ttport = port1; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 44c595432d6..81ce6a8e1d9 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ + { USB_DEVICE(0x04e8, 0x6601), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Roland SC-8820 */ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Keytouch QWERTY Panel keyboard */ + { USB_DEVICE(0x0926, 0x3333), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 06bb9d4587e..d50099675f2 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -546,6 +546,8 @@ config USB_GADGET_CI13XXX_MSM ci13xxx_udc core. This driver depends on OTG driver for PHY initialization, clock management, powering up VBUS, and power management. + This driver is not supported on boards like trout which + has an external PHY. Say "y" to link the driver statically, or "m" to build a dynamically linked module called "ci13xxx_msm" and force all diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index b5dbb2308f5..6d8e533949e 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -293,6 +293,7 @@ #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> +#include <linux/usb/composite.h> #include "gadget_chips.h" @@ -2763,7 +2764,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common, return ERR_PTR(-ENOMEM); common->free_storage_on_release = 1; } else { - memset(common, 0, sizeof common); + memset(common, 0, sizeof *common); common->free_storage_on_release = 0; } diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 20d43da319a..015118535f7 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c @@ -258,7 +258,7 @@ static int pipe_buffer_setting(struct r8a66597 *r8a66597, break; case R8A66597_BULK: /* isochronous pipes may be used as bulk pipes */ - if (info->pipe > R8A66597_BASE_PIPENUM_BULK) + if (info->pipe >= R8A66597_BASE_PIPENUM_BULK) bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK; else bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC; diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 24046c0f587..0e6afa260ed 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -151,6 +151,8 @@ config USB_EHCI_MSM Qualcomm chipsets. Root Hub has inbuilt TT. This driver depends on OTG driver for PHY initialization, clock management, powering up VBUS, and power management. + This driver is not supported on boards like trout which + has an external PHY. config USB_EHCI_HCD_PPC_OF bool "EHCI support for PPC USB controller on OF platform bus" diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index 2baf8a84908..a869e3c103d 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c @@ -227,8 +227,8 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev) * mark HW unaccessible. The PM and USB cores make sure that * the root hub is either suspended or stopped. */ - spin_lock_irqsave(&ehci->lock, flags); ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev)); + spin_lock_irqsave(&ehci->lock, flags); ehci_writel(ehci, 0, &ehci->regs->intr_enable); (void)ehci_readl(ehci, &ehci->regs->intr_enable); diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 796ea0c8900..8a515f0d598 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -111,6 +111,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, { int port; u32 temp; + unsigned long flags; /* If remote wakeup is enabled for the root hub but disabled * for the controller, we must adjust all the port wakeup flags @@ -120,6 +121,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup) return; + spin_lock_irqsave(&ehci->lock, flags); + /* clear phy low-power mode before changing wakeup flags */ if (ehci->has_hostpc) { port = HCS_N_PORTS(ehci->hcs_params); @@ -131,7 +134,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, temp = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); } + spin_unlock_irqrestore(&ehci->lock, flags); msleep(5); + spin_lock_irqsave(&ehci->lock, flags); } port = HCS_N_PORTS(ehci->hcs_params); @@ -170,6 +175,8 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, /* Does the root hub have a port wakeup pending? */ if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)) usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); + + spin_unlock_irqrestore(&ehci->lock, flags); } static int ehci_bus_suspend (struct usb_hcd *hcd) diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 680f2ef4e59..f784ceb862a 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { - dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret); + dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret); ret = -ENOMEM; goto err_create_hcd; } @@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) ret = omap_start_ehc(omap, hcd); if (ret) { - dev_dbg(&pdev->dev, "failed to start ehci\n"); + dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret); goto err_start; } @@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (ret) { - dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); + dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret); goto err_add_hcd; } diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index bed07d4aab0..07bb982e59f 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -367,8 +367,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) * mark HW unaccessible. The PM and USB cores make sure that * the root hub is either suspended or stopped. */ - spin_lock_irqsave (&ehci->lock, flags); ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup); + spin_lock_irqsave (&ehci->lock, flags); ehci_writel(ehci, 0, &ehci->regs->intr_enable); (void)ehci_readl(ehci, &ehci->regs->intr_enable); diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 990f06b89ea..2e9602a10e9 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -861,6 +861,7 @@ static int sl811h_urb_enqueue( DBG("dev %d ep%d maxpacket %d\n", udev->devnum, epnum, ep->maxpacket); retval = -EINVAL; + kfree(ep); goto fail; } diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index fcbf4abbf38..0231814a97a 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci) } } -void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num) +void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num) { - void *addr; + struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num]; + void __iomem *addr; u32 temp; u64 temp_64; @@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, } } -void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) +static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { /* Fields are 32 bits wide, DMA addresses are in bytes */ int field_size = 32 / 8; @@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); } -void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, +static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 1d0f45f0e7a..a9534396e85 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, /***************** Streams structures manipulation *************************/ -void xhci_free_stream_ctx(struct xhci_hcd *xhci, +static void xhci_free_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) { @@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci, * The stream context array must be a power of 2, and can be as small as * 64 bytes or as large as 1MB. */ -struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, +static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, dma_addr_t *dma, gfp_t mem_flags) { @@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val &= DBOFF_MASK; xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" " from cap regs base addr\n", val); - xhci->dba = (void *) xhci->cap_regs + val; + xhci->dba = (void __iomem *) xhci->cap_regs + val; xhci_dbg_regs(xhci); xhci_print_run_regs(xhci); /* Set ir_set to interrupt register set 0 */ - xhci->ir_set = (void *) xhci->run_regs->ir_set; + xhci->ir_set = &xhci->run_regs->ir_set[0]; /* * Event ring setup: Allocate a normal ring, but also setup @@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); - xhci_print_ir_set(xhci, xhci->ir_set, 0); + xhci_print_ir_set(xhci, 0); /* * XXX: Might need to set the Interrupter Moderation Register to diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 3e8211c1ce5..3289bf4832c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = find_trb_seg(cur_td->start_seg, dev->eps[ep_index].stopped_trb, &state->new_cycle_state); - if (!state->new_deq_seg) - BUG(); + if (!state->new_deq_seg) { + WARN_ON(1); + return; + } + /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg(xhci, "Finding endpoint context\n"); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); @@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = find_trb_seg(state->new_deq_seg, state->new_deq_ptr, &state->new_cycle_state); - if (!state->new_deq_seg) - BUG(); + if (!state->new_deq_seg) { + WARN_ON(1); + return; + } trb = &state->new_deq_ptr->generic; if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && @@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) /* Scatter gather list entries may cross 64KB boundaries */ running_total = TRB_MAX_BUFF_SIZE - - (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; if (running_total != 0) num_trbs++; /* How many more 64KB chunks to transfer, how many more TRBs? */ - while (running_total < sg_dma_len(sg)) { + while (running_total < sg_dma_len(sg) && running_total < temp) { num_trbs++; running_total += TRB_MAX_BUFF_SIZE; } @@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) static void check_trb_math(struct urb *urb, int num_trbs, int running_total) { if (num_trbs != 0) - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " "TRBs, %d left\n", __func__, urb->ep->desc.bEndpointAddress, num_trbs); if (running_total != urb->transfer_buffer_length) - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " "queued %#x (%d), asked for %#x (%d)\n", __func__, urb->ep->desc.bEndpointAddress, @@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, sg = urb->sg; addr = (u64) sg_dma_address(sg); this_sg_len = sg_dma_len(sg); - trb_buff_len = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); trb_buff_len = min_t(int, trb_buff_len, this_sg_len); if (trb_buff_len > urb->transfer_buffer_length) trb_buff_len = urb->transfer_buffer_length; @@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) addr + trb_buff_len); if (TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { + (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), @@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } trb_buff_len = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (addr & (TRB_MAX_BUFF_SIZE - 1)); trb_buff_len = min_t(int, trb_buff_len, this_sg_len); if (running_total + trb_buff_len > urb->transfer_buffer_length) trb_buff_len = @@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, num_trbs = 0; /* How much data is (potentially) left before the 64KB boundary? */ running_total = TRB_MAX_BUFF_SIZE - - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; /* If there's some data on this 64KB chunk, or we have to send a * zero-length transfer, we need at least one TRB @@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* How much data is in the first TRB? */ addr = (u64) urb->transfer_dma; trb_buff_len = TRB_MAX_BUFF_SIZE - - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); - if (urb->transfer_buffer_length < trb_buff_len) + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); + if (trb_buff_len > urb->transfer_buffer_length) trb_buff_len = urb->transfer_buffer_length; first_trb = true; @@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci, addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); td_len = urb->iso_frame_desc[i].length; - running_total = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; if (running_total != 0) num_trbs++; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 34cf4e16587..2083fc2179b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci) /* * Set the run bit and wait for the host to be running. */ -int xhci_start(struct xhci_hcd *xhci) +static int xhci_start(struct xhci_hcd *xhci) { u32 temp; int ret; @@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd) #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING -void xhci_event_ring_work(unsigned long arg) +static void xhci_event_ring_work(unsigned long arg) { unsigned long flags; int temp; @@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd) xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); xhci_writel(xhci, ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); - xhci_print_ir_set(xhci, xhci->ir_set, 0); + xhci_print_ir_set(xhci, 0); if (NUM_TEST_NOOPS > 0) doorbell = xhci_setup_one_noop(xhci); @@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd) temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_writel(xhci, ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); - xhci_print_ir_set(xhci, xhci->ir_set, 0); + xhci_print_ir_set(xhci, 0); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); @@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_writel(xhci, ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); - xhci_print_ir_set(xhci, xhci->ir_set, 0); + xhci_print_ir_set(xhci, 0); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); @@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs) /* Returns 1 if the arguments are OK; * returns 0 this is a root hub; returns -EINVAL for NULL pointers. */ -int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, +static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, const char *func) { struct xhci_hcd *xhci; @@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); } -void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, +static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state) { diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7f236fd2201..7f127df6dd5 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) } /* xHCI debugging */ -void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); +void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num); void xhci_print_registers(struct xhci_hcd *xhci); void xhci_dbg_regs(struct xhci_hcd *xhci); void xhci_print_run_regs(struct xhci_hcd *xhci); diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index eeba228eb2a..9d49d1cd7ce 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb) musb->xceiv->set_power = bfin_musb_set_power; musb->isr = blackfin_interrupt; + musb->double_buffer_not_ok = true; return 0; } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 07cf394e491..c292d5c499e 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); static inline struct musb *dev_to_musb(struct device *dev) { -#ifdef CONFIG_USB_MUSB_HDRC_HCD - /* usbcore insists dev->driver_data is a "struct hcd *" */ - return hcd_to_musb(dev_get_drvdata(dev)); -#else return dev_get_drvdata(dev); -#endif } /*-------------------------------------------------------------------------*/ @@ -1869,6 +1864,7 @@ allocate_instance(struct device *dev, INIT_LIST_HEAD(&musb->out_bulk); hcd->uses_new_polling = 1; + hcd->has_tt = 1; musb->vbuserr_retry = VBUSERR_RETRY_COUNT; musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; @@ -1876,10 +1872,9 @@ allocate_instance(struct device *dev, musb = kzalloc(sizeof *musb, GFP_KERNEL); if (!musb) return NULL; - dev_set_drvdata(dev, musb); #endif - + dev_set_drvdata(dev, musb); musb->mregs = mbase; musb->ctrl_base = mbase; musb->nIrq = -ENODEV; @@ -2191,7 +2186,7 @@ static int __init musb_probe(struct platform_device *pdev) void __iomem *base; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!iomem || irq == 0) + if (!iomem || irq <= 0) return -ENODEV; base = ioremap(iomem->start, resource_size(iomem)); diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index d0c236f8e19..e6400be8a0f 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -497,6 +497,19 @@ struct musb { struct usb_gadget_driver *gadget_driver; /* its driver */ #endif + /* + * FIXME: Remove this flag. + * + * This is only added to allow Blackfin to work + * with current driver. For some unknown reason + * Blackfin doesn't work with double buffering + * and that's enabled by default. + * + * We added this flag to forcefully disable double + * buffering until we get it working. + */ + unsigned double_buffer_not_ok:1 __deprecated; + struct musb_hdrc_config *config; #ifdef MUSB_CONFIG_PROC_FS diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h index 916065ba9e7..3a97c4e2d4f 100644 --- a/drivers/usb/musb/musb_dma.h +++ b/drivers/usb/musb/musb_dma.h @@ -169,6 +169,9 @@ struct dma_controller { dma_addr_t dma_addr, u32 length); int (*channel_abort)(struct dma_channel *); + int (*is_compatible)(struct dma_channel *channel, + u16 maxpacket, + void *buf, u32 length); }; /* called after channel_program(), may indicate a fault */ diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index ed58c6c8f15..2fe304611dc 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -92,11 +92,33 @@ /* ----------------------------------------------------------------------- */ +#define is_buffer_mapped(req) (is_dma_capable() && \ + (req->map_state != UN_MAPPED)) + /* Maps the buffer to dma */ static inline void map_dma_buffer(struct musb_request *request, - struct musb *musb) + struct musb *musb, struct musb_ep *musb_ep) { + int compatible = true; + struct dma_controller *dma = musb->dma_controller; + + request->map_state = UN_MAPPED; + + if (!is_dma_capable() || !musb_ep->dma) + return; + + /* Check if DMA engine can handle this request. + * DMA code must reject the USB request explicitly. + * Default behaviour is to map the request. + */ + if (dma->is_compatible) + compatible = dma->is_compatible(musb_ep->dma, + musb_ep->packet_sz, request->request.buf, + request->request.length); + if (!compatible) + return; + if (request->request.dma == DMA_ADDR_INVALID) { request->request.dma = dma_map_single( musb->controller, @@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - request->mapped = 1; + request->map_state = MUSB_MAPPED; } else { dma_sync_single_for_device(musb->controller, request->request.dma, @@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - request->mapped = 0; + request->map_state = PRE_MAPPED; } } @@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request, static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb) { + if (!is_buffer_mapped(request)) + return; + if (request->request.dma == DMA_ADDR_INVALID) { DBG(20, "not unmapping a never mapped buffer\n"); return; } - if (request->mapped) { + if (request->map_state == MUSB_MAPPED) { dma_unmap_single(musb->controller, request->request.dma, request->request.length, @@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request, ? DMA_TO_DEVICE : DMA_FROM_DEVICE); request->request.dma = DMA_ADDR_INVALID; - request->mapped = 0; - } else { + } else { /* PRE_MAPPED */ dma_sync_single_for_cpu(musb->controller, request->request.dma, request->request.length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - } + request->map_state = UN_MAPPED; } /* @@ -172,8 +196,7 @@ __acquires(ep->musb->lock) ep->busy = 1; spin_unlock(&musb->lock); - if (is_dma_capable() && ep->dma) - unmap_dma_buffer(req, musb); + unmap_dma_buffer(req, musb); if (request->status == 0) DBG(5, "%s done request %p, %d/%d\n", ep->end_point.name, request, @@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req) csr); #ifndef CONFIG_MUSB_PIO_ONLY - if (is_dma_capable() && musb_ep->dma) { + if (is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; size_t request_size; @@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req) * Unmap the dma buffer back to cpu if dma channel * programming fails */ - if (is_dma_capable() && musb_ep->dma) - unmap_dma_buffer(req, musb); + unmap_dma_buffer(req, musb); musb_write_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); @@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) return; } - if (is_cppi_enabled() && musb_ep->dma) { + if (is_cppi_enabled() && is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; struct dma_channel *channel = musb_ep->dma; @@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) len = musb_readw(epio, MUSB_RXCOUNT); if (request->actual < request->length) { #ifdef CONFIG_USB_INVENTRA_DMA - if (is_dma_capable() && musb_ep->dma) { + if (is_buffer_mapped(req)) { struct dma_controller *c; struct dma_channel *channel; int use_dma = 0; @@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) fifo_count = min_t(unsigned, len, fifo_count); #ifdef CONFIG_USB_TUSB_OMAP_DMA - if (tusb_dma_omap() && musb_ep->dma) { + if (tusb_dma_omap() && is_buffer_mapped(req)) { struct dma_controller *c = musb->dma_controller; struct dma_channel *channel = musb_ep->dma; u32 dma_addr = request->dma + request->actual; @@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) * programming fails. This buffer is mapped if the * channel allocation is successful */ - if (is_dma_capable() && musb_ep->dma) { + if (is_buffer_mapped(req)) { unmap_dma_buffer(req, musb); /* @@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep, /* Set TXMAXP with the FIFO size of the endpoint * to disable double buffering mode. */ - musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); + if (musb->double_buffer_not_ok) + musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); + else + musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz + | (musb_ep->hb_mult << 11)); csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; if (musb_readw(regs, MUSB_TXCSR) @@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep, /* Set RXMAXP with the FIFO size of the endpoint * to disable double buffering mode. */ - musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); + if (musb->double_buffer_not_ok) + musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); + else + musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz + | (musb_ep->hb_mult << 11)); /* force shared fifo to OUT-only mode */ if (hw_ep->is_shared_fifo) { @@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, request->epnum = musb_ep->current_epnum; request->tx = musb_ep->is_in; - if (is_dma_capable() && musb_ep->dma) - map_dma_buffer(request, musb); - else - request->mapped = 0; + map_dma_buffer(request, musb, musb_ep); spin_lock_irqsave(&musb->lock, lockflags); diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index dec8dc00819..a55354fbccf 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h @@ -35,13 +35,19 @@ #ifndef __MUSB_GADGET_H #define __MUSB_GADGET_H +enum buffer_map_state { + UN_MAPPED = 0, + PRE_MAPPED, + MUSB_MAPPED +}; + struct musb_request { struct usb_request request; struct musb_ep *ep; struct musb *musb; u8 tx; /* endpoint direction */ u8 epnum; - u8 mapped; + enum buffer_map_state map_state; }; static inline struct musb_request *to_musb_request(struct usb_request *req) diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 4d5bcb4e14d..0f523d7db57 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) /* Set RXMAXP with the FIFO size of the endpoint * to disable double buffer mode. */ - if (musb->hwvers < MUSB_HWVERS_2000) + if (musb->double_buffer_not_ok) musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); else musb_writew(ep->regs, MUSB_RXMAXP, @@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum, /* protocol/endpoint/interval/NAKlimit */ if (epnum) { musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); - if (can_bulk_split(musb, qh->type)) + if (musb->double_buffer_not_ok) musb_writew(epio, MUSB_TXMAXP, - packet_sz - | ((hw_ep->max_packet_sz_tx / - packet_sz) - 1) << 11); + hw_ep->max_packet_sz_tx); else musb_writew(epio, MUSB_TXMAXP, - packet_sz); + qh->maxpacket | + ((qh->hb_mult - 1) << 11)); musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); } else { musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h index f763d62f151..21056c924c7 100644 --- a/drivers/usb/musb/musbhsdma.h +++ b/drivers/usb/musb/musbhsdma.h @@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase, { musb_writew(mbase, MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW), - ((u16)((u32) dma_addr & 0xFFFF))); + dma_addr); musb_writew(mbase, MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH), - ((u16)(((u32) dma_addr >> 16) & 0xFFFF))); + (dma_addr >> 16)); } static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel) { - return musb_readl(mbase, + u32 count = musb_readw(mbase, MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH)); + + count = count << 16; + + count |= musb_readw(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW)); + + return count; } static inline void musb_write_hsdma_count(void __iomem *mbase, u8 bchannel, u32 len) { - musb_writel(mbase, + musb_writew(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len); + musb_writew(mbase, MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH), - len); + (len >> 16)); } #endif /* CONFIG_BLACKFIN */ diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index a3f12333fc4..bc8badd1689 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb) static int omap2430_musb_exit(struct musb *musb) { + del_timer_sync(&musb_idle_timer); omap2430_low_level_exit(musb); otg_put_transceiver(musb->xceiv); diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index 9fb875d5f09..9ffc8237fb4 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig @@ -103,6 +103,8 @@ config USB_MSM_OTG_72K required after resetting the hardware and power management. This driver is required even for peripheral only or host only mode configurations. + This driver is not supported on boards like trout which + has an external PHY. config AB8500_USB tristate "AB8500 USB Transceiver Driver" diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4787c0cd063..f349a3629d0 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -100,6 +100,7 @@ struct ftdi_sio_quirk { static int ftdi_jtag_probe(struct usb_serial *serial); static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); static int ftdi_NDI_device_setup(struct usb_serial *serial); +static int ftdi_stmclite_probe(struct usb_serial *serial); static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); @@ -123,6 +124,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { .port_probe = ftdi_HE_TIRA1_setup, }; +static struct ftdi_sio_quirk ftdi_stmclite_quirk = { + .probe = ftdi_stmclite_probe, +}; + /* * The 8U232AM has the same API as the sio except for: * - it can support MUCH higher baudrates; up to: @@ -616,6 +621,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, + { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, @@ -810,6 +816,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), + .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -1709,6 +1717,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial) } /* + * First and second port on STMCLiteadaptors is reserved for JTAG interface + * and the forth port for pio + */ +static int ftdi_stmclite_probe(struct usb_serial *serial) +{ + struct usb_device *udev = serial->dev; + struct usb_interface *interface = serial->interface; + + dbg("%s", __func__); + + if (interface == udev->actconfig->interface[2]) + return 0; + + dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n"); + + return -ENODEV; +} + +/* * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. * We have to correct it if we want to read from it. */ diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index ed160def858..117e8e6f93c 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -518,6 +518,12 @@ #define RATOC_PRODUCT_ID_USB60F 0xb020 /* + * Acton Research Corp. + */ +#define ACTON_VID 0x0647 /* Vendor ID */ +#define ACTON_SPECTRAPRO_PID 0x0100 + +/* * Contec products (http://www.contec.com) * Submitted by Daniel Sangorrin */ @@ -1034,6 +1040,12 @@ #define WHT_PID 0x0004 /* Wireless Handheld Terminal */ /* + * STMicroelectonics + */ +#define ST_VID 0x0483 +#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */ + +/* * Papouch products (http://www.papouch.com/) * Submitted by Folkert van Heusden */ diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index cd769ef24f8..3b246d93cf2 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2889,8 +2889,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial) dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build); - edge_serial->product_info.FirmwareMajorVersion = fw->data[0]; - edge_serial->product_info.FirmwareMinorVersion = fw->data[1]; + edge_serial->product_info.FirmwareMajorVersion = rec->data[0]; + edge_serial->product_info.FirmwareMinorVersion = rec->data[1]; edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build); for (rec = ihex_next_binrec(rec); rec; diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 7481ff8a49e..0457813eebe 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, + { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ { } diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index b2902f307b4..a910004f407 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -369,9 +369,9 @@ failed_1port: static void __exit ti_exit(void) { + usb_deregister(&ti_usb_driver); usb_serial_deregister(&ti_1port_device); usb_serial_deregister(&ti_2port_device); - usb_deregister(&ti_usb_driver); } diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index b004b2a485c..9c014e2ecd6 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb) __func__, status, endpoint); } else { tty = tty_port_tty_get(&port->port); - if (urb->actual_length) { - tty_insert_flip_string(tty, data, urb->actual_length); - tty_flip_buffer_push(tty); - } else - dbg("%s: empty read urb received", __func__); - tty_kref_put(tty); + if (tty) { + if (urb->actual_length) { + tty_insert_flip_string(tty, data, + urb->actual_length); + tty_flip_buffer_push(tty); + } else + dbg("%s: empty read urb received", __func__); + tty_kref_put(tty); + } /* Resubmit urb so we continue receiving */ if (status != -ESHUTDOWN) { diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index 15a5d89b7f3..1c11959a7d5 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -27,6 +27,7 @@ #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> +#include <linux/usb/cdc.h> #include "visor.h" /* @@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial, dbg("%s", __func__); + /* + * some Samsung Android phones in modem mode have the same ID + * as SPH-I500, but they are ACM devices, so dont bind to them + */ + if (id->idVendor == SAMSUNG_VENDOR_ID && + id->idProduct == SAMSUNG_SPH_I500_ID && + serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM && + serial->dev->descriptor.bDeviceSubClass == + USB_CDC_SUBCLASS_ACM) + return -ENODEV; + if (serial->dev->actconfig->desc.bConfigurationValue != 1) { dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 24bd5d7c3de..c1602b8c559 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1397,6 +1397,13 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Submitted by Nick Holloway */ +UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100, + "VTech", + "Kidizoom", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY ), + /* Reported by Michael Stattmann <michael@stattmann.com> */ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, "Sony Ericsson", @@ -1890,6 +1897,13 @@ UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), +/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */ +UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000, + "Coby Electronics", + "MP3 Player", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, "ST", "2A", diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index 3a7e9ff8a74..38e96ab9094 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c @@ -593,19 +593,17 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev) /* get interface & functional clock objects */ hdq_data->hdq_ick = clk_get(&pdev->dev, "ick"); - hdq_data->hdq_fck = clk_get(&pdev->dev, "fck"); + if (IS_ERR(hdq_data->hdq_ick)) { + dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n"); + ret = PTR_ERR(hdq_data->hdq_ick); + goto err_ick; + } - if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) { - dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n"); - if (IS_ERR(hdq_data->hdq_ick)) { - ret = PTR_ERR(hdq_data->hdq_ick); - goto err_clk; - } - if (IS_ERR(hdq_data->hdq_fck)) { - ret = PTR_ERR(hdq_data->hdq_fck); - clk_put(hdq_data->hdq_ick); - goto err_clk; - } + hdq_data->hdq_fck = clk_get(&pdev->dev, "fck"); + if (IS_ERR(hdq_data->hdq_fck)) { + dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n"); + ret = PTR_ERR(hdq_data->hdq_fck); + goto err_fck; } hdq_data->hdq_usecount = 0; @@ -665,10 +663,12 @@ err_fnclk: clk_disable(hdq_data->hdq_ick); err_intfclk: - clk_put(hdq_data->hdq_ick); clk_put(hdq_data->hdq_fck); -err_clk: +err_fck: + clk_put(hdq_data->hdq_ick); + +err_ick: iounmap(hdq_data->hdq_base); err_ioremap: diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 2e2400e7322..31649b7b672 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -862,12 +862,12 @@ config SBC_EPX_C3_WATCHDOG # M68K Architecture -config M548x_WATCHDOG - tristate "MCF548x watchdog support" +config M54xx_WATCHDOG + tristate "MCF54xx watchdog support" depends on M548x help To compile this driver as a module, choose M here: the - module will be called m548x_wdt. + module will be called m54xx_wdt. # MIPS Architecture diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index dd776651917..20e44c4782b 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -106,7 +106,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o # M32R Architecture # M68K Architecture -obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o +obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o # MIPS Architecture obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o diff --git a/drivers/watchdog/m548x_wdt.c b/drivers/watchdog/m54xx_wdt.c index cabbcfe1c84..4d43286074a 100644 --- a/drivers/watchdog/m548x_wdt.c +++ b/drivers/watchdog/m54xx_wdt.c @@ -1,7 +1,7 @@ /* - * drivers/watchdog/m548x_wdt.c + * drivers/watchdog/m54xx_wdt.c * - * Watchdog driver for ColdFire MCF548x processors + * Watchdog driver for ColdFire MCF547x & MCF548x processors * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be> * * Adapted from the IXP4xx watchdog driver, which carries these notices: @@ -29,8 +29,8 @@ #include <linux/uaccess.h> #include <asm/coldfire.h> -#include <asm/m548xsim.h> -#include <asm/m548xgpt.h> +#include <asm/m54xxsim.h> +#include <asm/m54xxgpt.h> static int nowayout = WATCHDOG_NOWAYOUT; static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */ @@ -76,7 +76,7 @@ static void wdt_keepalive(void) __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); } -static int m548x_wdt_open(struct inode *inode, struct file *file) +static int m54xx_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(WDT_IN_USE, &wdt_status)) return -EBUSY; @@ -86,7 +86,7 @@ static int m548x_wdt_open(struct inode *inode, struct file *file) return nonseekable_open(inode, file); } -static ssize_t m548x_wdt_write(struct file *file, const char *data, +static ssize_t m54xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { if (len) { @@ -112,10 +112,10 @@ static ssize_t m548x_wdt_write(struct file *file, const char *data, static const struct watchdog_info ident = { .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, - .identity = "Coldfire M548x Watchdog", + .identity = "Coldfire M54xx Watchdog", }; -static long m548x_wdt_ioctl(struct file *file, unsigned int cmd, +static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; @@ -161,7 +161,7 @@ static long m548x_wdt_ioctl(struct file *file, unsigned int cmd, return ret; } -static int m548x_wdt_release(struct inode *inode, struct file *file) +static int m54xx_wdt_release(struct inode *inode, struct file *file) { if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) wdt_disable(); @@ -177,45 +177,45 @@ static int m548x_wdt_release(struct inode *inode, struct file *file) } -static const struct file_operations m548x_wdt_fops = { +static const struct file_operations m54xx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, - .write = m548x_wdt_write, - .unlocked_ioctl = m548x_wdt_ioctl, - .open = m548x_wdt_open, - .release = m548x_wdt_release, + .write = m54xx_wdt_write, + .unlocked_ioctl = m54xx_wdt_ioctl, + .open = m54xx_wdt_open, + .release = m54xx_wdt_release, }; -static struct miscdevice m548x_wdt_miscdev = { +static struct miscdevice m54xx_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", - .fops = &m548x_wdt_fops, + .fops = &m54xx_wdt_fops, }; -static int __init m548x_wdt_init(void) +static int __init m54xx_wdt_init(void) { if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4, - "Coldfire M548x Watchdog")) { + "Coldfire M54xx Watchdog")) { printk(KERN_WARNING - "Coldfire M548x Watchdog : I/O region busy\n"); + "Coldfire M54xx Watchdog : I/O region busy\n"); return -EBUSY; } printk(KERN_INFO "ColdFire watchdog driver is loaded.\n"); - return misc_register(&m548x_wdt_miscdev); + return misc_register(&m54xx_wdt_miscdev); } -static void __exit m548x_wdt_exit(void) +static void __exit m54xx_wdt_exit(void) { - misc_deregister(&m548x_wdt_miscdev); + misc_deregister(&m54xx_wdt_miscdev); release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4); } -module_init(m548x_wdt_init); -module_exit(m548x_wdt_exit); +module_init(m54xx_wdt_init); +module_exit(m54xx_wdt_exit); MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>"); -MODULE_DESCRIPTION("Coldfire M548x Watchdog"); +MODULE_DESCRIPTION("Coldfire M54xx Watchdog"); module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)"); diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index db8c4c4ac88..24177272bcb 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID; #ifdef CONFIG_PM_SLEEP static int xen_hvm_suspend(void *data) { + int err; struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; int *cancelled = data; BUG_ON(!irqs_disabled()); + err = sysdev_suspend(PMSG_SUSPEND); + if (err) { + printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n", + err); + return err; + } + *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); xen_hvm_post_suspend(*cancelled); @@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data) xen_timer_resume(); } + sysdev_resume(); + return 0; } diff --git a/fs/afs/write.c b/fs/afs/write.c index 15690bb1d3b..789b3afb342 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, candidate->first = candidate->last = index; candidate->offset_first = from; candidate->to_last = to; + INIT_LIST_HEAD(&candidate->link); candidate->usage = 1; candidate->state = AFS_WBACK_PENDING; init_waitqueue_head(&candidate->waitq); @@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx) call_rcu(&ctx->rcu_head, ctx_rcu_free); } -#define get_ioctx(kioctx) do { \ - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ - atomic_inc(&(kioctx)->users); \ -} while (0) -#define put_ioctx(kioctx) do { \ - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ - if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ - __put_ioctx(kioctx); \ -} while (0) +static inline void get_ioctx(struct kioctx *kioctx) +{ + BUG_ON(atomic_read(&kioctx->users) <= 0); + atomic_inc(&kioctx->users); +} + +static inline int try_get_ioctx(struct kioctx *kioctx) +{ + return atomic_inc_not_zero(&kioctx->users); +} + +static inline void put_ioctx(struct kioctx *kioctx) +{ + BUG_ON(atomic_read(&kioctx->users) <= 0); + if (unlikely(atomic_dec_and_test(&kioctx->users))) + __put_ioctx(kioctx); +} /* ioctx_alloc * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. @@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) rcu_read_lock(); hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { - if (ctx->user_id == ctx_id && !ctx->dead) { - get_ioctx(ctx); + /* + * RCU protects us against accessing freed memory but + * we have to be careful not to get a reference when the + * reference count already dropped to 0 (ctx->dead test + * is unreliable because of races). + */ + if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){ ret = ctx; break; } @@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, goto out_put_req; spin_lock_irq(&ctx->ctx_lock); + /* + * We could have raced with io_destroy() and are currently holding a + * reference to ctx which should be destroyed. We cannot submit IO + * since ctx gets freed as soon as io_submit() puts its reference. The + * check here is reliable: io_destroy() sets ctx->dead before waiting + * for outstanding IO and the barrier between these two is realized by + * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we + * increment ctx->reqs_active before checking for ctx->dead and the + * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we + * don't see ctx->dead set here, io_destroy() waits for our IO to + * finish. + */ + if (ctx->dead) { + spin_unlock_irq(&ctx->ctx_lock); + ret = -EINVAL; + goto out_put_req; + } aio_run_iocb(req); if (!list_empty(&ctx->run_list)) { /* drain the run list */ diff --git a/fs/block_dev.c b/fs/block_dev.c index 333a7bb4cb9..88928701959 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); if (ret) goto out_del; + /* + * bdev could be deleted beneath us which would implicitly destroy + * the holder directory. Hold on to it. + */ + kobject_get(bdev->bd_part->holder_dir); list_add(&holder->list, &bdev->bd_holder_disks); goto out_unlock; @@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); + kobject_put(bdev->bd_part->holder_dir); list_del_init(&holder->list); kfree(holder); } @@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); * flush_disk - invalidates all buffer-cache entries on a disk * * @bdev: struct block device to be flushed + * @kill_dirty: flag to guide handling of dirty inodes * * Invalidates all buffer-cache entries on a disk. It should be called * when a disk has been changed -- either by a media change or online * resize. */ -static void flush_disk(struct block_device *bdev) +static void flush_disk(struct block_device *bdev, bool kill_dirty) { - if (__invalidate_device(bdev)) { + if (__invalidate_device(bdev, kill_dirty)) { char name[BDEVNAME_SIZE] = ""; if (bdev->bd_disk) @@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) "%s: detected capacity change from %lld to %lld\n", name, bdev_size, disk_size); i_size_write(bdev->bd_inode, disk_size); - flush_disk(bdev); + flush_disk(bdev, false); } } EXPORT_SYMBOL(check_disk_size_change); @@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev) if (!(events & DISK_EVENT_MEDIA_CHANGE)) return 0; - flush_disk(bdev); + flush_disk(bdev, true); if (bdops->revalidate_disk) bdops->revalidate_disk(bdev->bd_disk); return 1; @@ -1215,12 +1222,6 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) res = __blkdev_get(bdev, mode, 0); - /* __blkdev_get() may alter read only status, check it afterwards */ - if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { - __blkdev_put(bdev, mode, 0); - res = -EACCES; - } - if (whole) { /* finish claiming */ mutex_lock(&bdev->bd_mutex); @@ -1298,6 +1299,11 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, if (err) return ERR_PTR(err); + if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { + blkdev_put(bdev, mode); + return ERR_PTR(-EACCES); + } + return bdev; } EXPORT_SYMBOL(blkdev_get_by_path); @@ -1601,7 +1607,7 @@ fail: } EXPORT_SYMBOL(lookup_bdev); -int __invalidate_device(struct block_device *bdev) +int __invalidate_device(struct block_device *bdev, bool kill_dirty) { struct super_block *sb = get_super(bdev); int res = 0; @@ -1614,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev) * hold). */ shrink_dcache_sb(sb); - res = invalidate_inodes(sb); + res = invalidate_inodes(sb, kill_dirty); drop_super(sb); } invalidate_bdev(bdev); diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 15b5ca2a260..9c949348510 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) char *value = NULL; struct posix_acl *acl; + if (!IS_POSIXACL(inode)) + return NULL; + acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; @@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, struct posix_acl *acl; int ret = 0; + if (!IS_POSIXACL(dentry->d_inode)) + return -EOPNOTSUPP; + acl = btrfs_get_acl(dentry->d_inode, type); if (IS_ERR(acl)) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f745287fbf2..4d2110eafe2 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, u64 em_len; u64 em_start; struct extent_map *em; - int ret; + int ret = -ENOMEM; u32 *sums; tree = &BTRFS_I(inode)->io_tree; @@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, compressed_len = em->block_len; cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); + if (!cb) + goto out; + atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; @@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; - cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, + cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); + if (!cb->compressed_pages) + goto fail1; + bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; for (page_index = 0; page_index < nr_pages; page_index++) { cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (!cb->compressed_pages[page_index]) + goto fail2; } cb->nr_pages = nr_pages; @@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->len = uncompressed_len; comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); + if (!comp_bio) + goto fail2; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; atomic_inc(&cb->pending_bios); @@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bio_put(comp_bio); return 0; + +fail2: + for (page_index = 0; page_index < nr_pages; page_index++) + free_page((unsigned long)cb->compressed_pages[page_index]); + + kfree(cb->compressed_pages); +fail1: + kfree(cb); +out: + free_extent_map(em); + return ret; } static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; @@ -900,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, return ret; } -void __exit btrfs_exit_compress(void) +void btrfs_exit_compress(void) { free_workspaces(); } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2c98b3af605..6f820fa23df 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1254,6 +1254,7 @@ struct btrfs_root { #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) +#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -2218,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end); int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, u64 num_bytes); +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 type); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b531c36455d..e1aa8d607bc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -359,10 +359,14 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) tree = &BTRFS_I(page->mapping->host)->io_tree; - if (page->private == EXTENT_PAGE_PRIVATE) + if (page->private == EXTENT_PAGE_PRIVATE) { + WARN_ON(1); goto out; - if (!page->private) + } + if (!page->private) { + WARN_ON(1); goto out; + } len = page->private >> 2; WARN_ON(len == 0); @@ -1550,6 +1554,7 @@ static int transaction_kthread(void *arg) spin_unlock(&root->fs_info->new_trans_lock); trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); if (transid == trans->transid) { ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); @@ -2453,10 +2458,14 @@ int btrfs_commit_super(struct btrfs_root *root) up_write(&root->fs_info->cleanup_work_sem); trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_commit_transaction(trans, root); ret = btrfs_write_and_wait_transaction(NULL, root); BUG_ON(ret); @@ -2554,6 +2563,8 @@ int close_ctree(struct btrfs_root *root) kfree(fs_info->chunk_root); kfree(fs_info->dev_root); kfree(fs_info->csum_root); + kfree(fs_info); + return 0; } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 9786963b07e..ff27d7a477b 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -171,6 +171,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child) int ret; path = btrfs_alloc_path(); + if (!path) + return ERR_PTR(-ENOMEM); if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b55269340ce..588ff984987 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -320,11 +320,6 @@ static int caching_kthread(void *data) if (!path) return -ENOMEM; - exclude_super_stripes(extent_root, block_group); - spin_lock(&block_group->space_info->lock); - block_group->space_info->bytes_readonly += block_group->bytes_super; - spin_unlock(&block_group->space_info->lock); - last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); /* @@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, cache->cached = BTRFS_CACHE_NO; } spin_unlock(&cache->lock); - if (ret == 1) + if (ret == 1) { + free_excluded_extents(fs_info->extent_root, cache); return 0; + } } if (load_cache_only) @@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, u64 reserved; u64 max_reclaim; u64 reclaimed = 0; + long time_left; int pause = 1; int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; + int loops = 0; block_rsv = &root->fs_info->delalloc_block_rsv; space_info = block_rsv->space_info; @@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, max_reclaim = min(reserved, to_reclaim); - while (1) { + while (loops < 1024) { /* have the flusher threads jump in and do some IO */ smp_mb(); nr_pages = min_t(unsigned long, nr_pages, @@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); spin_lock(&space_info->lock); - if (reserved > space_info->bytes_reserved) + if (reserved > space_info->bytes_reserved) { + loops = 0; reclaimed += reserved - space_info->bytes_reserved; + } else { + loops++; + } reserved = space_info->bytes_reserved; spin_unlock(&space_info->lock); @@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, return -EAGAIN; __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(pause); + time_left = schedule_timeout(pause); + + /* We were interrupted, exit */ + if (time_left) + break; + pause <<= 1; if (pause > HZ / 10) pause = HZ / 10; @@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, if (num_bytes > 0) { if (dest) { - block_rsv_add_bytes(dest, num_bytes, 0); - } else { + spin_lock(&dest->lock); + if (!dest->full) { + u64 bytes_to_add; + + bytes_to_add = dest->size - dest->reserved; + bytes_to_add = min(num_bytes, bytes_to_add); + dest->reserved += bytes_to_add; + if (dest->reserved >= dest->size) + dest->full = 1; + num_bytes -= bytes_to_add; + } + spin_unlock(&dest->lock); + } + if (num_bytes) { spin_lock(&space_info->lock); space_info->bytes_reserved -= num_bytes; spin_unlock(&space_info->lock); @@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) num_bytes = ALIGN(num_bytes, root->sectorsize); atomic_dec(&BTRFS_I(inode)->outstanding_extents); + WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); spin_lock(&BTRFS_I(inode)->accounting_lock); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); @@ -5355,7 +5376,7 @@ again: num_bytes, data, 1); goto again; } - if (ret == -ENOSPC) { + if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { struct btrfs_space_info *sinfo; sinfo = __find_space_info(root->fs_info, data); @@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize) { struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; int ret; block_rsv = get_block_rsv(trans, root); @@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans, if (block_rsv->size == 0) { ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, 0); - if (ret) + /* + * If we couldn't reserve metadata bytes try and use some from + * the global reserve. + */ + if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + return ERR_PTR(ret); + } else if (ret) { return ERR_PTR(ret); + } return block_rsv; } ret = block_rsv_use_bytes(block_rsv, blocksize); if (!ret) return block_rsv; + if (ret) { + WARN_ON(1); + ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, + 0); + if (!ret) { + spin_lock(&block_rsv->lock); + block_rsv->size += blocksize; + spin_unlock(&block_rsv->lock); + return block_rsv; + } else if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + } + } return ERR_PTR(-ENOSPC); } @@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, BUG_ON(!wc); trans = btrfs_start_transaction(tree_root, 0); + BUG_ON(IS_ERR(trans)); + if (block_rsv) trans->block_rsv = block_rsv; @@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, btrfs_end_transaction_throttle(trans, tree_root); trans = btrfs_start_transaction(tree_root, 0); + BUG_ON(IS_ERR(trans)); if (block_rsv) trans->block_rsv = block_rsv; } @@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start, int ret = 0; ra = kzalloc(sizeof(*ra), GFP_NOFS); + if (!ra) + return -ENOMEM; mutex_lock(&inode->i_mutex); first_index = start >> PAGE_CACHE_SHIFT; @@ -6531,7 +6583,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode, u64 end = start + extent_key->offset - 1; em = alloc_extent_map(GFP_NOFS); - BUG_ON(!em || IS_ERR(em)); + BUG_ON(!em); em->start = start; em->len = extent_key->offset; @@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) BUG_ON(reloc_root->commit_root != NULL); while (1) { trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, reloc_root); @@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) if (found) { trans = btrfs_start_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); } @@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, trans = btrfs_start_transaction(extent_root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); if (extent_key->objectid == 0) { ret = del_extent_zero(trans, extent_root, path, extent_key); @@ -8013,6 +8065,13 @@ out: return ret; } +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 type) +{ + u64 alloc_flags = get_alloc_profile(root, type); + return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); +} + /* * helper to account the unused space of all the readonly block group in the * list. takes mirrors into account. @@ -8270,6 +8329,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) if (block_group->cached == BTRFS_CACHE_STARTED) wait_block_group_cache_done(block_group); + /* + * We haven't cached this block group, which means we could + * possibly have excluded extents on this block group. + */ + if (block_group->cached == BTRFS_CACHE_NO) + free_excluded_extents(info->extent_root, block_group); + btrfs_remove_free_space_cache(block_group); btrfs_put_block_group(block_group); @@ -8385,6 +8451,13 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache->sectorsize = root->sectorsize; /* + * We need to exclude the super stripes now so that the space + * info has super bytes accounted for, otherwise we'll think + * we have more space than we actually do. + */ + exclude_super_stripes(root, cache); + + /* * check for two cases, either we are full, and therefore * don't need to bother with the caching work since we won't * find any space, or we are empty, and we can just add all @@ -8392,12 +8465,10 @@ int btrfs_read_block_groups(struct btrfs_root *root) * time, particularly in the full case. */ if (found_key.offset == btrfs_block_group_used(&cache->item)) { - exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; free_excluded_extents(root, cache); } else if (btrfs_block_group_used(&cache->item) == 0) { - exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; add_new_free_space(cache, root->fs_info, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2e993cf1766..fd3f172e94e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode, */ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, - unsigned long bits) + unsigned long bits, int contig) { struct rb_node *node; struct extent_state *state; u64 cur_start = *start; u64 total_bytes = 0; + u64 last = 0; int found = 0; if (search_end <= cur_start) { @@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree, state = rb_entry(node, struct extent_state, rb_node); if (state->start > search_end) break; - if (state->end >= cur_start && (state->state & bits)) { + if (contig && found && state->start > last + 1) + break; + if (state->end >= cur_start && (state->state & bits) == bits) { total_bytes += min(search_end, state->end) + 1 - max(cur_start, state->start); if (total_bytes >= max_bytes) @@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree, *start = state->start; found = 1; } + last = state->end; + } else if (contig && found) { + break; } node = rb_next(node); if (!node) @@ -1865,7 +1871,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, bio_get(bio); if (tree->ops && tree->ops->submit_bio_hook) - tree->ops->submit_bio_hook(page->mapping->host, rw, bio, + ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, mirror_num, bio_flags, start); else submit_bio(rw, bio); @@ -1920,6 +1926,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, nr = bio_get_nr_vecs(bdev); bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); + if (!bio) + return -ENOMEM; bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; @@ -1944,6 +1952,7 @@ void set_page_extent_mapped(struct page *page) static void set_page_extent_head(struct page *page, unsigned long len) { + WARN_ON(!PagePrivate(page)); set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); } @@ -2126,7 +2135,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, &bio_flags); if (bio) - submit_one_bio(READ, bio, 0, bio_flags); + ret = submit_one_bio(READ, bio, 0, bio_flags); return ret; } @@ -2819,9 +2828,17 @@ int try_release_extent_state(struct extent_map_tree *map, * at this point we can safely clear everything except the * locked bit and the nodatasum bit */ - clear_extent_bit(tree, start, end, + ret = clear_extent_bit(tree, start, end, ~(EXTENT_LOCKED | EXTENT_NODATASUM), 0, 0, NULL, mask); + + /* if clear_extent_bit failed for enomem reasons, + * we can't allow the release to continue. + */ + if (ret < 0) + ret = 0; + else + ret = 1; } return ret; } @@ -2901,6 +2918,46 @@ out: return sector; } +/* + * helper function for fiemap, which doesn't want to see any holes. + * This maps until we find something past 'last' + */ +static struct extent_map *get_extent_skip_holes(struct inode *inode, + u64 offset, + u64 last, + get_extent_t *get_extent) +{ + u64 sectorsize = BTRFS_I(inode)->root->sectorsize; + struct extent_map *em; + u64 len; + + if (offset >= last) + return NULL; + + while(1) { + len = last - offset; + if (len == 0) + break; + len = (len + sectorsize - 1) & ~(sectorsize - 1); + em = get_extent(inode, NULL, 0, offset, len, 0); + if (!em || IS_ERR(em)) + return em; + + /* if this isn't a hole return it */ + if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) && + em->block_start != EXTENT_MAP_HOLE) { + return em; + } + + /* this is a hole, advance to the next extent */ + offset = extent_map_end(em); + free_extent_map(em); + if (offset >= last) + break; + } + return NULL; +} + int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent) { @@ -2910,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u32 flags = 0; u32 found_type; u64 last; + u64 last_for_get_extent = 0; u64 disko = 0; + u64 isize = i_size_read(inode); struct btrfs_key found_key; struct extent_map *em = NULL; struct extent_state *cached_state = NULL; struct btrfs_path *path; struct btrfs_file_extent_item *item; int end = 0; - u64 em_start = 0, em_len = 0; + u64 em_start = 0; + u64 em_len = 0; + u64 em_end = 0; unsigned long emflags; - int hole = 0; if (len == 0) return -EINVAL; @@ -2929,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return -ENOMEM; path->leave_spinning = 1; + /* + * lookup the last file extent. We're not using i_size here + * because there might be preallocation past i_size + */ ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, path, inode->i_ino, -1, 0); if (ret < 0) { @@ -2942,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); found_type = btrfs_key_type(&found_key); - /* No extents, just return */ + /* No extents, but there might be delalloc bits */ if (found_key.objectid != inode->i_ino || found_type != BTRFS_EXTENT_DATA_KEY) { - btrfs_free_path(path); - return 0; + /* have to trust i_size as the end */ + last = (u64)-1; + last_for_get_extent = isize; + } else { + /* + * remember the start of the last extent. There are a + * bunch of different factors that go into the length of the + * extent, so its much less complex to remember where it started + */ + last = found_key.offset; + last_for_get_extent = last + 1; } - last = found_key.offset; btrfs_free_path(path); + /* + * we might have some extents allocated but more delalloc past those + * extents. so, we trust isize unless the start of the last extent is + * beyond isize + */ + if (last < isize) { + last = (u64)-1; + last_for_get_extent = isize; + } + lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, &cached_state, GFP_NOFS); - em = get_extent(inode, NULL, 0, off, max - off, 0); + + em = get_extent_skip_holes(inode, off, last_for_get_extent, + get_extent); if (!em) goto out; if (IS_ERR(em)) { @@ -2962,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } while (!end) { - hole = 0; - off = em->start + em->len; + off = extent_map_end(em); if (off >= max) end = 1; - if (em->block_start == EXTENT_MAP_HOLE) { - hole = 1; - goto next; - } - em_start = em->start; em_len = em->len; - + em_end = extent_map_end(em); + emflags = em->flags; disko = 0; flags = 0; @@ -2993,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) flags |= FIEMAP_EXTENT_ENCODED; -next: - emflags = em->flags; free_extent_map(em); em = NULL; - if (!end) { - em = get_extent(inode, NULL, 0, off, max - off, 0); - if (!em) - goto out; - if (IS_ERR(em)) { - ret = PTR_ERR(em); - goto out; - } - emflags = em->flags; - } - - if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { + if ((em_start >= last) || em_len == (u64)-1 || + (last == (u64)-1 && isize <= em_end)) { flags |= FIEMAP_EXTENT_LAST; end = 1; } - if (em_start == last) { + /* now scan forward to see if this is really the last extent. */ + em = get_extent_skip_holes(inode, off, last_for_get_extent, + get_extent); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto out; + } + if (!em) { flags |= FIEMAP_EXTENT_LAST; end = 1; } - - if (!hole) { - ret = fiemap_fill_next_extent(fieinfo, em_start, disko, - em_len, flags); - if (ret) - goto out_free; - } + ret = fiemap_fill_next_extent(fieinfo, em_start, disko, + em_len, flags); + if (ret) + goto out_free; } out_free: free_extent_map(em); @@ -3192,7 +3263,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, } if (!PageUptodate(p)) uptodate = 0; - unlock_page(p); + + /* + * see below about how we avoid a nasty race with release page + * and why we unlock later + */ + if (i != 0) + unlock_page(p); } if (uptodate) set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); @@ -3216,9 +3293,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, atomic_inc(&eb->refs); spin_unlock(&tree->buffer_lock); radix_tree_preload_end(); + + /* + * there is a race where release page may have + * tried to find this extent buffer in the radix + * but failed. It will tell the VM it is safe to + * reclaim the, and it will clear the page private bit. + * We must make sure to set the page private bit properly + * after the extent buffer is in the radix tree so + * it doesn't get lost + */ + set_page_extent_mapped(eb->first_page); + set_page_extent_head(eb->first_page, eb->len); + if (!page0) + unlock_page(eb->first_page); return eb; free_eb: + if (eb->first_page && !page0) + unlock_page(eb->first_page); + if (!atomic_dec_and_test(&eb->refs)) return exists; btrfs_release_extent_buffer(eb); @@ -3269,10 +3363,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, continue; lock_page(page); + WARN_ON(!PagePrivate(page)); + + set_page_extent_mapped(page); if (i == 0) set_page_extent_head(page, eb->len); - else - set_page_private(page, EXTENT_PAGE_PRIVATE); clear_page_dirty_for_io(page); spin_lock_irq(&page->mapping->tree_lock); @@ -3462,6 +3557,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); + + WARN_ON(!PagePrivate(page)); + + set_page_extent_mapped(page); + if (i == 0) + set_page_extent_head(page, eb->len); + if (inc_all_pages) page_cache_get(page); if (!PageUptodate(page)) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 7083cfafd06..9318dfefd59 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -191,7 +191,7 @@ void extent_io_exit(void); u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, - u64 max_bytes, unsigned long bits); + u64 max_bytes, unsigned long bits, int contig); void free_extent_state(struct extent_state *state); int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index b0e1fce1253..2b6c12e983b 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -51,8 +51,8 @@ struct extent_map *alloc_extent_map(gfp_t mask) { struct extent_map *em; em = kmem_cache_alloc(extent_map_cache, mask); - if (!em || IS_ERR(em)) - return em; + if (!em) + return NULL; em->in_tree = 0; em->flags = 0; em->compress_type = BTRFS_COMPRESS_NONE; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a562a250ae7..4f19a3e1bf3 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, root = root->fs_info->csum_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; while (1) { key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; @@ -548,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, if (path->slots[0] == 0) goto out; path->slots[0]--; + } else if (ret < 0) { + goto out; } + leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c800d58f301..7084140d594 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -186,6 +186,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split = alloc_extent_map(GFP_NOFS); if (!split2) split2 = alloc_extent_map(GFP_NOFS); + BUG_ON(!split || !split2); write_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); @@ -793,8 +794,12 @@ again: for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); if (!pages[i]) { - err = -ENOMEM; - BUG_ON(1); + int c; + for (c = i - 1; c >= 0; c--) { + unlock_page(pages[c]); + page_cache_release(pages[c]); + } + return -ENOMEM; } wait_on_page_writeback(pages[i]); } @@ -946,6 +951,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / (sizeof(struct page *))); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto out; + } /* generic_write_checks can change our pos */ start_pos = pos; @@ -984,8 +993,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, size_t write_bytes = min(iov_iter_count(&i), nrptrs * (size_t)PAGE_CACHE_SIZE - offset); - size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + size_t num_pages = (write_bytes + offset + + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; WARN_ON(num_pages > nrptrs); memset(pages, 0, sizeof(struct page *) * nrptrs); @@ -1015,8 +1024,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, copied = btrfs_copy_from_user(pos, num_pages, write_bytes, pages, &i); - dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; if (num_pages > dirty_pages) { if (copied > 0) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 60d68426695..a0390657451 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, return entry; } -static void unlink_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) +static inline void +__unlink_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) { rb_erase(&info->offset_index, &block_group->free_space_offset); block_group->free_extents--; +} + +static void unlink_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) +{ + __unlink_free_space(block_group, info); block_group->free_space -= info->bytes; } @@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; + u64 size = block_group->key.offset; /* * The goal is to keep the total amount of memory used per 1gb of space * at or below 32k, so we need to adjust how much memory we allow to be * used by extent based free space tracking */ - max_bytes = MAX_CACHE_BYTES_PER_GIG * - (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); + if (size < 1024 * 1024 * 1024) + max_bytes = MAX_CACHE_BYTES_PER_GIG; + else + max_bytes = MAX_CACHE_BYTES_PER_GIG * + div64_u64(size, 1024 * 1024 * 1024); /* * we want to account for 1 more bitmap than what we have so we can make @@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, recalculate_thresholds(block_group); } +static void free_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *bitmap_info) +{ + unlink_free_space(block_group, bitmap_info); + kfree(bitmap_info->bitmap); + kfree(bitmap_info); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); +} + static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) @@ -1195,6 +1216,7 @@ again: */ search_start = *offset; search_bytes = *bytes; + search_bytes = min(search_bytes, end - search_start + 1); ret = search_bitmap(block_group, bitmap_info, &search_start, &search_bytes); BUG_ON(ret < 0 || search_start != *offset); @@ -1211,13 +1233,8 @@ again: if (*bytes) { struct rb_node *next = rb_next(&bitmap_info->offset_index); - if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + if (!bitmap_info->bytes) + free_bitmap(block_group, bitmap_info); /* * no entry after this bitmap, but we still have bytes to @@ -1250,13 +1267,8 @@ again: return -EAGAIN; goto again; - } else if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + } else if (!bitmap_info->bytes) + free_bitmap(block_group, bitmap_info); return 0; } @@ -1359,22 +1371,14 @@ out: return ret; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +bool try_merge_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info, bool update_stat) { - struct btrfs_free_space *right_info = NULL; - struct btrfs_free_space *left_info = NULL; - struct btrfs_free_space *info = NULL; - int ret = 0; - - info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); - if (!info) - return -ENOMEM; - - info->offset = offset; - info->bytes = bytes; - - spin_lock(&block_group->tree_lock); + struct btrfs_free_space *left_info; + struct btrfs_free_space *right_info; + bool merged = false; + u64 offset = info->offset; + u64 bytes = info->bytes; /* * first we want to see if there is free space adjacent to the range we @@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, else left_info = tree_search_offset(block_group, offset - 1, 0, 0); - /* - * If there was no extent directly to the left or right of this new - * extent then we know we're going to have to allocate a new extent, so - * before we do that see if we need to drop this into a bitmap - */ - if ((!left_info || left_info->bitmap) && - (!right_info || right_info->bitmap)) { - ret = insert_into_bitmap(block_group, info); - - if (ret < 0) { - goto out; - } else if (ret) { - ret = 0; - goto out; - } - } - if (right_info && !right_info->bitmap) { - unlink_free_space(block_group, right_info); + if (update_stat) + unlink_free_space(block_group, right_info); + else + __unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; kfree(right_info); + merged = true; } if (left_info && !left_info->bitmap && left_info->offset + left_info->bytes == offset) { - unlink_free_space(block_group, left_info); + if (update_stat) + unlink_free_space(block_group, left_info); + else + __unlink_free_space(block_group, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; kfree(left_info); + merged = true; } + return merged; +} + +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + struct btrfs_free_space *info; + int ret = 0; + + info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); + if (!info) + return -ENOMEM; + + info->offset = offset; + info->bytes = bytes; + + spin_lock(&block_group->tree_lock); + + if (try_merge_free_space(block_group, info, true)) + goto link; + + /* + * There was no extent directly to the left or right of this new + * extent then we know we're going to have to allocate a new extent, so + * before we do that see if we need to drop this into a bitmap + */ + ret = insert_into_bitmap(block_group, info); + if (ret < 0) { + goto out; + } else if (ret) { + ret = 0; + goto out; + } +link: ret = link_free_space(block_group, info); if (ret) kfree(info); @@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space( node = rb_next(&entry->offset_index); rb_erase(&entry->offset_index, &cluster->root); BUG_ON(entry->bitmap); + try_merge_free_space(block_group, entry, false); tree_insert_offset(&block_group->free_space_offset, entry->offset, &entry->offset_index, 0); } @@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ret = offset; if (entry->bitmap) { bitmap_clear_bits(block_group, entry, offset, bytes); - if (!entry->bytes) { - unlink_free_space(block_group, entry); - kfree(entry->bitmap); - kfree(entry); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + if (!entry->bytes) + free_bitmap(block_group, entry); } else { unlink_free_space(block_group, entry); entry->offset += bytes; @@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, ret = search_start; bitmap_clear_bits(block_group, entry, ret, bytes); + if (entry->bytes == 0) + free_bitmap(block_group, entry); out: spin_unlock(&cluster->lock); spin_unlock(&block_group->tree_lock); @@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, entry->offset += bytes; entry->bytes -= bytes; - if (entry->bytes == 0) { + if (entry->bytes == 0) rb_erase(&entry->offset_index, &cluster->root); - kfree(entry); - } break; } out: spin_unlock(&cluster->lock); + if (!ret) + return 0; + + spin_lock(&block_group->tree_lock); + + block_group->free_space -= bytes; + if (entry->bytes == 0) { + block_group->free_extents--; + kfree(entry); + } + + spin_unlock(&block_group->tree_lock); + return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 160b55b3e13..0efdb65953c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -416,7 +416,7 @@ again: } if (start == 0) { trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -612,6 +612,7 @@ retry: GFP_NOFS); trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); ret = btrfs_reserve_extent(trans, root, async_extent->compressed_size, async_extent->compressed_size, @@ -643,6 +644,7 @@ retry: async_extent->ram_size - 1, 0); em = alloc_extent_map(GFP_NOFS); + BUG_ON(!em); em->start = async_extent->start; em->len = async_extent->ram_size; em->orig_start = em->start; @@ -771,7 +773,7 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(root == root->fs_info->tree_root); trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -819,6 +821,7 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(ret); em = alloc_extent_map(GFP_NOFS); + BUG_ON(!em); em->start = start; em->orig_start = em->start; ram_size = ins.offset; @@ -1049,7 +1052,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, } else { trans = btrfs_join_transaction(root, 1); } - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); cow_start = (u64)-1; cur_offset = start; @@ -1168,6 +1171,7 @@ out_check: struct extent_map_tree *em_tree; em_tree = &BTRFS_I(inode)->extent_tree; em = alloc_extent_map(GFP_NOFS); + BUG_ON(!em); em->start = cur_offset; em->orig_start = em->start; em->len = num_bytes; @@ -1557,6 +1561,7 @@ out: out_page: unlock_page(page); page_cache_release(page); + kfree(fixup); } /* @@ -1703,7 +1708,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); @@ -1720,6 +1725,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1907,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start) private = 0; if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, - (u64)-1, 1, EXTENT_DIRTY)) { + (u64)-1, 1, EXTENT_DIRTY, 0)) { ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start, &private_failure); if (ret == 0) { @@ -2354,6 +2360,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) */ if (is_bad_inode(inode)) { trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); btrfs_orphan_del(trans, inode); btrfs_end_transaction(trans, root); iput(inode); @@ -2381,6 +2388,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) if (root->orphan_block_rsv || root->orphan_item_inserted) { trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_end_transaction(trans, root); } @@ -2641,7 +2649,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto err; + goto out; } path->leave_spinning = 1; @@ -2714,9 +2722,10 @@ static int check_path_shared(struct btrfs_root *root, struct extent_buffer *eb; int level; u64 refs = 1; - int uninitialized_var(ret); for (level = 0; level < BTRFS_MAX_LEVEL; level++) { + int ret; + if (!path->nodes[level]) break; eb = path->nodes[level]; @@ -2727,7 +2736,7 @@ static int check_path_shared(struct btrfs_root *root, if (refs > 1) return 1; } - return ret; /* XXX callers? */ + return 0; } /* @@ -4134,7 +4143,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) } srcu_read_unlock(&root->fs_info->subvol_srcu, index); - if (root != sub_root) { + if (!IS_ERR(inode) && root != sub_root) { down_read(&root->fs_info->cleanup_work_sem); if (!(inode->i_sb->s_flags & MS_RDONLY)) btrfs_orphan_cleanup(sub_root); @@ -4347,6 +4356,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_set_trans_block_group(trans, inode); if (nolock) ret = btrfs_end_transaction_nolock(trans, root); @@ -4372,6 +4383,7 @@ void btrfs_dirty_inode(struct inode *inode) return; trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); @@ -5176,6 +5188,8 @@ again: em = NULL; btrfs_release_path(root, path); trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return ERR_CAST(trans); goto again; } map = kmap(page); @@ -5266,6 +5280,128 @@ out: return em; } +struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) +{ + struct extent_map *em; + struct extent_map *hole_em = NULL; + u64 range_start = start; + u64 end; + u64 found; + u64 found_end; + int err = 0; + + em = btrfs_get_extent(inode, page, pg_offset, start, len, create); + if (IS_ERR(em)) + return em; + if (em) { + /* + * if our em maps to a hole, there might + * actually be delalloc bytes behind it + */ + if (em->block_start != EXTENT_MAP_HOLE) + return em; + else + hole_em = em; + } + + /* check to see if we've wrapped (len == -1 or similar) */ + end = start + len; + if (end < start) + end = (u64)-1; + else + end -= 1; + + em = NULL; + + /* ok, we didn't find anything, lets look for delalloc */ + found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, + end, len, EXTENT_DELALLOC, 1); + found_end = range_start + found; + if (found_end < range_start) + found_end = (u64)-1; + + /* + * we didn't find anything useful, return + * the original results from get_extent() + */ + if (range_start > end || found_end <= start) { + em = hole_em; + hole_em = NULL; + goto out; + } + + /* adjust the range_start to make sure it doesn't + * go backwards from the start they passed in + */ + range_start = max(start,range_start); + found = found_end - range_start; + + if (found > 0) { + u64 hole_start = start; + u64 hole_len = len; + + em = alloc_extent_map(GFP_NOFS); + if (!em) { + err = -ENOMEM; + goto out; + } + /* + * when btrfs_get_extent can't find anything it + * returns one huge hole + * + * make sure what it found really fits our range, and + * adjust to make sure it is based on the start from + * the caller + */ + if (hole_em) { + u64 calc_end = extent_map_end(hole_em); + + if (calc_end <= start || (hole_em->start > end)) { + free_extent_map(hole_em); + hole_em = NULL; + } else { + hole_start = max(hole_em->start, start); + hole_len = calc_end - hole_start; + } + } + em->bdev = NULL; + if (hole_em && range_start > hole_start) { + /* our hole starts before our delalloc, so we + * have to return just the parts of the hole + * that go until the delalloc starts + */ + em->len = min(hole_len, + range_start - hole_start); + em->start = hole_start; + em->orig_start = hole_start; + /* + * don't adjust block start at all, + * it is fixed at EXTENT_MAP_HOLE + */ + em->block_start = hole_em->block_start; + em->block_len = hole_len; + } else { + em->start = range_start; + em->len = found; + em->orig_start = range_start; + em->block_start = EXTENT_MAP_DELALLOC; + em->block_len = found; + } + } else if (hole_em) { + return hole_em; + } +out: + + free_extent_map(hole_em); + if (err) { + free_extent_map(em); + return ERR_PTR(err); + } + return em; +} + static struct extent_map *btrfs_new_extent_direct(struct inode *inode, u64 start, u64 len) { @@ -5280,8 +5416,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + len - 1, 0); trans = btrfs_join_transaction(root, 0); - if (!trans) - return ERR_PTR(-ENOMEM); + if (IS_ERR(trans)) + return ERR_CAST(trans); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -5505,7 +5641,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * while we look for nocow cross refs */ trans = btrfs_join_transaction(root, 0); - if (!trans) + if (IS_ERR(trans)) goto must_cow; if (can_nocow_odirect(trans, inode, start, len) == 1) { @@ -5640,7 +5776,7 @@ again: BUG_ON(!ordered); trans = btrfs_join_transaction(root, 1); - if (!trans) { + if (IS_ERR(trans)) { err = -ENOMEM; goto out; } @@ -6088,7 +6224,7 @@ out: static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { - return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); + return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); } int btrfs_readpage(struct file *file, struct page *page) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a506a22b522..5fdb2abc4fa 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); @@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, if (new_size > old_size) { trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_unlock; + } ret = btrfs_grow_device(trans, device, new_size); btrfs_commit_transaction(trans, root); } else { @@ -1067,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; - if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) + if (flags & BTRFS_SUBVOL_CREATE_ASYNC) return -EINVAL; if (flags & ~BTRFS_SUBVOL_RDONLY) return -EOPNOTSUPP; + if (!is_owner_or_cap(inode)) + return -EACCES; + down_write(&root->fs_info->subvol_sem); /* nothing to do */ @@ -1093,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, goto out_reset; } - ret = btrfs_update_root(trans, root, + ret = btrfs_update_root(trans, root->fs_info->tree_root, &root->root_key, &root->root_item); btrfs_commit_transaction(trans, root); @@ -1898,7 +1905,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, memcpy(&new_key, &key, sizeof(new_key)); new_key.objectid = inode->i_ino; - new_key.offset = key.offset + destoff - off; + if (off <= key.offset) + new_key.offset = key.offset + destoff - off; + else + new_key.offset = destoff; trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { @@ -2082,7 +2092,7 @@ static long btrfs_ioctl_trans_start(struct file *file) ret = -ENOMEM; trans = btrfs_start_ioctl_transaction(root, 0); - if (!trans) + if (IS_ERR(trans)) goto out_drop; file->private_data = trans; @@ -2138,9 +2148,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) path->leave_spinning = 1; trans = btrfs_start_transaction(root, 1); - if (!trans) { + if (IS_ERR(trans)) { btrfs_free_path(path); - return -ENOMEM; + return PTR_ERR(trans); } dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); @@ -2201,7 +2211,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) int num_types = 4; int alloc_size; int ret = 0; - int slot_count = 0; + u64 slot_count = 0; int i, c; if (copy_from_user(&space_args, @@ -2240,7 +2250,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) goto out; } - slot_count = min_t(int, space_args.space_slots, slot_count); + slot_count = min_t(u64, space_args.space_slots, slot_count); alloc_size = sizeof(*dest) * slot_count; @@ -2260,6 +2270,9 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; + if (!slot_count) + break; + info = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp, &root->fs_info->space_info, @@ -2281,7 +2294,10 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) memcpy(dest, &space, sizeof(space)); dest++; space_args.total_spaces++; + slot_count--; } + if (!slot_count) + break; } up_read(&info->groups_sem); } @@ -2334,6 +2350,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp u64 transid; trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); transid = trans->transid; btrfs_commit_transaction_async(trans, root, 0); diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index cc9b450399d..a178f5ebea7 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws, unsigned long tot_out; unsigned long tot_len; char *buf; + bool may_late_unmap, need_unmap; data_in = kmap(pages_in[0]); tot_len = read_compress_length(data_in); @@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws, tot_in += in_len; working_bytes = in_len; + may_late_unmap = need_unmap = false; /* fast path: avoid using the working buffer */ if (in_page_bytes_left >= in_len) { buf = data_in + in_offset; bytes = in_len; + may_late_unmap = true; goto cont; } @@ -329,14 +332,17 @@ cont: if (working_bytes == 0 && tot_in >= tot_len) break; - kunmap(pages_in[page_in_index]); - page_in_index++; - if (page_in_index >= total_pages_in) { + if (page_in_index + 1 >= total_pages_in) { ret = -1; - data_in = NULL; goto done; } - data_in = kmap(pages_in[page_in_index]); + + if (may_late_unmap) + need_unmap = true; + else + kunmap(pages_in[page_in_index]); + + data_in = kmap(pages_in[++page_in_index]); in_page_bytes_left = PAGE_CACHE_SIZE; in_offset = 0; @@ -346,6 +352,8 @@ cont: out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, &out_len); + if (need_unmap) + kunmap(pages_in[page_in_index - 1]); if (ret != LZO_E_OK) { printk(KERN_WARNING "btrfs decompress failed\n"); ret = -1; @@ -363,8 +371,7 @@ cont: break; } done: - if (data_in) - kunmap(pages_in[page_in_index]); + kunmap(pages_in[page_in_index]); return ret; } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 2b61e1ddcd9..083a5547737 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) { struct rb_root *root = &tree->tree; - struct rb_node *prev; + struct rb_node *prev = NULL; struct rb_node *ret; struct btrfs_ordered_extent *entry; diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 0d126be22b6..fb2605d998e 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) #else BUG(); #endif + break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, struct btrfs_block_group_item); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 045c9c2b2d7..31ade5802ae 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1157,6 +1157,7 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, new_node->bytenr = dest->node->start; new_node->level = node->level; new_node->lowest = node->lowest; + new_node->checked = 1; new_node->root = dest; if (!node->lowest) { @@ -2028,6 +2029,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, while (1) { trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); trans->block_rsv = rc->block_rsv; ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, @@ -2147,6 +2149,12 @@ again: } trans = btrfs_join_transaction(rc->extent_root, 1); + if (IS_ERR(trans)) { + if (!err) + btrfs_block_rsv_release(rc->extent_root, + rc->block_rsv, num_bytes); + return PTR_ERR(trans); + } if (!err) { if (num_bytes != rc->merging_rsv_size) { @@ -3222,6 +3230,7 @@ truncate: trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); + ret = PTR_ERR(trans); goto out; } @@ -3628,6 +3637,7 @@ int prepare_to_relocate(struct reloc_control *rc) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); + BUG_ON(IS_ERR(trans)); btrfs_commit_transaction(trans, rc->extent_root); return 0; } @@ -3644,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) u32 item_size; int ret; int err = 0; + int progress = 0; path = btrfs_alloc_path(); if (!path) @@ -3656,8 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) } while (1) { + progress++; trans = btrfs_start_transaction(rc->extent_root, 0); - + BUG_ON(IS_ERR(trans)); +restart: if (update_backref_cache(trans, &rc->backref_cache)) { btrfs_end_transaction(trans, rc->extent_root); continue; @@ -3770,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) } } } + if (trans && progress && err == -ENOSPC) { + ret = btrfs_force_chunk_alloc(trans, rc->extent_root, + rc->block_group->flags); + if (ret == 0) { + err = 0; + progress = 0; + goto restart; + } + } btrfs_release_path(rc->extent_root, path); clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, @@ -3804,7 +3826,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) /* get rid of pinned extents */ trans = btrfs_join_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); + if (IS_ERR(trans)) + err = PTR_ERR(trans); + else + btrfs_commit_transaction(trans, rc->extent_root); out_free: btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); btrfs_free_path(path); @@ -4022,6 +4047,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) int ret; trans = btrfs_start_transaction(root->fs_info->tree_root, 0); + BUG_ON(IS_ERR(trans)); memset(&root->root_item.drop_progress, 0, sizeof(root->root_item.drop_progress)); @@ -4125,6 +4151,11 @@ int btrfs_recover_relocation(struct btrfs_root *root) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); + if (IS_ERR(trans)) { + unset_reloc_control(rc); + err = PTR_ERR(trans); + goto out_free; + } rc->merge_reloc_tree = 1; @@ -4154,9 +4185,13 @@ int btrfs_recover_relocation(struct btrfs_root *root) unset_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); -out: + if (IS_ERR(trans)) + err = PTR_ERR(trans); + else + btrfs_commit_transaction(trans, rc->extent_root); +out_free: kfree(rc); +out: while (!list_empty(&reloc_roots)) { reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index b2130c46fdb..d39a9895d93 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -155,7 +155,8 @@ enum { Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, - Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, + Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, + Opt_enospc_debug, Opt_err, }; static match_table_t tokens = { @@ -184,6 +185,7 @@ static match_table_t tokens = { {Opt_space_cache, "space_cache"}, {Opt_clear_cache, "clear_cache"}, {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, + {Opt_enospc_debug, "enospc_debug"}, {Opt_err, NULL}, }; @@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_user_subvol_rm_allowed: btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); break; + case Opt_enospc_debug: + btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); @@ -383,7 +388,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, struct btrfs_fs_devices **fs_devices) { substring_t args[MAX_OPT_ARGS]; - char *opts, *p; + char *opts, *orig, *p; int error = 0; int intarg; @@ -397,6 +402,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, opts = kstrdup(options, GFP_KERNEL); if (!opts) return -ENOMEM; + orig = opts; while ((p = strsep(&opts, ",")) != NULL) { int token; @@ -432,7 +438,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, } out_free_opts: - kfree(opts); + kfree(orig); out: /* * If no subvolume name is specified we use the default one. Allocate @@ -623,6 +629,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) btrfs_wait_ordered_extents(root, 0, 0); trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); return ret; } @@ -761,6 +769,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } btrfs_close_devices(fs_devices); + kfree(fs_info); + kfree(tree_root); } else { char b[BDEVNAME_SIZE]; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index bae5c7b8bbe..3d73c8d93bb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, INIT_DELAYED_WORK(&ac->work, do_async_commit); ac->root = root; ac->newtrans = btrfs_join_transaction(root, 0); + if (IS_ERR(ac->newtrans)) { + int err = PTR_ERR(ac->newtrans); + kfree(ac); + return err; + } /* take transaction reference */ mutex_lock(&root->fs_info->trans_mutex); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 054744ac571..a4bbb854dfd 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, } dst_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS); + if (!dst_copy || !src_copy) { + btrfs_release_path(root, path); + kfree(dst_copy); + kfree(src_copy); + return -ENOMEM; + } read_extent_buffer(eb, src_copy, src_ptr, item_size); @@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, btrfs_dir_item_key_to_cpu(leaf, di, &location); name_len = btrfs_dir_name_len(leaf, di); name = kmalloc(name_len, GFP_NOFS); + if (!name) + return -ENOMEM; + read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); btrfs_release_path(root, path); @@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log, int match = 0; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + ret = btrfs_search_slot(NULL, log, key, path, 0, 0); if (ret != 0) goto out; @@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, key.offset = (u64)-1; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); + if (!name) + return -ENOMEM; + log_type = btrfs_dir_type(eb, di); read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len); @@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, root_owner = btrfs_header_owner(parent); next = btrfs_find_create_tree_block(root, bytenr, blocksize); + if (!next) + return -ENOMEM; if (*level == 1) { wc->process_func(root, next, wc, ptr_gen); @@ -2032,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, wait_log_commit(trans, log_root_tree, log_root_tree->log_transid); mutex_unlock(&log_root_tree->log_mutex); + ret = 0; goto out; } atomic_set(&log_root_tree->log_commit[index2], 1); @@ -2096,7 +2116,7 @@ out: smp_mb(); if (waitqueue_active(&root->log_commit_wait[index1])) wake_up(&root->log_commit_wait[index1]); - return 0; + return ret; } static void free_log_tree(struct btrfs_trans_handle *trans, @@ -2194,6 +2214,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, name, name_len, -1); if (IS_ERR(di)) { @@ -2594,6 +2617,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); + if (!ins_data) + return -ENOMEM; + ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); @@ -2725,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; dst_path = btrfs_alloc_path(); + if (!dst_path) { + btrfs_free_path(path); + return -ENOMEM; + } min_key.objectid = inode->i_ino; min_key.type = BTRFS_INODE_ITEM_KEY; @@ -3080,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) BUG_ON(!path); trans = btrfs_start_transaction(fs_info->tree_root, 0); + BUG_ON(IS_ERR(trans)); wc.trans = trans; wc.pin = 1; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d158530233b..dd13eb81ee4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1213,6 +1213,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root, return -ENOMEM; trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + btrfs_free_path(path); + return PTR_ERR(trans); + } key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; @@ -1334,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) ret = btrfs_shrink_device(device, 0); if (ret) - goto error_brelse; + goto error_undo; ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); if (ret) - goto error_brelse; + goto error_undo; device->in_fs_metadata = 0; @@ -1412,6 +1416,13 @@ out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); return ret; +error_undo: + if (device->writeable) { + list_add(&device->dev_alloc_list, + &root->fs_info->fs_devices->alloc_list); + root->fs_info->fs_devices->rw_devices++; + } + goto error_brelse; } /* @@ -1601,11 +1612,19 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) ret = find_next_devid(root, &device->devid); if (ret) { + kfree(device->name); kfree(device); goto error; } trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + kfree(device->name); + kfree(device); + ret = PTR_ERR(trans); + goto error; + } + lock_chunks(root); device->writeable = 1; @@ -1621,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) device->dev_root = root->fs_info->dev_root; device->bdev = bdev; device->in_fs_metadata = 1; - device->mode = 0; + device->mode = FMODE_EXCL; set_blocksize(device->bdev, 4096); if (seeding_dev) { @@ -1873,7 +1892,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, return ret; trans = btrfs_start_transaction(root, 0); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); lock_chunks(root); @@ -2047,7 +2066,7 @@ int btrfs_balance(struct btrfs_root *dev_root) BUG_ON(ret); trans = btrfs_start_transaction(dev_root, 0); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_grow_device(trans, device, old_size); BUG_ON(ret); @@ -2213,6 +2232,11 @@ again: /* Shrinking succeeded, else we would be at "done". */ trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto done; + } + lock_chunks(root); device->disk_total_bytes = new_size; diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0bc68de8edd..f0aef787a10 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -60,6 +60,7 @@ int ceph_init_dentry(struct dentry *dentry) } di->dentry = dentry; di->lease_session = NULL; + di->parent_inode = igrab(dentry->d_parent->d_inode); dentry->d_fsdata = di; dentry->d_time = jiffies; ceph_dentry_lru_add(dentry); @@ -1033,7 +1034,7 @@ static void ceph_dentry_release(struct dentry *dentry) u64 snapid = CEPH_NOSNAP; if (!IS_ROOT(dentry)) { - parent_inode = dentry->d_parent->d_inode; + parent_inode = di->parent_inode; if (parent_inode) snapid = ceph_snap(parent_inode); } @@ -1058,6 +1059,8 @@ static void ceph_dentry_release(struct dentry *dentry) kmem_cache_free(ceph_dentry_cachep, di); dentry->d_fsdata = NULL; } + if (parent_inode) + iput(parent_inode); } static int ceph_snapdir_d_revalidate(struct dentry *dentry, diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 39c243acd06..f40b9139e43 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -584,10 +584,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) if (lastinode) iput(lastinode); - dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino); - list_for_each_entry(child, &realm->children, child_item) - queue_realm_cap_snaps(child); + list_for_each_entry(child, &realm->children, child_item) { + dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n", + realm, realm->ino, child, child->ino); + list_del_init(&child->dirty_item); + list_add(&child->dirty_item, &realm->dirty_item); + } + list_del_init(&realm->dirty_item); dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); } @@ -683,7 +687,9 @@ more: * queue cap snaps _after_ we've built the new snap contexts, * so that i_head_snapc can be set appropriately. */ - list_for_each_entry(realm, &dirty_realms, dirty_item) { + while (!list_empty(&dirty_realms)) { + realm = list_first_entry(&dirty_realms, struct ceph_snap_realm, + dirty_item); queue_realm_cap_snaps(realm); } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 20b907d76ae..88fcaa21b80 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -207,6 +207,7 @@ struct ceph_dentry_info { struct dentry *dentry; u64 time; u64 offset; + struct inode *parent_inode; }; struct ceph_inode_xattrs_info { diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 1e7636b145a..beeebf19423 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -372,6 +372,10 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), GFP_KERNEL); + if (!ppace) { + cERROR(1, "DACL memory allocation error"); + return; + } for (i = 0; i < num_aces; ++i) { ppace[i] = (struct cifs_ace *) (acl_base + acl_size); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 4a3330235d5..a9371b6578c 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -127,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.70" +#define CIFS_VERSION "1.71" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index edd5b29b53c..17afb0fbcae 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -188,6 +188,8 @@ struct TCP_Server_Info { /* multiplexed reads or writes */ unsigned int maxBuf; /* maxBuf specifies the maximum */ /* message size the server can send or receive for non-raw SMBs */ + /* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */ + /* when socket is setup (and during reconnect) before NegProt sent */ unsigned int max_rw; /* maxRw specifies the maximum */ /* message size the server can send or receive for */ /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ @@ -652,7 +654,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, #define MID_REQUEST_SUBMITTED 2 #define MID_RESPONSE_RECEIVED 4 #define MID_RETRY_NEEDED 8 /* session closed while this request out */ -#define MID_NO_RESP_NEEDED 0x10 +#define MID_RESPONSE_MALFORMED 0x10 /* Types of response buffer returned from SendReceive2 */ #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 46c66ed01af..904aa47e351 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -136,9 +136,6 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) } } - if (ses->status == CifsExiting) - return -EIO; - /* * Give demultiplex thread up to 10 seconds to reconnect, should be * greater than cifs socket timeout which is 7 seconds @@ -156,7 +153,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) * retrying until process is killed or server comes * back on-line */ - if (!tcon->retry || ses->status == CifsExiting) { + if (!tcon->retry) { cFYI(1, "gave up waiting on reconnect in smb_init"); return -EHOSTDOWN; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 47d8ff62368..8d6c17ab593 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -337,8 +337,13 @@ cifs_echo_request(struct work_struct *work) struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); - /* no need to ping if we got a response recently */ - if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) + /* + * We cannot send an echo until the NEGOTIATE_PROTOCOL request is + * done, which is indicated by maxBuf != 0. Also, no need to ping if + * we got a response recently + */ + if (server->maxBuf == 0 || + time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) goto requeue_echo; rc = CIFSSMBEcho(server); @@ -578,14 +583,23 @@ incomplete_rcv: else if (reconnect == 1) continue; - length += 4; /* account for rfc1002 hdr */ + total_read += 4; /* account for rfc1002 hdr */ + dump_smb(smb_buffer, total_read); - dump_smb(smb_buffer, length); - if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) { - cifs_dump_mem("Bad SMB: ", smb_buffer, 48); - continue; - } + /* + * We know that we received enough to get to the MID as we + * checked the pdu_length earlier. Now check to see + * if the rest of the header is OK. We borrow the length + * var for the rest of the loop to avoid a new stack var. + * + * 48 bytes is enough to display the header and a little bit + * into the payload for debugging purposes. + */ + length = checkSMB(smb_buffer, smb_buffer->Mid, total_read); + if (length != 0) + cifs_dump_mem("Bad SMB: ", smb_buffer, + min_t(unsigned int, total_read, 48)); mid_entry = NULL; server->lstrp = jiffies; @@ -597,7 +611,8 @@ incomplete_rcv: if ((mid_entry->mid == smb_buffer->Mid) && (mid_entry->midState == MID_REQUEST_SUBMITTED) && (mid_entry->command == smb_buffer->Command)) { - if (check2ndT2(smb_buffer,server->maxBuf) > 0) { + if (length == 0 && + check2ndT2(smb_buffer, server->maxBuf) > 0) { /* We have a multipart transact2 resp */ isMultiRsp = true; if (mid_entry->resp_buf) { @@ -632,12 +647,17 @@ incomplete_rcv: mid_entry->resp_buf = smb_buffer; mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: - mid_entry->midState = MID_RESPONSE_RECEIVED; - list_del_init(&mid_entry->qhead); - mid_entry->callback(mid_entry); + if (length == 0) + mid_entry->midState = + MID_RESPONSE_RECEIVED; + else + mid_entry->midState = + MID_RESPONSE_MALFORMED; #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); break; } mid_entry = NULL; @@ -653,6 +673,9 @@ multi_t2_fnd: else smallbuf = NULL; } + } else if (length != 0) { + /* response sanity checks failed */ + continue; } else if (!is_valid_oplock_break(smb_buffer, server) && !isMultiRsp) { cERROR(1, "No task to wake, unknown frame received! " diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 74c0a282d01..e964b1cd5dd 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1662,10 +1662,10 @@ static ssize_t cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { - size_t total_written = 0; - unsigned int written = 0; - unsigned long num_pages, npages; - size_t copied, len, cur_len, i; + unsigned int written; + unsigned long num_pages, npages, i; + size_t copied, len, cur_len; + ssize_t total_written = 0; struct kvec *to_send; struct page **pages; struct iov_iter it; @@ -1821,7 +1821,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, { int rc; int xid; - unsigned int total_read, bytes_read = 0; + ssize_t total_read; + unsigned int bytes_read = 0; size_t len, cur_len; int iov_offset = 0; struct cifs_sb_info *cifs_sb; diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 8d9189f6447..79f641eeda3 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) { int rc, alen, slen; const char *pct; - char *endp, scope_id[13]; + char scope_id[13]; struct sockaddr_in *s4 = (struct sockaddr_in *) dst; struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; @@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) memcpy(scope_id, pct + 1, slen); scope_id[slen] = '\0'; - s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0); - if (endp != scope_id + slen) - return 0; + rc = strict_strtoul(scope_id, 0, + (unsigned long *)&s6->sin6_scope_id); + rc = (rc == 0) ? 1 : 0; } return rc; diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 1adc9625a34..16765703131 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -656,13 +656,13 @@ ssetup_ntlmssp_authenticate: if (type == LANMAN) { #ifdef CONFIG_CIFS_WEAK_PW_HASH - char lnm_session_key[CIFS_SESS_KEY_SIZE]; + char lnm_session_key[CIFS_AUTH_RESP_SIZE]; pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; /* no capabilities flags in old lanman negotiation */ - pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); + pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* Calculate hash with password and copy into bcc_ptr. * Encryption Key (stored as in cryptkey) gets used if the @@ -675,8 +675,8 @@ ssetup_ntlmssp_authenticate: true : false, lnm_session_key); ses->flags |= CIFS_SES_LANMAN; - memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); - bcc_ptr += CIFS_SESS_KEY_SIZE; + memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); + bcc_ptr += CIFS_AUTH_RESP_SIZE; /* can not sign if LANMAN negotiated so no need to calculate signing key? but what if server diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b8c5e2eb43d..46d8756f2b2 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -359,6 +359,10 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, if (rc) return rc; + /* enable signing if server requires it */ + if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; + mutex_lock(&server->srv_mutex); mid = AllocMidQEntry(in_buf, server); if (mid == NULL) { @@ -453,6 +457,9 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) case MID_RETRY_NEEDED: rc = -EAGAIN; break; + case MID_RESPONSE_MALFORMED: + rc = -EIO; + break; default: cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, mid->mid, mid->midState); diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 9c64ae9e4c1..2d8c87b951c 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1468,15 +1468,13 @@ static void work_stop(void) static int work_start(void) { - recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM | - WQ_HIGHPRI | WQ_FREEZEABLE, 0); + recv_workqueue = create_singlethread_workqueue("dlm_recv"); if (!recv_workqueue) { log_print("can't start dlm_recv"); return -ENOMEM; } - send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM | - WQ_HIGHPRI | WQ_FREEZEABLE, 0); + send_workqueue = create_singlethread_workqueue("dlm_send"); if (!send_workqueue) { log_print("can't start dlm_send"); destroy_workqueue(recv_workqueue); diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 6fc4f319b55..534c1d46e69 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c @@ -46,24 +46,28 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *lower_dentry; struct vfsmount *lower_mnt; - struct dentry *dentry_save; - struct vfsmount *vfsmount_save; + struct dentry *dentry_save = NULL; + struct vfsmount *vfsmount_save = NULL; int rc = 1; - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) goto out; - dentry_save = nd->path.dentry; - vfsmount_save = nd->path.mnt; - nd->path.dentry = lower_dentry; - nd->path.mnt = lower_mnt; + if (nd) { + dentry_save = nd->path.dentry; + vfsmount_save = nd->path.mnt; + nd->path.dentry = lower_dentry; + nd->path.mnt = lower_mnt; + } rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); - nd->path.dentry = dentry_save; - nd->path.mnt = vfsmount_save; + if (nd) { + nd->path.dentry = dentry_save; + nd->path.mnt = vfsmount_save; + } if (dentry->d_inode) { struct inode *lower_inode = ecryptfs_inode_to_lower(dentry->d_inode); diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index dbc84ed9633..e00753496e3 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -632,8 +632,7 @@ int ecryptfs_interpose(struct dentry *hidden_dentry, u32 flags); int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, struct dentry *lower_dentry, - struct inode *ecryptfs_dir_inode, - struct nameidata *ecryptfs_nd); + struct inode *ecryptfs_dir_inode); int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, size_t *decrypted_name_size, struct dentry *ecryptfs_dentry, diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 81e10e6a944..7d1050e254f 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -317,6 +317,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) const struct file_operations ecryptfs_dir_fops = { .readdir = ecryptfs_readdir, + .read = generic_read_dir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index bd33f87a190..b592938a84b 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -74,16 +74,20 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, unsigned int flags_save; int rc; - dentry_save = nd->path.dentry; - vfsmount_save = nd->path.mnt; - flags_save = nd->flags; - nd->path.dentry = lower_dentry; - nd->path.mnt = lower_mnt; - nd->flags &= ~LOOKUP_OPEN; + if (nd) { + dentry_save = nd->path.dentry; + vfsmount_save = nd->path.mnt; + flags_save = nd->flags; + nd->path.dentry = lower_dentry; + nd->path.mnt = lower_mnt; + nd->flags &= ~LOOKUP_OPEN; + } rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); - nd->path.dentry = dentry_save; - nd->path.mnt = vfsmount_save; - nd->flags = flags_save; + if (nd) { + nd->path.dentry = dentry_save; + nd->path.mnt = vfsmount_save; + nd->flags = flags_save; + } return rc; } @@ -241,8 +245,7 @@ out: */ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, struct dentry *lower_dentry, - struct inode *ecryptfs_dir_inode, - struct nameidata *ecryptfs_nd) + struct inode *ecryptfs_dir_inode) { struct dentry *lower_dir_dentry; struct vfsmount *lower_mnt; @@ -290,8 +293,6 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, goto out; if (special_file(lower_inode->i_mode)) goto out; - if (!ecryptfs_nd) - goto out; /* Released in this function */ page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER); if (!page_virt) { @@ -349,75 +350,6 @@ out: } /** - * ecryptfs_new_lower_dentry - * @name: The name of the new dentry. - * @lower_dir_dentry: Parent directory of the new dentry. - * @nd: nameidata from last lookup. - * - * Create a new dentry or get it from lower parent dir. - */ -static struct dentry * -ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry, - struct nameidata *nd) -{ - struct dentry *new_dentry; - struct dentry *tmp; - struct inode *lower_dir_inode; - - lower_dir_inode = lower_dir_dentry->d_inode; - - tmp = d_alloc(lower_dir_dentry, name); - if (!tmp) - return ERR_PTR(-ENOMEM); - - mutex_lock(&lower_dir_inode->i_mutex); - new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd); - mutex_unlock(&lower_dir_inode->i_mutex); - - if (!new_dentry) - new_dentry = tmp; - else - dput(tmp); - - return new_dentry; -} - - -/** - * ecryptfs_lookup_one_lower - * @ecryptfs_dentry: The eCryptfs dentry that we are looking up - * @lower_dir_dentry: lower parent directory - * @name: lower file name - * - * Get the lower dentry from vfs. If lower dentry does not exist yet, - * create it. - */ -static struct dentry * -ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry, - struct dentry *lower_dir_dentry, struct qstr *name) -{ - struct nameidata nd; - struct vfsmount *lower_mnt; - int err; - - lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt( - ecryptfs_dentry->d_parent)); - err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd); - mntput(lower_mnt); - - if (!err) { - /* we dont need the mount */ - mntput(nd.path.mnt); - return nd.path.dentry; - } - if (err != -ENOENT) - return ERR_PTR(err); - - /* create a new lower dentry */ - return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd); -} - -/** * ecryptfs_lookup * @ecryptfs_dir_inode: The eCryptfs directory inode * @ecryptfs_dentry: The eCryptfs dentry that we are looking up @@ -434,7 +366,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, size_t encrypted_and_encoded_name_size; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; struct dentry *lower_dir_dentry, *lower_dentry; - struct qstr lower_name; int rc = 0; if ((ecryptfs_dentry->d_name.len == 1 @@ -444,20 +375,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, goto out_d_drop; } lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); - lower_name.name = ecryptfs_dentry->d_name.name; - lower_name.len = ecryptfs_dentry->d_name.len; - lower_name.hash = ecryptfs_dentry->d_name.hash; - if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { - rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, - lower_dir_dentry->d_inode, &lower_name); - if (rc < 0) - goto out_d_drop; - } - lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, - lower_dir_dentry, &lower_name); + mutex_lock(&lower_dir_dentry->d_inode->i_mutex); + lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name, + lower_dir_dentry, + ecryptfs_dentry->d_name.len); + mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); - ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " + ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, encrypted_and_encoded_name); goto out_d_drop; @@ -479,28 +404,21 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, "filename; rc = [%d]\n", __func__, rc); goto out_d_drop; } - lower_name.name = encrypted_and_encoded_name; - lower_name.len = encrypted_and_encoded_name_size; - lower_name.hash = full_name_hash(lower_name.name, lower_name.len); - if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { - rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, - lower_dir_dentry->d_inode, &lower_name); - if (rc < 0) - goto out_d_drop; - } - lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, - lower_dir_dentry, &lower_name); + mutex_lock(&lower_dir_dentry->d_inode->i_mutex); + lower_dentry = lookup_one_len(encrypted_and_encoded_name, + lower_dir_dentry, + encrypted_and_encoded_name_size); + mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); - ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " + ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, encrypted_and_encoded_name); goto out_d_drop; } lookup_and_interpose: rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, - ecryptfs_dir_inode, - ecryptfs_nd); + ecryptfs_dir_inode); goto out; out_d_drop: d_drop(ecryptfs_dentry); @@ -1092,6 +1010,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), ecryptfs_dentry_to_lower(dentry), &lower_stat); if (!rc) { + fsstack_copy_attr_all(dentry->d_inode, + ecryptfs_inode_to_lower(dentry->d_inode)); generic_fillattr(dentry->d_inode, stat); stat->blocks = lower_stat.blocks; } diff --git a/fs/eventfd.c b/fs/eventfd.c index e0194b3e14d..d9a59177391 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_get); * @ctx: [in] Pointer to eventfd context. * * The eventfd context reference must have been previously acquired either - * with eventfd_ctx_get() or eventfd_ctx_fdget()). + * with eventfd_ctx_get() or eventfd_ctx_fdget(). */ void eventfd_ctx_put(struct eventfd_ctx *ctx) { @@ -146,9 +146,9 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. * @ctx: [in] Pointer to eventfd context. * @wait: [in] Wait queue to be removed. - * @cnt: [out] Pointer to the 64bit conter value. + * @cnt: [out] Pointer to the 64-bit counter value. * - * Returns zero if successful, or the following error codes: + * Returns %0 if successful, or the following error codes: * * -EAGAIN : The operation would have blocked. * @@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. * @ctx: [in] Pointer to eventfd context. * @no_wait: [in] Different from zero if the operation should not block. - * @cnt: [out] Pointer to the 64bit conter value. + * @cnt: [out] Pointer to the 64-bit counter value. * - * Returns zero if successful, or the following error codes: + * Returns %0 if successful, or the following error codes: * - * -EAGAIN : The operation would have blocked but @no_wait was nonzero. + * -EAGAIN : The operation would have blocked but @no_wait was non-zero. * -ERESTARTSYS : A signal interrupted the wait operation. * * If @no_wait is zero, the function might sleep until the eventfd internal diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 267d0ada454..4a09af9e9a6 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -63,6 +63,13 @@ * cleanup path and it is also acquired by eventpoll_release_file() * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). + * It is also acquired when inserting an epoll fd onto another epoll + * fd. We do this so that we walk the epoll tree and ensure that this + * insertion does not create a cycle of epoll file descriptors, which + * could lead to deadlock. We need a global mutex to prevent two + * simultaneous inserts (A into B and B into A) from racing and + * constructing a cycle without either insert observing that it is + * going to. * It is possible to drop the "ep->mtx" and to use the global * mutex "epmutex" (together with "ep->lock") to have it working, * but having "ep->mtx" will make the interface more scalable. @@ -224,6 +231,9 @@ static long max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +/* Used to check for epoll file descriptor inclusion loops */ +static struct nested_calls poll_loop_ncalls; + /* Used for safe wake up implementation */ static struct nested_calls poll_safewake_ncalls; @@ -1198,6 +1208,62 @@ retry: return res; } +/** + * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() + * API, to verify that adding an epoll file inside another + * epoll structure, does not violate the constraints, in + * terms of closed loops, or too deep chains (which can + * result in excessive stack usage). + * + * @priv: Pointer to the epoll file to be currently checked. + * @cookie: Original cookie for this call. This is the top-of-the-chain epoll + * data structure pointer. + * @call_nests: Current dept of the @ep_call_nested() call stack. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) +{ + int error = 0; + struct file *file = priv; + struct eventpoll *ep = file->private_data; + struct rb_node *rbp; + struct epitem *epi; + + mutex_lock(&ep->mtx); + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (unlikely(is_file_epoll(epi->ffd.file))) { + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, epi->ffd.file, + epi->ffd.file->private_data, current); + if (error != 0) + break; + } + } + mutex_unlock(&ep->mtx); + + return error; +} + +/** + * ep_loop_check - Performs a check to verify that adding an epoll file (@file) + * another epoll file (represented by @ep) does not create + * closed loops or too deep chains. + * + * @ep: Pointer to the epoll private data structure. + * @file: Pointer to the epoll file to be checked. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check(struct eventpoll *ep, struct file *file) +{ + return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, file, ep, current); +} + /* * Open an eventpoll file descriptor. */ @@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event) { int error; + int did_lock_epmutex = 0; struct file *file, *tfile; struct eventpoll *ep; struct epitem *epi; @@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, */ ep = file->private_data; + /* + * When we insert an epoll file descriptor, inside another epoll file + * descriptor, there is the change of creating closed loops, which are + * better be handled here, than in more critical paths. + * + * We hold epmutex across the loop check and the insert in this case, in + * order to prevent two separate inserts from racing and each doing the + * insert "at the same time" such that ep_loop_check passes on both + * before either one does the insert, thereby creating a cycle. + */ + if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) { + mutex_lock(&epmutex); + did_lock_epmutex = 1; + error = -ELOOP; + if (ep_loop_check(ep, tfile) != 0) + goto error_tgt_fput; + } + + mutex_lock(&ep->mtx); /* @@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_unlock(&ep->mtx); error_tgt_fput: + if (unlikely(did_lock_epmutex)) + mutex_unlock(&epmutex); + fput(tfile); error_fput: fput(file); @@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void) EP_ITEM_COST; BUG_ON(max_user_watches < 0); + /* + * Initialize the structure used to perform epoll file descriptor + * inclusion loops checks. + */ + ep_nested_calls_init(&poll_loop_ncalls); + /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0c8d97b56f3..3aa0b72b3b9 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -848,6 +848,7 @@ struct ext4_inode_info { atomic_t i_ioend_count; /* Number of outstanding io_end structs */ /* current io_end structure for async DIO write*/ ext4_io_end_t *cur_aio_dio; + atomic_t i_aiodio_unwritten; /* Nr. of inflight conversions pending */ spinlock_t i_block_reservation_lock; @@ -2119,6 +2120,15 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh) #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) +/* For ioend & aio unwritten conversion wait queues */ +#define EXT4_WQ_HASH_SZ 37 +#define ext4_ioend_wq(v) (&ext4__ioend_wq[((unsigned long)(v)) %\ + EXT4_WQ_HASH_SZ]) +#define ext4_aio_mutex(v) (&ext4__aio_mutex[((unsigned long)(v)) %\ + EXT4_WQ_HASH_SZ]) +extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; +extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; + #endif /* __KERNEL__ */ #endif /* _EXT4_H */ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 63a75810b7c..ccce8a7e94e 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3174,9 +3174,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, * that this IO needs to convertion to written when IO is * completed */ - if (io) + if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { io->flag = EXT4_IO_END_UNWRITTEN; - else + atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); + } else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); if (ext4_should_dioread_nolock(inode)) map->m_flags |= EXT4_MAP_UNINIT; @@ -3463,9 +3464,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, * that we need to perform convertion when IO is done. */ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { - if (io) + if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { io->flag = EXT4_IO_END_UNWRITTEN; - else + atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); + } else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 2e8322c8aa8..7b80d543b89 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -55,11 +55,47 @@ static int ext4_release_file(struct inode *inode, struct file *filp) return 0; } +static void ext4_aiodio_wait(struct inode *inode) +{ + wait_queue_head_t *wq = ext4_ioend_wq(inode); + + wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0)); +} + +/* + * This tests whether the IO in question is block-aligned or not. + * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they + * are converted to written only after the IO is complete. Until they are + * mapped, these blocks appear as holes, so dio_zero_block() will assume that + * it needs to zero out portions of the start and/or end block. If 2 AIO + * threads are at work on the same unwritten block, they must be synchronized + * or one thread will zero the other's data, causing corruption. + */ +static int +ext4_unaligned_aio(struct inode *inode, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + struct super_block *sb = inode->i_sb; + int blockmask = sb->s_blocksize - 1; + size_t count = iov_length(iov, nr_segs); + loff_t final_size = pos + count; + + if (pos >= inode->i_size) + return 0; + + if ((pos & blockmask) || (final_size & blockmask)) + return 1; + + return 0; +} + static ssize_t ext4_file_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; + int unaligned_aio = 0; + int ret; /* * If we have encountered a bitmap-format file, the size limit @@ -78,9 +114,31 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, nr_segs = iov_shorten((struct iovec *)iov, nr_segs, sbi->s_bitmap_maxbytes - pos); } + } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) && + !is_sync_kiocb(iocb))) { + unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos); } - return generic_file_aio_write(iocb, iov, nr_segs, pos); + /* Unaligned direct AIO must be serialized; see comment above */ + if (unaligned_aio) { + static unsigned long unaligned_warn_time; + + /* Warn about this once per day */ + if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ)) + ext4_msg(inode->i_sb, KERN_WARNING, + "Unaligned AIO/DIO on inode %ld by %s; " + "performance will be poor.", + inode->i_ino, current->comm); + mutex_lock(ext4_aio_mutex(inode)); + ext4_aiodio_wait(inode); + } + + ret = generic_file_aio_write(iocb, iov, nr_segs, pos); + + if (unaligned_aio) + mutex_unlock(ext4_aio_mutex(inode)); + + return ret; } static const struct vm_operations_struct ext4_file_vm_ops = { diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 851f49b2f9d..d1fe09aea73 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -342,10 +342,15 @@ static struct kmem_cache *ext4_free_ext_cachep; /* We create slab caches for groupinfo data structures based on the * superblock block size. There will be one per mounted filesystem for * each unique s_blocksize_bits */ -#define NR_GRPINFO_CACHES \ - (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1) +#define NR_GRPINFO_CACHES 8 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; +static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { + "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", + "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", + "ext4_groupinfo_64k", "ext4_groupinfo_128k" +}; + static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, @@ -2414,6 +2419,55 @@ err_freesgi: return -ENOMEM; } +static void ext4_groupinfo_destroy_slabs(void) +{ + int i; + + for (i = 0; i < NR_GRPINFO_CACHES; i++) { + if (ext4_groupinfo_caches[i]) + kmem_cache_destroy(ext4_groupinfo_caches[i]); + ext4_groupinfo_caches[i] = NULL; + } +} + +static int ext4_groupinfo_create_slab(size_t size) +{ + static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); + int slab_size; + int blocksize_bits = order_base_2(size); + int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; + struct kmem_cache *cachep; + + if (cache_index >= NR_GRPINFO_CACHES) + return -EINVAL; + + if (unlikely(cache_index < 0)) + cache_index = 0; + + mutex_lock(&ext4_grpinfo_slab_create_mutex); + if (ext4_groupinfo_caches[cache_index]) { + mutex_unlock(&ext4_grpinfo_slab_create_mutex); + return 0; /* Already created */ + } + + slab_size = offsetof(struct ext4_group_info, + bb_counters[blocksize_bits + 2]); + + cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], + slab_size, 0, SLAB_RECLAIM_ACCOUNT, + NULL); + + mutex_unlock(&ext4_grpinfo_slab_create_mutex); + if (!cachep) { + printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n"); + return -ENOMEM; + } + + ext4_groupinfo_caches[cache_index] = cachep; + + return 0; +} + int ext4_mb_init(struct super_block *sb, int needs_recovery) { struct ext4_sb_info *sbi = EXT4_SB(sb); @@ -2421,9 +2475,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) unsigned offset; unsigned max; int ret; - int cache_index; - struct kmem_cache *cachep; - char *namep = NULL; i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); @@ -2440,30 +2491,9 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) goto out; } - cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; - cachep = ext4_groupinfo_caches[cache_index]; - if (!cachep) { - char name[32]; - int len = offsetof(struct ext4_group_info, - bb_counters[sb->s_blocksize_bits + 2]); - - sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits); - namep = kstrdup(name, GFP_KERNEL); - if (!namep) { - ret = -ENOMEM; - goto out; - } - - /* Need to free the kmem_cache_name() when we - * destroy the slab */ - cachep = kmem_cache_create(namep, len, 0, - SLAB_RECLAIM_ACCOUNT, NULL); - if (!cachep) { - ret = -ENOMEM; - goto out; - } - ext4_groupinfo_caches[cache_index] = cachep; - } + ret = ext4_groupinfo_create_slab(sb->s_blocksize); + if (ret < 0) + goto out; /* order 0 is regular bitmap */ sbi->s_mb_maxs[0] = sb->s_blocksize << 3; @@ -2520,7 +2550,6 @@ out: if (ret) { kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); - kfree(namep); } return ret; } @@ -2734,7 +2763,6 @@ int __init ext4_init_mballoc(void) void ext4_exit_mballoc(void) { - int i; /* * Wait for completion of call_rcu()'s on ext4_pspace_cachep * before destroying the slab cache. @@ -2743,15 +2771,7 @@ void ext4_exit_mballoc(void) kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_free_ext_cachep); - - for (i = 0; i < NR_GRPINFO_CACHES; i++) { - struct kmem_cache *cachep = ext4_groupinfo_caches[i]; - if (cachep) { - char *name = (char *)kmem_cache_name(cachep); - kmem_cache_destroy(cachep); - kfree(name); - } - } + ext4_groupinfo_destroy_slabs(); ext4_remove_debugfs_entry(); } diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 7270dcfca92..955cc309142 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -32,14 +32,8 @@ static struct kmem_cache *io_page_cachep, *io_end_cachep; -#define WQ_HASH_SZ 37 -#define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ]) -static wait_queue_head_t ioend_wq[WQ_HASH_SZ]; - int __init ext4_init_pageio(void) { - int i; - io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); if (io_page_cachep == NULL) return -ENOMEM; @@ -48,9 +42,6 @@ int __init ext4_init_pageio(void) kmem_cache_destroy(io_page_cachep); return -ENOMEM; } - for (i = 0; i < WQ_HASH_SZ; i++) - init_waitqueue_head(&ioend_wq[i]); - return 0; } @@ -62,7 +53,7 @@ void ext4_exit_pageio(void) void ext4_ioend_wait(struct inode *inode) { - wait_queue_head_t *wq = to_ioend_wq(inode); + wait_queue_head_t *wq = ext4_ioend_wq(inode); wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); } @@ -87,7 +78,7 @@ void ext4_free_io_end(ext4_io_end_t *io) for (i = 0; i < io->num_io_pages; i++) put_io_page(io->pages[i]); io->num_io_pages = 0; - wq = to_ioend_wq(io->inode); + wq = ext4_ioend_wq(io->inode); if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) && waitqueue_active(wq)) wake_up_all(wq); @@ -102,6 +93,7 @@ int ext4_end_io_nolock(ext4_io_end_t *io) struct inode *inode = io->inode; loff_t offset = io->offset; ssize_t size = io->size; + wait_queue_head_t *wq; int ret = 0; ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," @@ -126,7 +118,16 @@ int ext4_end_io_nolock(ext4_io_end_t *io) if (io->iocb) aio_complete(io->iocb, io->result, 0); /* clear the DIO AIO unwritten flag */ - io->flag &= ~EXT4_IO_END_UNWRITTEN; + if (io->flag & EXT4_IO_END_UNWRITTEN) { + io->flag &= ~EXT4_IO_END_UNWRITTEN; + /* Wake up anyone waiting on unwritten extent conversion */ + wq = ext4_ioend_wq(io->inode); + if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) && + waitqueue_active(wq)) { + wake_up_all(wq); + } + } + return ret; } @@ -190,6 +191,7 @@ static void ext4_end_bio(struct bio *bio, int error) struct inode *inode; unsigned long flags; int i; + sector_t bi_sector = bio->bi_sector; BUG_ON(!io_end); bio->bi_private = NULL; @@ -207,9 +209,7 @@ static void ext4_end_bio(struct bio *bio, int error) if (error) SetPageError(page); BUG_ON(!head); - if (head->b_size == PAGE_CACHE_SIZE) - clear_buffer_dirty(head); - else { + if (head->b_size != PAGE_CACHE_SIZE) { loff_t offset; loff_t io_end_offset = io_end->offset + io_end->size; @@ -221,7 +221,6 @@ static void ext4_end_bio(struct bio *bio, int error) if (error) buffer_io_error(bh); - clear_buffer_dirty(bh); } if (buffer_delay(bh)) partial_write = 1; @@ -257,7 +256,7 @@ static void ext4_end_bio(struct bio *bio, int error) (unsigned long long) io_end->offset, (long) io_end->size, (unsigned long long) - bio->bi_sector >> (inode->i_blkbits - 9)); + bi_sector >> (inode->i_blkbits - 9)); } /* Add the io_end to per-inode completed io list*/ @@ -380,6 +379,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, blocksize = 1 << inode->i_blkbits; + BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); set_page_writeback(page); ClearPageError(page); @@ -397,12 +397,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io, for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { + block_end = block_start + blocksize; if (block_start >= len) { clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } + clear_buffer_dirty(bh); ret = io_submit_add_bh(io, io_page, inode, wbc, bh); if (ret) { /* diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 48ce561fafa..f6a318f836b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -77,6 +77,7 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); static void ext4_destroy_lazyinit_thread(void); static void ext4_unregister_li_request(struct super_block *sb); +static void ext4_clear_request_list(void); #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static struct file_system_type ext3_fs_type = { @@ -832,6 +833,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ei->i_sync_tid = 0; ei->i_datasync_tid = 0; atomic_set(&ei->i_ioend_count, 0); + atomic_set(&ei->i_aiodio_unwritten, 0); return &ei->vfs_inode; } @@ -2716,6 +2718,8 @@ static void ext4_unregister_li_request(struct super_block *sb) mutex_unlock(&ext4_li_info->li_list_mtx); } +static struct task_struct *ext4_lazyinit_task; + /* * This is the function where ext4lazyinit thread lives. It walks * through the request list searching for next scheduled filesystem. @@ -2784,6 +2788,10 @@ cont_thread: if (time_before(jiffies, next_wakeup)) schedule(); finish_wait(&eli->li_wait_daemon, &wait); + if (kthread_should_stop()) { + ext4_clear_request_list(); + goto exit_thread; + } } exit_thread: @@ -2808,6 +2816,7 @@ exit_thread: wake_up(&eli->li_wait_task); kfree(ext4_li_info); + ext4_lazyinit_task = NULL; ext4_li_info = NULL; mutex_unlock(&ext4_li_mtx); @@ -2830,11 +2839,10 @@ static void ext4_clear_request_list(void) static int ext4_run_lazyinit_thread(void) { - struct task_struct *t; - - t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit"); - if (IS_ERR(t)) { - int err = PTR_ERR(t); + ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, + ext4_li_info, "ext4lazyinit"); + if (IS_ERR(ext4_lazyinit_task)) { + int err = PTR_ERR(ext4_lazyinit_task); ext4_clear_request_list(); del_timer_sync(&ext4_li_info->li_timer); kfree(ext4_li_info); @@ -2985,16 +2993,10 @@ static void ext4_destroy_lazyinit_thread(void) * If thread exited earlier * there's nothing to be done. */ - if (!ext4_li_info) + if (!ext4_li_info || !ext4_lazyinit_task) return; - ext4_clear_request_list(); - - while (ext4_li_info->li_task) { - wake_up(&ext4_li_info->li_wait_daemon); - wait_event(ext4_li_info->li_wait_task, - ext4_li_info->li_task == NULL); - } + kthread_stop(ext4_lazyinit_task); } static int ext4_fill_super(struct super_block *sb, void *data, int silent) @@ -4768,7 +4770,7 @@ static struct file_system_type ext4_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -int __init ext4_init_feat_adverts(void) +static int __init ext4_init_feat_adverts(void) { struct ext4_features *ef; int ret = -ENOMEM; @@ -4792,23 +4794,44 @@ out: return ret; } +static void ext4_exit_feat_adverts(void) +{ + kobject_put(&ext4_feat->f_kobj); + wait_for_completion(&ext4_feat->f_kobj_unregister); + kfree(ext4_feat); +} + +/* Shared across all ext4 file systems */ +wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; +struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; + static int __init ext4_init_fs(void) { - int err; + int i, err; ext4_check_flag_values(); + + for (i = 0; i < EXT4_WQ_HASH_SZ; i++) { + mutex_init(&ext4__aio_mutex[i]); + init_waitqueue_head(&ext4__ioend_wq[i]); + } + err = ext4_init_pageio(); if (err) return err; err = ext4_init_system_zone(); if (err) - goto out5; + goto out7; ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj); if (!ext4_kset) - goto out4; + goto out6; ext4_proc_root = proc_mkdir("fs/ext4", NULL); + if (!ext4_proc_root) + goto out5; err = ext4_init_feat_adverts(); + if (err) + goto out4; err = ext4_init_mballoc(); if (err) @@ -4838,12 +4861,14 @@ out1: out2: ext4_exit_mballoc(); out3: - kfree(ext4_feat); + ext4_exit_feat_adverts(); +out4: remove_proc_entry("fs/ext4", NULL); +out5: kset_unregister(ext4_kset); -out4: +out6: ext4_exit_system_zone(); -out5: +out7: ext4_exit_pageio(); return err; } @@ -4857,6 +4882,7 @@ static void __exit ext4_exit_fs(void) destroy_inodecache(); ext4_exit_xattr(); ext4_exit_mballoc(); + ext4_exit_feat_adverts(); remove_proc_entry("fs/ext4", NULL); kset_unregister(ext4_kset); ext4_exit_system_zone(); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index bfed8447ed8..83543b5ff94 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr, if (err) return err; - if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc) - return 0; + if (attr->ia_valid & ATTR_OPEN) { + if (fc->atomic_o_trunc) + return 0; + file = NULL; + } if (attr->ia_valid & ATTR_SIZE) is_truncate = true; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 95da1bc1c82..9e0832dbb1e 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff) return ff; } +static void fuse_release_async(struct work_struct *work) +{ + struct fuse_req *req; + struct fuse_conn *fc; + struct path path; + + req = container_of(work, struct fuse_req, misc.release.work); + path = req->misc.release.path; + fc = get_fuse_conn(path.dentry->d_inode); + + fuse_put_request(fc, req); + path_put(&path); +} + static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) { - path_put(&req->misc.release.path); + if (fc->destroy_req) { + /* + * If this is a fuseblk mount, then it's possible that + * releasing the path will result in releasing the + * super block and sending the DESTROY request. If + * the server is single threaded, this would hang. + * For this reason do the path_put() in a separate + * thread. + */ + atomic_inc(&req->count); + INIT_WORK(&req->misc.release.work, fuse_release_async); + schedule_work(&req->misc.release.work); + } else { + path_put(&req->misc.release.path); + } } -static void fuse_file_put(struct fuse_file *ff) +static void fuse_file_put(struct fuse_file *ff, bool sync) { if (atomic_dec_and_test(&ff->count)) { struct fuse_req *req = ff->reserved_req; - req->end = fuse_release_end; - fuse_request_send_background(ff->fc, req); + if (sync) { + fuse_request_send(ff->fc, req); + path_put(&req->misc.release.path); + fuse_put_request(ff->fc, req); + } else { + req->end = fuse_release_end; + fuse_request_send_background(ff->fc, req); + } kfree(ff); } } @@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode) * Normally this will send the RELEASE request, however if * some asynchronous READ or WRITE requests are outstanding, * the sending will be delayed. + * + * Make the release synchronous if this is a fuseblk mount, + * synchronous RELEASE is allowed (and desirable) in this case + * because the server can be trusted not to screw up. */ - fuse_file_put(ff); + fuse_file_put(ff, ff->fc->destroy_req != NULL); } static int fuse_open(struct inode *inode, struct file *file) @@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) page_cache_release(page); } if (req->ff) - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_send_readpages(struct fuse_req *req, struct file *file) @@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) { __free_page(req->pages[0]); - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ae5744a2f9e..d4286947bc2 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -21,6 +21,7 @@ #include <linux/rwsem.h> #include <linux/rbtree.h> #include <linux/poll.h> +#include <linux/workqueue.h> /** Max number of pages that can be used in a single read request */ #define FUSE_MAX_PAGES_PER_REQ 32 @@ -262,7 +263,10 @@ struct fuse_req { /** Data for asynchronous requests */ union { struct { - struct fuse_release_in in; + union { + struct fuse_release_in in; + struct work_struct work; + }; struct path path; } release; struct fuse_init_in init_in; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 08a8beb152e..7cd9a5a68d5 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void) #endif glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | - WQ_HIGHPRI | WQ_FREEZEABLE, 0); + WQ_HIGHPRI | WQ_FREEZABLE, 0); if (IS_ERR(glock_workqueue)) return PTR_ERR(glock_workqueue); gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", - WQ_MEM_RECLAIM | WQ_FREEZEABLE, + WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (IS_ERR(gfs2_delete_workqueue)) { destroy_workqueue(glock_workqueue); diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index ebef7ab6e17..72c31a315d9 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo) struct address_space *mapping = (struct address_space *)(gl + 1); gfs2_init_glock_once(gl); - memset(mapping, 0, sizeof(*mapping)); - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); - spin_lock_init(&mapping->tree_lock); - spin_lock_init(&mapping->i_mmap_lock); - INIT_LIST_HEAD(&mapping->private_list); - spin_lock_init(&mapping->private_lock); - INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); - INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); + address_space_init_once(mapping); } /** @@ -144,7 +137,7 @@ static int __init init_gfs2_fs(void) error = -ENOMEM; gfs_recovery_wq = alloc_workqueue("gfs_recovery", - WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); + WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (!gfs_recovery_wq) goto fail_wq; diff --git a/fs/inode.c b/fs/inode.c index da85e56378f..0647d80accf 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, i_callback); } +void address_space_init_once(struct address_space *mapping) +{ + memset(mapping, 0, sizeof(*mapping)); + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); + spin_lock_init(&mapping->tree_lock); + spin_lock_init(&mapping->i_mmap_lock); + INIT_LIST_HEAD(&mapping->private_list); + spin_lock_init(&mapping->private_lock); + INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); + INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); + mutex_init(&mapping->unmap_mutex); +} +EXPORT_SYMBOL(address_space_init_once); + /* * These are initializations that only need to be done * once, because the fields are idempotent across use @@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode) INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_wb_list); INIT_LIST_HEAD(&inode->i_lru); - INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); - spin_lock_init(&inode->i_data.tree_lock); - spin_lock_init(&inode->i_data.i_mmap_lock); - INIT_LIST_HEAD(&inode->i_data.private_list); - spin_lock_init(&inode->i_data.private_lock); - INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); - INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); + address_space_init_once(&inode->i_data); i_size_ordered_init(inode); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&inode->i_fsnotify_marks); @@ -540,11 +548,14 @@ void evict_inodes(struct super_block *sb) /** * invalidate_inodes - attempt to free all inodes on a superblock * @sb: superblock to operate on + * @kill_dirty: flag to guide handling of dirty inodes * * Attempts to free all inodes for a given superblock. If there were any * busy inodes return a non-zero value, else zero. + * If @kill_dirty is set, discard dirty inodes too, otherwise treat + * them as busy. */ -int invalidate_inodes(struct super_block *sb) +int invalidate_inodes(struct super_block *sb, bool kill_dirty) { int busy = 0; struct inode *inode, *next; @@ -556,6 +567,10 @@ int invalidate_inodes(struct super_block *sb) list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) continue; + if (inode->i_state & I_DIRTY && !kill_dirty) { + busy = 1; + continue; + } if (atomic_read(&inode->i_count)) { busy = 1; continue; diff --git a/fs/internal.h b/fs/internal.h index 0663568b124..9b976b57d7f 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *); */ extern int get_nr_dirty_inodes(void); extern void evict_inodes(struct super_block *); -extern int invalidate_inodes(struct super_block *); +extern int invalidate_inodes(struct super_block *, bool); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 9e4686900f1..97e73469b2c 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -473,7 +473,8 @@ int __jbd2_log_space_left(journal_t *journal) } /* - * Called under j_state_lock. Returns true if a transaction commit was started. + * Called with j_state_lock locked for writing. + * Returns true if a transaction commit was started. */ int __jbd2_log_start_commit(journal_t *journal, tid_t target) { @@ -520,11 +521,13 @@ int jbd2_journal_force_commit_nested(journal_t *journal) { transaction_t *transaction = NULL; tid_t tid; + int need_to_start = 0; read_lock(&journal->j_state_lock); if (journal->j_running_transaction && !current->journal_info) { transaction = journal->j_running_transaction; - __jbd2_log_start_commit(journal, transaction->t_tid); + if (!tid_geq(journal->j_commit_request, transaction->t_tid)) + need_to_start = 1; } else if (journal->j_committing_transaction) transaction = journal->j_committing_transaction; @@ -535,6 +538,8 @@ int jbd2_journal_force_commit_nested(journal_t *journal) tid = transaction->t_tid; read_unlock(&journal->j_state_lock); + if (need_to_start) + jbd2_log_start_commit(journal, tid); jbd2_log_wait_commit(journal, tid); return 1; } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index faad2bd787c..1d1191050f9 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -117,10 +117,10 @@ static inline void update_t_max_wait(transaction_t *transaction) static int start_this_handle(journal_t *journal, handle_t *handle, int gfp_mask) { - transaction_t *transaction; - int needed; - int nblocks = handle->h_buffer_credits; - transaction_t *new_transaction = NULL; + transaction_t *transaction, *new_transaction = NULL; + tid_t tid; + int needed, need_to_start; + int nblocks = handle->h_buffer_credits; if (nblocks > journal->j_max_transaction_buffers) { printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", @@ -222,8 +222,11 @@ repeat: atomic_sub(nblocks, &transaction->t_outstanding_credits); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); - __jbd2_log_start_commit(journal, transaction->t_tid); + tid = transaction->t_tid; + need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); + if (need_to_start) + jbd2_log_start_commit(journal, tid); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; @@ -442,7 +445,8 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; - int ret; + tid_t tid; + int need_to_start, ret; /* If we've had an abort of any type, don't even think about * actually doing the restart! */ @@ -465,8 +469,11 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) spin_unlock(&transaction->t_handle_lock); jbd_debug(2, "restarting handle %p\n", handle); - __jbd2_log_start_commit(journal, transaction->t_tid); + tid = transaction->t_tid; + need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); + if (need_to_start) + jbd2_log_start_commit(journal, tid); lock_map_release(&handle->h_lockdep_map); handle->h_buffer_credits = nblocks; diff --git a/fs/namei.c b/fs/namei.c index 7d77f24d32a..0087cf9c2c6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -455,14 +455,6 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry struct fs_struct *fs = current->fs; struct dentry *parent = nd->path.dentry; - /* - * It can be possible to revalidate the dentry that we started - * the path walk with. force_reval_path may also revalidate the - * dentry already committed to the nameidata. - */ - if (unlikely(parent == dentry)) - return nameidata_drop_rcu(nd); - BUG_ON(!(nd->flags & LOOKUP_RCU)); if (nd->root.mnt) { spin_lock(&fs->lock); @@ -561,39 +553,25 @@ static inline int nameidata_drop_rcu_last_maybe(struct nameidata *nd) */ void release_open_intent(struct nameidata *nd) { - if (nd->intent.open.file->f_path.dentry == NULL) - put_filp(nd->intent.open.file); - else - fput(nd->intent.open.file); -} - -/* - * Call d_revalidate and handle filesystems that request rcu-walk - * to be dropped. This may be called and return in rcu-walk mode, - * regardless of success or error. If -ECHILD is returned, the caller - * must return -ECHILD back up the path walk stack so path walk may - * be restarted in ref-walk mode. - */ -static int d_revalidate(struct dentry *dentry, struct nameidata *nd) -{ - int status; + struct file *file = nd->intent.open.file; - status = dentry->d_op->d_revalidate(dentry, nd); - if (status == -ECHILD) { - if (nameidata_dentry_drop_rcu(nd, dentry)) - return status; - status = dentry->d_op->d_revalidate(dentry, nd); + if (file && !IS_ERR(file)) { + if (file->f_path.dentry == NULL) + put_filp(file); + else + fput(file); } +} - return status; +static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) +{ + return dentry->d_op->d_revalidate(dentry, nd); } -static inline struct dentry * +static struct dentry * do_revalidate(struct dentry *dentry, struct nameidata *nd) { - int status; - - status = d_revalidate(dentry, nd); + int status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { /* * The dentry failed validation. @@ -602,24 +580,39 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd) * to return a fail status. */ if (status < 0) { - /* If we're in rcu-walk, we don't have a ref */ - if (!(nd->flags & LOOKUP_RCU)) - dput(dentry); + dput(dentry); dentry = ERR_PTR(status); - - } else { - /* Don't d_invalidate in rcu-walk mode */ - if (nameidata_dentry_drop_rcu_maybe(nd, dentry)) - return ERR_PTR(-ECHILD); - if (!d_invalidate(dentry)) { - dput(dentry); - dentry = NULL; - } + } else if (!d_invalidate(dentry)) { + dput(dentry); + dentry = NULL; } } return dentry; } +static inline struct dentry * +do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd) +{ + int status = d_revalidate(dentry, nd); + if (likely(status > 0)) + return dentry; + if (status == -ECHILD) { + if (nameidata_dentry_drop_rcu(nd, dentry)) + return ERR_PTR(-ECHILD); + return do_revalidate(dentry, nd); + } + if (status < 0) + return ERR_PTR(status); + /* Don't d_invalidate in rcu-walk mode */ + if (nameidata_dentry_drop_rcu(nd, dentry)) + return ERR_PTR(-ECHILD); + if (!d_invalidate(dentry)) { + dput(dentry); + dentry = NULL; + } + return dentry; +} + static inline int need_reval_dot(struct dentry *dentry) { if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) @@ -664,9 +657,6 @@ force_reval_path(struct path *path, struct nameidata *nd) return 0; if (!status) { - /* Don't d_invalidate in rcu-walk mode */ - if (nameidata_drop_rcu(nd)) - return -ECHILD; d_invalidate(dentry); status = -ESTALE; } @@ -773,6 +763,8 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) int error; struct dentry *dentry = link->dentry; + BUG_ON(nd->flags & LOOKUP_RCU); + touch_atime(link->mnt, dentry); nd_set_link(nd, NULL); @@ -803,10 +795,16 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) * Without that kind of total limit, nasty chains of consecutive * symlinks can cause almost arbitrarily long lookups. */ -static inline int do_follow_link(struct path *path, struct nameidata *nd) +static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd) { void *cookie; int err = -ELOOP; + + /* We drop rcu-walk here */ + if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) + return -ECHILD; + BUG_ON(inode != path->dentry->d_inode); + if (current->link_count >= MAX_NESTED_LINKS) goto loop; if (current->total_link_count >= 40) @@ -1251,9 +1249,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, return -ECHILD; nd->seq = seq; - if (dentry->d_flags & DCACHE_OP_REVALIDATE) - goto need_revalidate; -done2: + if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { + dentry = do_revalidate_rcu(dentry, nd); + if (!dentry) + goto need_lookup; + if (IS_ERR(dentry)) + goto fail; + if (!(nd->flags & LOOKUP_RCU)) + goto done; + } path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, false))) @@ -1266,8 +1270,13 @@ done2: if (!dentry) goto need_lookup; found: - if (dentry->d_flags & DCACHE_OP_REVALIDATE) - goto need_revalidate; + if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { + dentry = do_revalidate(dentry, nd); + if (!dentry) + goto need_lookup; + if (IS_ERR(dentry)) + goto fail; + } done: path->mnt = mnt; path->dentry = dentry; @@ -1309,16 +1318,6 @@ need_lookup: mutex_unlock(&dir->i_mutex); goto found; -need_revalidate: - dentry = do_revalidate(dentry, nd); - if (!dentry) - goto need_lookup; - if (IS_ERR(dentry)) - goto fail; - if (nd->flags & LOOKUP_RCU) - goto done2; - goto done; - fail: return PTR_ERR(dentry); } @@ -1415,11 +1414,7 @@ exec_again: goto out_dput; if (inode->i_op->follow_link) { - /* We commonly drop rcu-walk here */ - if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) - return -ECHILD; - BUG_ON(inode != next.dentry->d_inode); - err = do_follow_link(&next, nd); + err = do_follow_link(inode, &next, nd); if (err) goto return_err; nd->inode = nd->path.dentry->d_inode; @@ -1463,10 +1458,7 @@ last_component: break; if (inode && unlikely(inode->i_op->follow_link) && (lookup_flags & LOOKUP_FOLLOW)) { - if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) - return -ECHILD; - BUG_ON(inode != next.dentry->d_inode); - err = do_follow_link(&next, nd); + err = do_follow_link(inode, &next, nd); if (err) goto return_err; nd->inode = nd->path.dentry->d_inode; @@ -1500,12 +1492,15 @@ return_reval: * We may need to check the cached dentry for staleness. */ if (need_reval_dot(nd->path.dentry)) { + if (nameidata_drop_rcu_last_maybe(nd)) + return -ECHILD; /* Note: we do not d_invalidate() */ err = d_revalidate(nd->path.dentry, nd); if (!err) err = -ESTALE; if (err < 0) break; + return 0; } return_base: if (nameidata_drop_rcu_last_maybe(nd)) @@ -2265,8 +2260,6 @@ static struct file *finish_open(struct nameidata *nd, return filp; exit: - if (!IS_ERR(nd->intent.open.file)) - release_open_intent(nd); path_put(&nd->path); return ERR_PTR(error); } @@ -2389,8 +2382,6 @@ exit_mutex_unlock: exit_dput: path_put_conditional(path, nd); exit: - if (!IS_ERR(nd->intent.open.file)) - release_open_intent(nd); path_put(&nd->path); return ERR_PTR(error); } @@ -2477,6 +2468,7 @@ struct file *do_filp_open(int dfd, const char *pathname, } audit_inode(pathname, nd.path.dentry); filp = finish_open(&nd, open_flag, acc_mode); + release_open_intent(&nd); return filp; creat: @@ -2553,6 +2545,7 @@ out: path_put(&nd.root); if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL)) goto reval; + release_open_intent(&nd); return filp; exit_dput: @@ -2560,8 +2553,6 @@ exit_dput: out_path: path_put(&nd.path); out_filp: - if (!IS_ERR(nd.intent.open.file)) - release_open_intent(&nd); filp = ERR_PTR(error); goto out; } diff --git a/fs/namespace.c b/fs/namespace.c index 7b0b9537169..d1edf26025d 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags) */ br_write_lock(vfsmount_lock); if (mnt_get_count(mnt) != 2) { - br_write_lock(vfsmount_lock); + br_write_unlock(vfsmount_lock); return -EBUSY; } br_write_unlock(vfsmount_lock); diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 3be975e1891..cde36cb0f34 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -484,7 +484,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr, out: return status; out_default: - return nfs_cb_stat_to_errno(status); + return nfs_cb_stat_to_errno(nfserr); } /* @@ -564,11 +564,9 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, if (unlikely(status)) goto out; if (unlikely(nfserr != NFS4_OK)) - goto out_default; + status = nfs_cb_stat_to_errno(nfserr); out: return status; -out_default: - return nfs_cb_stat_to_errno(status); } /* diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d98d0213285..54b60bfceb8 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -230,9 +230,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f dp->dl_client = clp; get_nfs4_file(fp); dp->dl_file = fp; - dp->dl_vfs_file = find_readable_file(fp); - get_file(dp->dl_vfs_file); - dp->dl_flock = NULL; dp->dl_type = type; dp->dl_stateid.si_boot = boot_time; dp->dl_stateid.si_stateownerid = current_delegid++; @@ -241,8 +238,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); dp->dl_time = 0; atomic_set(&dp->dl_count, 1); - list_add(&dp->dl_perfile, &fp->fi_delegations); - list_add(&dp->dl_perclnt, &clp->cl_delegations); INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); return dp; } @@ -253,36 +248,30 @@ nfs4_put_delegation(struct nfs4_delegation *dp) if (atomic_dec_and_test(&dp->dl_count)) { dprintk("NFSD: freeing dp %p\n",dp); put_nfs4_file(dp->dl_file); - fput(dp->dl_vfs_file); kmem_cache_free(deleg_slab, dp); num_delegations--; } } -/* Remove the associated file_lock first, then remove the delegation. - * lease_modify() is called to remove the FS_LEASE file_lock from - * the i_flock list, eventually calling nfsd's lock_manager - * fl_release_callback. - */ -static void -nfs4_close_delegation(struct nfs4_delegation *dp) +static void nfs4_put_deleg_lease(struct nfs4_file *fp) { - dprintk("NFSD: close_delegation dp %p\n",dp); - /* XXX: do we even need this check?: */ - if (dp->dl_flock) - vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock); + if (atomic_dec_and_test(&fp->fi_delegees)) { + vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); + fp->fi_lease = NULL; + fp->fi_deleg_file = NULL; + } } /* Called under the state lock. */ static void unhash_delegation(struct nfs4_delegation *dp) { - list_del_init(&dp->dl_perfile); list_del_init(&dp->dl_perclnt); spin_lock(&recall_lock); + list_del_init(&dp->dl_perfile); list_del_init(&dp->dl_recall_lru); spin_unlock(&recall_lock); - nfs4_close_delegation(dp); + nfs4_put_deleg_lease(dp->dl_file); nfs4_put_delegation(dp); } @@ -958,8 +947,6 @@ expire_client(struct nfs4_client *clp) spin_lock(&recall_lock); while (!list_empty(&clp->cl_delegations)) { dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); - dprintk("NFSD: expire client. dp %p, fp %p\n", dp, - dp->dl_flock); list_del_init(&dp->dl_perclnt); list_move(&dp->dl_recall_lru, &reaplist); } @@ -2078,6 +2065,7 @@ alloc_init_file(struct inode *ino) fp->fi_inode = igrab(ino); fp->fi_id = current_fileid++; fp->fi_had_conflict = false; + fp->fi_lease = NULL; memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); memset(fp->fi_access, 0, sizeof(fp->fi_access)); spin_lock(&recall_lock); @@ -2329,23 +2317,8 @@ nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access) nfs4_file_put_access(fp, O_RDONLY); } -/* - * Spawn a thread to perform a recall on the delegation represented - * by the lease (file_lock) - * - * Called from break_lease() with lock_flocks() held. - * Note: we assume break_lease will only call this *once* for any given - * lease. - */ -static -void nfsd_break_deleg_cb(struct file_lock *fl) +static void nfsd_break_one_deleg(struct nfs4_delegation *dp) { - struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; - - dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl); - if (!dp) - return; - /* We're assuming the state code never drops its reference * without first removing the lease. Since we're in this lease * callback (and since the lease code is serialized by the kernel @@ -2353,22 +2326,35 @@ void nfsd_break_deleg_cb(struct file_lock *fl) * it's safe to take a reference: */ atomic_inc(&dp->dl_count); - spin_lock(&recall_lock); list_add_tail(&dp->dl_recall_lru, &del_recall_lru); - spin_unlock(&recall_lock); /* only place dl_time is set. protected by lock_flocks*/ dp->dl_time = get_seconds(); + nfsd4_cb_recall(dp); +} + +/* Called from break_lease() with lock_flocks() held. */ +static void nfsd_break_deleg_cb(struct file_lock *fl) +{ + struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; + struct nfs4_delegation *dp; + + BUG_ON(!fp); + /* We assume break_lease is only called once per lease: */ + BUG_ON(fp->fi_had_conflict); /* * We don't want the locks code to timeout the lease for us; - * we'll remove it ourself if the delegation isn't returned - * in time. + * we'll remove it ourself if a delegation isn't returned + * in time: */ fl->fl_break_time = 0; - dp->dl_file->fi_had_conflict = true; - nfsd4_cb_recall(dp); + spin_lock(&recall_lock); + fp->fi_had_conflict = true; + list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) + nfsd_break_one_deleg(dp); + spin_unlock(&recall_lock); } static @@ -2459,13 +2445,15 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) static struct nfs4_delegation * find_delegation_file(struct nfs4_file *fp, stateid_t *stid) { - struct nfs4_delegation *dp; + struct nfs4_delegation *dp = NULL; + spin_lock(&recall_lock); list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) - return dp; + break; } - return NULL; + spin_unlock(&recall_lock); + return dp; } int share_access_to_flags(u32 share_access) @@ -2641,6 +2629,66 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; } +static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) +{ + struct file_lock *fl; + + fl = locks_alloc_lock(); + if (!fl) + return NULL; + locks_init_lock(fl); + fl->fl_lmops = &nfsd_lease_mng_ops; + fl->fl_flags = FL_LEASE; + fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; + fl->fl_end = OFFSET_MAX; + fl->fl_owner = (fl_owner_t)(dp->dl_file); + fl->fl_pid = current->tgid; + return fl; +} + +static int nfs4_setlease(struct nfs4_delegation *dp, int flag) +{ + struct nfs4_file *fp = dp->dl_file; + struct file_lock *fl; + int status; + + fl = nfs4_alloc_init_lease(dp, flag); + if (!fl) + return -ENOMEM; + fl->fl_file = find_readable_file(fp); + list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); + status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); + if (status) { + list_del_init(&dp->dl_perclnt); + locks_free_lock(fl); + return -ENOMEM; + } + fp->fi_lease = fl; + fp->fi_deleg_file = fl->fl_file; + get_file(fp->fi_deleg_file); + atomic_set(&fp->fi_delegees, 1); + list_add(&dp->dl_perfile, &fp->fi_delegations); + return 0; +} + +static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) +{ + struct nfs4_file *fp = dp->dl_file; + + if (!fp->fi_lease) + return nfs4_setlease(dp, flag); + spin_lock(&recall_lock); + if (fp->fi_had_conflict) { + spin_unlock(&recall_lock); + return -EAGAIN; + } + atomic_inc(&fp->fi_delegees); + list_add(&dp->dl_perfile, &fp->fi_delegations); + spin_unlock(&recall_lock); + list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); + return 0; +} + /* * Attempt to hand out a delegation. */ @@ -2650,7 +2698,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta struct nfs4_delegation *dp; struct nfs4_stateowner *sop = stp->st_stateowner; int cb_up; - struct file_lock *fl; int status, flag = 0; cb_up = nfsd4_cb_channel_good(sop->so_client); @@ -2681,36 +2728,11 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta } dp = alloc_init_deleg(sop->so_client, stp, fh, flag); - if (dp == NULL) { - flag = NFS4_OPEN_DELEGATE_NONE; - goto out; - } - status = -ENOMEM; - fl = locks_alloc_lock(); - if (!fl) - goto out; - locks_init_lock(fl); - fl->fl_lmops = &nfsd_lease_mng_ops; - fl->fl_flags = FL_LEASE; - fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; - fl->fl_end = OFFSET_MAX; - fl->fl_owner = (fl_owner_t)dp; - fl->fl_file = find_readable_file(stp->st_file); - BUG_ON(!fl->fl_file); - fl->fl_pid = current->tgid; - dp->dl_flock = fl; - - /* vfs_setlease checks to see if delegation should be handed out. - * the lock_manager callback fl_change is used - */ - if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) { - dprintk("NFSD: setlease failed [%d], no delegation\n", status); - dp->dl_flock = NULL; - locks_free_lock(fl); - unhash_delegation(dp); - flag = NFS4_OPEN_DELEGATE_NONE; - goto out; - } + if (dp == NULL) + goto out_no_deleg; + status = nfs4_set_delegation(dp, flag); + if (status) + goto out_free; memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); @@ -2722,6 +2744,12 @@ out: && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) dprintk("NFSD: WARNING: refusing delegation reclaim\n"); open->op_delegate_type = flag; + return; +out_free: + nfs4_put_delegation(dp); +out_no_deleg: + flag = NFS4_OPEN_DELEGATE_NONE; + goto out; } /* @@ -2916,8 +2944,6 @@ nfs4_laundromat(void) test_val = u; break; } - dprintk("NFSD: purging unused delegation dp %p, fp %p\n", - dp, dp->dl_flock); list_move(&dp->dl_recall_lru, &reaplist); } spin_unlock(&recall_lock); @@ -3128,7 +3154,7 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, goto out; renew_client(dp->dl_client); if (filpp) { - *filpp = find_readable_file(dp->dl_file); + *filpp = dp->dl_file->fi_deleg_file; BUG_ON(!*filpp); } } else { /* open or lock stateid */ diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 956629b9cdc..1275b865507 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -317,8 +317,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) + return status; iattr->ia_valid |= ATTR_UID; } if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { @@ -328,8 +328,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) + return status; iattr->ia_valid |= ATTR_GID; } if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 3074656ba7b..2d31224b07b 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -83,8 +83,6 @@ struct nfs4_delegation { atomic_t dl_count; /* ref count */ struct nfs4_client *dl_client; struct nfs4_file *dl_file; - struct file *dl_vfs_file; - struct file_lock *dl_flock; u32 dl_type; time_t dl_time; /* For recall: */ @@ -379,6 +377,9 @@ struct nfs4_file { */ atomic_t fi_readers; atomic_t fi_writers; + struct file *fi_deleg_file; + struct file_lock *fi_lease; + atomic_t fi_delegees; struct inode *fi_inode; u32 fi_id; /* used with stateowner->so_id * for stateid_hashtbl hash */ diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 641117f2188..da1d9701f8e 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -808,7 +808,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino) if (ra->p_count == 0) frap = rap; } - depth = nfsdstats.ra_size*11/10; + depth = nfsdstats.ra_size; if (!frap) { spin_unlock(&rab->pb_lock); return NULL; @@ -1744,6 +1744,13 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, host_err = nfsd_break_lease(odentry->d_inode); if (host_err) goto out_drop_write; + if (ndentry->d_inode) { + host_err = nfsd_break_lease(ndentry->d_inode); + if (host_err) + goto out_drop_write; + } + if (host_err) + goto out_drop_write; host_err = vfs_rename(fdir, odentry, tdir, ndentry); if (!host_err) { host_err = commit_metadata(tfhp); @@ -1812,22 +1819,22 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, host_err = mnt_want_write(fhp->fh_export->ex_path.mnt); if (host_err) - goto out_nfserr; + goto out_put; host_err = nfsd_break_lease(rdentry->d_inode); if (host_err) - goto out_put; + goto out_drop_write; if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry); else host_err = vfs_rmdir(dirp, rdentry); -out_put: - dput(rdentry); - if (!host_err) host_err = commit_metadata(fhp); - +out_drop_write: mnt_drop_write(fhp->fh_export->ex_path.mnt); +out_put: + dput(rdentry); + out_nfserr: err = nfserrno(host_err); out: diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 388e9e8f528..85f7baa15f5 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -35,11 +35,6 @@ #include "btnode.h" -void nilfs_btnode_cache_init_once(struct address_space *btnc) -{ - nilfs_mapping_init_once(btnc); -} - static const struct address_space_operations def_btnode_aops = { .sync_page = block_sync_page, }; diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 79037494f1e..1b8ebd888c2 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h @@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt { struct buffer_head *newbh; }; -void nilfs_btnode_cache_init_once(struct address_space *); void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); void nilfs_btnode_cache_clear(struct address_space *); struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 6a0e2a189f6..a0babd2bff6 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, struct backing_dev_info *bdi = inode->i_sb->s_bdi; INIT_LIST_HEAD(&shadow->frozen_buffers); - nilfs_mapping_init_once(&shadow->frozen_data); + address_space_init_once(&shadow->frozen_data); nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); - nilfs_mapping_init_once(&shadow->frozen_btnodes); + address_space_init_once(&shadow->frozen_btnodes); nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); mi->mi_shadow = shadow; return 0; diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 0c432416cfe..a585b35fd6b 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, return nc; } -void nilfs_mapping_init_once(struct address_space *mapping) -{ - memset(mapping, 0, sizeof(*mapping)); - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); - spin_lock_init(&mapping->tree_lock); - INIT_LIST_HEAD(&mapping->private_list); - spin_lock_init(&mapping->private_lock); - - spin_lock_init(&mapping->i_mmap_lock); - INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); - INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); -} - void nilfs_mapping_init(struct address_space *mapping, struct backing_dev_info *bdi, const struct address_space_operations *aops) diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index 622df27cd89..2a00953ebd5 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h @@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *); int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); void nilfs_copy_back_pages(struct address_space *, struct address_space *); void nilfs_clear_dirty_pages(struct address_space *); -void nilfs_mapping_init_once(struct address_space *mapping); void nilfs_mapping_init(struct address_space *mapping, struct backing_dev_info *bdi, const struct address_space_operations *aops); diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 58fd707174e..1673b3d9984 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj) #ifdef CONFIG_NILFS_XATTR init_rwsem(&ii->xattr_sem); #endif - nilfs_btnode_cache_init_once(&ii->i_btnode_cache); + address_space_init_once(&ii->i_btnode_cache); ii->i_bmap = &ii->i_bmap_data; inode_init_once(&ii->vfs_inode); } diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 43e56b97f9c..6180da1e37e 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb) ocfs2_quota_trans_credits(sb); } -/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe + - * bitmap block for the new bit) dx_root update for free list */ -#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1) +/* data block for new dir/symlink, allocation of directory block, dx_root + * update for free list */ +#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1) static inline int ocfs2_add_dir_index_credits(struct super_block *sb) { diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index b5f9160e93e..19ebc5aad39 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, u32 num_clusters, unsigned int e_flags) { int ret, delete, index, credits = 0; - u32 new_bit, new_len; + u32 new_bit, new_len, orig_num_clusters; unsigned int set_len; struct ocfs2_super *osb = OCFS2_SB(sb); handle_t *handle; @@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, goto out; } + orig_num_clusters = num_clusters; + while (num_clusters) { ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, p_cluster, num_clusters, @@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, * in write-back mode. */ if (context->get_clusters == ocfs2_di_get_clusters) { - ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); + ret = ocfs2_cow_sync_writeback(sb, context, cpos, + orig_num_clusters); if (ret) mlog_errno(ret); } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 38f986d2447..36c423fb063 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb, struct mount_options *mopt, int is_remount) { - int status; + int status, user_stack = 0; char *p; u32 tmp; @@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb, memcpy(mopt->cluster_stack, args[0].from, OCFS2_STACK_LABEL_LEN); mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; + /* + * Open code the memcmp here as we don't have + * an osb to pass to + * ocfs2_userspace_stack(). + */ + if (memcmp(mopt->cluster_stack, + OCFS2_CLASSIC_CLUSTER_STACK, + OCFS2_STACK_LABEL_LEN)) + user_stack = 1; break; case Opt_inode64: mopt->mount_opt |= OCFS2_MOUNT_INODE64; @@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb, } } - /* Ensure only one heartbeat mode */ - tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | - OCFS2_MOUNT_HB_NONE); - if (hweight32(tmp) != 1) { - mlog(ML_ERROR, "Invalid heartbeat mount options\n"); - status = 0; - goto bail; + if (user_stack == 0) { + /* Ensure only one heartbeat mode */ + tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | + OCFS2_MOUNT_HB_GLOBAL | + OCFS2_MOUNT_HB_NONE); + if (hweight32(tmp) != 1) { + mlog(ML_ERROR, "Invalid heartbeat mount options\n"); + status = 0; + goto bail; + } } status = 1; diff --git a/fs/open.c b/fs/open.c index e52389e1f05..5a2c6ebc22b 100644 --- a/fs/open.c +++ b/fs/open.c @@ -790,6 +790,8 @@ struct file *nameidata_to_filp(struct nameidata *nd) /* Pick up the filp from the open intent */ filp = nd->intent.open.file; + nd->intent.open.file = NULL; + /* Has the filesystem initialised the file for us? */ if (filp->f_path.dentry == NULL) { path_get(&nd->path); diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index 789c625c7aa..b10e3540d5b 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c @@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm) } vm->vblk_size = get_unaligned_be32(data + 0x08); + if (vm->vblk_size == 0) { + ldm_error ("Illegal VBLK size"); + return false; + } + vm->vblk_offset = get_unaligned_be32(data + 0x0C); vm->last_vblk_seq = get_unaligned_be32(data + 0x04); diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index 68d6a216ee7..11f688bd76c 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c @@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len) int mac_partition(struct parsed_partitions *state) { - int slot = 1; Sector sect; unsigned char *data; - int blk, blocks_in_map; + int slot, blocks_in_map; unsigned secsize; #ifdef CONFIG_PPC_PMAC int found_root = 0; @@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state) put_dev_sector(sect); return 0; /* not a MacOS disk */ } - strlcat(state->pp_buf, " [mac]", PAGE_SIZE); blocks_in_map = be32_to_cpu(part->map_count); - for (blk = 1; blk <= blocks_in_map; ++blk) { - int pos = blk * secsize; + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { + put_dev_sector(sect); + return 0; + } + strlcat(state->pp_buf, " [mac]", PAGE_SIZE); + for (slot = 1; slot <= blocks_in_map; ++slot) { + int pos = slot * secsize; put_dev_sector(sect); data = read_part_sector(state, pos/512, §); if (!data) @@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state) } if (goodness > found_root_goodness) { - found_root = blk; + found_root = slot; found_root_goodness = goodness; } } #endif /* CONFIG_PPC_PMAC */ - - ++slot; } #ifdef CONFIG_PPC_PMAC if (found_root_goodness) diff --git a/fs/proc/array.c b/fs/proc/array.c index df2b703b9d0..7c99c1cf7e5 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -353,9 +353,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, task_cap(m, task); task_cpus_allowed(m, task); cpuset_task_status_allowed(m, task); -#if defined(CONFIG_S390) - task_show_regs(m, task); -#endif task_context_switch_counts(m, task); return 0; } diff --git a/fs/super.c b/fs/super.c index 74e149efed8..7e9dd4cc2c0 100644 --- a/fs/super.c +++ b/fs/super.c @@ -177,6 +177,11 @@ void deactivate_locked_super(struct super_block *s) struct file_system_type *fs = s->s_type; if (atomic_dec_and_test(&s->s_active)) { fs->kill_sb(s); + /* + * We need to call rcu_barrier so all the delayed rcu free + * inodes are flushed before we release the fs module. + */ + rcu_barrier(); put_filesystem(fs); put_super(s); } else { diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c index 05201ae719e..d61611c8801 100644 --- a/fs/xfs/linux-2.6/xfs_discard.c +++ b/fs/xfs/linux-2.6/xfs_discard.c @@ -152,6 +152,8 @@ xfs_ioc_trim( if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); + if (!blk_queue_discard(q)) + return -XFS_ERROR(EOPNOTSUPP); if (copy_from_user(&range, urange, sizeof(range))) return -XFS_ERROR(EFAULT); diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index cec89dd5d7d..85668efb3e3 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -53,6 +53,9 @@ xfs_fs_geometry( xfs_fsop_geom_t *geo, int new_version) { + + memset(geo, 0, sizeof(*geo)); + geo->blocksize = mp->m_sb.sb_blocksize; geo->rtextsize = mp->m_sb.sb_rextsize; geo->agblocks = mp->m_sb.sb_agblocks; diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 31b6188df22..b4bfe338ea0 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -4,6 +4,8 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_MMU +#include <linux/mm_types.h> + #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 835214761d4..ad5770f2315 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1134,7 +1134,7 @@ struct drm_device { struct usb_device *usbdev; struct drm_sg_mem *sg; /**< Scatter gather memory */ - int num_crtcs; /**< Number of CRTCs on this device */ + unsigned int num_crtcs; /**< Number of CRTCs on this device */ void *dev_private; /**< device private data */ void *mm_private; struct address_space *dev_mapping; diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 0039f1f97ad..c4d6dbfa3ff 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -290,6 +290,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_RELAXED_FENCING 12 #define I915_PARAM_HAS_COHERENT_RINGS 13 #define I915_PARAM_HAS_EXEC_CONSTANTS 14 +#define I915_PARAM_HAS_RELAXED_DELTA 15 typedef struct drm_i915_getparam { int param; diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h index 68cd248f6d3..66900e3c6eb 100644 --- a/include/linux/dcbnl.h +++ b/include/linux/dcbnl.h @@ -101,8 +101,8 @@ struct ieee_pfc { */ struct dcb_app { __u8 selector; - __u32 protocol; __u8 priority; + __u16 protocol; }; struct dcbmsg { diff --git a/include/linux/freezer.h b/include/linux/freezer.h index da7e52b099f..1effc8b56b4 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -109,7 +109,7 @@ static inline void freezer_count(void) } /* - * Check if the task should be counted as freezeable by the freezer + * Check if the task should be counted as freezable by the freezer */ static inline int freezer_should_skip(struct task_struct *p) { diff --git a/include/linux/fs.h b/include/linux/fs.h index bd3215940c3..e38b50a4b9d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -649,6 +649,7 @@ struct address_space { spinlock_t private_lock; /* for use by the address_space */ struct list_head private_list; /* ditto */ struct address_space *assoc_mapping; /* ditto */ + struct mutex unmap_mutex; /* to protect unmapping */ } __attribute__((aligned(sizeof(long)))); /* * On most architectures that alignment is already the case; but @@ -2139,7 +2140,7 @@ extern void check_disk_size_change(struct gendisk *disk, struct block_device *bdev); extern int revalidate_disk(struct gendisk *); extern int check_disk_change(struct block_device *); -extern int __invalidate_device(struct block_device *); +extern int __invalidate_device(struct block_device *, bool); extern int invalidate_partition(struct gendisk *, int); #endif unsigned long invalidate_mapping_pages(struct address_space *mapping, @@ -2225,6 +2226,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); extern int inode_init_always(struct super_block *, struct inode *); extern void inode_init_once(struct inode *); +extern void address_space_init_once(struct address_space *mapping); extern void ihold(struct inode * inode); extern void iput(struct inode *); extern struct inode * igrab(struct inode *); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 8e6c8c42bc3..df29c8fde36 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -57,7 +57,8 @@ extern pmd_t *page_check_address_pmd(struct page *page, (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ ((__vma)->vm_flags & VM_HUGEPAGE))) && \ - !((__vma)->vm_flags & VM_NOHUGEPAGE)) + !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ + !is_vma_temporary_stack(__vma)) #define transparent_hugepage_defrag(__vma) \ ((transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 69747469174..fe7c4b9ae27 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h @@ -4,8 +4,8 @@ #include <linux/types.h> #include <linux/input.h> -#define MATRIX_MAX_ROWS 16 -#define MATRIX_MAX_COLS 16 +#define MATRIX_MAX_ROWS 32 +#define MATRIX_MAX_COLS 32 #define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ diff --git a/include/linux/klist.h b/include/linux/klist.h index e91a4e59b77..a370ce57cf1 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h @@ -22,7 +22,7 @@ struct klist { struct list_head k_list; void (*get)(struct klist_node *); void (*put)(struct klist_node *); -} __attribute__ ((aligned (4))); +} __attribute__ ((aligned (sizeof(void *)))); #define KLIST_INIT(_name, _get, _put) \ { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \ diff --git a/include/linux/list.h b/include/linux/list.h index 9a5f8a71810..3a54266a1e8 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -96,6 +96,11 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) * in an undefined state. */ #ifndef CONFIG_DEBUG_LIST +static inline void __list_del_entry(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); +} + static inline void list_del(struct list_head *entry) { __list_del(entry->prev, entry->next); @@ -103,6 +108,7 @@ static inline void list_del(struct list_head *entry) entry->prev = LIST_POISON2; } #else +extern void __list_del_entry(struct list_head *entry); extern void list_del(struct list_head *entry); #endif @@ -135,7 +141,7 @@ static inline void list_replace_init(struct list_head *old, */ static inline void list_del_init(struct list_head *entry) { - __list_del(entry->prev, entry->next); + __list_del_entry(entry); INIT_LIST_HEAD(entry); } @@ -146,7 +152,7 @@ static inline void list_del_init(struct list_head *entry) */ static inline void list_move(struct list_head *list, struct list_head *head) { - __list_del(list->prev, list->next); + __list_del_entry(list); list_add(list, head); } @@ -158,7 +164,7 @@ static inline void list_move(struct list_head *list, struct list_head *head) static inline void list_move_tail(struct list_head *list, struct list_head *head) { - __list_del(list->prev, list->next); + __list_del_entry(list); list_add_tail(list, head); } diff --git a/include/linux/module.h b/include/linux/module.h index 9bdf27c7615..5de42043dff 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -62,7 +62,7 @@ struct module_version_attribute { struct module_attribute mattr; const char *module_name; const char *version; -}; +} __attribute__ ((__aligned__(sizeof(void *)))); struct module_kobject { diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 32fb81212fd..1ca64113efe 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -16,6 +16,8 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/init.h> +#include <linux/errno.h> +#include <linux/printk.h> #include <asm/atomic.h> /* Each escaped entry is prefixed by ESCAPE_CODE @@ -186,10 +188,17 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val); int oprofile_add_data64(struct op_entry *entry, u64 val); int oprofile_write_commit(struct op_entry *entry); -#ifdef CONFIG_PERF_EVENTS +#ifdef CONFIG_HW_PERF_EVENTS int __init oprofile_perf_init(struct oprofile_operations *ops); void oprofile_perf_exit(void); char *op_name_from_perf_id(void); -#endif /* CONFIG_PERF_EVENTS */ +#else +static inline int __init oprofile_perf_init(struct oprofile_operations *ops) +{ + pr_info("oprofile: hardware counters not available\n"); + return -ENODEV; +} +static inline void oprofile_perf_exit(void) { } +#endif /* CONFIG_HW_PERF_EVENTS */ #endif /* OPROFILE_H */ diff --git a/include/linux/pm.h b/include/linux/pm.h index dd9c7ab3827..21415cc91cb 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -431,6 +431,8 @@ struct dev_pm_info { struct list_head entry; struct completion completion; struct wakeup_source *wakeup; +#else + unsigned int should_wakeup:1; #endif #ifdef CONFIG_PM_RUNTIME struct timer_list suspend_timer; diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 9cff00dd6b6..03a67db03d0 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev) return dev->power.can_wakeup; } -static inline bool device_may_wakeup(struct device *dev) -{ - return false; -} - static inline struct wakeup_source *wakeup_source_create(const char *name) { return NULL; @@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {} static inline int device_wakeup_enable(struct device *dev) { - return -EINVAL; + dev->power.should_wakeup = true; + return 0; } static inline int device_wakeup_disable(struct device *dev) { + dev->power.should_wakeup = false; return 0; } -static inline int device_init_wakeup(struct device *dev, bool val) +static inline int device_set_wakeup_enable(struct device *dev, bool enable) { - dev->power.can_wakeup = val; - return val ? -EINVAL : 0; + dev->power.should_wakeup = enable; + return 0; } +static inline int device_init_wakeup(struct device *dev, bool val) +{ + device_set_wakeup_capable(dev, val); + device_set_wakeup_enable(dev, val); + return 0; +} -static inline int device_set_wakeup_enable(struct device *dev, bool enable) +static inline bool device_may_wakeup(struct device *dev) { - return -EINVAL; + return dev->power.can_wakeup && dev->power.should_wakeup; } static inline void __pm_stay_awake(struct wakeup_source *ws) {} diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index d63dcbaea16..9026b30238f 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h @@ -14,10 +14,12 @@ #define LINUX_RIO_REGS_H /* - * In RapidIO, each device has a 2MB configuration space that is + * In RapidIO, each device has a 16MB configuration space that is * accessed via maintenance transactions. Portions of configuration * space are standardized and/or reserved. */ +#define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */ + #define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ #define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ #define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ diff --git a/include/linux/rtc.h b/include/linux/rtc.h index a0b639f8e80..89c3e518299 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -203,6 +203,18 @@ struct rtc_device struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ int pie_enabled; struct work_struct irqwork; + + +#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL + struct work_struct uie_task; + struct timer_list uie_timer; + /* Those fields are protected by rtc->irq_lock */ + unsigned int oldsecs; + unsigned int uie_irq_active:1; + unsigned int stop_uie_polling:1; + unsigned int uie_task_active:1; + unsigned int uie_timer_active:1; +#endif }; #define to_rtc_device(d) container_of(d, struct rtc_device, dev) @@ -235,7 +247,10 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq); extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, + unsigned int enabled); +void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); void rtc_aie_update_irq(void *private); void rtc_uie_update_irq(void *private); enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); diff --git a/include/linux/sched.h b/include/linux/sched.h index d747f948b34..777d8a5ed06 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ -#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ /* diff --git a/include/linux/security.h b/include/linux/security.h index c642bb8b8f5..b2b7f9749f5 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1662,7 +1662,7 @@ int security_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); -int security_capable(int cap); +int security_capable(const struct cred *cred, int cap); int security_real_capable(struct task_struct *tsk, int cap); int security_real_capable_noaudit(struct task_struct *tsk, int cap); int security_sysctl(struct ctl_table *table, int op); @@ -1856,9 +1856,9 @@ static inline int security_capset(struct cred *new, return cap_capset(new, old, effective, inheritable, permitted); } -static inline int security_capable(int cap) +static inline int security_capable(const struct cred *cred, int cap) { - return cap_capable(current, current_cred(), cap, SECURITY_CAP_AUDIT); + return cap_capable(current, cred, cap, SECURITY_CAP_AUDIT); } static inline int security_real_capable(struct task_struct *tsk, int cap) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 8651556dbd5..d3ec89fb412 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, const struct thermal_cooling_device_ops *); void thermal_cooling_device_unregister(struct thermal_cooling_device *); + +#ifdef CONFIG_NET extern int generate_netlink_event(u32 orig, enum events event); +#else +static inline int generate_netlink_event(u32 orig, enum events event) +{ + return 0; +} +#endif #endif /* __THERMAL_H__ */ diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index 5e86dc771da..81a927930bf 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h @@ -89,7 +89,7 @@ struct usb_cdc_acm_descriptor { #define USB_CDC_COMM_FEATURE 0x01 #define USB_CDC_CAP_LINE 0x02 -#define USB_CDC_CAP_BRK 0x04 +#define USB_CDC_CAP_BRK 0x04 #define USB_CDC_CAP_NOTIFY 0x08 /* "Union Functional Descriptor" from CDC spec 5.2.3.8 */ @@ -271,6 +271,11 @@ struct usb_cdc_notification { __le16 wLength; } __attribute__ ((packed)); +struct usb_cdc_speed_change { + __le32 DLBitRRate; /* contains the downlink bit rate (IN pipe) */ + __le32 ULBitRate; /* contains the uplink bit rate (OUT pipe) */ +} __attribute__ ((packed)); + /*-------------------------------------------------------------------------*/ /* @@ -292,7 +297,7 @@ struct usb_cdc_ncm_ntb_parameters { __le16 wNdpOutDivisor; __le16 wNdpOutPayloadRemainder; __le16 wNdpOutAlignment; - __le16 wPadding2; + __le16 wNtbOutMaxDatagrams; } __attribute__ ((packed)); /* @@ -307,7 +312,7 @@ struct usb_cdc_ncm_nth16 { __le16 wHeaderLength; __le16 wSequence; __le16 wBlockLength; - __le16 wFpIndex; + __le16 wNdpIndex; } __attribute__ ((packed)); struct usb_cdc_ncm_nth32 { @@ -315,7 +320,7 @@ struct usb_cdc_ncm_nth32 { __le16 wHeaderLength; __le16 wSequence; __le32 dwBlockLength; - __le32 dwFpIndex; + __le32 dwNdpIndex; } __attribute__ ((packed)); /* @@ -337,7 +342,7 @@ struct usb_cdc_ncm_dpe16 { struct usb_cdc_ncm_ndp16 { __le32 dwSignature; __le16 wLength; - __le16 wNextFpIndex; + __le16 wNextNdpIndex; struct usb_cdc_ncm_dpe16 dpe16[0]; } __attribute__ ((packed)); @@ -375,6 +380,7 @@ struct usb_cdc_ncm_ndp32 { #define USB_CDC_NCM_NCAP_ENCAP_COMMAND (1 << 2) #define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE (1 << 3) #define USB_CDC_NCM_NCAP_CRC_MODE (1 << 4) +#define USB_CDC_NCM_NCAP_NTB_INPUT_SIZE (1 << 5) /* CDC NCM subclass Table 6-3: NTB Parameter Structure */ #define USB_CDC_NCM_NTB16_SUPPORTED (1 << 0) @@ -392,6 +398,13 @@ struct usb_cdc_ncm_ndp32 { #define USB_CDC_NCM_NTB_MIN_IN_SIZE 2048 #define USB_CDC_NCM_NTB_MIN_OUT_SIZE 2048 +/* NTB Input Size Structure */ +struct usb_cdc_ncm_ndp_input_size { + __le32 dwNtbInMaxSize; + __le16 wNtbInMaxDatagrams; + __le16 wReserved; +} __attribute__ ((packed)); + /* CDC NCM subclass 6.2.11 SetCrcMode */ #define USB_CDC_NCM_CRC_NOT_APPENDED 0x00 #define USB_CDC_NCM_CRC_APPENDED 0x01 diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h index b92e17349c7..7d1babbff07 100644 --- a/include/linux/usb/msm_hsusb_hw.h +++ b/include/linux/usb/msm_hsusb_hw.h @@ -16,12 +16,8 @@ #ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__ #define __LINUX_USB_GADGET_MSM72K_UDC_H__ -#ifdef CONFIG_ARCH_MSM7X00A -#define USB_SBUSCFG (MSM_USB_BASE + 0x0090) -#else #define USB_AHBBURST (MSM_USB_BASE + 0x0090) #define USB_AHBMODE (MSM_USB_BASE + 0x0098) -#endif #define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ #define USB_USBCMD (MSM_USB_BASE + 0x0140) diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index a85064db8f9..e4d333543a3 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h @@ -7,7 +7,8 @@ * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so * anyone can use the definitions to implement compatible drivers/servers. * - * Copyright (C) Red Hat, Inc., 2009, 2010 + * Copyright (C) Red Hat, Inc., 2009, 2010, 2011 + * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011 */ /* Feature bits */ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1ac11586a2f..f7998a3bf02 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } enum { WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ - WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ + WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ @@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, /** * alloc_ordered_workqueue - allocate an ordered workqueue * @name: name of the workqueue - * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) * * Allocate an ordered workqueue. An ordered workqueue executes at * most one work item at any given time in the queued order. They are @@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags) #define create_workqueue(name) \ alloc_workqueue((name), WQ_MEM_RECLAIM, 1) -#define create_freezeable_workqueue(name) \ - alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) +#define create_freezable_workqueue(name) \ + alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) #define create_singlethread_workqueue(name) \ alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4a3cd2cd2f5..96e50e0ce3c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -89,6 +89,18 @@ #define IPV6_ADDR_SCOPE_GLOBAL 0x0e /* + * Addr flags + */ +#ifdef __KERNEL__ +#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \ + ((a)->s6_addr[1] & 0x10) +#define IPV6_ADDR_MC_FLAG_PREFIX(a) \ + ((a)->s6_addr[1] & 0x20) +#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \ + ((a)->s6_addr[1] & 0x40) +#endif + +/* * fragmentation header */ diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h index cd85b3bc832..e505358d899 100644 --- a/include/net/netfilter/nf_tproxy_core.h +++ b/include/net/netfilter/nf_tproxy_core.h @@ -201,18 +201,8 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, } #endif -static inline void -nf_tproxy_put_sock(struct sock *sk) -{ - /* TIME_WAIT inet sockets have to be handled differently */ - if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT)) - inet_twsk_put(inet_twsk(sk)); - else - sock_put(sk); -} - /* assign a socket to the skb -- consumes sk */ -int +void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk); #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 160a407c196..04f8556313d 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -199,7 +199,7 @@ struct tcf_proto { struct qdisc_skb_cb { unsigned int pkt_len; - char data[]; + long data[]; }; static inline int qdisc_qlen(struct Qdisc *q) diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index 8479b66c067..3fd5064dd43 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h @@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev); #define CONF_ENABLE_ESR 0x0008 #define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ * (CONF_ENABLE_IRQ) in use */ +#define CONF_ENABLE_ZVCARD 0x0020 /* flags used by pcmcia_loop_config() autoconfiguration */ #define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */ diff --git a/include/sound/wm8903.h b/include/sound/wm8903.h index b4a0db2307e..1eeebd534f7 100644 --- a/include/sound/wm8903.h +++ b/include/sound/wm8903.h @@ -17,13 +17,9 @@ /* * R6 (0x06) - Mic Bias Control 0 */ -#define WM8903_MICDET_HYST_ENA 0x0080 /* MICDET_HYST_ENA */ -#define WM8903_MICDET_HYST_ENA_MASK 0x0080 /* MICDET_HYST_ENA */ -#define WM8903_MICDET_HYST_ENA_SHIFT 7 /* MICDET_HYST_ENA */ -#define WM8903_MICDET_HYST_ENA_WIDTH 1 /* MICDET_HYST_ENA */ -#define WM8903_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */ -#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */ -#define WM8903_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */ +#define WM8903_MICDET_THR_MASK 0x0030 /* MICDET_THR - [5:4] */ +#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [5:4] */ +#define WM8903_MICDET_THR_WIDTH 2 /* MICDET_THR - [5:4] */ #define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */ #define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */ #define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 07fdfb6b9a9..0828b6c8610 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -8,7 +8,6 @@ #include <scsi/scsi_cmnd.h> #include <net/sock.h> #include <net/tcp.h> -#include "target_core_mib.h" #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) @@ -195,6 +194,21 @@ typedef enum { SAM_TASK_ATTR_EMULATED } t10_task_attr_index_t; +/* + * Used for target SCSI statistics + */ +typedef enum { + SCSI_INST_INDEX, + SCSI_DEVICE_INDEX, + SCSI_AUTH_INTR_INDEX, + SCSI_INDEX_TYPE_MAX +} scsi_index_t; + +struct scsi_index_table { + spinlock_t lock; + u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; +} ____cacheline_aligned; + struct se_cmd; struct t10_alua { @@ -578,8 +592,6 @@ struct se_node_acl { spinlock_t stats_lock; /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ atomic_t acl_pr_ref_count; - /* Used for MIB access */ - atomic_t mib_ref_count; struct se_dev_entry *device_list; struct se_session *nacl_sess; struct se_portal_group *se_tpg; @@ -595,8 +607,6 @@ struct se_node_acl { } ____cacheline_aligned; struct se_session { - /* Used for MIB access */ - atomic_t mib_ref_count; u64 sess_bin_isid; struct se_node_acl *se_node_acl; struct se_portal_group *se_tpg; @@ -806,7 +816,6 @@ struct se_hba { /* Virtual iSCSI devices attached. */ u32 dev_count; u32 hba_index; - atomic_t dev_mib_access_count; atomic_t load_balance_queue; atomic_t left_queue_depth; /* Maximum queue depth the HBA can handle. */ @@ -845,6 +854,12 @@ struct se_lun { #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) +struct scsi_port_stats { + u64 cmd_pdus; + u64 tx_data_octets; + u64 rx_data_octets; +} ____cacheline_aligned; + struct se_port { /* RELATIVE TARGET PORT IDENTIFER */ u16 sep_rtpi; @@ -867,6 +882,7 @@ struct se_port { } ____cacheline_aligned; struct se_tpg_np { + struct se_portal_group *tpg_np_parent; struct config_group tpg_np_group; } ____cacheline_aligned; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 66f44e56eb8..24694051157 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -111,6 +111,8 @@ struct se_subsystem_api; extern int init_se_global(void); extern void release_se_global(void); +extern void init_scsi_index_table(void); +extern u32 scsi_get_new_index(scsi_index_t); extern void transport_init_queue_obj(struct se_queue_obj *); extern int transport_subsystem_check_init(void); extern int transport_subsystem_register(struct se_subsystem_api *); diff --git a/init/calibrate.c b/init/calibrate.c index 6eb48e53d61..24fe022c55f 100644 --- a/init/calibrate.c +++ b/init/calibrate.c @@ -66,7 +66,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void) pre_start = 0; read_current_timer(&start); start_jiffies = jiffies; - while (jiffies <= (start_jiffies + 1)) { + while (time_before_eq(jiffies, start_jiffies + 1)) { pre_start = start; read_current_timer(&start); } @@ -74,8 +74,8 @@ static unsigned long __cpuinit calibrate_delay_direct(void) pre_end = 0; end = post_start; - while (jiffies <= - (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) { + while (time_before_eq(jiffies, start_jiffies + 1 + + DELAY_CALIBRATION_TICKS)) { pre_end = end; read_current_timer(&end); } diff --git a/kernel/capability.c b/kernel/capability.c index 2f05303715a..9e9385f132c 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -306,7 +306,7 @@ int capable(int cap) BUG(); } - if (security_capable(cap) == 0) { + if (security_capable(current_cred(), cap) == 0) { current->flags |= PF_SUPERPRIV; return 1; } diff --git a/kernel/cred.c b/kernel/cred.c index 6a1aa004e37..3a9d6dd53a6 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -252,13 +252,13 @@ struct cred *cred_alloc_blank(void) #endif atomic_set(&new->usage, 1); +#ifdef CONFIG_DEBUG_CREDENTIALS + new->magic = CRED_MAGIC; +#endif if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) goto error; -#ifdef CONFIG_DEBUG_CREDENTIALS - new->magic = CRED_MAGIC; -#endif return new; error: @@ -657,6 +657,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) validate_creds(old); *new = *old; + atomic_set(&new->usage, 1); + set_cred_subscribers(new, 0); get_uid(new->user); get_group_info(new->group_info); @@ -674,8 +676,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; - atomic_set(&new->usage, 1); - set_cred_subscribers(new, 0); put_cred(old); validate_creds(new); return new; @@ -748,7 +748,11 @@ bool creds_are_invalid(const struct cred *cred) if (cred->magic != CRED_MAGIC) return true; #ifdef CONFIG_SECURITY_SELINUX - if (selinux_is_enabled()) { + /* + * cred->security == NULL if security_cred_alloc_blank() or + * security_prepare_creds() returned an error. + */ + if (selinux_is_enabled() && cred->security) { if ((unsigned long) cred->security < PAGE_SIZE) return true; if ((*(u32 *)cred->security & 0xffffff00) == diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 4571ae7e085..99c3bc8a6fb 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -3,6 +3,12 @@ */ #include <linux/irqdesc.h> +#ifdef CONFIG_SPARSE_IRQ +# define IRQ_BITMAP_BITS (NR_IRQS + 8196) +#else +# define IRQ_BITMAP_BITS NR_IRQS +#endif + extern int noirqdebug; #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 282f20230e6..2039bea31bd 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS; EXPORT_SYMBOL_GPL(nr_irqs); static DEFINE_MUTEX(sparse_irq_lock); -static DECLARE_BITMAP(allocated_irqs, NR_IRQS); +static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); #ifdef CONFIG_SPARSE_IRQ @@ -217,6 +217,15 @@ int __init early_irq_init(void) initcnt = arch_probe_nr_irqs(); printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); + if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) + nr_irqs = IRQ_BITMAP_BITS; + + if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) + initcnt = IRQ_BITMAP_BITS; + + if (initcnt > nr_irqs) + nr_irqs = initcnt; + for (i = 0; i < initcnt; i++) { desc = alloc_desc(i, node); set_bit(i, allocated_irqs); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0caa59f747d..9033c1c7082 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1100,7 +1100,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, if (retval) kfree(action); -#ifdef CONFIG_DEBUG_SHIRQ +#ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { /* * It's a shared IRQ -- the driver ought to be prepared for it diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 891115a929a..dc49358b73f 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -23,7 +23,7 @@ #ifdef CONFIG_HARDIRQS_SW_RESEND /* Bitmap to handle software resend of interrupts: */ -static DECLARE_BITMAP(irqs_resend, NR_IRQS); +static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS); /* * Run software resends of IRQ's diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 999835b6112..656222fcf76 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -782,6 +782,10 @@ retry: raw_spin_unlock_irq(&ctx->lock); } +#define MAX_INTERRUPTS (~0ULL) + +static void perf_log_throttle(struct perf_event *event, int enable); + static int event_sched_in(struct perf_event *event, struct perf_cpu_context *cpuctx, @@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event, event->state = PERF_EVENT_STATE_ACTIVE; event->oncpu = smp_processor_id(); + + /* + * Unthrottle events, since we scheduled we might have missed several + * ticks already, also for a heavily scheduling task there is little + * guarantee it'll get a tick in a timely manner. + */ + if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { + perf_log_throttle(event, 1); + event->hw.interrupts = 0; + } + /* * The new state must be visible before we turn it on in the hardware: */ @@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task) } } -#define MAX_INTERRUPTS (~0ULL) - -static void perf_log_throttle(struct perf_event *event, int enable); - static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) { u64 frequency = event->attr.sample_freq; diff --git a/kernel/power/main.c b/kernel/power/main.c index 7b5db6a8561..701853042c2 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq); static int __init pm_start_workqueue(void) { - pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); + pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); return pm_wq ? 0 : -ENOMEM; } diff --git a/kernel/power/process.c b/kernel/power/process.c index d6d2a10320e..0cf3a27a6c9 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -22,7 +22,7 @@ */ #define TIMEOUT (20 * HZ) -static inline int freezeable(struct task_struct * p) +static inline int freezable(struct task_struct * p) { if ((p == current) || (p->flags & PF_NOFREEZE) || @@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only) todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { - if (frozen(p) || !freezeable(p)) + if (frozen(p) || !freezable(p)) continue; if (!freeze_task(p, sig_only)) @@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only) read_lock(&tasklist_lock); do_each_thread(g, p) { - if (!freezeable(p)) + if (!freezable(p)) continue; if (nosig_only && should_send_signal(p)) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0dac75ea445..64db648ff91 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1519,11 +1519,8 @@ static int swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, unsigned int nr_pages, unsigned int nr_highmem) { - int error = 0; - if (nr_highmem > 0) { - error = get_highmem_buffer(PG_ANY); - if (error) + if (get_highmem_buffer(PG_ANY)) goto err_out; if (nr_highmem > alloc_highmem) { nr_highmem -= alloc_highmem; @@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, err_out: swsusp_free(); - return error; + return -ENOMEM; } asmlinkage int swsusp_save(void) diff --git a/kernel/printk.c b/kernel/printk.c index 2ddbdc73aad..36231525e22 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -262,25 +262,47 @@ int dmesg_restrict = 1; int dmesg_restrict; #endif +static int syslog_action_restricted(int type) +{ + if (dmesg_restrict) + return 1; + /* Unless restricted, we allow "read all" and "get buffer size" for everybody */ + return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER; +} + +static int check_syslog_permissions(int type, bool from_file) +{ + /* + * If this is from /proc/kmsg and we've already opened it, then we've + * already done the capabilities checks at open time. + */ + if (from_file && type != SYSLOG_ACTION_OPEN) + return 0; + + if (syslog_action_restricted(type)) { + if (capable(CAP_SYSLOG)) + return 0; + /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ + if (capable(CAP_SYS_ADMIN)) { + WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " + "but no CAP_SYSLOG (deprecated).\n"); + return 0; + } + return -EPERM; + } + return 0; +} + int do_syslog(int type, char __user *buf, int len, bool from_file) { unsigned i, j, limit, count; int do_clear = 0; char c; - int error = 0; + int error; - /* - * If this is from /proc/kmsg we only do the capabilities checks - * at open time. - */ - if (type == SYSLOG_ACTION_OPEN || !from_file) { - if (dmesg_restrict && !capable(CAP_SYSLOG)) - goto warn; /* switch to return -EPERM after 2.6.39 */ - if ((type != SYSLOG_ACTION_READ_ALL && - type != SYSLOG_ACTION_SIZE_BUFFER) && - !capable(CAP_SYSLOG)) - goto warn; /* switch to return -EPERM after 2.6.39 */ - } + error = check_syslog_permissions(type, from_file); + if (error) + goto out; error = security_syslog(type); if (error) @@ -423,12 +445,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) } out: return error; -warn: - /* remove after 2.6.39 */ - if (capable(CAP_SYS_ADMIN)) - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " - "but no CAP_SYSLOG (deprecated and denied).\n"); - return -EPERM; } SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 99bbaa3e5b0..1708b1e2972 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -313,7 +313,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data) child->exit_code = data; dead = __ptrace_detach(current, child); if (!child->exit_state) - wake_up_process(child); + wake_up_state(child, TASK_TRACED | TASK_STOPPED); } write_unlock_irq(&tasklist_lock); diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 48b2761b566..a3b5aff6260 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void) return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; } +/* + * Check whether the broadcast device supports oneshot. + */ +bool tick_broadcast_oneshot_available(void) +{ + struct clock_event_device *bc = tick_broadcast_device.evtdev; + + return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; +} + #endif diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 051bc80a0c4..ed228ef6f6b 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -51,7 +51,11 @@ int tick_is_oneshot_available(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); - return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); + if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) + return 0; + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) + return 1; + return tick_broadcast_oneshot_available(); } /* diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 290eefbc1f6..f65d3a723a6 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); extern int tick_broadcast_oneshot_active(void); extern void tick_check_oneshot_broadcast(int cpu); +bool tick_broadcast_oneshot_available(void); # else /* BROADCAST */ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { @@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } static inline int tick_broadcast_oneshot_active(void) { return 0; } static inline void tick_check_oneshot_broadcast(int cpu) { } +static inline bool tick_broadcast_oneshot_available(void) { return true; } # endif /* !BROADCAST */ #else /* !ONESHOT */ @@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) return 0; } static inline int tick_broadcast_oneshot_active(void) { return 0; } +static inline bool tick_broadcast_oneshot_available(void) { return false; } #endif /* !TICK_ONESHOT */ /* diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 32a19f9397f..3258455549f 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -41,7 +41,7 @@ static void print_name_offset(struct seq_file *m, void *sym) char symname[KSYM_NAME_LEN]; if (lookup_symbol_name((unsigned long)sym, symname) < 0) - SEQ_printf(m, "<%p>", sym); + SEQ_printf(m, "<%pK>", sym); else SEQ_printf(m, "%s", symname); } @@ -112,7 +112,7 @@ next_one: static void print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) { - SEQ_printf(m, " .base: %p\n", base); + SEQ_printf(m, " .base: %pK\n", base); SEQ_printf(m, " .index: %d\n", base->index); SEQ_printf(m, " .resolution: %Lu nsecs\n", diff --git a/kernel/timer.c b/kernel/timer.c index d53ce66daea..d6459923d24 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -959,7 +959,7 @@ EXPORT_SYMBOL(try_to_del_timer_sync); * * Synchronization rules: Callers must prevent restarting of the timer, * otherwise this function is meaningless. It must not be called from - * hardirq contexts. The caller must not hold locks which would prevent + * interrupt contexts. The caller must not hold locks which would prevent * completion of the timer's handler. The timer's handler must not call * add_timer_on(). Upon exit the timer is not queued and the handler is * not running on any CPU. @@ -971,12 +971,10 @@ int del_timer_sync(struct timer_list *timer) #ifdef CONFIG_LOCKDEP unsigned long flags; - raw_local_irq_save(flags); - local_bh_disable(); + local_irq_save(flags); lock_map_acquire(&timer->lockdep_map); lock_map_release(&timer->lockdep_map); - _local_bh_enable(); - raw_local_irq_restore(flags); + local_irq_restore(flags); #endif /* * don't use it in hardirq context, because it diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 153562d0b93..d95721f3370 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -138,6 +138,13 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) !blk_tracer_enabled)) return; + /* + * If the BLK_TC_NOTIFY action mask isn't set, don't send any note + * message to the trace. + */ + if (!(bt->act_mask & BLK_TC_NOTIFY)) + return; + local_irq_save(flags); buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); va_start(args, fmt); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index f37f974aa81..18bb15776c5 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -363,8 +363,14 @@ static int watchdog_nmi_enable(int cpu) goto out_save; } - printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", - cpu, PTR_ERR(event)); + + /* vary the KERN level based on the returned errno */ + if (PTR_ERR(event) == -EOPNOTSUPP) + printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu); + else if (PTR_ERR(event) == -ENOENT) + printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu); + else + printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); return PTR_ERR(event); /* success path */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 11869faa681..ee6578b578a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -79,7 +79,9 @@ enum { MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ - MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ + MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, + /* call for help after 10ms + (min two ticks) */ MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ CREATE_COOLDOWN = HZ, /* time to breath after fail */ TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ @@ -2047,6 +2049,15 @@ repeat: move_linked_works(work, scheduled, &n); process_scheduled_works(rescuer); + + /* + * Leave this gcwq. If keep_working() is %true, notify a + * regular worker; otherwise, we end up with 0 concurrency + * and stalling the execution. + */ + if (keep_working(gcwq)) + wake_up_worker(gcwq); + spin_unlock_irq(&gcwq->lock); } @@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, */ spin_lock(&workqueue_lock); - if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) + if (workqueue_freezing && wq->flags & WQ_FREEZABLE) for_each_cwq_cpu(cpu, wq) get_cwq(cpu, wq)->max_active = 0; @@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) spin_lock_irq(&gcwq->lock); - if (!(wq->flags & WQ_FREEZEABLE) || + if (!(wq->flags & WQ_FREEZABLE) || !(gcwq->flags & GCWQ_FREEZING)) get_cwq(gcwq->cpu, wq)->max_active = max_active; @@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq) * want to get it over with ASAP - spam rescuers, wake up as * many idlers as necessary and create new ones till the * worklist is empty. Note that if the gcwq is frozen, there - * may be frozen works in freezeable cwqs. Don't declare + * may be frozen works in freezable cwqs. Don't declare * completion while frozen. */ while (gcwq->nr_workers != gcwq->nr_idle || @@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu); /** * freeze_workqueues_begin - begin freezing workqueues * - * Start freezing workqueues. After this function returns, all - * freezeable workqueues will queue new works to their frozen_works - * list instead of gcwq->worklist. + * Start freezing workqueues. After this function returns, all freezable + * workqueues will queue new works to their frozen_works list instead of + * gcwq->worklist. * * CONTEXT: * Grabs and releases workqueue_lock and gcwq->lock's. @@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (cwq && wq->flags & WQ_FREEZEABLE) + if (cwq && wq->flags & WQ_FREEZABLE) cwq->max_active = 0; } @@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void) } /** - * freeze_workqueues_busy - are freezeable workqueues still busy? + * freeze_workqueues_busy - are freezable workqueues still busy? * * Check whether freezing is complete. This function must be called * between freeze_workqueues_begin() and thaw_workqueues(). @@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void) * Grabs and releases workqueue_lock. * * RETURNS: - * %true if some freezeable workqueues are still busy. %false if - * freezing is complete. + * %true if some freezable workqueues are still busy. %false if freezing + * is complete. */ bool freeze_workqueues_busy(void) { @@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) + if (!cwq || !(wq->flags & WQ_FREEZABLE)) continue; BUG_ON(cwq->nr_active < 0); @@ -3690,7 +3701,7 @@ void thaw_workqueues(void) list_for_each_entry(wq, &workqueues, list) { struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); - if (!cwq || !(wq->flags & WQ_FREEZEABLE)) + if (!cwq || !(wq->flags & WQ_FREEZABLE)) continue; /* restore max_active and repopulate worklist */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3967c2356e3..2b97418c67e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -805,7 +805,7 @@ config ARCH_WANT_FRAME_POINTERS config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && \ - (CRIS || M68K || M68KNOMMU || FRV || UML || \ + (CRIS || M68K || FRV || UML || \ AVR32 || SUPERH || BLACKFIN || MN10300) || \ ARCH_WANT_FRAME_POINTERS default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS diff --git a/lib/list_debug.c b/lib/list_debug.c index 344c710d16c..b8029a5583f 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -35,6 +35,31 @@ void __list_add(struct list_head *new, } EXPORT_SYMBOL(__list_add); +void __list_del_entry(struct list_head *entry) +{ + struct list_head *prev, *next; + + prev = entry->prev; + next = entry->next; + + if (WARN(next == LIST_POISON1, + "list_del corruption, %p->next is LIST_POISON1 (%p)\n", + entry, LIST_POISON1) || + WARN(prev == LIST_POISON2, + "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", + entry, LIST_POISON2) || + WARN(prev->next != entry, + "list_del corruption. prev->next should be %p, " + "but was %p\n", entry, prev->next) || + WARN(next->prev != entry, + "list_del corruption. next->prev should be %p, " + "but was %p\n", entry, next->prev)) + return; + + __list_del(prev, next); +} +EXPORT_SYMBOL(__list_del_entry); + /** * list_del - deletes entry from list. * @entry: the element to delete from the list. @@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add); */ void list_del(struct list_head *entry) { - WARN(entry->next == LIST_POISON1, - "list_del corruption, next is LIST_POISON1 (%p)\n", - LIST_POISON1); - WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2, - "list_del corruption, prev is LIST_POISON2 (%p)\n", - LIST_POISON2); - WARN(entry->prev->next != entry, - "list_del corruption. prev->next should be %p, " - "but was %p\n", entry, entry->prev->next); - WARN(entry->next->prev != entry, - "list_del corruption. next->prev should be %p, " - "but was %p\n", entry, entry->next->prev); - __list_del(entry->prev, entry->next); + __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c47bbe11b80..93ca08b8a45 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, /* * Ensure that the address returned is DMA'ble */ - if (!dma_capable(dev, dev_addr, size)) - panic("map_single: bounce buffer is not DMA'ble"); + if (!dma_capable(dev, dev_addr, size)) { + swiotlb_tbl_unmap_single(dev, map, size, dir); + dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); + } return dev_addr; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b6c1ce3c53b..3e29781ee76 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1811,6 +1811,8 @@ static void collapse_huge_page(struct mm_struct *mm, /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ if (!vma->anon_vma || vma->vm_ops || vma->vm_file) goto out; + if (is_vma_temporary_stack(vma)) + goto out; VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); pgd = pgd_offset(mm, address); @@ -1852,7 +1854,6 @@ static void collapse_huge_page(struct mm_struct *mm, set_pmd_at(mm, address, pmd, _pmd); spin_unlock(&mm->page_table_lock); anon_vma_unlock(vma->anon_vma); - mem_cgroup_uncharge_page(new_page); goto out; } @@ -1898,6 +1899,7 @@ out_up_write: return; out: + mem_cgroup_uncharge_page(new_page); #ifdef CONFIG_NUMA put_page(new_page); #endif @@ -2032,32 +2034,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || (vma->vm_flags & VM_NOHUGEPAGE)) { + skip: progress++; continue; } - /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) { - khugepaged_scan.address = vma->vm_end; - progress++; - continue; - } + if (!vma->anon_vma || vma->vm_ops || vma->vm_file) + goto skip; + if (is_vma_temporary_stack(vma)) + goto skip; + VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; - if (hstart >= hend) { - progress++; - continue; - } + if (hstart >= hend) + goto skip; + if (khugepaged_scan.address > hend) + goto skip; if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; - if (khugepaged_scan.address > hend) { - khugepaged_scan.address = hend + HPAGE_PMD_SIZE; - progress++; - continue; - } - BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); + VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); while (khugepaged_scan.address < hend) { int ret; @@ -2086,7 +2083,7 @@ breakouterloop: breakouterloop_mmap_sem: spin_lock(&khugepaged_mm_lock); - BUG_ON(khugepaged_scan.mm_slot != mm_slot); + VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); /* * Release the current mm_slot if this mm is about to die, or * if we scanned all vmas of this mm. @@ -2241,9 +2238,9 @@ static int khugepaged(void *none) for (;;) { mutex_unlock(&khugepaged_mutex); - BUG_ON(khugepaged_thread != current); + VM_BUG_ON(khugepaged_thread != current); khugepaged_loop(); - BUG_ON(khugepaged_thread != current); + VM_BUG_ON(khugepaged_thread != current); mutex_lock(&khugepaged_mutex); if (!khugepaged_enabled()) diff --git a/mm/memblock.c b/mm/memblock.c index bdba245d8af..4618fda975a 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -137,8 +137,6 @@ static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size, BUG_ON(0 == size); - size = memblock_align_up(size, align); - /* Pump up max_addr */ if (end == MEMBLOCK_ALLOC_ACCESSIBLE) end = memblock.current_limit; diff --git a/mm/memory.c b/mm/memory.c index 31250faff39..5823698c2b7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2219,7 +2219,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); - page_cache_release(old_page); goto unlock; } page_cache_release(old_page); @@ -2289,7 +2288,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); - page_cache_release(old_page); goto unlock; } @@ -2367,16 +2365,6 @@ gotten: } __SetPageUptodate(new_page); - /* - * Don't let another task, with possibly unlocked vma, - * keep the mlocked page. - */ - if ((vma->vm_flags & VM_LOCKED) && old_page) { - lock_page(old_page); /* for LRU manipulation */ - clear_page_mlock(old_page); - unlock_page(old_page); - } - if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) goto oom_free_new; @@ -2444,10 +2432,20 @@ gotten: if (new_page) page_cache_release(new_page); - if (old_page) - page_cache_release(old_page); unlock: pte_unmap_unlock(page_table, ptl); + if (old_page) { + /* + * Don't let another task, with possibly unlocked vma, + * keep the mlocked page. + */ + if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) { + lock_page(old_page); /* LRU manipulation */ + munlock_vma_page(old_page); + unlock_page(old_page); + } + page_cache_release(old_page); + } return ret; oom_free_new: page_cache_release(new_page); @@ -2650,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping, details.last_index = ULONG_MAX; details.i_mmap_lock = &mapping->i_mmap_lock; + mutex_lock(&mapping->unmap_mutex); spin_lock(&mapping->i_mmap_lock); /* Protect against endless unmapping loops */ @@ -2666,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping, if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); spin_unlock(&mapping->i_mmap_lock); + mutex_unlock(&mapping->unmap_mutex); } EXPORT_SYMBOL(unmap_mapping_range); @@ -3053,12 +3053,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, goto out; } charged = 1; - /* - * Don't let another task, with possibly unlocked vma, - * keep the mlocked page. - */ - if (vma->vm_flags & VM_LOCKED) - clear_page_mlock(vmf.page); copy_user_highpage(page, vmf.page, address, vma); __SetPageUptodate(page); } else { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 368fc9d2361..49355a970be 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1830,7 +1830,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, if (unlikely(pol->mode == MPOL_INTERLEAVE)) { unsigned nid; - nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); + nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); page = alloc_page_interleave(gfp, order, nid); put_mems_allowed(); diff --git a/mm/migrate.c b/mm/migrate.c index 76611525380..352de555626 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, return -EPERM; /* Find the mm_struct */ - read_lock(&tasklist_lock); + rcu_read_lock(); task = pid ? find_task_by_vpid(pid) : current; if (!task) { - read_unlock(&tasklist_lock); + rcu_read_unlock(); return -ESRCH; } mm = get_task_mm(task); - read_unlock(&tasklist_lock); + rcu_read_unlock(); if (!mm) return -EINVAL; diff --git a/mm/mremap.c b/mm/mremap.c index 9925b6391b8..1de98d492dd 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, */ mapping = vma->vm_file->f_mapping; spin_lock(&mapping->i_mmap_lock); - if (new_vma->vm_truncate_count && - new_vma->vm_truncate_count != vma->vm_truncate_count) - new_vma->vm_truncate_count = 0; + new_vma->vm_truncate_count = 0; } /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a873e61e312..cdef1d4b4e4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5376,10 +5376,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count) for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { unsigned long check = pfn + iter; - if (!pfn_valid_within(check)) { - iter++; + if (!pfn_valid_within(check)) continue; - } + page = pfn_to_page(check); if (!page_count(page)) { if (PageBuddy(page)) diff --git a/mm/swapfile.c b/mm/swapfile.c index 07a458d72fa..0341c5700e3 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = -EINVAL; if (S_ISBLK(inode->i_mode)) { - bdev = I_BDEV(inode); + bdev = bdgrab(I_BDEV(inode)); error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, sys_swapon); if (error < 0) { diff --git a/mm/truncate.c b/mm/truncate.c index 49feb46e77b..d64296be00d 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping, next = start; while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + mem_cgroup_uncharge_start(); for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; pgoff_t page_index = page->index; @@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping, unlock_page(page); } pagevec_release(&pvec); + mem_cgroup_uncharge_end(); cond_resched(); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 148c6e630df..6771ea70bfe 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone, if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) return false; - /* - * If we failed to reclaim and have scanned the full list, stop. - * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far - * faster but obviously would be less likely to succeed - * allocation. If this is desirable, use GFP_REPEAT to decide - * if both reclaimed and scanned should be checked or just - * reclaimed - */ - if (!nr_reclaimed && !nr_scanned) - return false; + /* Consider stopping depending on scan and reclaim activity */ + if (sc->gfp_mask & __GFP_REPEAT) { + /* + * For __GFP_REPEAT allocations, stop reclaiming if the + * full LRU list has been scanned and we are still failing + * to reclaim pages. This full LRU scan is potentially + * expensive but a __GFP_REPEAT caller really wants to succeed + */ + if (!nr_reclaimed && !nr_scanned) + return false; + } else { + /* + * For non-__GFP_REPEAT allocations which can presumably + * fail without consequence, stop if we failed to reclaim + * any pages from the last SWAP_CLUSTER_MAX number of + * pages that were scanned. This will return to the + * caller faster at the risk reclaim/compaction and + * the resulting allocation attempt fails + */ + if (!nr_reclaimed) + return false; + } /* * If we have not reclaimed enough pages for compaction and the @@ -1882,12 +1894,12 @@ static void shrink_zone(int priority, struct zone *zone, unsigned long nr[NR_LRU_LISTS]; unsigned long nr_to_scan; enum lru_list l; - unsigned long nr_reclaimed; + unsigned long nr_reclaimed, nr_scanned; unsigned long nr_to_reclaim = sc->nr_to_reclaim; - unsigned long nr_scanned = sc->nr_scanned; restart: nr_reclaimed = 0; + nr_scanned = sc->nr_scanned; get_scan_count(zone, sc, nr, priority); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index ee41fef04b2..d1a61132254 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c @@ -50,12 +50,12 @@ static struct sk_buff *frag_merge_packet(struct list_head *head, skb = tfp->skb; } + if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0) + goto err; + skb_pull(tmp_skb, sizeof(struct unicast_frag_packet)); - if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) { - /* free buffered skb, skb will be freed later */ - kfree_skb(tfp->skb); - return NULL; - } + if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) + goto err; /* move free entry to end */ tfp->skb = NULL; @@ -70,6 +70,11 @@ static struct sk_buff *frag_merge_packet(struct list_head *head, unicast_packet->packet_type = BAT_UNICAST; return skb; + +err: + /* free buffered skb, skb will be freed later */ + kfree_skb(tfp->skb); + return NULL; } static void frag_create_entry(struct list_head *head, struct sk_buff *skb) diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 7550abb0c96..675614e38e1 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -859,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason) result = L2CAP_CR_SEC_BLOCK; else result = L2CAP_CR_BAD_PSM; + sk->sk_state = BT_DISCONN; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 2575c2db640..d7b9af4703d 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) break; } + tty_unlock(); schedule(); + tty_lock(); } set_current_state(TASK_RUNNING); remove_wait_queue(&dev->wait, &wait); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 6f6d8e1b776..88e4aa9cb1f 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb) if (is_multicast_ether_addr(dest)) { mdst = br_mdb_get(br, skb); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { - if ((mdst && !hlist_unhashed(&mdst->mglist)) || + if ((mdst && mdst->mglist) || br_multicast_is_router(br)) skb2 = skb; br_multicast_forward(mdst, skb, skb2); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index f701a21acb3..030a002ff8e 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -37,10 +37,9 @@ rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -static inline int ipv6_is_local_multicast(const struct in6_addr *addr) +static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) { - if (ipv6_addr_is_multicast(addr) && - IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) + if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr)) return 1; return 0; } @@ -232,8 +231,7 @@ static void br_multicast_group_expired(unsigned long data) if (!netif_running(br->dev) || timer_pending(&mp->timer)) goto out; - if (!hlist_unhashed(&mp->mglist)) - hlist_del_init(&mp->mglist); + mp->mglist = false; if (mp->ports) goto out; @@ -276,7 +274,7 @@ static void br_multicast_del_pg(struct net_bridge *br, del_timer(&p->query_timer); call_rcu_bh(&p->rcu, br_multicast_free_pg); - if (!mp->ports && hlist_unhashed(&mp->mglist) && + if (!mp->ports && !mp->mglist && netif_running(br->dev)) mod_timer(&mp->timer, jiffies); @@ -436,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, eth = eth_hdr(skb); memcpy(eth->h_source, br->dev->dev_addr, 6); - ipv6_eth_mc_map(group, eth->h_dest); eth->h_proto = htons(ETH_P_IPV6); skb_put(skb, sizeof(*eth)); @@ -448,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, ip6h->payload_len = htons(8 + sizeof(*mldq)); ip6h->nexthdr = IPPROTO_HOPOPTS; ip6h->hop_limit = 1; - ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); + ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, + &ip6h->saddr); ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); + ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); hopopt = (u8 *)(ip6h + 1); hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ @@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data) struct net_bridge *br = mp->br; spin_lock(&br->multicast_lock); - if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || + if (!netif_running(br->dev) || !mp->mglist || mp->queries_sent >= br->multicast_last_member_count) goto out; @@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + mp->mglist = true; mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; } @@ -781,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br, { struct br_ip br_group; - if (ipv6_is_local_multicast(group)) + if (!ipv6_is_transient_multicast(group)) return 0; ipv6_addr_copy(&br_group.u.ip6, group); - br_group.proto = htons(ETH_P_IP); + br_group.proto = htons(ETH_P_IPV6); return br_multicast_add_group(br, port, &br_group); } @@ -1014,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, nsrcs = skb_header_pointer(skb, len + offsetof(struct mld2_grec, - grec_mca), + grec_nsrcs), sizeof(_nsrcs), &_nsrcs); if (!nsrcs) return -EINVAL; if (!pskb_may_pull(skb, len + sizeof(*grec) + - sizeof(struct in6_addr) * (*nsrcs))) + sizeof(struct in6_addr) * ntohs(*nsrcs))) return -EINVAL; grec = (struct mld2_grec *)(skb->data + len); - len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); + len += sizeof(*grec) + + sizeof(struct in6_addr) * ntohs(*nsrcs); /* We treat these as MLDv1 reports for now. */ switch (grec->grec_type) { @@ -1165,7 +1165,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, max_delay *= br->multicast_last_member_count; - if (!hlist_unhashed(&mp->mglist) && + if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) @@ -1177,7 +1177,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) - mod_timer(&mp->timer, now + max_delay); + mod_timer(&p->timer, now + max_delay); } out: @@ -1236,7 +1236,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, goto out; max_delay *= br->multicast_last_member_count; - if (!hlist_unhashed(&mp->mglist) && + if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) @@ -1248,7 +1248,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) - mod_timer(&mp->timer, now + max_delay); + mod_timer(&p->timer, now + max_delay); } out: @@ -1283,7 +1283,7 @@ static void br_multicast_leave_group(struct net_bridge *br, br->multicast_last_member_interval; if (!port) { - if (!hlist_unhashed(&mp->mglist) && + if (mp->mglist && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, time) : try_to_del_timer_sync(&mp->timer) >= 0)) { @@ -1341,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, { struct br_ip br_group; - if (ipv6_is_local_multicast(group)) + if (!ipv6_is_transient_multicast(group)) return; ipv6_addr_copy(&br_group.u.ip6, group); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 84aac7734bf..4e1b620b6be 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -84,13 +84,13 @@ struct net_bridge_port_group { struct net_bridge_mdb_entry { struct hlist_node hlist[2]; - struct hlist_node mglist; struct net_bridge *br; struct net_bridge_port_group __rcu *ports; struct rcu_head rcu; struct timer_list timer; struct timer_list query_timer; struct br_ip addr; + bool mglist; u32 queries_sent; }; @@ -238,7 +238,6 @@ struct net_bridge spinlock_t multicast_lock; struct net_bridge_mdb_htable __rcu *mdb; struct hlist_head router_list; - struct hlist_head mglist; struct timer_list multicast_router_timer; struct timer_list multicast_querier_timer; diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index fa9dab372b6..6008d6dc18a 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -394,9 +394,7 @@ static void ipcaif_net_setup(struct net_device *dev) priv->conn_req.sockaddr.u.dgm.connection_id = -1; priv->flowenabled = false; - ASSERT_RTNL(); init_waitqueue_head(&priv->netmgmt_wq); - list_add(&priv->list_field, &chnl_net_list); } @@ -453,6 +451,8 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev, ret = register_netdevice(dev); if (ret) pr_warn("device rtml registration failed\n"); + else + list_add(&caifdev->list_field, &chnl_net_list); return ret; } diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index dff633d62e5..35b36b86d76 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) { struct kvec iov = {buf, len}; struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + int r; - return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); + r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); + if (r == -EAGAIN) + r = 0; + return r; } /* @@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, size_t kvlen, size_t len, int more) { struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + int r; if (more) msg.msg_flags |= MSG_MORE; else msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ - return kernel_sendmsg(sock, &msg, iov, kvlen, len); + r = kernel_sendmsg(sock, &msg, iov, kvlen, len); + if (r == -EAGAIN) + r = 0; + return r; } @@ -847,6 +855,8 @@ static int write_partial_msg_pages(struct ceph_connection *con) (msg->pages || msg->pagelist || msg->bio || in_trail)) kunmap(page); + if (ret == -EAGAIN) + ret = 0; if (ret <= 0) goto out; @@ -1737,16 +1747,12 @@ more_kvec: if (con->out_skip) { ret = write_partial_skip(con); if (ret <= 0) - goto done; - if (ret < 0) { - dout("try_write write_partial_skip err %d\n", ret); - goto done; - } + goto out; } if (con->out_kvec_left) { ret = write_partial_kvec(con); if (ret <= 0) - goto done; + goto out; } /* msg pages? */ @@ -1761,11 +1767,11 @@ more_kvec: if (ret == 1) goto more_kvec; /* we need to send the footer, too! */ if (ret == 0) - goto done; + goto out; if (ret < 0) { dout("try_write write_partial_msg_pages err %d\n", ret); - goto done; + goto out; } } @@ -1789,10 +1795,9 @@ do_next: /* Nothing to do! */ clear_bit(WRITE_PENDING, &con->state); dout("try_write nothing else to write.\n"); -done: ret = 0; out: - dout("try_write done on %p\n", con); + dout("try_write done on %p ret %d\n", con, ret); return ret; } @@ -1821,19 +1826,17 @@ more: dout("try_read connecting\n"); ret = read_partial_banner(con); if (ret <= 0) - goto done; - if (process_banner(con) < 0) { - ret = -1; goto out; - } + ret = process_banner(con); + if (ret < 0) + goto out; } ret = read_partial_connect(con); if (ret <= 0) - goto done; - if (process_connect(con) < 0) { - ret = -1; goto out; - } + ret = process_connect(con); + if (ret < 0) + goto out; goto more; } @@ -1848,7 +1851,7 @@ more: dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); ret = ceph_tcp_recvmsg(con->sock, buf, skip); if (ret <= 0) - goto done; + goto out; con->in_base_pos += ret; if (con->in_base_pos) goto more; @@ -1859,7 +1862,7 @@ more: */ ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); if (ret <= 0) - goto done; + goto out; dout("try_read got tag %d\n", (int)con->in_tag); switch (con->in_tag) { case CEPH_MSGR_TAG_MSG: @@ -1870,7 +1873,7 @@ more: break; case CEPH_MSGR_TAG_CLOSE: set_bit(CLOSED, &con->state); /* fixme */ - goto done; + goto out; default: goto bad_tag; } @@ -1882,13 +1885,12 @@ more: case -EBADMSG: con->error_msg = "bad crc"; ret = -EIO; - goto out; + break; case -EIO: con->error_msg = "io error"; - goto out; - default: - goto done; + break; } + goto out; } if (con->in_tag == CEPH_MSGR_TAG_READY) goto more; @@ -1898,15 +1900,13 @@ more: if (con->in_tag == CEPH_MSGR_TAG_ACK) { ret = read_partial_ack(con); if (ret <= 0) - goto done; + goto out; process_ack(con); goto more; } -done: - ret = 0; out: - dout("try_read done on %p\n", con); + dout("try_read done on %p ret %d\n", con, ret); return ret; bad_tag: diff --git a/net/core/dev.c b/net/core/dev.c index b6d0bf875a8..8ae6631abcc 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1280,10 +1280,13 @@ static int __dev_close_many(struct list_head *head) static int __dev_close(struct net_device *dev) { + int retval; LIST_HEAD(single); list_add(&dev->unreg_list, &single); - return __dev_close_many(&single); + retval = __dev_close_many(&single); + list_del(&single); + return retval; } int dev_close_many(struct list_head *head) @@ -1325,7 +1328,7 @@ int dev_close(struct net_device *dev) list_add(&dev->unreg_list, &single); dev_close_many(&single); - + list_del(&single); return 0; } EXPORT_SYMBOL(dev_close); @@ -5063,6 +5066,7 @@ static void rollback_registered(struct net_device *dev) list_add(&dev->unreg_list, &single); rollback_registered_many(&single); + list_del(&single); } unsigned long netdev_fix_features(unsigned long features, const char *name) @@ -5660,30 +5664,35 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev_net_set(dev, &init_net); + dev->gso_max_size = GSO_MAX_SIZE; + + INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); + dev->ethtool_ntuple_list.count = 0; + INIT_LIST_HEAD(&dev->napi_list); + INIT_LIST_HEAD(&dev->unreg_list); + INIT_LIST_HEAD(&dev->link_watch_list); + dev->priv_flags = IFF_XMIT_DST_RELEASE; + setup(dev); + dev->num_tx_queues = txqs; dev->real_num_tx_queues = txqs; if (netif_alloc_netdev_queues(dev)) - goto free_pcpu; + goto free_all; #ifdef CONFIG_RPS dev->num_rx_queues = rxqs; dev->real_num_rx_queues = rxqs; if (netif_alloc_rx_queues(dev)) - goto free_pcpu; + goto free_all; #endif - dev->gso_max_size = GSO_MAX_SIZE; - - INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list); - dev->ethtool_ntuple_list.count = 0; - INIT_LIST_HEAD(&dev->napi_list); - INIT_LIST_HEAD(&dev->unreg_list); - INIT_LIST_HEAD(&dev->link_watch_list); - dev->priv_flags = IFF_XMIT_DST_RELEASE; - setup(dev); strcpy(dev->name, name); return dev; +free_all: + free_netdev(dev); + return NULL; + free_pcpu: free_percpu(dev->pcpu_refcnt); kfree(dev->_tx); @@ -6211,6 +6220,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list) } } unregister_netdevice_many(&dev_kill_list); + list_del(&dev_kill_list); rtnl_unlock(); } diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 6b03f561cae..d5074a56728 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -626,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, dcb->cmd = DCB_CMD_GAPP; app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); + if (!app_nest) + goto out_cancel; + ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); if (ret) goto out_cancel; @@ -1613,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp); u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type *itr; + struct dcb_app_type event; + + memcpy(&event.name, dev->name, sizeof(event.name)); + memcpy(&event.app, new, sizeof(event.app)); spin_lock(&dcb_lock); /* Search for existing match and replace */ @@ -1644,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) } out: spin_unlock(&dcb_lock); - call_dcbevent_notifiers(DCB_APP_EVENT, new); + call_dcbevent_notifiers(DCB_APP_EVENT, &event); return 0; } EXPORT_SYMBOL(dcb_setapp); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 748cb5b337b..df4616fce92 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu) return mtu >= 68; } +static void inetdev_send_gratuitous_arp(struct net_device *dev, + struct in_device *in_dev) + +{ + struct in_ifaddr *ifa = in_dev->ifa_list; + + if (!ifa) + return; + + arp_send(ARPOP_REQUEST, ETH_P_ARP, + ifa->ifa_address, dev, + ifa->ifa_address, NULL, + dev->dev_addr, NULL); +} + /* Called only under RTNL semaphore */ static int inetdev_event(struct notifier_block *this, unsigned long event, @@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, } ip_mc_up(in_dev); /* fall through */ - case NETDEV_NOTIFY_PEERS: case NETDEV_CHANGEADDR: + if (!IN_DEV_ARP_NOTIFY(in_dev)) + break; + /* fall through */ + case NETDEV_NOTIFY_PEERS: /* Send gratuitous ARP to notify of link change */ - if (IN_DEV_ARP_NOTIFY(in_dev)) { - struct in_ifaddr *ifa = in_dev->ifa_list; - - if (ifa) - arp_send(ARPOP_REQUEST, ETH_P_ARP, - ifa->ifa_address, dev, - ifa->ifa_address, NULL, - dev->dev_addr, NULL); - } + inetdev_send_gratuitous_arp(dev, in_dev); break; case NETDEV_DOWN: ip_mc_down(in_dev); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index c5af909cf70..3c8dfa16614 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -505,7 +505,9 @@ restart: } rcu_read_unlock(); + local_bh_disable(); inet_twsk_deschedule(tw, twdr); + local_bh_enable(); inet_twsk_put(tw); goto restart_rcu; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index eb68a0e34e4..6613edfac28 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev .fl4_dst = dst, .fl4_src = tiph->saddr, .fl4_tos = RT_TOS(tos), + .proto = IPPROTO_GRE, .fl_gre_key = tunnel->parms.o_key }; if (ip_route_output_key(dev_net(dev), &rt, &fl)) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 788a3e74834..6ed6603c2f6 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2722,6 +2722,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { .destroy = ipv4_dst_destroy, .check = ipv4_blackhole_dst_check, .default_mtu = ipv4_blackhole_default_mtu, + .default_advmss = ipv4_default_advmss, .update_pmtu = ipv4_rt_blackhole_update_pmtu, }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eb7f82ebf4a..65f6c040624 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, } /* D-SACK for already forgotten data... Do dumb counting. */ - if (dup_sack && + if (dup_sack && tp->undo_marker && tp->undo_retrans && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) tp->undo_retrans--; @@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { - if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) + if (tp->undo_marker && tp->undo_retrans && + after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) state->reord = min(fack_count, state->reord); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 406f320336e..dfa5beb0c1c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) if (!tp->retrans_stamp) tp->retrans_stamp = TCP_SKB_CB(skb)->when; - tp->undo_retrans++; + tp->undo_retrans += tcp_skb_pcount(skb); /* snd_nxt is stored to detect loss of retransmitted segment, * see tcp_input.c tcp_sacktag_write_queue(). diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index 09c88891a75..de338037a73 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c @@ -410,7 +410,7 @@ fallback: if (p != NULL) { sb_add(m, "%02x", *p++); for (i = 1; i < len; i++) - sb_add(m, ":%02x", p[i]); + sb_add(m, ":%02x", *p++); } sb_add(m, " "); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1c29f95695d..a998db6e789 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -128,6 +128,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { .destroy = ip6_dst_destroy, .check = ip6_dst_check, .default_mtu = ip6_blackhole_default_mtu, + .default_advmss = ip6_default_advmss, .update_pmtu = ip6_rt_blackhole_update_pmtu, }; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 4bc8a9250cf..9cd73b11506 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1822,6 +1822,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, *cookie ^= 2; IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; local->hw_roc_skb = skb; + local->hw_roc_skb_for_status = skb; mutex_unlock(&local->mtx); return 0; @@ -1875,6 +1876,7 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, if (ret == 0) { kfree_skb(local->hw_roc_skb); local->hw_roc_skb = NULL; + local->hw_roc_skb_for_status = NULL; } mutex_unlock(&local->mtx); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c47d7c0e48a..533fd32f49f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -953,7 +953,7 @@ struct ieee80211_local { struct ieee80211_channel *hw_roc_channel; struct net_device *hw_roc_dev; - struct sk_buff *hw_roc_skb; + struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status; struct work_struct hw_roc_start, hw_roc_done; enum nl80211_channel_type hw_roc_channel_type; unsigned int hw_roc_duration; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8acba456744..7a10a8d1b2d 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1229,6 +1229,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) } mutex_unlock(&local->iflist_mtx); unregister_netdevice_many(&unreg_list); + list_del(&unreg_list); } static u32 ieee80211_idle_off(struct ieee80211_local *local, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 45fbb9e3374..c9ceb4d57ab 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1033,6 +1033,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, if (is_multicast_ether_addr(hdr->addr1)) return; + /* + * In case we receive frames after disassociation. + */ + if (!sdata->u.mgd.associated) + return; + ieee80211_sta_reset_conn_monitor(sdata); } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 38a797217a9..071ac95c4aa 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -323,6 +323,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { struct ieee80211_work *wk; + u64 cookie = (unsigned long)skb; rcu_read_lock(); list_for_each_entry_rcu(wk, &local->work_list, list) { @@ -334,8 +335,12 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) break; } rcu_read_unlock(); + if (local->hw_roc_skb_for_status == skb) { + cookie = local->hw_roc_cookie ^ 2; + local->hw_roc_skb_for_status = NULL; + } cfg80211_mgmt_tx_status( - skb->dev, (unsigned long) skb, skb->data, skb->len, + skb->dev, cookie, skb->data, skb->len, !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b64b42bc774..b0beaa58246 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1547,7 +1547,7 @@ static int ieee80211_skb_resize(struct ieee80211_local *local, skb_orphan(skb); } - if (skb_header_cloned(skb)) + if (skb_cloned(skb)) I802_DEBUG_INC(local->tx_expand_skb_head_cloned); else if (head_need || tail_need) I802_DEBUG_INC(local->tx_expand_skb_head); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index cf68700abff..d036597aabb 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: changed |= BSS_CHANGED_ASSOC; + mutex_lock(&sdata->u.mgd.mtx); ieee80211_bss_info_change_notify(sdata, changed); + mutex_unlock(&sdata->u.mgd.mtx); break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 32fcbe290c0..4aa614b8a96 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head, /* Optimization: we don't need to hold module reference here, since function can't sleep. --RR */ +repeat: verdict = elem->hook(hook, skb, indev, outdev, okfn); if (verdict != NF_ACCEPT) { #ifdef CONFIG_NETFILTER_DEBUG @@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head, #endif if (verdict != NF_REPEAT) return verdict; - *i = (*i)->prev; + goto repeat; } } return NF_ACCEPT; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index e61511929c6..84f4fcc5884 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -942,8 +942,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_REPLY, ct); out: - if (tmpl) - nf_ct_put(tmpl); + if (tmpl) { + /* Special case: we have to repeat this hook, assign the + * template again to this packet. We assume that this packet + * has no conntrack assigned. This is used by nf_ct_tcp. */ + if (ret == NF_REPEAT) + skb->nfct = (struct nf_conntrack *)tmpl; + else + nf_ct_put(tmpl); + } return ret; } diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c index 4d87befb04c..474d621cbc2 100644 --- a/net/netfilter/nf_tproxy_core.c +++ b/net/netfilter/nf_tproxy_core.c @@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb) skb->destructor = NULL; if (sk) - nf_tproxy_put_sock(sk); + sock_put(sk); } /* consumes sk */ -int +void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) { - bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? - inet_twsk(sk)->tw_transparent : - inet_sk(sk)->transparent; - - if (transparent) { - skb_orphan(skb); - skb->sk = sk; - skb->destructor = nf_tproxy_destructor; - return 1; - } else - nf_tproxy_put_sock(sk); - - return 0; + /* assigning tw sockets complicates things; most + * skb->sk->X checks would have to test sk->sk_state first */ + if (sk->sk_state == TCP_TIME_WAIT) { + inet_twsk_put(inet_twsk(sk)); + return; + } + + skb_orphan(skb); + skb->sk = sk; + skb->destructor = nf_tproxy_destructor; } EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 640678f47a2..dcfd57eb9d0 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -33,6 +33,20 @@ #include <net/netfilter/nf_tproxy_core.h> #include <linux/netfilter/xt_TPROXY.h> +static bool tproxy_sk_is_transparent(struct sock *sk) +{ + if (sk->sk_state != TCP_TIME_WAIT) { + if (inet_sk(sk)->transparent) + return true; + sock_put(sk); + } else { + if (inet_twsk(sk)->tw_transparent) + return true; + inet_twsk_put(inet_twsk(sk)); + } + return false; +} + static inline __be32 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) { @@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, skb->dev, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ - if (sk && nf_tproxy_assign_sock(skb, sk)) { + if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~mark_mask) ^ mark_value; @@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->daddr, ntohs(hp->dest), &laddr, ntohs(lport), skb->mark); + + nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } @@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) par->in, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ - if (sk && nf_tproxy_assign_sock(skb, sk)) { + if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; @@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), laddr, ntohs(lport), skb->mark); + + nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 00d6ae83830..9cc46356b57 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -35,6 +35,15 @@ #include <net/netfilter/nf_conntrack.h> #endif +static void +xt_socket_put_sk(struct sock *sk) +{ + if (sk->sk_state == TCP_TIME_WAIT) + inet_twsk_put(inet_twsk(sk)); + else + sock_put(sk); +} + static int extract_icmp4_fields(const struct sk_buff *skb, u8 *protocol, @@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, (sk->sk_state == TCP_TIME_WAIT && inet_twsk(sk)->tw_transparent)); - nf_tproxy_put_sock(sk); + xt_socket_put_sk(sk); if (wildcard || !transparent) sk = NULL; @@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) (sk->sk_state == TCP_TIME_WAIT && inet_twsk(sk)->tw_transparent)); - nf_tproxy_put_sock(sk); + xt_socket_put_sk(sk); if (wildcard || !transparent) sk = NULL; diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 5ee16f0353f..d763793d39d 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c @@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, return ret; plen -= sizeof(*token); - token = kmalloc(sizeof(*token), GFP_KERNEL); + token = kzalloc(sizeof(*token), GFP_KERNEL); if (!token) return -ENOMEM; - token->kad = kmalloc(plen, GFP_KERNEL); + token->kad = kzalloc(plen, GFP_KERNEL); if (!token->kad) { kfree(token); return -ENOMEM; @@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) goto error; ret = -ENOMEM; - token = kmalloc(sizeof(*token), GFP_KERNEL); + token = kzalloc(sizeof(*token), GFP_KERNEL); if (!token) goto error; - token->kad = kmalloc(plen, GFP_KERNEL); + token->kad = kzalloc(plen, GFP_KERNEL); if (!token->kad) goto error_free; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 34dc598440a..1bc698039ae 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -839,6 +839,7 @@ void dev_deactivate(struct net_device *dev) list_add(&dev->unreg_list, &single); dev_deactivate_many(&single); + list_del(&single); } static void dev_init_scheduler_queue(struct net_device *dev, diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 2cc46f0962c..b23428f3c0d 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, *errp = sctp_make_op_error_fixed(asoc, chunk); if (*errp) { - sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, - WORD_ROUND(ntohs(param.p->length))); - sctp_addto_chunk_fixed(*errp, - WORD_ROUND(ntohs(param.p->length)), - param.v); + if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, + WORD_ROUND(ntohs(param.p->length)))) + sctp_addto_chunk_fixed(*errp, + WORD_ROUND(ntohs(param.p->length)), + param.v); } else { /* If there is no memory for generating the ERROR * report as specified, an ABORT will be triggered diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 3e5dbd4e4cd..d112f038edf 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev, return freq; if (freq == 0) return -EINVAL; - wdev_lock(wdev); mutex_lock(&rdev->devlist_mtx); + wdev_lock(wdev); err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); - mutex_unlock(&rdev->devlist_mtx); wdev_unlock(wdev); + mutex_unlock(&rdev->devlist_mtx); return err; default: return -EOPNOTSUPP; diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 55187c8f642..406207515b5 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -27,9 +27,19 @@ #include <net/sock.h> #include <net/x25.h> -/* - * Parse a set of facilities into the facilities structures. Unrecognised - * facilities are written to the debug log file. +/** + * x25_parse_facilities - Parse facilities from skb into the facilities structs + * + * @skb: sk_buff to parse + * @facilities: Regular facilites, updated as facilities are found + * @dte_facs: ITU DTE facilities, updated as DTE facilities are found + * @vc_fac_mask: mask is updated with all facilities found + * + * Return codes: + * -1 - Parsing error, caller should drop call and clean up + * 0 - Parse OK, this skb has no facilities + * >0 - Parse OK, returns the length of the facilities header + * */ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) @@ -62,7 +72,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, switch (*p & X25_FAC_CLASS_MASK) { case X25_FAC_CLASS_A: if (len < 2) - return 0; + return -1; switch (*p) { case X25_FAC_REVERSE: if((p[1] & 0x81) == 0x81) { @@ -107,7 +117,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, break; case X25_FAC_CLASS_B: if (len < 3) - return 0; + return -1; switch (*p) { case X25_FAC_PACKET_SIZE: facilities->pacsize_in = p[1]; @@ -130,7 +140,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, break; case X25_FAC_CLASS_C: if (len < 4) - return 0; + return -1; printk(KERN_DEBUG "X.25: unknown facility %02X, " "values %02X, %02X, %02X\n", p[0], p[1], p[2], p[3]); @@ -139,18 +149,18 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, break; case X25_FAC_CLASS_D: if (len < p[1] + 2) - return 0; + return -1; switch (*p) { case X25_FAC_CALLING_AE: if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) - return 0; + return -1; dte_facs->calling_len = p[2]; memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLING_AE; break; case X25_FAC_CALLED_AE: if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) - return 0; + return -1; dte_facs->called_len = p[2]; memcpy(dte_facs->called_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLED_AE; diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index f729f022be6..15de65f0471 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -91,10 +91,10 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp { struct x25_address source_addr, dest_addr; int len; + struct x25_sock *x25 = x25_sk(sk); switch (frametype) { case X25_CALL_ACCEPTED: { - struct x25_sock *x25 = x25_sk(sk); x25_stop_timer(sk); x25->condition = 0x00; @@ -113,14 +113,16 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp &dest_addr); if (len > 0) skb_pull(skb, len); + else if (len < 0) + goto out_clear; len = x25_parse_facilities(skb, &x25->facilities, &x25->dte_facilities, &x25->vc_facil_mask); if (len > 0) skb_pull(skb, len); - else - return -1; + else if (len < 0) + goto out_clear; /* * Copy any Call User Data. */ @@ -144,6 +146,12 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp } return 0; + +out_clear: + x25_write_internal(sk, X25_CLEAR_REQUEST); + x25->state = X25_STATE_2; + x25_start_t23timer(sk); + return 0; } /* diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index 4cbc942f762..21306928d47 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c @@ -396,9 +396,12 @@ void __exit x25_link_free(void) write_lock_bh(&x25_neigh_list_lock); list_for_each_safe(entry, tmp, &x25_neigh_list) { + struct net_device *dev; + nb = list_entry(entry, struct x25_neigh, node); + dev = nb->dev; __x25_remove_neigh(nb); - dev_put(nb->dev); + dev_put(dev); } write_unlock_bh(&x25_neigh_list_lock); } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 8b3ef404c79..6459588befc 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) default: BUG(); } - xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); + xdst = dst_alloc(dst_ops); xfrm_policy_put_afinfo(afinfo); - xdst->flo.ops = &xfrm_bundle_fc_ops; + if (likely(xdst)) + xdst->flo.ops = &xfrm_bundle_fc_ops; + else + xdst = ERR_PTR(-ENOBUFS); return xdst; } diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c index c9a16abacab..6c94c6ce292 100644 --- a/scripts/basic/fixdep.c +++ b/scripts/basic/fixdep.c @@ -315,6 +315,7 @@ static void parse_dep_file(void *map, size_t len) char *end = m + len; char *p; char s[PATH_MAX]; + int first; p = strchr(m, ':'); if (!p) { @@ -327,6 +328,7 @@ static void parse_dep_file(void *map, size_t len) clear_config(); + first = 1; while (m < end) { while (m < end && (*m == ' ' || *m == '\\' || *m == '\n')) m++; @@ -340,9 +342,17 @@ static void parse_dep_file(void *map, size_t len) if (strrcmp(s, "include/generated/autoconf.h") && strrcmp(s, "arch/um/include/uml-config.h") && strrcmp(s, ".ver")) { - printf(" %s \\\n", s); + /* + * Do not output the first dependency (the + * source file), so that kbuild is not confused + * if a .c file is rewritten into .S or vice + * versa. + */ + if (!first) + printf(" %s \\\n", s); do_config_file(s); } + first = 0; m = p + 1; } printf("\n%s: $(deps_%s)\n\n", target, target); diff --git a/scripts/package/builddeb b/scripts/package/builddeb index b0b2357aef4..f6cbc3ddb68 100644 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -238,12 +238,12 @@ EOF fi # Build header package -find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$ -find arch/x86/include include scripts -type f >> /tmp/files$$ +(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$) +(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$) (cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$) destdir=$kernel_headers_dir/usr/src/linux-headers-$version mkdir -p "$destdir" -tar -c -f - -T /tmp/files$$ | (cd $destdir; tar -xf -) +(cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -) (cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -) rm -f /tmp/files$$ /tmp/objfiles$$ arch=$(dpkg --print-architecture) diff --git a/security/security.c b/security/security.c index 739e40362f4..7b7308ace8c 100644 --- a/security/security.c +++ b/security/security.c @@ -154,10 +154,9 @@ int security_capset(struct cred *new, const struct cred *old, effective, inheritable, permitted); } -int security_capable(int cap) +int security_capable(const struct cred *cred, int cap) { - return security_ops->capable(current, current_cred(), cap, - SECURITY_CAP_AUDIT); + return security_ops->capable(current, cred, cap, SECURITY_CAP_AUDIT); } int security_real_capable(struct task_struct *tsk, int cap) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index e276eb46853..c8d69927068 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3198,7 +3198,11 @@ static void selinux_cred_free(struct cred *cred) { struct task_security_struct *tsec = cred->security; - BUG_ON((unsigned long) cred->security < PAGE_SIZE); + /* + * cred->security == NULL if security_cred_alloc_blank() or + * security_prepare_creds() returned an error. + */ + BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE); cred->security = (void *) 0x7UL; kfree(tsec); } diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index 24d3013c023..7c1fc64cb53 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c @@ -50,7 +50,11 @@ static void aaci_ac97_select_codec(struct aaci *aaci, struct snd_ac97 *ac97) if (v & SLFR_1RXV) readl(aaci->base + AACI_SL1RX); - writel(maincr, aaci->base + AACI_MAINCR); + if (maincr != readl(aaci->base + AACI_MAINCR)) { + writel(maincr, aaci->base + AACI_MAINCR); + readl(aaci->base + AACI_MAINCR); + udelay(1); + } } /* @@ -993,6 +997,8 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci) * disabling the channel doesn't clear the FIFO. */ writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR); + readl(aaci->base + AACI_MAINCR); + udelay(1); writel(aaci->maincr, aaci->base + AACI_MAINCR); /* diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c index 7730575bfad..b8b31c433d6 100644 --- a/sound/core/hrtimer.c +++ b/sound/core/hrtimer.c @@ -45,12 +45,13 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); struct snd_timer *t = stime->timer; + unsigned long oruns; if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; - hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); - snd_timer_interrupt(stime->timer, t->sticks); + oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); + snd_timer_interrupt(stime->timer, t->sticks * oruns); if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; @@ -104,7 +105,7 @@ static int snd_hrtimer_stop(struct snd_timer *t) } static struct snd_timer_hardware hrtimer_hw = { - .flags = SNDRV_TIMER_HW_AUTO, + .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_TASKLET, .open = snd_hrtimer_open, .close = snd_hrtimer_close, .start = snd_hrtimer_start, diff --git a/sound/core/jack.c b/sound/core/jack.c index 4902ae56873..53b53e97c89 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c @@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type, fail_input: input_free_device(jack->input_dev); + kfree(jack->id); kfree(jack); return err; } diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 23f49f356e0..16c0bdfbb16 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c @@ -1252,11 +1252,19 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) { static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; - int temp; + int temp, page, delta; temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); - temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); - return temp; + page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT; + if (dma->nr_periods >= 4) + delta = (page - dma->period_real) & 3; + else { + delta = (page - dma->period_real); + if (delta < 0) + delta += dma->nr_periods; + } + return (dma->period_virt + delta) * dma->period_bytes + + (temp & (dma->period_bytes - 1)); } static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 2e91a991eb1..fcedad9a5fe 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2308,6 +2308,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), @@ -2703,7 +2704,7 @@ static int __devinit azx_probe(struct pci_dev *pci, if (err < 0) goto out_free; #ifdef CONFIG_SND_HDA_PATCH_LOADER - if (patch[dev]) { + if (patch[dev] && *patch[dev]) { snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n", patch[dev]); err = snd_hda_load_patch(chip->bus, patch[dev]); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fbe97d32140..4d5004e693f 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -3114,6 +3114,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), + SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), + SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), @@ -3410,7 +3412,7 @@ static void cx_auto_parse_output(struct hda_codec *codec) } } spec->multiout.dac_nids = spec->private_dac_nids; - spec->multiout.max_channels = nums * 2; + spec->multiout.max_channels = spec->multiout.num_dacs * 2; if (cfg->hp_outs > 0) spec->auto_mute = 1; @@ -3729,9 +3731,9 @@ static int cx_auto_init(struct hda_codec *codec) return 0; } -static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, +static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename, const char *dir, int cidx, - hda_nid_t nid, int hda_dir) + hda_nid_t nid, int hda_dir, int amp_idx) { static char name[32]; static struct snd_kcontrol_new knew[] = { @@ -3743,7 +3745,8 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, for (i = 0; i < 2; i++) { struct snd_kcontrol *kctl; - knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, 0, hda_dir); + knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx, + hda_dir); knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; knew[i].index = cidx; snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); @@ -3759,6 +3762,9 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, return 0; } +#define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \ + cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0) + #define cx_auto_add_pb_volume(codec, nid, str, idx) \ cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) @@ -3808,29 +3814,60 @@ static int cx_auto_build_input_controls(struct hda_codec *codec) struct conexant_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; static const char *prev_label; - int i, err, cidx; + int i, err, cidx, conn_len; + hda_nid_t conn[HDA_MAX_CONNECTIONS]; + + int multi_adc_volume = 0; /* If the ADC nid has several input volumes */ + int adc_nid = spec->adc_nids[0]; + + conn_len = snd_hda_get_connections(codec, adc_nid, conn, + HDA_MAX_CONNECTIONS); + if (conn_len < 0) + return conn_len; + + multi_adc_volume = cfg->num_inputs > 1 && conn_len > 1; + if (!multi_adc_volume) { + err = cx_auto_add_volume(codec, "Capture", "", 0, adc_nid, + HDA_INPUT); + if (err < 0) + return err; + } - err = cx_auto_add_volume(codec, "Capture", "", 0, spec->adc_nids[0], - HDA_INPUT); - if (err < 0) - return err; prev_label = NULL; cidx = 0; for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; const char *label; - if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) + int j; + int pin_amp = get_wcaps(codec, nid) & AC_WCAP_IN_AMP; + if (!pin_amp && !multi_adc_volume) continue; + label = hda_get_autocfg_input_label(codec, cfg, i); if (label == prev_label) cidx++; else cidx = 0; prev_label = label; - err = cx_auto_add_volume(codec, label, " Capture", cidx, - nid, HDA_INPUT); - if (err < 0) - return err; + + if (pin_amp) { + err = cx_auto_add_volume(codec, label, " Boost", cidx, + nid, HDA_INPUT); + if (err < 0) + return err; + } + + if (!multi_adc_volume) + continue; + for (j = 0; j < conn_len; j++) { + if (conn[j] == nid) { + err = cx_auto_add_volume_idx(codec, label, + " Capture", cidx, adc_nid, HDA_INPUT, j); + if (err < 0) + return err; + break; + } + } } return 0; } @@ -3902,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = { .patch = patch_cxt5066 }, { .id = 0x14f15069, .name = "CX20585", .patch = patch_cxt5066 }, + { .id = 0x14f1506e, .name = "CX20590", + .patch = patch_cxt5066 }, { .id = 0x14f15097, .name = "CX20631", .patch = patch_conexant_auto }, { .id = 0x14f15098, .name = "CX20632", @@ -3928,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066"); MODULE_ALIAS("snd-hda-codec-id:14f15067"); MODULE_ALIAS("snd-hda-codec-id:14f15068"); MODULE_ALIAS("snd-hda-codec-id:14f15069"); +MODULE_ALIAS("snd-hda-codec-id:14f1506e"); MODULE_ALIAS("snd-hda-codec-id:14f15097"); MODULE_ALIAS("snd-hda-codec-id:14f15098"); MODULE_ALIAS("snd-hda-codec-id:14f150a1"); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 2d5b83fa8d2..a5876773672 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -642,6 +642,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid, hdmi_ai->ver = 0x01; hdmi_ai->len = 0x0a; hdmi_ai->CC02_CT47 = channels - 1; + hdmi_ai->CA = ca; hdmi_checksum_audio_infoframe(hdmi_ai); } else if (spec->sink_eld[i].conn_type == 1) { /* DisplayPort */ struct dp_audio_infoframe *dp_ai; @@ -651,6 +652,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid, dp_ai->len = 0x1b; dp_ai->ver = 0x11 << 2; dp_ai->CC02_CT47 = channels - 1; + dp_ai->CA = ca; } else { snd_printd("HDMI: unknown connection type at pin %d\n", pin_nid); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2fa9ed99c32..3328a259a24 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2290,6 +2290,29 @@ static struct snd_kcontrol_new alc888_base_mixer[] = { { } /* end */ }; +static struct snd_kcontrol_new alc888_acer_aspire_4930g_mixer[] = { + HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), + HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), + HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT), + HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT), + HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0f, 2, 0x0, + HDA_OUTPUT), + HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0f, 2, 2, HDA_INPUT), + HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0f, 1, 0x0, HDA_OUTPUT), + HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0f, 1, 2, HDA_INPUT), + HDA_CODEC_VOLUME("Side Playback Volume", 0x0e, 0x0, HDA_OUTPUT), + HDA_BIND_MUTE("Side Playback Switch", 0x0e, 2, HDA_INPUT), + HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), + HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), + HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), + HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT), + HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), + { } /* end */ +}; + + static struct snd_kcontrol_new alc889_acer_aspire_8930g_mixer[] = { HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), @@ -10359,7 +10382,7 @@ static struct alc_config_preset alc882_presets[] = { .init_hook = alc_automute_amp, }, [ALC888_ACER_ASPIRE_4930G] = { - .mixers = { alc888_base_mixer, + .mixers = { alc888_acer_aspire_4930g_mixer, alc883_chmode_mixer }, .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs, alc888_acer_aspire_4930g_verbs }, @@ -18802,6 +18825,7 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = { ALC662_3ST_6ch_DIG), SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x", ALC663_ASUS_H13), + SND_PCI_QUIRK(0x1991, 0x5628, "Ordissimo EVE", ALC662_LENOVO_101E), {} }; @@ -19494,6 +19518,7 @@ static const struct alc_fixup alc662_fixups[] = { }; static struct snd_pci_quirk alc662_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 9ea48b425d0..bd7b123f644 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = { 0x0f, 0x10, 0x11, 0x1f, 0x20, }; -static hda_nid_t stac92hd88xxx_pin_nids[10] = { +static hda_nid_t stac92hd87xxx_pin_nids[6] = { + 0x0a, 0x0b, 0x0c, 0x0d, + 0x0f, 0x11, +}; + +static hda_nid_t stac92hd88xxx_pin_nids[8] = { 0x0a, 0x0b, 0x0c, 0x0d, 0x0f, 0x11, 0x1f, 0x20, }; @@ -5430,12 +5435,13 @@ again: switch (codec->vendor_id) { case 0x111d76d1: case 0x111d76d9: + case 0x111d76e5: spec->dmic_nids = stac92hd87b_dmic_nids; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd87b_dmic_nids, STAC92HD87B_NUM_DMICS); - spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); - spec->pin_nids = stac92hd88xxx_pin_nids; + spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids); + spec->pin_nids = stac92hd87xxx_pin_nids; spec->mono_nid = 0; spec->num_pwrs = 0; break; @@ -5443,6 +5449,7 @@ again: case 0x111d7667: case 0x111d7668: case 0x111d7669: + case 0x111d76e3: spec->num_dmics = stac92xx_connected_ports(codec, stac92hd88xxx_dmic_nids, STAC92HD88XXX_NUM_DMICS); @@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = { { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, + { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx}, + { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx}, { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, {} /* terminator */ }; diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index a76c3260d94..63b0054200a 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec) hda_nid_t nid = cfg->inputs[i].pin; if (spec->smart51_enabled && is_smart51_pins(spec, nid)) ctl = PIN_OUT; - else if (i == AUTO_PIN_MIC) + else if (cfg->inputs[i].type == AUTO_PIN_MIC) ctl = PIN_VREF50; else ctl = PIN_IN; diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c index bb4bf65b9e7..0bb424af956 100644 --- a/sound/soc/codecs/cx20442.c +++ b/sound/soc/codecs/cx20442.c @@ -367,7 +367,7 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec) return 0; } -static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC; +static const u8 cx20442_reg; static struct snd_soc_codec_driver cx20442_codec_dev = { .probe = cx20442_codec_probe, diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 987476a5895..017d99ceb42 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -1482,7 +1482,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, WM8903_MICDET_EINT | WM8903_MICSHRT_EINT, irq_mask); - if (det && shrt) { + if (det || shrt) { /* Enable mic detection, this may not have been set through * platform data (eg, if the defaults are OK). */ snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0, diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h index e8490f3edd0..e3ec2433b21 100644 --- a/sound/soc/codecs/wm8903.h +++ b/sound/soc/codecs/wm8903.h @@ -165,7 +165,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec, #define WM8903_VMID_RES_50K 2 #define WM8903_VMID_RES_250K 3 -#define WM8903_VMID_RES_5K 4 +#define WM8903_VMID_RES_5K 6 /* * R8 (0x08) - Analogue DAC 0 diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 3351f77607b..ebaee5ca743 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -107,6 +107,9 @@ struct wm8994_priv { int revision; struct wm8994_pdata *pdata; + + unsigned int aif1clk_enable:1; + unsigned int aif2clk_enable:1; }; static int wm8994_readable(unsigned int reg) @@ -1004,6 +1007,93 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec) } } +static int late_enable_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + if (wm8994->aif1clk_enable) + snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, + WM8994_AIF1CLK_ENA_MASK, + WM8994_AIF1CLK_ENA); + if (wm8994->aif2clk_enable) + snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, + WM8994_AIF2CLK_ENA_MASK, + WM8994_AIF2CLK_ENA); + break; + } + + return 0; +} + +static int late_disable_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_POST_PMD: + if (wm8994->aif1clk_enable) { + snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, + WM8994_AIF1CLK_ENA_MASK, 0); + wm8994->aif1clk_enable = 0; + } + if (wm8994->aif2clk_enable) { + snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, + WM8994_AIF2CLK_ENA_MASK, 0); + wm8994->aif2clk_enable = 0; + } + break; + } + + return 0; +} + +static int aif1clk_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + wm8994->aif1clk_enable = 1; + break; + } + + return 0; +} + +static int aif2clk_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + wm8994->aif2clk_enable = 1; + break; + } + + return 0; +} + +static int dac_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + unsigned int mask = 1 << w->shift; + + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + mask, mask); + return 0; +} + static const char *hp_mux_text[] = { "Mixer", "DAC", @@ -1272,6 +1362,47 @@ static const struct soc_enum aif2dacr_src_enum = static const struct snd_kcontrol_new aif2dacr_src_mux = SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); +static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = { +SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), +SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + +SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, + late_enable_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, + late_enable_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, + late_enable_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, + late_enable_ev, SND_SOC_DAPM_PRE_PMU), + +SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev) +}; + +static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { +SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0) +}; + +static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = { +SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0, + dac_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0, + dac_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0, + dac_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0, + dac_ev, SND_SOC_DAPM_PRE_PMU), +}; + +static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = { +SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0), +SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0), +SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), +SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), +}; + static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { SND_SOC_DAPM_INPUT("DMIC1DAT"), SND_SOC_DAPM_INPUT("DMIC2DAT"), @@ -1284,12 +1415,9 @@ SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0), - -SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture", +SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL, 0, WM8994_POWER_MANAGEMENT_4, 9, 0), -SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture", +SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL, 0, WM8994_POWER_MANAGEMENT_4, 8, 0), SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0, WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev, @@ -1298,9 +1426,9 @@ SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0, WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), -SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture", +SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL, 0, WM8994_POWER_MANAGEMENT_4, 11, 0), -SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture", +SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL, 0, WM8994_POWER_MANAGEMENT_4, 10, 0), SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0, WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev, @@ -1345,6 +1473,7 @@ SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0, SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0), +SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("AIF2ADCDAT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux), @@ -1371,11 +1500,6 @@ SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), -SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0), -SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0), -SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), -SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), - SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), @@ -1515,14 +1639,12 @@ static const struct snd_soc_dapm_route intercon[] = { { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" }, /* DAC1 inputs */ - { "DAC1L", NULL, "DAC1L Mixer" }, { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" }, { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, - { "DAC1R", NULL, "DAC1R Mixer" }, { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" }, { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, @@ -1531,7 +1653,6 @@ static const struct snd_soc_dapm_route intercon[] = { /* DAC2/AIF2 outputs */ { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" }, - { "DAC2L", NULL, "AIF2DAC2L Mixer" }, { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" }, { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, @@ -1539,13 +1660,17 @@ static const struct snd_soc_dapm_route intercon[] = { { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" }, - { "DAC2R", NULL, "AIF2DAC2R Mixer" }, { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" }, { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, { "AIF2DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" }, { "AIF2DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" }, + { "AIF1ADCDAT", NULL, "AIF1ADC1L" }, + { "AIF1ADCDAT", NULL, "AIF1ADC1R" }, + { "AIF1ADCDAT", NULL, "AIF1ADC2L" }, + { "AIF1ADCDAT", NULL, "AIF1ADC2R" }, + { "AIF2ADCDAT", NULL, "AIF2ADC Mux" }, /* AIF3 output */ @@ -1578,6 +1703,31 @@ static const struct snd_soc_dapm_route intercon[] = { { "Right Headphone Mux", "DAC", "DAC1R" }, }; +static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = { + { "DAC1L", NULL, "Late DAC1L Enable PGA" }, + { "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" }, + { "DAC1R", NULL, "Late DAC1R Enable PGA" }, + { "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" }, + { "DAC2L", NULL, "Late DAC2L Enable PGA" }, + { "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" }, + { "DAC2R", NULL, "Late DAC2R Enable PGA" }, + { "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" } +}; + +static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = { + { "DAC1L", NULL, "DAC1L Mixer" }, + { "DAC1R", NULL, "DAC1R Mixer" }, + { "DAC2L", NULL, "AIF2DAC2L Mixer" }, + { "DAC2R", NULL, "AIF2DAC2R Mixer" }, +}; + +static const struct snd_soc_dapm_route wm8994_revd_intercon[] = { + { "AIF1DACDAT", NULL, "AIF2DACDAT" }, + { "AIF2DACDAT", NULL, "AIF1DACDAT" }, + { "AIF1ADCDAT", NULL, "AIF2ADCDAT" }, + { "AIF2ADCDAT", NULL, "AIF1ADCDAT" }, +}; + static const struct snd_soc_dapm_route wm8994_intercon[] = { { "AIF2DACL", NULL, "AIF2DAC Mux" }, { "AIF2DACR", NULL, "AIF2DAC Mux" }, @@ -2501,6 +2651,22 @@ static int wm8994_resume(struct snd_soc_codec *codec) { struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); int i, ret; + unsigned int val, mask; + + if (wm8994->revision < 4) { + /* force a HW read */ + val = wm8994_reg_read(codec->control_data, + WM8994_POWER_MANAGEMENT_5); + + /* modify the cache only */ + codec->cache_only = 1; + mask = WM8994_DAC1R_ENA | WM8994_DAC1L_ENA | + WM8994_DAC2R_ENA | WM8994_DAC2L_ENA; + val &= mask; + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + mask, val); + codec->cache_only = 0; + } /* Restore the registers */ ret = snd_soc_cache_sync(codec); @@ -2834,11 +3000,10 @@ static void wm8958_default_micdet(u16 status, void *data) report |= SND_JACK_BTN_5; done: - snd_soc_jack_report(wm8994->micdet[0].jack, + snd_soc_jack_report(wm8994->micdet[0].jack, report, SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 | - SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT, - report); + SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT); } /** @@ -3112,6 +3277,17 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) case WM8994: snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets, ARRAY_SIZE(wm8994_specific_dapm_widgets)); + if (wm8994->revision < 4) { + snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, + ARRAY_SIZE(wm8994_lateclk_revd_widgets)); + snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, + ARRAY_SIZE(wm8994_dac_revd_widgets)); + } else { + snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, + ARRAY_SIZE(wm8994_lateclk_widgets)); + snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, + ARRAY_SIZE(wm8994_dac_widgets)); + } break; case WM8958: snd_soc_add_controls(codec, wm8958_snd_controls, @@ -3129,6 +3305,16 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) case WM8994: snd_soc_dapm_add_routes(dapm, wm8994_intercon, ARRAY_SIZE(wm8994_intercon)); + + if (wm8994->revision < 4) { + snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, + ARRAY_SIZE(wm8994_revd_intercon)); + snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon, + ARRAY_SIZE(wm8994_lateclk_revd_intercon)); + } else { + snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon, + ARRAY_SIZE(wm8994_lateclk_intercon)); + } break; case WM8958: snd_soc_dapm_add_routes(dapm, wm8958_intercon, diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 613df5db0b3..51689270606 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -674,6 +674,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"), }; static const struct snd_soc_dapm_route analogue_routes[] = { + { "MICBIAS1", NULL, "CLK_SYS" }, + { "MICBIAS2", NULL, "CLK_SYS" }, + { "IN1L PGA", "IN1LP Switch", "IN1LP" }, { "IN1L PGA", "IN1LN Switch", "IN1LN" }, diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c index b36f0b39b09..fe7984221eb 100644 --- a/sound/soc/davinci/davinci-evm.c +++ b/sound/soc/davinci/davinci-evm.c @@ -218,7 +218,19 @@ static struct snd_soc_dai_link dm6467_evm_dai[] = { .ops = &evm_spdif_ops, }, }; -static struct snd_soc_dai_link da8xx_evm_dai = { + +static struct snd_soc_dai_link da830_evm_dai = { + .name = "TLV320AIC3X", + .stream_name = "AIC3X", + .cpu_dai_name = "davinci-mcasp.1", + .codec_dai_name = "tlv320aic3x-hifi", + .codec_name = "tlv320aic3x-codec.1-0018", + .platform_name = "davinci-pcm-audio", + .init = evm_aic3x_init, + .ops = &evm_ops, +}; + +static struct snd_soc_dai_link da850_evm_dai = { .name = "TLV320AIC3X", .stream_name = "AIC3X", .cpu_dai_name= "davinci-mcasp.0", @@ -259,13 +271,13 @@ static struct snd_soc_card dm6467_snd_soc_card_evm = { static struct snd_soc_card da830_snd_soc_card = { .name = "DA830/OMAP-L137 EVM", - .dai_link = &da8xx_evm_dai, + .dai_link = &da830_evm_dai, .num_links = 1, }; static struct snd_soc_card da850_snd_soc_card = { .name = "DA850/OMAP-L138 EVM", - .dai_link = &da8xx_evm_dai, + .dai_link = &da850_evm_dai, .num_links = 1, }; diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c index e20c9e1457c..1e9bccae4e8 100644 --- a/sound/soc/imx/eukrea-tlv320.c +++ b/sound/soc/imx/eukrea-tlv320.c @@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = { .name = "tlv320aic23", .stream_name = "TLV320AIC23", .codec_dai_name = "tlv320aic23-hifi", - .platform_name = "imx-pcm-audio.0", + .platform_name = "imx-fiq-pcm-audio.0", .codec_name = "tlv320aic23-codec.0-001a", .cpu_dai_name = "imx-ssi.0", .ops = &eukrea_tlv320_snd_ops, diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c index 28333e7d9c5..dc65650a6fa 100644 --- a/sound/soc/pxa/e740_wm9705.c +++ b/sound/soc/pxa/e740_wm9705.c @@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9705-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", @@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9705-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c index 01bf31675c5..51897fcd911 100644 --- a/sound/soc/pxa/e750_wm9705.c +++ b/sound/soc/pxa/e750_wm9705.c @@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9705-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", @@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9705-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9705-codec", diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c index c6a37c6ef23..053ed208e59 100644 --- a/sound/soc/pxa/e800_wm9712.c +++ b/sound/soc/pxa/e800_wm9712.c @@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", @@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9712-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c index fc22e6eefc9..b13a4252812 100644 --- a/sound/soc/pxa/em-x270.c +++ b/sound/soc/pxa/em-x270.c @@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", @@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9712-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c index 0d70fc8c12b..38ca6759907 100644 --- a/sound/soc/pxa/mioa701_wm9713.c +++ b/sound/soc/pxa/mioa701_wm9713.c @@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9713-hifi", .codec_name = "wm9713-codec", .init = mioa701_wm9713_init, @@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name ="wm9713-aux", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c index 857db96d4a4..504e4004f00 100644 --- a/sound/soc/pxa/palm27x.c +++ b/sound/soc/pxa/palm27x.c @@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = { { .name = "AC97 HiFi", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .codec_name = "wm9712-codec", .platform_name = "pxa-pcm-audio", @@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9712-aux", .codec_name = "wm9712-codec", .platform_name = "pxa-pcm-audio", diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index f75804ef089..4b6e5d608b4 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c @@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_dai_name = "wm9712-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", @@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = { { .name = "AC97 Aux", .stream_name = "AC97 Aux", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_dai_name = "wm9712-aux", .platform_name = "pxa-pcm-audio", .codec_name = "wm9712-codec", diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c index b222a7d7202..25bba108fea 100644 --- a/sound/soc/pxa/zylonite.c +++ b/sound/soc/pxa/zylonite.c @@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { .stream_name = "AC97 HiFi", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", - .cpu_dai_name = "pxa-ac97.0", + .cpu_dai_name = "pxa2xx-ac97", .codec_name = "wm9713-hifi", .init = zylonite_wm9713_init, }, @@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { .stream_name = "AC97 Aux", .codec_name = "wm9713-codec", .platform_name = "pxa-pcm-audio", - .cpu_dai_name = "pxa-ac97.1", + .cpu_dai_name = "pxa2xx-ac97-aux", .codec_name = "wm9713-aux", }, { diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index c4b60610beb..c3f6f1e7279 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1449,6 +1449,7 @@ static int soc_post_component_init(struct snd_soc_card *card, rtd = &card->rtd_aux[num]; name = aux_dev->name; } + rtd->card = card; /* machine controls, routes and widgets are not prefixed */ temp = codec->name_prefix; @@ -1471,7 +1472,6 @@ static int soc_post_component_init(struct snd_soc_card *card, /* register the rtd device */ rtd->codec = codec; - rtd->card = card; rtd->dev.parent = card->dev; rtd->dev.release = rtd_release; rtd->dev.init_name = name; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 8194f150bab..25e54230cc6 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -712,7 +712,15 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w) !path->connected(path->source, path->sink)) continue; - if (path->sink && path->sink->power_check && + if (!path->sink) + continue; + + if (path->sink->force) { + power = 1; + break; + } + + if (path->sink->power_check && path->sink->power_check(path->sink)) { power = 1; break; @@ -1627,6 +1635,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes); int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) { struct snd_soc_dapm_widget *w; + unsigned int val; list_for_each_entry(w, &dapm->card->widgets, list) { @@ -1675,6 +1684,18 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) case snd_soc_dapm_post: break; } + + /* Read the initial power state from the device */ + if (w->reg >= 0) { + val = snd_soc_read(w->codec, w->reg); + val &= 1 << w->shift; + if (w->invert) + val = !val; + + if (val) + w->power = 1; + } + w->new = 1; } diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 68b97477577..66eabafb1c2 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -785,7 +785,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev) } dev->pcm->private_data = dev; - strcpy(dev->pcm->name, dev->product_name); + strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name)); memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c index 2f218c77fff..a1a47088fd0 100644 --- a/sound/usb/caiaq/midi.c +++ b/sound/usb/caiaq/midi.c @@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device) if (ret < 0) return ret; - strcpy(rmidi->name, device->product_name); + strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name)); rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = device; diff --git a/sound/usb/card.c b/sound/usb/card.c index 800f7cb4f25..c0f8270bc19 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx, return -ENOMEM; } + mutex_init(&chip->shutdown_mutex); chip->index = idx; chip->dev = dev; chip->card = card; @@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) chip = ptr; card = chip->card; mutex_lock(®ister_mutex); + mutex_lock(&chip->shutdown_mutex); chip->shutdown = 1; chip->num_interfaces--; if (chip->num_interfaces <= 0) { @@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) snd_usb_mixer_disconnect(p); } usb_chip[chip->index] = NULL; + mutex_unlock(&chip->shutdown_mutex); mutex_unlock(®ister_mutex); snd_card_free_when_closed(card); } else { + mutex_unlock(&chip->shutdown_mutex); mutex_unlock(®ister_mutex); } } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 7df89b3d7de..85af6051b52 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -95,7 +95,7 @@ enum { }; -/*E-mu 0202(0404) eXtension Unit(XU) control*/ +/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ enum { USB_XU_CLOCK_RATE = 0xe301, USB_XU_CLOCK_SOURCE = 0xe302, @@ -1566,7 +1566,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw cval->initialized = 1; } else { if (type == USB_XU_CLOCK_RATE) { - /* E-Mu USB 0404/0202/TrackerPre + /* E-Mu USB 0404/0202/TrackerPre/0204 * samplerate control quirk */ cval->min = 0; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 4132522ac90..e3f680526cb 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, } if (changed) { + mutex_lock(&subs->stream->chip->shutdown_mutex); /* format changed */ snd_usb_release_substream_urbs(subs, 0); /* influenced: period_bytes, channels, rate, format, */ @@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, params_rate(hw_params), snd_pcm_format_physical_width(params_format(hw_params)) * params_channels(hw_params)); + mutex_unlock(&subs->stream->chip->shutdown_mutex); } return ret; @@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream) subs->cur_audiofmt = NULL; subs->cur_rate = 0; subs->period_bytes = 0; - if (!subs->stream->chip->shutdown) - snd_usb_release_substream_urbs(subs, 0); + mutex_lock(&subs->stream->chip->shutdown_mutex); + snd_usb_release_substream_urbs(subs, 0); + mutex_unlock(&subs->stream->chip->shutdown_mutex); return snd_pcm_lib_free_vmalloc_buffer(substream); } diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 35999874d30..921a86fd988 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -79,6 +79,13 @@ .idProduct = 0x3f0a, .bInterfaceClass = USB_CLASS_AUDIO, }, +{ + /* E-Mu 0204 USB */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x041e, + .idProduct = 0x3f19, + .bInterfaceClass = USB_CLASS_AUDIO, +}, /* * Logitech QuickCam: bDeviceClass is vendor-specific, so generic interface diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index cf8bf088394..e314cdb8500 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -532,7 +532,7 @@ int snd_usb_is_big_endian_format(struct snd_usb_audio *chip, struct audioformat } /* - * For E-Mu 0404USB/0202USB/TrackerPre sample rate should be set for device, + * For E-Mu 0404USB/0202USB/TrackerPre/0204 sample rate should be set for device, * not for interface. */ @@ -589,6 +589,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */ case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */ case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */ + case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */ set_format_emu_quirk(subs, fmt); break; } diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index db3eb21627e..6e66fffe87f 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -36,6 +36,7 @@ struct snd_usb_audio { struct snd_card *card; u32 usb_id; int shutdown; + struct mutex shutdown_mutex; unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ int num_interfaces; int num_suspended_intf; diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b2f729fdb31..60cac6f92e8 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -759,8 +759,8 @@ static int __cmd_record(int argc, const char **argv) perf_session__process_machines(session, event__synthesize_guest_os); if (!system_wide) - event__synthesize_thread(target_tid, process_synthesized_event, - session); + event__synthesize_thread_map(threads, process_synthesized_event, + session); else event__synthesize_threads(process_synthesized_event, session); diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 746cf03cb05..0ace786e83e 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) c->start_time = start; if (p->start_time == 0 || p->start_time > start) p->start_time = start; - - if (cpu > numcpus) - numcpus = cpu; } #define MAX_CPUS 4096 @@ -511,6 +508,9 @@ static int process_sample_event(event_t *event __used, if (!event_str) return 0; + if (sample->cpu > numcpus) + numcpus = sample->cpu; + if (strcmp(event_str, "power:cpu_idle") == 0) { struct power_processor_entry *ppe = (void *)te; if (ppe->state == (u32)PWR_EVENT_EXIT) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b6998e05576..5a29d9cd948 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1306,7 +1306,7 @@ static int __cmd_top(void) return -ENOMEM; if (target_tid != -1) - event__synthesize_thread(target_tid, event__process, session); + event__synthesize_thread_map(threads, event__process, session); else event__synthesize_threads(event__process, session); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1478ab4ee22..50d0a931497 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -263,11 +263,12 @@ static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event, process, session); } -int event__synthesize_thread(pid_t pid, event__handler_t process, - struct perf_session *session) +int event__synthesize_thread_map(struct thread_map *threads, + event__handler_t process, + struct perf_session *session) { event_t *comm_event, *mmap_event; - int err = -1; + int err = -1, thread; comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); if (comm_event == NULL) @@ -277,8 +278,15 @@ int event__synthesize_thread(pid_t pid, event__handler_t process, if (mmap_event == NULL) goto out_free_comm; - err = __event__synthesize_thread(comm_event, mmap_event, pid, - process, session); + err = 0; + for (thread = 0; thread < threads->nr; ++thread) { + if (__event__synthesize_thread(comm_event, mmap_event, + threads->map[thread], + process, session)) { + err = -1; + break; + } + } free(mmap_event); out_free_comm: free(comm_event); diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 2b7e91902f1..cc7b52f9b49 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -135,14 +135,16 @@ typedef union event_union { void event__print_totals(void); struct perf_session; +struct thread_map; typedef int (*event__handler_synth_t)(event_t *event, struct perf_session *session); typedef int (*event__handler_t)(event_t *event, struct sample_data *sample, struct perf_session *session); -int event__synthesize_thread(pid_t pid, event__handler_t process, - struct perf_session *session); +int event__synthesize_thread_map(struct thread_map *threads, + event__handler_t process, + struct perf_session *session); int event__synthesize_threads(event__handler_t process, struct perf_session *session); int event__synthesize_kernel_mmap(event__handler_t process, diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 32f4f1f2f6e..df51560f16f 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -585,6 +585,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, { struct sort_entry *se; u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; + u64 nr_events; const char *sep = symbol_conf.field_sep; int ret; @@ -593,6 +594,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, if (pair_hists) { period = self->pair ? self->pair->period : 0; + nr_events = self->pair ? self->pair->nr_events : 0; total = pair_hists->stats.total_period; period_sys = self->pair ? self->pair->period_sys : 0; period_us = self->pair ? self->pair->period_us : 0; @@ -600,6 +602,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, period_guest_us = self->pair ? self->pair->period_guest_us : 0; } else { period = self->period; + nr_events = self->nr_events; total = session_total; period_sys = self->period_sys; period_us = self->period_us; @@ -640,9 +643,9 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, if (symbol_conf.show_nr_samples) { if (sep) - ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); + ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); else - ret += snprintf(s + ret, size - ret, "%11" PRIu64, period); + ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events); } if (pair_hists) { diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index fb737fe9be9..96c866045d6 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c @@ -456,9 +456,9 @@ void svg_legenda(void) return; svg_legenda_box(0, "Running", "sample"); - svg_legenda_box(100, "Idle","rect.c1"); - svg_legenda_box(200, "Deeper Idle", "rect.c3"); - svg_legenda_box(350, "Deepest Idle", "rect.c6"); + svg_legenda_box(100, "Idle","c1"); + svg_legenda_box(200, "Deeper Idle", "c3"); + svg_legenda_box(350, "Deepest Idle", "c6"); svg_legenda_box(550, "Sleeping", "process2"); svg_legenda_box(650, "Waiting for cpu", "waiting"); svg_legenda_box(800, "Blocked on IO", "blocked"); diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 4c6983de6fd..362a0cb448d 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -72,7 +72,7 @@ int need_reinitialize; int num_cpus; -typedef struct per_cpu_counters { +struct counters { unsigned long long tsc; /* per thread */ unsigned long long aperf; /* per thread */ unsigned long long mperf; /* per thread */ @@ -88,13 +88,13 @@ typedef struct per_cpu_counters { int pkg; int core; int cpu; - struct per_cpu_counters *next; -} PCC; + struct counters *next; +}; -PCC *pcc_even; -PCC *pcc_odd; -PCC *pcc_delta; -PCC *pcc_average; +struct counters *cnt_even; +struct counters *cnt_odd; +struct counters *cnt_delta; +struct counters *cnt_average; struct timeval tv_even; struct timeval tv_odd; struct timeval tv_delta; @@ -125,7 +125,7 @@ unsigned long long get_msr(int cpu, off_t offset) return msr; } -void print_header() +void print_header(void) { if (show_pkg) fprintf(stderr, "pkg "); @@ -160,39 +160,39 @@ void print_header() putc('\n', stderr); } -void dump_pcc(PCC *pcc) +void dump_cnt(struct counters *cnt) { - fprintf(stderr, "package: %d ", pcc->pkg); - fprintf(stderr, "core:: %d ", pcc->core); - fprintf(stderr, "CPU: %d ", pcc->cpu); - fprintf(stderr, "TSC: %016llX\n", pcc->tsc); - fprintf(stderr, "c3: %016llX\n", pcc->c3); - fprintf(stderr, "c6: %016llX\n", pcc->c6); - fprintf(stderr, "c7: %016llX\n", pcc->c7); - fprintf(stderr, "aperf: %016llX\n", pcc->aperf); - fprintf(stderr, "pc2: %016llX\n", pcc->pc2); - fprintf(stderr, "pc3: %016llX\n", pcc->pc3); - fprintf(stderr, "pc6: %016llX\n", pcc->pc6); - fprintf(stderr, "pc7: %016llX\n", pcc->pc7); - fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr); + fprintf(stderr, "package: %d ", cnt->pkg); + fprintf(stderr, "core:: %d ", cnt->core); + fprintf(stderr, "CPU: %d ", cnt->cpu); + fprintf(stderr, "TSC: %016llX\n", cnt->tsc); + fprintf(stderr, "c3: %016llX\n", cnt->c3); + fprintf(stderr, "c6: %016llX\n", cnt->c6); + fprintf(stderr, "c7: %016llX\n", cnt->c7); + fprintf(stderr, "aperf: %016llX\n", cnt->aperf); + fprintf(stderr, "pc2: %016llX\n", cnt->pc2); + fprintf(stderr, "pc3: %016llX\n", cnt->pc3); + fprintf(stderr, "pc6: %016llX\n", cnt->pc6); + fprintf(stderr, "pc7: %016llX\n", cnt->pc7); + fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr); } -void dump_list(PCC *pcc) +void dump_list(struct counters *cnt) { - printf("dump_list 0x%p\n", pcc); + printf("dump_list 0x%p\n", cnt); - for (; pcc; pcc = pcc->next) - dump_pcc(pcc); + for (; cnt; cnt = cnt->next) + dump_cnt(cnt); } -void print_pcc(PCC *p) +void print_cnt(struct counters *p) { double interval_float; interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; /* topology columns, print blanks on 1st (average) line */ - if (p == pcc_average) { + if (p == cnt_average) { if (show_pkg) fprintf(stderr, " "); if (show_core) @@ -262,24 +262,24 @@ void print_pcc(PCC *p) putc('\n', stderr); } -void print_counters(PCC *cnt) +void print_counters(struct counters *counters) { - PCC *pcc; + struct counters *cnt; print_header(); if (num_cpus > 1) - print_pcc(pcc_average); + print_cnt(cnt_average); - for (pcc = cnt; pcc != NULL; pcc = pcc->next) - print_pcc(pcc); + for (cnt = counters; cnt != NULL; cnt = cnt->next) + print_cnt(cnt); } #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after)) - -int compute_delta(PCC *after, PCC *before, PCC *delta) +int compute_delta(struct counters *after, + struct counters *before, struct counters *delta) { int errors = 0; int perf_err = 0; @@ -391,20 +391,20 @@ int compute_delta(PCC *after, PCC *before, PCC *delta) delta->extra_msr = after->extra_msr; if (errors) { fprintf(stderr, "ERROR cpu%d before:\n", before->cpu); - dump_pcc(before); + dump_cnt(before); fprintf(stderr, "ERROR cpu%d after:\n", before->cpu); - dump_pcc(after); + dump_cnt(after); errors = 0; } } return 0; } -void compute_average(PCC *delta, PCC *avg) +void compute_average(struct counters *delta, struct counters *avg) { - PCC *sum; + struct counters *sum; - sum = calloc(1, sizeof(PCC)); + sum = calloc(1, sizeof(struct counters)); if (sum == NULL) { perror("calloc sum"); exit(1); @@ -438,35 +438,34 @@ void compute_average(PCC *delta, PCC *avg) free(sum); } -void get_counters(PCC *pcc) +void get_counters(struct counters *cnt) { - for ( ; pcc; pcc = pcc->next) { - pcc->tsc = get_msr(pcc->cpu, MSR_TSC); + for ( ; cnt; cnt = cnt->next) { + cnt->tsc = get_msr(cnt->cpu, MSR_TSC); if (do_nhm_cstates) - pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY); + cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY); if (do_nhm_cstates) - pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY); + cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY); if (do_snb_cstates) - pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY); + cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY); if (has_aperf) - pcc->aperf = get_msr(pcc->cpu, MSR_APERF); + cnt->aperf = get_msr(cnt->cpu, MSR_APERF); if (has_aperf) - pcc->mperf = get_msr(pcc->cpu, MSR_MPERF); + cnt->mperf = get_msr(cnt->cpu, MSR_MPERF); if (do_snb_cstates) - pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY); + cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY); if (do_nhm_cstates) - pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY); + cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY); if (do_nhm_cstates) - pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY); + cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY); if (do_snb_cstates) - pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY); + cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY); if (extra_msr_offset) - pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset); + cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset); } } - -void print_nehalem_info() +void print_nehalem_info(void) { unsigned long long msr; unsigned int ratio; @@ -514,38 +513,38 @@ void print_nehalem_info() } -void free_counter_list(PCC *list) +void free_counter_list(struct counters *list) { - PCC *p; + struct counters *p; for (p = list; p; ) { - PCC *free_me; + struct counters *free_me; free_me = p; p = p->next; free(free_me); } - return; } void free_all_counters(void) { - free_counter_list(pcc_even); - pcc_even = NULL; + free_counter_list(cnt_even); + cnt_even = NULL; - free_counter_list(pcc_odd); - pcc_odd = NULL; + free_counter_list(cnt_odd); + cnt_odd = NULL; - free_counter_list(pcc_delta); - pcc_delta = NULL; + free_counter_list(cnt_delta); + cnt_delta = NULL; - free_counter_list(pcc_average); - pcc_average = NULL; + free_counter_list(cnt_average); + cnt_average = NULL; } -void insert_cpu_counters(PCC **list, PCC *new) +void insert_counters(struct counters **list, + struct counters *new) { - PCC *prev; + struct counters *prev; /* * list was empty @@ -594,18 +593,16 @@ void insert_cpu_counters(PCC **list, PCC *new) */ new->next = prev->next; prev->next = new; - - return; } -void alloc_new_cpu_counters(int pkg, int core, int cpu) +void alloc_new_counters(int pkg, int core, int cpu) { - PCC *new; + struct counters *new; if (verbose > 1) printf("pkg%d core%d, cpu%d\n", pkg, core, cpu); - new = (PCC *)calloc(1, sizeof(PCC)); + new = (struct counters *)calloc(1, sizeof(struct counters)); if (new == NULL) { perror("calloc"); exit(1); @@ -613,9 +610,10 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) new->pkg = pkg; new->core = core; new->cpu = cpu; - insert_cpu_counters(&pcc_odd, new); + insert_counters(&cnt_odd, new); - new = (PCC *)calloc(1, sizeof(PCC)); + new = (struct counters *)calloc(1, + sizeof(struct counters)); if (new == NULL) { perror("calloc"); exit(1); @@ -623,9 +621,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) new->pkg = pkg; new->core = core; new->cpu = cpu; - insert_cpu_counters(&pcc_even, new); + insert_counters(&cnt_even, new); - new = (PCC *)calloc(1, sizeof(PCC)); + new = (struct counters *)calloc(1, sizeof(struct counters)); if (new == NULL) { perror("calloc"); exit(1); @@ -633,9 +631,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) new->pkg = pkg; new->core = core; new->cpu = cpu; - insert_cpu_counters(&pcc_delta, new); + insert_counters(&cnt_delta, new); - new = (PCC *)calloc(1, sizeof(PCC)); + new = (struct counters *)calloc(1, sizeof(struct counters)); if (new == NULL) { perror("calloc"); exit(1); @@ -643,7 +641,7 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) new->pkg = pkg; new->core = core; new->cpu = cpu; - pcc_average = new; + cnt_average = new; } int get_physical_package_id(int cpu) @@ -719,7 +717,7 @@ void re_initialize(void) { printf("turbostat: topology changed, re-initializing.\n"); free_all_counters(); - num_cpus = for_all_cpus(alloc_new_cpu_counters); + num_cpus = for_all_cpus(alloc_new_counters); need_reinitialize = 0; printf("num_cpus is now %d\n", num_cpus); } @@ -728,7 +726,7 @@ void dummy(int pkg, int core, int cpu) { return; } /* * check to see if a cpu came on-line */ -void verify_num_cpus() +void verify_num_cpus(void) { int new_num_cpus; @@ -740,14 +738,12 @@ void verify_num_cpus() num_cpus, new_num_cpus); need_reinitialize = 1; } - - return; } void turbostat_loop() { restart: - get_counters(pcc_even); + get_counters(cnt_even); gettimeofday(&tv_even, (struct timezone *)NULL); while (1) { @@ -757,24 +753,24 @@ restart: goto restart; } sleep(interval_sec); - get_counters(pcc_odd); + get_counters(cnt_odd); gettimeofday(&tv_odd, (struct timezone *)NULL); - compute_delta(pcc_odd, pcc_even, pcc_delta); + compute_delta(cnt_odd, cnt_even, cnt_delta); timersub(&tv_odd, &tv_even, &tv_delta); - compute_average(pcc_delta, pcc_average); - print_counters(pcc_delta); + compute_average(cnt_delta, cnt_average); + print_counters(cnt_delta); if (need_reinitialize) { re_initialize(); goto restart; } sleep(interval_sec); - get_counters(pcc_even); + get_counters(cnt_even); gettimeofday(&tv_even, (struct timezone *)NULL); - compute_delta(pcc_even, pcc_odd, pcc_delta); + compute_delta(cnt_even, cnt_odd, cnt_delta); timersub(&tv_even, &tv_odd, &tv_delta); - compute_average(pcc_delta, pcc_average); - print_counters(pcc_delta); + compute_average(cnt_delta, cnt_average); + print_counters(cnt_delta); } } @@ -892,7 +888,7 @@ void check_cpuid() * this check is valid for both Intel and AMD */ asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007)); - has_invariant_tsc = edx && (1 << 8); + has_invariant_tsc = edx & (1 << 8); if (!has_invariant_tsc) { fprintf(stderr, "No invariant TSC\n"); @@ -905,7 +901,7 @@ void check_cpuid() */ asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6)); - has_aperf = ecx && (1 << 0); + has_aperf = ecx & (1 << 0); if (!has_aperf) { fprintf(stderr, "No APERF MSR\n"); exit(1); @@ -952,7 +948,7 @@ void turbostat_init() check_dev_msr(); check_super_user(); - num_cpus = for_all_cpus(alloc_new_cpu_counters); + num_cpus = for_all_cpus(alloc_new_counters); if (verbose) print_nehalem_info(); @@ -962,7 +958,7 @@ int fork_it(char **argv) { int retval; pid_t child_pid; - get_counters(pcc_even); + get_counters(cnt_even); gettimeofday(&tv_even, (struct timezone *)NULL); child_pid = fork(); @@ -985,14 +981,14 @@ int fork_it(char **argv) exit(1); } } - get_counters(pcc_odd); + get_counters(cnt_odd); gettimeofday(&tv_odd, (struct timezone *)NULL); - retval = compute_delta(pcc_odd, pcc_even, pcc_delta); + retval = compute_delta(cnt_odd, cnt_even, cnt_delta); timersub(&tv_odd, &tv_even, &tv_delta); - compute_average(pcc_delta, pcc_average); + compute_average(cnt_delta, cnt_average); if (!retval) - print_counters(pcc_delta); + print_counters(cnt_delta); fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);; |