summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/hwmon/max1606462
-rw-r--r--Documentation/hwmon/max3444079
-rw-r--r--Documentation/hwmon/max868869
-rw-r--r--Documentation/hwmon/pmbus34
-rw-r--r--Documentation/hwmon/smm6658
-rw-r--r--Documentation/hwmon/submitting-patches109
-rw-r--r--Documentation/md.txt10
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/unistd.h4
-rw-r--r--arch/arm/kernel/calls.S4
-rw-r--r--arch/x86/include/asm/numa.h2
-rw-r--r--arch/x86/kernel/smpboot.c23
-rw-r--r--arch/x86/mm/numa.c31
-rw-r--r--arch/x86/mm/numa_emulation.c20
-rw-r--r--arch/x86/xen/mmu.c13
-rw-r--r--arch/x86/xen/setup.c2
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c20
-rw-r--r--block/elevator.c3
-rw-r--r--block/genhd.c8
-rw-r--r--drivers/char/agp/generic.c19
-rw-r--r--drivers/char/virtio_console.c11
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c73
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c5
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c91
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c6
-rw-r--r--drivers/hwmon/pmbus_core.c1
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-cd_ioctl.c6
-rw-r--r--drivers/ide/ide-gd.c7
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/net/bna/bfa_ioc.c31
-rw-r--r--drivers/net/bna/bfa_ioc.h1
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c28
-rw-r--r--drivers/net/bna/bfi.h6
-rw-r--r--drivers/net/bna/bnad.c1
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c9
-rw-r--r--drivers/net/bonding/bond_alb.c6
-rw-r--r--drivers/net/bonding/bond_alb.h4
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/natsemi.c3
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c17
-rw-r--r--drivers/net/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c14
-rw-r--r--drivers/net/sfc/efx.c6
-rw-r--r--drivers/net/sfc/io.h2
-rw-r--r--drivers/net/sfc/net_driver.h2
-rw-r--r--drivers/net/sfc/nic.c22
-rw-r--r--drivers/net/sfc/nic.h1
-rw-r--r--drivers/net/sfc/selftest.c25
-rw-r--r--drivers/net/sfc/tx.c3
-rw-r--r--drivers/net/sis900.c23
-rw-r--r--drivers/net/stmmac/dwmac_lib.c28
-rw-r--r--drivers/net/stmmac/stmmac_main.c49
-rw-r--r--drivers/net/tokenring/3c359.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c6
-rw-r--r--drivers/net/wireless/ath/regd_common.h1
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h3
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c17
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c3
-rw-r--r--drivers/net/wireless/mwl8k.c9
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/pci/intel-iommu.c55
-rw-r--r--drivers/scsi/scsi_lib.c17
-rw-r--r--drivers/scsi/scsi_transport_fc.c19
-rw-r--r--drivers/virtio/virtio_pci.c15
-rw-r--r--drivers/virtio/virtio_ring.c1
-rw-r--r--fs/nfsd/nfs4state.c3
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_message.c4
-rw-r--r--include/linux/blkdev.h26
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/caif/cfdgml.c6
-rw-r--r--net/caif/cfmuxl.c4
-rw-r--r--net/core/dev.c10
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inetpeer.c13
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/sysctl_net_ipv4.c3
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/llc/llc_input.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c4
-rw-r--r--net/netfilter/ipset/ip_set_core.c18
-rw-r--r--net/netfilter/xt_set.c18
-rw-r--r--net/sctp/associola.c4
123 files changed, 996 insertions, 543 deletions
diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064
new file mode 100644
index 00000000000..41728999e14
--- /dev/null
+++ b/Documentation/hwmon/max16064
@@ -0,0 +1,62 @@
+Kernel driver max16064
+======================
+
+Supported chips:
+ * Maxim MAX16064
+ Prefix: 'max16064'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX16064 Quad Power-Supply
+Controller with Active-Voltage Output Control and PMBus Interface.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in[1-4]_label "vout[1-4]"
+in[1-4]_input Measured voltage. From READ_VOUT register.
+in[1-4]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in[1-4]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in[1-4]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-4]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in[1-4]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+temp1_input Measured temperature. From READ_TEMPERATURE_1 register.
+temp1_max Maximum temperature. From OT_WARN_LIMIT register.
+temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
+temp1_max_alarm Chip temperature high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
+ status is set.
+temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
+ status is set.
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
new file mode 100644
index 00000000000..6c525dd07d5
--- /dev/null
+++ b/Documentation/hwmon/max34440
@@ -0,0 +1,79 @@
+Kernel driver max34440
+======================
+
+Supported chips:
+ * Maxim MAX34440
+ Prefixes: 'max34440'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+ * Maxim MAX34441
+ PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
+ Prefixes: 'max34441'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel
+Power-Supply Manager and MAX34441 PMBus 5-Channel Power-Supply Manager
+and Intelligent Fan Controller.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in[1-6]_label "vout[1-6]".
+in[1-6]_input Measured voltage. From READ_VOUT register.
+in[1-6]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in[1-6]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in[1-6]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-6]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in[1-6]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+curr[1-6]_label "iout[1-6]".
+curr[1-6]_input Measured current. From READ_IOUT register.
+curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
+curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
+curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
+
+ in6 and curr6 attributes only exist for MAX34440.
+
+temp[1-8]_input Measured temperatures. From READ_TEMPERATURE_1 register.
+ temp1 is the chip's internal temperature. temp2..temp5
+ are remote I2C temperature sensors. For MAX34441, temp6
+ is a remote thermal-diode sensor. For MAX34440, temp6..8
+ are remote I2C temperature sensors.
+temp[1-8]_max Maximum temperature. From OT_WARN_LIMIT register.
+temp[1-8]_crit Critical high temperature. From OT_FAULT_LIMIT register.
+temp[1-8]_max_alarm Temperature high alarm.
+temp[1-8]_crit_alarm Temperature critical high alarm.
+
+ temp7 and temp8 attributes only exist for MAX34440.
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688
new file mode 100644
index 00000000000..0ddd3a41203
--- /dev/null
+++ b/Documentation/hwmon/max8688
@@ -0,0 +1,69 @@
+Kernel driver max8688
+=====================
+
+Supported chips:
+ * Maxim MAX8688
+ Prefix: 'max8688'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX8688 Digital Power-Supply
+Controller/Monitor with PMBus Interface.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in1_label "vout1"
+in1_input Measured voltage. From READ_VOUT register.
+in1_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in1_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+curr1_label "iout1"
+curr1_input Measured current. From READ_IOUT register.
+curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register.
+curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register.
+curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
+
+temp1_input Measured temperature. From READ_TEMPERATURE_1 register.
+temp1_max Maximum temperature. From OT_WARN_LIMIT register.
+temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
+temp1_max_alarm Chip temperature high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
+ status is set.
+temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
+ status is set.
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index dc4933e9634..5e462fc7f99 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -13,26 +13,6 @@ Supported chips:
Prefix: 'ltc2978'
Addresses scanned: -
Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
- * Maxim MAX16064
- Quad Power-Supply Controller
- Prefix: 'max16064'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
- * Maxim MAX34440
- PMBus 6-Channel Power-Supply Manager
- Prefixes: 'max34440'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
- * Maxim MAX34441
- PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
- Prefixes: 'max34441'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
- * Maxim MAX8688
- Digital Power-Supply Controller/Monitor
- Prefix: 'max8688'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
* Generic PMBus devices
Prefix: 'pmbus'
Addresses scanned: -
@@ -175,11 +155,13 @@ currX_crit Critical maximum current.
From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register.
currX_alarm Current high alarm.
From IIN_OC_WARNING or IOUT_OC_WARNING status.
+currX_max_alarm Current high alarm.
+ From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT status.
currX_lcrit_alarm Output current critical low alarm.
From IOUT_UC_FAULT status.
currX_crit_alarm Current critical high alarm.
From IIN_OC_FAULT or IOUT_OC_FAULT status.
-currX_label "iin" or "vinY"
+currX_label "iin" or "ioutY"
powerX_input Measured power. From READ_PIN or READ_POUT register.
powerX_cap Output power cap. From POUT_MAX register.
@@ -193,13 +175,13 @@ powerX_crit_alarm Output power critical high alarm.
From POUT_OP_FAULT status.
powerX_label "pin" or "poutY"
-tempX_input Measured tempererature.
+tempX_input Measured temperature.
From READ_TEMPERATURE_X register.
-tempX_min Mimimum tempererature. From UT_WARN_LIMIT register.
-tempX_max Maximum tempererature. From OT_WARN_LIMIT register.
-tempX_lcrit Critical low tempererature.
+tempX_min Mimimum temperature. From UT_WARN_LIMIT register.
+tempX_max Maximum temperature. From OT_WARN_LIMIT register.
+tempX_lcrit Critical low temperature.
From UT_FAULT_LIMIT register.
-tempX_crit Critical high tempererature.
+tempX_crit Critical high temperature.
From OT_FAULT_LIMIT register.
tempX_min_alarm Chip temperature low alarm. Set by comparing
READ_TEMPERATURE_X with UT_WARN_LIMIT if
diff --git a/Documentation/hwmon/smm665 b/Documentation/hwmon/smm665
index 3820fc9ca52..59e31614054 100644
--- a/Documentation/hwmon/smm665
+++ b/Documentation/hwmon/smm665
@@ -150,8 +150,8 @@ in8_crit_alarm Channel F critical alarm
in9_crit_alarm AIN1 critical alarm
in10_crit_alarm AIN2 critical alarm
-temp1_input Chip tempererature
-temp1_min Mimimum chip tempererature
-temp1_max Maximum chip tempererature
-temp1_crit Critical chip tempererature
+temp1_input Chip temperature
+temp1_min Mimimum chip temperature
+temp1_max Maximum chip temperature
+temp1_crit Critical chip temperature
temp1_crit_alarm Temperature critical alarm
diff --git a/Documentation/hwmon/submitting-patches b/Documentation/hwmon/submitting-patches
new file mode 100644
index 00000000000..86f42e8e9e4
--- /dev/null
+++ b/Documentation/hwmon/submitting-patches
@@ -0,0 +1,109 @@
+ How to Get Your Patch Accepted Into the Hwmon Subsystem
+ -------------------------------------------------------
+
+This text is is a collection of suggestions for people writing patches or
+drivers for the hwmon subsystem. Following these suggestions will greatly
+increase the chances of your change being accepted.
+
+
+1. General
+----------
+
+* It should be unnecessary to mention, but please read and follow
+ Documentation/SubmitChecklist
+ Documentation/SubmittingDrivers
+ Documentation/SubmittingPatches
+ Documentation/CodingStyle
+
+* If your patch generates checkpatch warnings, please refrain from explanations
+ such as "I don't like that coding style". Keep in mind that each unnecessary
+ warning helps hiding a real problem. If you don't like the kernel coding
+ style, don't write kernel drivers.
+
+* Please test your patch thoroughly. We are not your test group.
+ Sometimes a patch can not or not completely be tested because of missing
+ hardware. In such cases, you should test-build the code on at least one
+ architecture. If run-time testing was not achieved, it should be written
+ explicitly below the patch header.
+
+* If your patch (or the driver) is affected by configuration options such as
+ CONFIG_SMP or CONFIG_HOTPLUG, make sure it compiles for all configuration
+ variants.
+
+
+2. Adding functionality to existing drivers
+-------------------------------------------
+
+* Make sure the documentation in Documentation/hwmon/<driver_name> is up to
+ date.
+
+* Make sure the information in Kconfig is up to date.
+
+* If the added functionality requires some cleanup or structural changes, split
+ your patch into a cleanup part and the actual addition. This makes it easier
+ to review your changes, and to bisect any resulting problems.
+
+* Never mix bug fixes, cleanup, and functional enhancements in a single patch.
+
+
+3. New drivers
+--------------
+
+* Running your patch or driver file(s) through checkpatch does not mean its
+ formatting is clean. If unsure about formatting in your new driver, run it
+ through Lindent. Lindent is not perfect, and you may have to do some minor
+ cleanup, but it is a good start.
+
+* Consider adding yourself to MAINTAINERS.
+
+* Document the driver in Documentation/hwmon/<driver_name>.
+
+* Add the driver to Kconfig and Makefile in alphabetical order.
+
+* Make sure that all dependencies are listed in Kconfig. For new drivers, it
+ is most likely prudent to add a dependency on EXPERIMENTAL.
+
+* Avoid forward declarations if you can. Rearrange the code if necessary.
+
+* Avoid calculations in macros and macro-generated functions. While such macros
+ may save a line or so in the source, it obfuscates the code and makes code
+ review more difficult. It may also result in code which is more complicated
+ than necessary. Use inline functions or just regular functions instead.
+
+* If the driver has a detect function, make sure it is silent. Debug messages
+ and messages printed after a successful detection are acceptable, but it
+ must not print messages such as "Chip XXX not found/supported".
+
+ Keep in mind that the detect function will run for all drivers supporting an
+ address if a chip is detected on that address. Unnecessary messages will just
+ pollute the kernel log and not provide any value.
+
+* Provide a detect function if and only if a chip can be detected reliably.
+
+* Avoid writing to chip registers in the detect function. If you have to write,
+ only do it after you have already gathered enough data to be certain that the
+ detection is going to be successful.
+
+ Keep in mind that the chip might not be what your driver believes it is, and
+ writing to it might cause a bad misconfiguration.
+
+* Make sure there are no race conditions in the probe function. Specifically,
+ completely initialize your chip first, then create sysfs entries and register
+ with the hwmon subsystem.
+
+* Do not provide support for deprecated sysfs attributes.
+
+* Do not create non-standard attributes unless really needed. If you have to use
+ non-standard attributes, or you believe you do, discuss it on the mailing list
+ first. Either case, provide a detailed explanation why you need the
+ non-standard attribute(s).
+ Standard attributes are specified in Documentation/hwmon/sysfs-interface.
+
+* When deciding which sysfs attributes to support, look at the chip's
+ capabilities. While we do not expect your driver to support everything the
+ chip may offer, it should at least support all limits and alarms.
+
+* Last but not least, please check if a driver for your chip already exists
+ before starting to write a new driver. Especially for temperature sensors,
+ new chips are often variants of previously released chips. In some cases,
+ a presumably new chip may simply have been relabeled.
diff --git a/Documentation/md.txt b/Documentation/md.txt
index a81c7b4790f..2366b1c8cf1 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -552,6 +552,16 @@ also have
within the array where IO will be blocked. This is currently
only supported for raid4/5/6.
+ sync_min
+ sync_max
+ The two values, given as numbers of sectors, indicate a range
+ withing the array where 'check'/'repair' will operate. Must be
+ a multiple of chunk_size. When it reaches "sync_max" it will
+ pause, rather than complete.
+ You can use 'select' or 'poll' on "sync_completed" to wait for
+ that number to reach sync_max. Then you can either increase
+ "sync_max", or can write 'idle' to "sync_action".
+
Each active md device may also have attributes specific to the
personality module that manages it.
diff --git a/MAINTAINERS b/MAINTAINERS
index ec360030628..1e2724e55cf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -151,6 +151,7 @@ S: Maintained
F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
+M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index ed5bc9e05a4..cd4458f6417 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -2,6 +2,7 @@
#define __ASM_ARM_CPUTYPE_H
#include <linux/stringify.h>
+#include <linux/kernel.h>
#define CPUID_ID 0
#define CPUID_CACHETYPE 1
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index c891eb76c0e..87dbe3e2197 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -396,6 +396,10 @@
#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
+#define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370)
+#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371)
+#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372)
+#define __NR_syncfs (__NR_SYSCALL_BASE+373)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 5c26eccef99..7fbf28c35bb 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -379,6 +379,10 @@
CALL(sys_fanotify_init)
CALL(sys_fanotify_mark)
CALL(sys_prlimit64)
+/* 370 */ CALL(sys_name_to_handle_at)
+ CALL(sys_open_by_handle_at)
+ CALL(sys_clock_adjtime)
+ CALL(sys_syncfs)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 3d4dab43c99..a50fc9f493b 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -51,7 +51,7 @@ static inline void numa_remove_cpu(int cpu) { }
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
+void debug_cpumask_set_cpu(int cpu, int node, bool enable);
#endif
#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8ed8908cc9f..c2871d3c71b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -312,26 +312,6 @@ void __cpuinit smp_store_cpu_info(int id)
identify_secondary_cpu(c);
}
-static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2)
-{
- int node1 = early_cpu_to_node(cpu1);
- int node2 = early_cpu_to_node(cpu2);
-
- /*
- * Our CPU scheduler assumes all logical cpus in the same physical cpu
- * share the same node. But, buggy ACPI or NUMA emulation might assign
- * them to different node. Fix it.
- */
- if (node1 != node2) {
- pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n",
- cpu1, node1, cpu2, node2, node2);
-
- numa_remove_cpu(cpu1);
- numa_set_node(cpu1, node2);
- numa_add_cpu(cpu1);
- }
-}
-
static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
{
cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
@@ -340,7 +320,6 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
- check_cpu_siblings_on_same_node(cpu1, cpu2);
}
@@ -382,12 +361,10 @@ void __cpuinit set_cpu_sibling_map(int cpu)
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
- check_cpu_siblings_on_same_node(cpu, i);
}
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
cpumask_set_cpu(i, cpu_core_mask(cpu));
cpumask_set_cpu(cpu, cpu_core_mask(i));
- check_cpu_siblings_on_same_node(cpu, i);
/*
* Does this new cpu bringup a new core?
*/
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 9559d360fde..745258dfc4d 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -213,53 +213,48 @@ int early_cpu_to_node(int cpu)
return per_cpu(x86_cpu_to_node_map, cpu);
}
-struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
+void debug_cpumask_set_cpu(int cpu, int node, bool enable)
{
- int node = early_cpu_to_node(cpu);
struct cpumask *mask;
char buf[64];
if (node == NUMA_NO_NODE) {
/* early_cpu_to_node() already emits a warning and trace */
- return NULL;
+ return;
}
mask = node_to_cpumask_map[node];
if (!mask) {
pr_err("node_to_cpumask_map[%i] NULL\n", node);
dump_stack();
- return NULL;
+ return;
}
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+
cpulist_scnprintf(buf, sizeof(buf), mask);
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
enable ? "numa_add_cpu" : "numa_remove_cpu",
cpu, node, buf);
- return mask;
+ return;
}
# ifndef CONFIG_NUMA_EMU
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static void __cpuinit numa_set_cpumask(int cpu, bool enable)
{
- struct cpumask *mask;
-
- mask = debug_cpumask_set_cpu(cpu, enable);
- if (!mask)
- return;
-
- if (enable)
- cpumask_set_cpu(cpu, mask);
- else
- cpumask_clear_cpu(cpu, mask);
+ debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
}
void __cpuinit numa_add_cpu(int cpu)
{
- numa_set_cpumask(cpu, 1);
+ numa_set_cpumask(cpu, true);
}
void __cpuinit numa_remove_cpu(int cpu)
{
- numa_set_cpumask(cpu, 0);
+ numa_set_cpumask(cpu, false);
}
# endif /* !CONFIG_NUMA_EMU */
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index ad091e4cff1..de84cc14037 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -454,10 +454,9 @@ void __cpuinit numa_remove_cpu(int cpu)
cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
}
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static void __cpuinit numa_set_cpumask(int cpu, bool enable)
{
- struct cpumask *mask;
- int nid, physnid, i;
+ int nid, physnid;
nid = early_cpu_to_node(cpu);
if (nid == NUMA_NO_NODE) {
@@ -467,28 +466,21 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
physnid = emu_nid_to_phys[nid];
- for_each_online_node(i) {
+ for_each_online_node(nid) {
if (emu_nid_to_phys[nid] != physnid)
continue;
- mask = debug_cpumask_set_cpu(cpu, enable);
- if (!mask)
- return;
-
- if (enable)
- cpumask_set_cpu(cpu, mask);
- else
- cpumask_clear_cpu(cpu, mask);
+ debug_cpumask_set_cpu(cpu, nid, enable);
}
}
void __cpuinit numa_add_cpu(int cpu)
{
- numa_set_cpumask(cpu, 1);
+ numa_set_cpumask(cpu, true);
}
void __cpuinit numa_remove_cpu(int cpu)
{
- numa_set_cpumask(cpu, 0);
+ numa_set_cpumask(cpu, false);
}
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index a991b57f91f..aef7af92b28 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1473,16 +1473,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif
}
+#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
- unsigned long pfn = pte_pfn(pte);
-
-#ifdef CONFIG_X86_32
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
if (pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
-#endif
+
+ return pte;
+}
+#else /* CONFIG_X86_64 */
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
/*
* If the new pfn is within the range of the newly allocated
@@ -1497,6 +1501,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
return pte;
}
+#endif /* CONFIG_X86_64 */
/* Init-time set_pte while constructing initial pagetables, which
doesn't allow RO pagetable pages to be remapped RW */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index fa0269a9937..90bac0aac3a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,7 +227,7 @@ char * __init xen_memory_setup(void)
memcpy(map_raw, map, sizeof(map));
e820.nr_map = 0;
- xen_extra_mem_start = mem_end;
+ xen_extra_mem_start = max((1ULL << 32), mem_end);
for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end;
diff --git a/block/blk-core.c b/block/blk-core.c
index 5fa3dd2705c..a2e58eeb354 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -292,7 +292,6 @@ EXPORT_SYMBOL(blk_sync_queue);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
- * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
@@ -303,15 +302,7 @@ void __blk_run_queue(struct request_queue *q)
if (unlikely(blk_queue_stopped(q)))
return;
- /*
- * Only recurse once to avoid overrunning the stack, let the unplug
- * handling reinvoke the handler shortly if we already got there.
- */
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
- q->request_fn(q);
- queue_flag_clear(QUEUE_FLAG_REENTER, q);
- } else
- queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+ q->request_fn(q);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -328,6 +319,7 @@ void blk_run_queue_async(struct request_queue *q)
if (likely(!blk_queue_stopped(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
+EXPORT_SYMBOL(blk_run_queue_async);
/**
* blk_run_queue - run a single device queue
@@ -2787,7 +2779,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
local_irq_restore(flags);
}
-EXPORT_SYMBOL(blk_flush_plug_list);
void blk_finish_plug(struct blk_plug *plug)
{
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6d735122bc5..bd236313f35 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_SYNC);
- } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
+ } else {
blk_clear_queue_full(q, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_ASYNC);
- } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
+ } else {
blk_clear_queue_full(q, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
@@ -508,8 +508,10 @@ int blk_register_queue(struct gendisk *disk)
return ret;
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
- if (ret < 0)
+ if (ret < 0) {
+ blk_trace_remove_sysfs(dev);
return ret;
+ }
kobject_uevent(&q->kobj, KOBJ_ADD);
diff --git a/block/blk.h b/block/blk.h
index c9df8fc3c99..61263463e38 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
void __generic_unplug_device(struct request_queue *);
-void blk_run_queue_async(struct request_queue *q);
/*
* Internal atomic flags for request handling
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 46b0a1d1d92..5b52011e3a4 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2582,28 +2582,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
}
/*
- * Must always be called with the rcu_read_lock() held
+ * Call func for each cic attached to this ioc.
*/
static void
-__call_for_each_cic(struct io_context *ioc,
- void (*func)(struct io_context *, struct cfq_io_context *))
+call_for_each_cic(struct io_context *ioc,
+ void (*func)(struct io_context *, struct cfq_io_context *))
{
struct cfq_io_context *cic;
struct hlist_node *n;
+ rcu_read_lock();
+
hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
func(ioc, cic);
-}
-/*
- * Call func for each cic attached to this ioc.
- */
-static void
-call_for_each_cic(struct io_context *ioc,
- void (*func)(struct io_context *, struct cfq_io_context *))
-{
- rcu_read_lock();
- __call_for_each_cic(ioc, func);
rcu_read_unlock();
}
@@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc)
* should be ok to iterate over the known list, we will see all cic's
* since no new ones are added.
*/
- __call_for_each_cic(ioc, cic_free_func);
+ call_for_each_cic(ioc, cic_free_func);
}
static void cfq_put_cooperator(struct cfq_queue *cfqq)
diff --git a/block/elevator.c b/block/elevator.c
index 6f6abc08bb5..45ca1e34f58 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -671,7 +671,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
q->boundary_rq = rq;
}
} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
- where == ELEVATOR_INSERT_SORT)
+ (where == ELEVATOR_INSERT_SORT ||
+ where == ELEVATOR_INSERT_SORT_MERGE))
where = ELEVATOR_INSERT_BACK;
switch (where) {
diff --git a/block/genhd.c b/block/genhd.c
index b364bd038a1..2dd988723d7 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1588,9 +1588,13 @@ static void disk_events_workfn(struct work_struct *work)
spin_unlock_irq(&ev->lock);
- /* tell userland about new events */
+ /*
+ * Tell userland about new events. Only the events listed in
+ * @disk->events are reported. Unlisted events are processed the
+ * same internally but never get reported to userland.
+ */
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
- if (events & (1 << i))
+ if (events & disk->events & (1 << i))
envp[nr_events++] = disk_uevents[i];
if (nr_events)
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 012cba0d6d9..b072648dc3f 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -115,6 +115,9 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
struct agp_memory *new;
unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
+ if (INT_MAX/sizeof(struct page *) < num_agp_pages)
+ return NULL;
+
new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
if (new == NULL)
return NULL;
@@ -234,11 +237,14 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
int scratch_pages;
struct agp_memory *new;
size_t i;
+ int cur_memory;
if (!bridge)
return NULL;
- if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
+ cur_memory = atomic_read(&bridge->current_memory_agp);
+ if ((cur_memory + page_count > bridge->max_memory_agp) ||
+ (cur_memory + page_count < page_count))
return NULL;
if (type >= AGP_USER_TYPES) {
@@ -1089,8 +1095,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
return -EINVAL;
}
- /* AK: could wrap */
- if ((pg_start + mem->page_count) > num_entries)
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
return -EINVAL;
j = pg_start;
@@ -1124,7 +1130,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
size_t i;
struct agp_bridge_data *bridge;
- int mask_type;
+ int mask_type, num_entries;
bridge = mem->bridge;
if (!bridge)
@@ -1136,6 +1142,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
if (type != mem->type)
return -EINVAL;
+ num_entries = agp_num_entries();
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
+ return -EINVAL;
+
mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
if (mask_type != 0) {
/* The generic routines know nothing of memory types */
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 84b164d1eb2..838568a7dbf 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1280,18 +1280,7 @@ static void unplug_port(struct port *port)
spin_lock_irq(&pdrvdata_lock);
list_del(&port->cons.list);
spin_unlock_irq(&pdrvdata_lock);
-#if 0
- /*
- * hvc_remove() not called as removing one hvc port
- * results in other hvc ports getting frozen.
- *
- * Once this is resolved in hvc, this functionality
- * will be enabled. Till that is done, the -EPIPE
- * return from get_chars() above will help
- * hvc_console.c to clean up on ports we remove here.
- */
hvc_remove(port->cons.hvc);
-#endif
}
/* Remove unused data this port might have received. */
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index d77005849af..219d88a0eea 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb)
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
+ err = 0;
}
return err;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 432fc04c6bf..e522c702b04 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3771,8 +3771,11 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled)
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
return false;
+ }
htotal = crtc->mode.htotal;
hdisplay = crtc->mode.hdisplay;
@@ -6215,36 +6218,6 @@ cleanup_work:
return ret;
}
-static void intel_crtc_reset(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- /* Reset flags back to the 'unknown' status so that they
- * will be correctly set on the initial modeset.
- */
- intel_crtc->dpms_mode = -1;
-}
-
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
- .dpms = intel_crtc_dpms,
- .mode_fixup = intel_crtc_mode_fixup,
- .mode_set = intel_crtc_mode_set,
- .mode_set_base = intel_pipe_set_base,
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_disable,
-};
-
-static const struct drm_crtc_funcs intel_crtc_funcs = {
- .reset = intel_crtc_reset,
- .cursor_set = intel_crtc_cursor_set,
- .cursor_move = intel_crtc_cursor_move,
- .gamma_set = intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = intel_crtc_destroy,
- .page_flip = intel_crtc_page_flip,
-};
-
static void intel_sanitize_modesetting(struct drm_device *dev,
int pipe, int plane)
{
@@ -6281,6 +6254,42 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
intel_disable_pipe(dev_priv, pipe);
}
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Reset flags back to the 'unknown' status so that they
+ * will be correctly set on the initial modeset.
+ */
+ intel_crtc->dpms_mode = -1;
+
+ /* We need to fix up any BIOS configuration that conflicts with
+ * our expectations.
+ */
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+}
+
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .dpms = intel_crtc_dpms,
+ .mode_fixup = intel_crtc_mode_fixup,
+ .mode_set = intel_crtc_mode_set,
+ .mode_set_base = intel_pipe_set_base,
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_disable,
+};
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .reset = intel_crtc_reset,
+ .cursor_set = intel_crtc_cursor_set,
+ .cursor_move = intel_crtc_cursor_move,
+ .gamma_set = intel_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
+};
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -6330,8 +6339,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
-
- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 4256b8ef394..6b22c1dcc01 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1151,10 +1151,10 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
{
int pipeconf_reg = PIPECONF(pipe);
- int dspcntr_reg = DSPCNTR(pipe);
+ int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = DSPADDR(pipe);
+ int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
@@ -1378,7 +1378,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (type < 0)
return connector_status_disconnected;
+ intel_tv->type = type;
intel_tv_find_better_format(connector);
+
return connector_status_connected;
}
@@ -1670,8 +1672,7 @@ intel_tv_init(struct drm_device *dev)
*
* More recent chipsets favour HDMI rather than integrated S-Video.
*/
- connector->polled =
- DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index ce38e97b942..568caedd721 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
&chan->m2mf_ntfy);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 856d56a98d1..a76514a209b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -682,6 +682,9 @@ struct drm_nouveau_private {
/* For PFIFO and PGRAPH. */
spinlock_t context_switch_lock;
+ /* VM/PRAMIN flush, legacy PRAMIN aperture */
+ spinlock_t vm_lock;
+
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_ramht *ramht;
struct nouveau_gpuobj *ramfc;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 889c4454682..39aee6d4daf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info)
OUT_RING (chan, 0);
}
- nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
+ nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
FIRE_RING(chan);
mutex_unlock(&chan->mutex);
ret = -EBUSY;
for (i = 0; i < 100000; i++) {
- if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
+ if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
ret = 0;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 78f467fe30b..5045f8b921d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -398,7 +398,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
dma_bits = 40;
} else
if (drm_pci_device_is_pcie(dev) &&
- dev_priv->chipset != 0x40 &&
+ dev_priv->chipset > 0x40 &&
dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
dma_bits = 39;
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 7ba3fc0b30c..5b39718ae1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -35,19 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_bo *ntfy = NULL;
- uint32_t flags;
+ uint32_t flags, ttmpl;
int ret;
- if (nouveau_vram_notify)
+ if (nouveau_vram_notify) {
flags = NOUVEAU_GEM_DOMAIN_VRAM;
- else
+ ttmpl = TTM_PL_FLAG_VRAM;
+ } else {
flags = NOUVEAU_GEM_DOMAIN_GART;
+ ttmpl = TTM_PL_FLAG_TT;
+ }
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
- ret = nouveau_bo_pin(ntfy, flags);
+ ret = nouveau_bo_pin(ntfy, ttmpl);
if (ret)
goto out_err;
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 4f00c87ed86..67a16e01ffa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -1039,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev;
+ unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16;
u32 val;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
}
val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return val;
}
@@ -1063,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev;
+ unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
}
nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a33fe401928..4bce801bc58 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -55,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
be->func->clear(be);
return -EFAULT;
}
+ nvbe->ttm_alloced[nvbe->nr_pages] = false;
}
nvbe->nr_pages++;
@@ -427,7 +428,7 @@ nouveau_sgdma_init(struct drm_device *dev)
u32 aper_size, align;
int ret;
- if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev))
+ if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
aper_size = 512 * 1024 * 1024;
else
aper_size = 64 * 1024 * 1024;
@@ -457,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.func = &nv50_sgdma_backend;
} else
if (drm_pci_device_is_pcie(dev) &&
- dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev)) {
dev_priv->gart_info.func = &nv44_sgdma_backend;
align = 512 * 1024;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 6e2b1a6caa2..a30adec5bea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -608,6 +608,7 @@ nouveau_card_init(struct drm_device *dev)
spin_lock_init(&dev_priv->channels.lock);
spin_lock_init(&dev_priv->tile.lock);
spin_lock_init(&dev_priv->context_switch_lock);
+ spin_lock_init(&dev_priv->vm_lock);
/* Make the CRTCs and I2C buses accessible */
ret = engine->display.early_init(dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a6f8aa651fc..4f95a1e5822 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -404,23 +404,25 @@ void
nv50_instmem_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x00330c, 0x00000001);
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
void
nv84_instmem_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x070000, 0x00000001);
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 4fd3432b5b8..6c269449074 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -174,10 +174,11 @@ void
nv50_vm_flush_engine(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index a0a2a0277f7..a179e6c55af 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -104,11 +104,12 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd;
+ unsigned long flags;
u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
pinstmem->flush(vm->dev);
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
list_for_each_entry(vpgd, &vm->pgd_list, head) {
/* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases
@@ -125,5 +126,5 @@ nvc0_vm_flush(struct nouveau_vm *vm)
nv_rd32(dev, 0x100c80), engine);
}
}
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d71d375149f..7bd74568909 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -135,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
case ATOM_IIO_MOVE_INDEX:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((index >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -145,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
case ATOM_IIO_MOVE_DATA:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((data >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -155,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
case ATOM_IIO_MOVE_ATTR:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((ctx->
io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9d516a8c4df..529a3a70473 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -532,10 +532,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
- if ((rdev->family == CHIP_R600) ||
- (rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV630) ||
- (rdev->family == CHIP_RV670))
+ if (rdev->family < CHIP_RV770)
pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
} else {
pll->flags |= RADEON_PLL_LEGACY;
@@ -565,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
if (ss->refdiv) {
- pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3453910ee0f..e9bc135d918 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -353,7 +353,7 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp = 0;
+ u32 tmp;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -363,64 +363,63 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
* first display controller
* 0 - first half of lb (3840 * 2)
* 1 - first 3/4 of lb (5760 * 2)
- * 2 - whole lb (7680 * 2)
+ * 2 - whole lb (7680 * 2), other crtc must be disabled
* 3 - first 1/4 of lb (1920 * 2)
* second display controller
* 4 - second half of lb (3840 * 2)
* 5 - second 3/4 of lb (5760 * 2)
- * 6 - whole lb (7680 * 2)
+ * 6 - whole lb (7680 * 2), other crtc must be disabled
* 7 - last 1/4 of lb (1920 * 2)
*/
- if (mode && other_mode) {
- if (mode->hdisplay > other_mode->hdisplay) {
- if (mode->hdisplay > 2560)
- tmp = 1; /* 3/4 */
- else
- tmp = 0; /* 1/2 */
- } else if (other_mode->hdisplay > mode->hdisplay) {
- if (other_mode->hdisplay > 2560)
- tmp = 3; /* 1/4 */
- else
- tmp = 0; /* 1/2 */
- } else
+ /* this can get tricky if we have two large displays on a paired group
+ * of crtcs. Ideally for multiple large displays we'd assign them to
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (radeon_crtc->base.enabled && mode) {
+ if (other_mode)
tmp = 0; /* 1/2 */
- } else if (mode)
- tmp = 2; /* whole */
- else if (other_mode)
- tmp = 3; /* 1/4 */
+ else
+ tmp = 2; /* whole */
+ } else
+ tmp = 0;
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
- switch (tmp) {
- case 0:
- case 4:
- default:
- if (ASIC_IS_DCE5(rdev))
- return 4096 * 2;
- else
- return 3840 * 2;
- case 1:
- case 5:
- if (ASIC_IS_DCE5(rdev))
- return 6144 * 2;
- else
- return 5760 * 2;
- case 2:
- case 6:
- if (ASIC_IS_DCE5(rdev))
- return 8192 * 2;
- else
- return 7680 * 2;
- case 3:
- case 7:
- if (ASIC_IS_DCE5(rdev))
- return 2048 * 2;
- else
- return 1920 * 2;
+ if (radeon_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+ case 4:
+ default:
+ if (ASIC_IS_DCE5(rdev))
+ return 4096 * 2;
+ else
+ return 3840 * 2;
+ case 1:
+ case 5:
+ if (ASIC_IS_DCE5(rdev))
+ return 6144 * 2;
+ else
+ return 5760 * 2;
+ case 2:
+ case 6:
+ if (ASIC_IS_DCE5(rdev))
+ return 8192 * 2;
+ else
+ return 7680 * 2;
+ case 3:
+ case 7:
+ if (ASIC_IS_DCE5(rdev))
+ return 2048 * 2;
+ else
+ return 1920 * 2;
+ }
}
+
+ /* controller not enabled, so no lb used */
+ return 0;
}
static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
@@ -2581,7 +2580,7 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
u32 wptr, tmp;
if (rdev->wb.enabled)
- wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 15d58292677..6f27593901c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3231,7 +3231,7 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
u32 wptr, tmp;
if (rdev->wb.enabled)
- wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2ef6d513506..5f45fa12bb8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1199,7 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
if (!radeon_connector->router_bus)
- goto failed;
+ DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
@@ -1208,7 +1208,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1226,7 +1226,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1249,7 +1249,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
@@ -1290,7 +1290,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
@@ -1329,10 +1329,10 @@ radeon_add_atom_connector(struct drm_device *dev,
else
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
- goto failed;
+ DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
@@ -1381,7 +1381,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
@@ -1457,7 +1457,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1475,7 +1475,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1493,7 +1493,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -1538,7 +1538,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
@@ -1567,9 +1567,4 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_legacy_backlight_init(radeon_encoder, connector);
}
}
- return;
-
-failed:
- drm_connector_cleanup(connector);
- kfree(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index ccbabf734a6..983cbac75af 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1096,6 +1096,9 @@ void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
if (!radeon_connector->router.ddc_valid)
return;
+ if (!radeon_connector->router_bus)
+ return;
+
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
@@ -1121,6 +1124,9 @@ void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
if (!radeon_connector->router.cd_valid)
return;
+ if (!radeon_connector->router_bus)
+ return;
+
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index edfb92e4173..196ffafafd8 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -139,7 +139,6 @@ struct pmbus_data {
* A single status register covers multiple attributes,
* so we keep them all together.
*/
- u8 status_bits;
u8 status[PB_NUM_STATUS_REG];
u8 currpage;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index fd1e1179913..a5ec5a7cb38 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE;
- g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 2a6bc50e8a4..02caa7dd51c 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -79,6 +79,12 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
return CDS_DRIVE_NOT_READY;
}
+/*
+ * ide-cd always generates media changed event if media is missing, which
+ * makes it impossible to use for proper event reporting, so disk->events
+ * is cleared to 0 and the following function is used only to trigger
+ * revalidation and never propagated to userland.
+ */
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
unsigned int clearing, int slot_nr)
{
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index c4ffd488893..70ea8763567 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -298,6 +298,12 @@ static unsigned int ide_gd_check_events(struct gendisk *disk,
return 0;
}
+ /*
+ * The following is used to force revalidation on the first open on
+ * removeable devices, and never gets reported to userland as
+ * genhd->events is 0. This is intended as removeable ide disk
+ * can't really detect MEDIA_CHANGE events.
+ */
ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
@@ -413,7 +419,6 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
- g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6e853c61d87..7d6f7f18a92 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3170,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
+ mddev->degraded = 0;
if (mddev->pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f301e6ae220..49bf5f89143 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5151,7 +5151,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->queue->queue_lock = &conf->device_lock;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
@@ -5679,6 +5678,7 @@ static void raid5_quiesce(mddev_t *mddev, int state)
static void *raid45_takeover_raid0(mddev_t *mddev, int level)
{
struct raid0_private_data *raid0_priv = mddev->private;
+ sector_t sectors;
/* for raid0 takeover only one zone is supported */
if (raid0_priv->nr_strip_zones > 1) {
@@ -5687,6 +5687,9 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level)
return ERR_PTR(-EINVAL);
}
+ sectors = raid0_priv->strip_zone[0].zone_end;
+ sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
+ mddev->dev_sectors = sectors;
mddev->new_level = level;
mddev->new_layout = ALGORITHM_PARITY_N;
mddev->new_chunk_sectors = mddev->chunk_sectors;
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index e3de0b8625c..7581518ecfa 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -38,6 +38,8 @@
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
@@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
- if (bfa_ioc_sync_complete(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
@@ -1314,7 +1316,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
* execution context (driver/bios) must match.
*/
static bool
-bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
@@ -1325,7 +1327,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
if (fwhdr.signature != drv_fwhdr->signature)
return false;
- if (fwhdr.exec != drv_fwhdr->exec)
+ if (swab32(fwhdr.param) != boot_env)
return false;
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@ -1352,9 +1354,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
{
enum bfi_ioc_state ioc_fwstate;
bool fwvalid;
+ u32 boot_env;
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ boot_env = BFI_BOOT_LOADER_OS;
+
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@@ -1362,10 +1367,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
- false : bfa_ioc_fwver_valid(ioc);
+ false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
return;
}
@@ -1396,7 +1401,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
}
void
@@ -1506,7 +1511,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
*/
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
- u32 boot_param)
+ u32 boot_env)
{
u32 *fwimg;
u32 pgnum, pgoff;
@@ -1558,10 +1563,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type and boot param at the end.
*/
- writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+ writel(boot_type, ((ioc->ioc_regs.smem_page_start)
+ (BFI_BOOT_TYPE_OFF)));
- writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
- + (BFI_BOOT_PARAM_OFF)));
+ writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_LOADER_OFF)));
}
static void
@@ -1721,7 +1726,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
* as the entry vector.
*/
static void
-bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
{
void __iomem *rb;
@@ -1734,7 +1739,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
- if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+ if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
} else {
@@ -1743,7 +1748,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_param);
+ bfa_ioc_download_fw(ioc, boot_type, boot_env);
/**
* Enable interrupts just before starting LPU
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index e4974bc24ef..bd48abee781 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -194,6 +194,7 @@ struct bfa_ioc_hwif {
bool msix);
void (*ioc_notify_fail) (struct bfa_ioc *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
+ bool (*ioc_sync_start) (struct bfa_ioc *ioc);
void (*ioc_sync_join) (struct bfa_ioc *ioc);
void (*ioc_sync_leave) (struct bfa_ioc *ioc);
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 469997c4ffd..87aecdf22cf 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
@@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -345,6 +347,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
/**
* Synchronized IOC failure processing routines
*/
+static bool
+bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return true;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+/**
+ * Synchronized IOC failure processing routines
+ */
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
index a9739681105..6050379526f 100644
--- a/drivers/net/bna/bfi.h
+++ b/drivers/net/bna/bfi.h
@@ -184,12 +184,14 @@ enum bfi_mclass {
#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
#define BFI_BOOT_TYPE_OFF 8
-#define BFI_BOOT_PARAM_OFF 12
+#define BFI_BOOT_LOADER_OFF 12
-#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
+#define BFI_BOOT_TYPE_NORMAL 0
#define BFI_BOOT_TYPE_FLASH 1
#define BFI_BOOT_TYPE_MEMTEST 2
+#define BFI_BOOT_LOADER_OS 0
+
#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 9f356d5d0f3..8e6ceab9f4d 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
/* Initialize the Rx event handlers */
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
- rx_cbfn.rcb_destroy_cbfn = NULL;
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index f5050155c6b..89cb977898c 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -2114,19 +2114,18 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
for (i = 0; i < (data * 2); i++) {
if ((i % 2) == 0)
bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OPER, SPEED_1000);
+ LED_MODE_ON, SPEED_1000);
else
bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OFF, 0);
+ LED_MODE_FRONT_PANEL_OFF, 0);
msleep_interruptible(500);
if (signal_pending(current))
break;
}
- if (bp->link_vars.link_up)
- bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
- bp->link_vars.line_speed);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OPER, bp->link_vars.line_speed);
return 0;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9bc5de3e04a..ba715826e2a 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -176,7 +176,7 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
- tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
+ tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
}
_unlock_tx_hashtbl(bond);
@@ -701,7 +701,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
*/
rlb_choose_channel(skb, bond);
- /* The ARP relpy packets must be delayed so that
+ /* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
*/
bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
@@ -1042,7 +1042,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
*
* If the permanent hw address of @slave is @bond's hw address, we need to
* find a different hw address to give @slave, that isn't in use by any other
- * slave in the bond. This address must be, of course, one of the premanent
+ * slave in the bond. This address must be, of course, one of the permanent
* addresses of the other slaves.
*
* We go over the slave list, and for each slave there we compare its
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 86861f08b24..8ca7158b2dd 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -75,7 +75,7 @@ struct tlb_client_info {
* gave this entry index.
*/
u32 tx_bytes; /* Each Client accumulates the BytesTx that
- * were tranmitted to it, and after each
+ * were transmitted to it, and after each
* CallBack the LoadHistory is divided
* by the balance interval
*/
@@ -122,7 +122,6 @@ struct tlb_slave_info {
};
struct alb_bond_info {
- struct timer_list alb_timer;
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
spinlock_t tx_hashtbl_lock;
u32 unbalanced_load;
@@ -140,7 +139,6 @@ struct alb_bond_info {
struct slave *next_rx_slave;/* next slave to be assigned
* to a new rx client for
*/
- u32 rlb_interval_counter;
u8 primary_is_promisc; /* boolean */
u32 rlb_promisc_timeout_counter;/* counts primary
* promiscuity time
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c0a1bc5b143..bd1d811c204 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
if (!ofdev->dev.of_match)
return -EINVAL;
- data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
+ data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
base = of_iomap(np, 0);
if (!base) {
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ea0dc451da9..d70fb76edb7 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -173,7 +173,8 @@ static void loopback_setup(struct net_device *dev)
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
- | NETIF_F_NETNS_LOCAL;
+ | NETIF_F_NETNS_LOCAL
+ | NETIF_F_VLAN_CHALLENGED;
dev->ethtool_ops = &loopback_ethtool_ops;
dev->header_ops = &eth_header_ops;
dev->netdev_ops = &loopback_ops;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index aa2813e06d0..1074231f0a0 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
prev_eedata = eedata;
}
+ /* Store MAC Address in perm_addr */
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
dev->base_addr = (unsigned long __force) ioaddr;
dev->irq = irq;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index d7299f1a494..679dc8519c5 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -174,7 +174,7 @@
#define MAX_NUM_CARDS 4
-#define MAX_BUFFERS_PER_CMD 32
+#define NETXEN_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
@@ -558,7 +558,7 @@ struct netxen_recv_crb {
*/
struct netxen_cmd_buffer {
struct sk_buff *skb;
- struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
u32 frag_count;
};
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 83348dc4b18..e8a4b665599 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1844,6 +1844,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
int i, k;
+ int delta = 0;
+ struct skb_frag_struct *frag;
u32 producer;
int frag_count, no_of_desc;
@@ -1851,6 +1853,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ delta += frag->size;
+ }
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index dc44564ef6f..b0dead00b2d 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -99,6 +99,7 @@
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
+#define QLCNIC_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index cd88c7e1bfa..cb1a1ef36c0 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
struct ethhdr *phdr;
+ int delta = 0;
int i, k;
u32 producer;
@@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+ delta += skb_shinfo(skb)->frags[i].size;
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d890679e4c4..a3c2aab53de 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
- * Since we are touching interrupts the caller should hold the suspend lock
+ * This is for use only during a loopback self-test. It must not
+ * deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
@@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
+ BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
@@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- if (efx_dev_registered(efx))
+ if (efx_dev_registered(efx) && !efx->port_inhibited)
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index d9d8c2ef107..cc978803d48 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
+ rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+ rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 9ffa9a6b55a..191a311da2d 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
- * @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -360,7 +359,6 @@ struct efx_channel {
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
- unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index e8396614daf..10f1cb79c14 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
- return ((efx_qword_t *) (channel->eventq.addr)) + index;
+ return ((efx_qword_t *) (channel->eventq.addr)) +
+ (index & channel->eventq_mask);
}
/* See if an event is present
@@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
channel->channel);
}
@@ -908,7 +910,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
if (code == EFX_CHANNEL_MAGIC_TEST(channel))
- ++channel->magic_count;
+ ; /* ignore */
else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
@@ -1015,8 +1017,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
- /* Increment read pointer */
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
@@ -1060,6 +1061,13 @@ out:
return spent;
}
+/* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+ return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
/* Allocate buffer table entries for event queue */
int efx_nic_probe_eventq(struct efx_channel *channel)
@@ -1165,7 +1173,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
- unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
+ unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
do {
efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1205,7 +1213,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* it's ok to throw away every non-flush event */
EFX_SET_QWORD(*event);
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
} while (read_ptr != end_ptr);
channel->eventq_read_ptr = read_ptr;
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index d9de1b647d4..a42db6e35be 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
extern void efx_nic_remove_eventq(struct efx_channel *channel);
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+extern bool efx_nic_event_present(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index a0f49b348d6..50ad3bcaf68 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
- struct efx_channel *channel;
-
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
@@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
efx->last_irq_cpu = -1;
smp_wmb();
- /* ACK each interrupting event queue. Receiving an interrupt due to
- * traffic before a test event is raised is considered a pass */
- efx_for_each_channel(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
- if (efx->last_irq_cpu >= 0)
- goto success;
- }
-
efx_nic_generate_interrupt(efx);
/* Wait for arrival of test interrupt. */
@@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
struct efx_nic *efx = channel->efx;
- unsigned int magic_count, count;
+ unsigned int read_ptr, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
- magic_count = channel->magic_count;
+ read_ptr = channel->eventq_read_ptr;
channel->efx->last_irq_cpu = -1;
smp_wmb();
@@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
do {
schedule_timeout_uninterruptible(HZ / 100);
- if (channel->work_pending)
- efx_process_channel_now(channel);
-
- if (channel->magic_count != magic_count)
+ if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
goto eventq_ok;
} while (++count < 2);
@@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
}
/* Check to see if event was received even if interrupt wasn't */
- efx_process_channel_now(channel);
- if (channel->magic_count != magic_count) {
+ if (efx_nic_event_present(channel)) {
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n", channel->channel);
@@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
+ netif_tx_wake_all_queues(efx->net_dev);
+
return rc_test;
}
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 13980190821..d2c85dfdf3b 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
* queue state. */
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
- likely(efx->port_enabled)) {
+ likely(efx->port_enabled) &&
+ likely(!efx->port_inhibited)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cb317cd069f..484f795a779 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -240,7 +240,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
* @net_dev: the net device to get address for
*
* Older SiS900 and friends, use EEPROM to store MAC address.
- * MAC address is read from read_eeprom() into @net_dev->dev_addr.
+ * MAC address is read from read_eeprom() into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
*/
static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
@@ -261,6 +262,9 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
return 1;
}
@@ -271,7 +275,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
*
* SiS630E model, use APC CMOS RAM to store MAC address.
* APC CMOS RAM is accessed through ISA bridge.
- * MAC address is read into @net_dev->dev_addr.
+ * MAC address is read into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
*/
static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
@@ -296,6 +301,10 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
outb(0x09 + i, 0x70);
((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
}
+
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@@ -310,7 +319,7 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
*
* SiS635 model, set MAC Reload Bit to load Mac address from APC
* to rfdr. rfdr is accessed through rfcr. MAC address is read into
- * @net_dev->dev_addr.
+ * @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
@@ -334,6 +343,9 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
*( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
}
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
/* enable packet filtering */
outl(rfcrSave | RFEN, rfcr + ioaddr);
@@ -353,7 +365,7 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.
- * MAC address is read into @net_dev->dev_addr.
+ * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
@@ -372,6 +384,9 @@ static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
outl(EEDONE, ee_addr);
return 1;
} else {
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d65fab1ba79..e25093510b0 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -26,9 +26,9 @@
#undef DWMAC_DMA_DEBUG
#ifdef DWMAC_DMA_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
+#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
#else
-#define DBG(fmt, args...) do { } while (0)
+#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
#endif
/* CSR1 enables the transmit DMA to check for new descriptor */
@@ -152,7 +152,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
- DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+ DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
#ifdef DWMAC_DMA_DEBUG
/* It displays the DMA process states (CSR5 register) */
show_tx_process_state(intr_status);
@@ -160,43 +160,43 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
#endif
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_STATUS_AIS)) {
- DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
if (unlikely(intr_status & DMA_STATUS_UNF)) {
- DBG(INFO, "transmit underflow\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
ret = tx_hard_error_bump_tc;
x->tx_undeflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TJT)) {
- DBG(INFO, "transmit jabber\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
x->tx_jabber_irq++;
}
if (unlikely(intr_status & DMA_STATUS_OVF)) {
- DBG(INFO, "recv overflow\n");
+ DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
x->rx_overflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RU)) {
- DBG(INFO, "receive buffer unavailable\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
x->rx_buf_unav_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RPS)) {
- DBG(INFO, "receive process stopped\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
x->rx_process_stopped_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RWT)) {
- DBG(INFO, "receive watchdog\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
x->rx_watchdog_irq++;
}
if (unlikely(intr_status & DMA_STATUS_ETI)) {
- DBG(INFO, "transmit early interrupt\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
x->tx_early_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TPS)) {
- DBG(INFO, "transmit process stopped\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
x->tx_process_stopped_irq++;
ret = tx_hard_error;
}
if (unlikely(intr_status & DMA_STATUS_FBI)) {
- DBG(INFO, "fatal bus error\n");
+ DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
x->fatal_bus_error_irq++;
ret = tx_hard_error;
}
@@ -215,7 +215,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
- DBG(INFO, "\n\n");
+ DWMAC_LIB_DBG(KERN_INFO "\n\n");
return ret;
}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 0e5f03135b5..cc973fc3840 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -750,7 +750,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
priv->xstats.threshold = tc;
}
- stmmac_tx_err(priv);
} else if (unlikely(status == tx_hard_error))
stmmac_tx_err(priv);
}
@@ -781,21 +780,6 @@ static int stmmac_open(struct net_device *dev)
stmmac_verify_args();
- ret = stmmac_init_phy(dev);
- if (unlikely(ret)) {
- pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
- return ret;
- }
-
- /* Request the IRQ lines */
- ret = request_irq(dev->irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
- if (unlikely(ret < 0)) {
- pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
- __func__, dev->irq, ret);
- return ret;
- }
-
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
@@ -814,6 +798,11 @@ static int stmmac_open(struct net_device *dev)
} else
priv->tm->enable = 1;
#endif
+ ret = stmmac_init_phy(dev);
+ if (unlikely(ret)) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+ goto open_error;
+ }
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
@@ -822,12 +811,11 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
- priv->dma_tx_phy,
- priv->dma_rx_phy) < 0)) {
-
+ ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
+ priv->dma_tx_phy, priv->dma_rx_phy);
+ if (ret < 0) {
pr_err("%s: DMA initialization failed\n", __func__);
- return -1;
+ goto open_error;
}
/* Copy the MAC addr into the HW */
@@ -848,6 +836,15 @@ static int stmmac_open(struct net_device *dev)
writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
+ /* Request the IRQ lines */
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ goto open_error;
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_enable_mac(priv->ioaddr);
@@ -878,7 +875,17 @@ static int stmmac_open(struct net_device *dev)
napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
+
return 0;
+
+open_error:
+#ifdef CONFIG_STMMAC_TIMER
+ kfree(priv->tm);
+#endif
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
+ return ret;
}
/**
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 8a3b191b195..ff32befd844 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1251,7 +1251,7 @@ static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* The NIC has told us that a packet has been downloaded onto the card, we must
* find out which packet it has done, clear the skb and information for the packet
- * then advance around the ring for all tranmitted packets
+ * then advance around the ring for all transmitted packets
*/
static void xl_dn_comp(struct net_device *dev)
@@ -1568,7 +1568,7 @@ static void xl_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 5bd14070453..9354ca9da57 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1675,7 +1675,7 @@ drop_frame:
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3d2fbe60b46..2684003b8ab 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1500,7 +1500,7 @@ drop_frame:
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f1b8af64569..2d10239ce82 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1040,7 +1040,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
}
ret = ath9k_htc_hw_init(hif_dev->htc_handle,
- &hif_dev->udev->dev, hif_dev->device_id,
+ &interface->dev, hif_dev->device_id,
hif_dev->udev->product, id->driver_info);
if (ret) {
ret = -EINVAL;
@@ -1158,7 +1158,7 @@ fail_resume:
#endif
static struct usb_driver ath9k_hif_usb_driver = {
- .name = "ath9k_hif_usb",
+ .name = KBUILD_MODNAME,
.probe = ath9k_hif_usb_probe,
.disconnect = ath9k_hif_usb_disconnect,
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1ec9bcd6b28..c95bc5cc1a1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1254,15 +1254,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
- if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
- ath9k_hw_abortpcurecv(ah);
- if (!ath9k_hw_stopdmarecv(ah)) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Failed to stop receive dma\n");
- bChannelChange = false;
- }
- }
-
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 562257ac52c..edc1cbbfeca 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -751,28 +751,47 @@ void ath9k_hw_abortpcurecv(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
-bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
{
#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
#define AH_RX_TIME_QUANTUM 100 /* usec */
struct ath_common *common = ath9k_hw_common(ah);
+ u32 mac_status, last_mac_status = 0;
int i;
+ /* Enable access to the DMA observation bus */
+ REG_WRITE(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
REG_WRITE(ah, AR_CR, AR_CR_RXD);
/* Wait for rx enable bit to go low */
for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
break;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
+ if (mac_status == 0x1c0 && mac_status == last_mac_status) {
+ *reset = true;
+ break;
+ }
+
+ last_mac_status = mac_status;
+ }
+
udelay(AH_TIME_QUANTUM);
}
if (i == 0) {
ath_err(common,
- "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
AH_RX_STOP_DMA_TIMEOUT / 1000,
REG_READ(ah, AR_CR),
- REG_READ(ah, AR_DIAG_SW));
+ REG_READ(ah, AR_DIAG_SW),
+ REG_READ(ah, AR_DMADBG_7));
return false;
} else {
return true;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index b2b2ff852c3..c2a59386fb9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -695,7 +695,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_abortpcurecv(struct ath_hw *ah);
-bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
/* Interrupt Handling */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dddb85de622..17d04ff8d67 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1376,7 +1376,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
ath9k_calculate_iter_data(hw, vif, &iter_data);
- ath9k_ps_wakeup(sc);
/* Set BSSID mask. */
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
ath_hw_setbssidmask(common);
@@ -1411,7 +1410,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
}
ath9k_hw_set_interrupts(ah, ah->imask);
- ath9k_ps_restore(sc);
/* Set up ANI */
if ((iter_data.naps + iter_data.nadhocs) > 0) {
@@ -1457,6 +1455,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_vif *avp = (void *)vif->drv_priv;
int ret = 0;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
switch (vif->type) {
@@ -1503,6 +1502,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ath9k_do_vif_add_setup(hw, vif);
out:
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return ret;
}
@@ -1517,6 +1517,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
+ ath9k_ps_wakeup(sc);
/* See if new interface type is valid. */
if ((new_type == NL80211_IFTYPE_ADHOC) &&
@@ -1546,6 +1547,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
ath9k_do_vif_add_setup(hw, vif);
out:
+ ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
return ret;
}
@@ -1558,6 +1560,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
sc->nvifs--;
@@ -1569,6 +1572,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath9k_calculate_summary_state(hw, NULL);
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
}
static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1809,6 +1813,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
txq = sc->tx.txq_map[queue];
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1832,6 +1837,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
ath_beaconq_config(sc);
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return ret;
}
@@ -1894,6 +1900,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
int slottime;
int error;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
if (changed & BSS_CHANGED_BSSID) {
@@ -1994,6 +2001,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
}
static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a9c3f4672aa..dcd19bc337d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -486,12 +486,12 @@ start_recv:
bool ath_stoprecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- bool stopped;
+ bool stopped, reset = false;
spin_lock_bh(&sc->rx.rxbuflock);
ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
- stopped = ath9k_hw_stopdmarecv(ah);
+ stopped = ath9k_hw_stopdmarecv(ah, &reset);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_edma_stop_recv(sc);
@@ -506,7 +506,7 @@ bool ath_stoprecv(struct ath_softc *sc)
"confusing the DMA engine when we start RX up\n");
ATH_DBG_WARN_ON_ONCE(!stopped);
}
- return stopped;
+ return stopped || reset;
}
void ath_flushrecv(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 248c670fdfb..5c2cfe69415 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
{APL9_WORLD, CTL_ETSI, CTL_ETSI},
{APL3_FCCA, CTL_FCC, CTL_FCC},
+ {APL7_FCCA, CTL_FCC, CTL_FCC},
{APL1_ETSIC, CTL_FCC, CTL_ETSI},
{APL2_ETSIC, CTL_FCC, CTL_ETSI},
{APL2_APLD, CTL_FCC, NO_CTL},
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index 2a45dd44cc1..aef65cd4766 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,6 +1,5 @@
config IWLWIFI_LEGACY
- tristate "Intel Wireless Wifi legacy devices"
- depends on PCI && MAC80211
+ tristate
select FW_LOADER
select NEW_LEDS
select LEDS_CLASS
@@ -65,7 +64,8 @@ endmenu
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
- depends on IWLWIFI_LEGACY
+ depends on PCI && MAC80211
+ select IWLWIFI_LEGACY
---help---
This option enables support for
@@ -92,7 +92,8 @@ config IWL4965
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
- depends on IWLWIFI_LEGACY
+ depends on PCI && MAC80211
+ select IWLWIFI_LEGACY
---help---
Select to build the driver supporting the:
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 779d3cb86e2..5c3a68d3af1 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -74,8 +74,6 @@
/* RSSI to dBm */
#define IWL39_RSSI_OFFSET 95
-#define IWL_DEFAULT_TX_POWER 0x0F
-
/*
* EEPROM related constants, enums, and structures.
*/
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 08b189c8472..fc6fa2886d9 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -804,9 +804,6 @@ struct iwl4965_scd_bc_tbl {
#define IWL4965_DEFAULT_TX_RETRY 15
-/* Limit range of txpower output target to be between these values */
-#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
-
/* EEPROM */
#define IWL4965_FIRST_AMPDU_QUEUE 10
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 7007d61bb6b..c1511b14b23 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -160,6 +160,7 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
struct ieee80211_channel *geo_ch;
struct ieee80211_rate *rates;
int i = 0;
+ s8 max_tx_power = 0;
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
@@ -235,8 +236,8 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
geo_ch->flags |= ch->ht40_extension_channel;
- if (ch->max_power_avg > priv->tx_power_device_lmt)
- priv->tx_power_device_lmt = ch->max_power_avg;
+ if (ch->max_power_avg > max_tx_power)
+ max_tx_power = ch->max_power_avg;
} else {
geo_ch->flags |= IEEE80211_CHAN_DISABLED;
}
@@ -249,6 +250,10 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
geo_ch->flags);
}
+ priv->tx_power_device_lmt = max_tx_power;
+ priv->tx_power_user_lmt = max_tx_power;
+ priv->tx_power_next = max_tx_power;
+
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
priv->cfg->sku & IWL_SKU_A) {
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
@@ -1124,11 +1129,11 @@ int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
if (!priv->cfg->ops->lib->send_tx_power)
return -EOPNOTSUPP;
- if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
+ /* 0 dBm mean 1 milliwatt */
+ if (tx_power < 0) {
IWL_WARN(priv,
- "Requested user TXPOWER %d below lower limit %d.\n",
- tx_power,
- IWL4965_TX_POWER_TARGET_POWER_MIN);
+ "Requested user TXPOWER %d below 1 mW.\n",
+ tx_power);
return -EINVAL;
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
index 04c5648027d..cb346d1a9ff 100644
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -471,13 +471,6 @@ int iwl_legacy_init_channel_map(struct iwl_priv *priv)
flags & EEPROM_CHANNEL_RADAR))
? "" : "not ");
- /* Set the tx_power_user_lmt to the highest power
- * supported by any channel */
- if (eeprom_ch_info[ch].max_power_avg >
- priv->tx_power_user_lmt)
- priv->tx_power_user_lmt =
- eeprom_ch_info[ch].max_power_avg;
-
ch_info++;
}
}
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 28eb3d885ba..cc7ebcee60e 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -3825,10 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->force_reset[IWL_FW_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
- priv->tx_power_next = IWL_DEFAULT_TX_POWER;
-
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
eeprom->version);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index 91b3d8b9d7a..d484c367816 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -3140,12 +3140,6 @@ static int iwl4965_init_drv(struct iwl_priv *priv)
iwl_legacy_init_scan_params(priv);
- /* Set the tx_power_user_lmt to the lowest power level
- * this value will get overwritten by channel max power avg
- * from eeprom */
- priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
- priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
-
ret = iwl_legacy_init_channel_map(priv);
if (ret) {
IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 3ea31b659d1..22e045b5bce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -530,6 +530,9 @@ static struct iwl_ht_params iwl5000_ht_params = {
struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
IWL_DEVICE_5000,
+ /* at least EEPROM 0x11A has wrong info */
+ .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
+ .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
.ht_params = &iwl5000_ht_params,
};
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 36952274950..c1ceb4b2397 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -137,6 +137,7 @@ struct mwl8k_tx_queue {
struct mwl8k_priv {
struct ieee80211_hw *hw;
struct pci_dev *pdev;
+ int irq;
struct mwl8k_device_info *device_info;
@@ -3761,9 +3762,11 @@ static int mwl8k_start(struct ieee80211_hw *hw)
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
+ priv->irq = -1;
wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
return -EIO;
}
+ priv->irq = priv->pdev->irq;
/* Enable TX reclaim and RX tasklets. */
tasklet_enable(&priv->poll_tx_task);
@@ -3800,6 +3803,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
+ priv->irq = -1;
tasklet_disable(&priv->poll_tx_task);
tasklet_disable(&priv->poll_rx_task);
}
@@ -3818,7 +3822,10 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
/* Disable interrupts */
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
- free_irq(priv->pdev->irq, hw);
+ if (priv->irq != -1) {
+ free_irq(priv->pdev->irq, hw);
+ priv->irq = -1;
+ }
/* Stop finalize join worker */
cancel_work_sync(&priv->finalize_join_worker);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 7834c26c295..042842e704d 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -703,7 +703,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54_tx_info *p54info;
struct p54_hdr *hdr;
struct p54_tx_data *txhdr;
- unsigned int padding, len, extra_len;
+ unsigned int padding, len, extra_len = 0;
int i, j, ridx;
u16 hdr_flags = 0, aid = 0;
u8 rate, queue = 0, crypt_offset = 0;
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 505c1c7075f..d552d2c7784 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1299,7 +1299,7 @@ static void iommu_detach_domain(struct dmar_domain *domain,
static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_rbtree_key;
-static void dmar_init_reserved_ranges(void)
+static int dmar_init_reserved_ranges(void)
{
struct pci_dev *pdev = NULL;
struct iova *iova;
@@ -1313,8 +1313,10 @@ static void dmar_init_reserved_ranges(void)
/* IOAPIC ranges shouldn't be accessed by DMA */
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
IOVA_PFN(IOAPIC_RANGE_END));
- if (!iova)
+ if (!iova) {
printk(KERN_ERR "Reserve IOAPIC range failed\n");
+ return -ENODEV;
+ }
/* Reserve all PCI MMIO to avoid peer-to-peer access */
for_each_pci_dev(pdev) {
@@ -1327,11 +1329,13 @@ static void dmar_init_reserved_ranges(void)
iova = reserve_iova(&reserved_iova_list,
IOVA_PFN(r->start),
IOVA_PFN(r->end));
- if (!iova)
+ if (!iova) {
printk(KERN_ERR "Reserve iova failed\n");
+ return -ENODEV;
+ }
}
}
-
+ return 0;
}
static void domain_reserve_special_ranges(struct dmar_domain *domain)
@@ -1835,7 +1839,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
ret = iommu_attach_domain(domain, iommu);
if (ret) {
- domain_exit(domain);
+ free_domain_mem(domain);
goto error;
}
@@ -2213,7 +2217,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
-int __init init_dmars(void)
+static int __init init_dmars(int force_on)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
@@ -2393,8 +2397,15 @@ int __init init_dmars(void)
* enable translation
*/
for_each_drhd_unit(drhd) {
- if (drhd->ignored)
+ if (drhd->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on
+ * this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(drhd->iommu);
continue;
+ }
iommu = drhd->iommu;
iommu_flush_write_buffer(iommu);
@@ -3240,9 +3251,15 @@ static int device_notifier(struct notifier_block *nb,
if (!domain)
return 0;
- if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
+ if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
domain_remove_one_dev_info(domain, pdev);
+ if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+ list_empty(&domain->devices))
+ domain_exit(domain);
+ }
+
return 0;
}
@@ -3277,12 +3294,21 @@ int __init intel_iommu_init(void)
if (no_iommu || dmar_disabled)
return -ENODEV;
- iommu_init_mempool();
- dmar_init_reserved_ranges();
+ if (iommu_init_mempool()) {
+ if (force_on)
+ panic("tboot: Failed to initialize iommu memory\n");
+ return -ENODEV;
+ }
+
+ if (dmar_init_reserved_ranges()) {
+ if (force_on)
+ panic("tboot: Failed to reserve iommu ranges\n");
+ return -ENODEV;
+ }
init_no_remapping_devices();
- ret = init_dmars();
+ ret = init_dmars(force_on);
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
@@ -3391,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
domain->iommu_count--;
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
+
+ spin_lock_irqsave(&iommu->lock, tmp_flags);
+ clear_bit(domain->id, iommu->domain_ids);
+ iommu->domains[domain->id] = NULL;
+ spin_unlock_irqrestore(&iommu->lock, tmp_flags);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3607,9 +3638,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
pte = dmar_domain->pgd;
if (dma_pte_present(pte)) {
- free_pgtable_page(dmar_domain->pgd);
dmar_domain->pgd = (struct dma_pte *)
phys_to_virt(dma_pte_addr(pte));
+ free_pgtable_page(pte);
}
dmar_domain->agaw--;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ab55c2fa7ce..e9901b8f844 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
list_splice_init(&shost->starved_list, &starved_list);
while (!list_empty(&starved_list)) {
- int flagset;
-
/*
* As long as shost is accepting commands and we have
* starved queues, call blk_run_queue. scsi_request_fn
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
continue;
}
- spin_unlock(shost->host_lock);
-
- spin_lock(sdev->request_queue->queue_lock);
- flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
- !test_bit(QUEUE_FLAG_REENTER,
- &sdev->request_queue->queue_flags);
- if (flagset)
- queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
- __blk_run_queue(sdev->request_queue);
- if (flagset)
- queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
- spin_unlock(sdev->request_queue->queue_lock);
-
- spin_lock(shost->host_lock);
+ blk_run_queue_async(sdev->request_queue);
}
/* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 28c33506e4a..815069d13f9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3816,28 +3816,17 @@ fail_host_msg:
static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
- int flagset;
- unsigned long flags;
-
if (!rport->rqst_q)
return;
+ /*
+ * This get/put dance makes no sense
+ */
get_device(&rport->dev);
-
- spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
- flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
- !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
- if (flagset)
- queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
- __blk_run_queue(rport->rqst_q);
- if (flagset)
- queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
- spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
-
+ blk_run_queue_async(rport->rqst_q);
put_device(&rport->dev);
}
-
/**
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
* @q: rport request queue
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 4fb5b2bf234..4bcc8b82640 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -590,15 +590,10 @@ static struct virtio_config_ops virtio_pci_config_ops = {
static void virtio_pci_release_dev(struct device *_d)
{
- struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
+ struct virtio_device *dev = container_of(_d, struct virtio_device,
+ dev);
struct virtio_pci_device *vp_dev = to_vp_device(dev);
- struct pci_dev *pci_dev = vp_dev->pci_dev;
- vp_del_vqs(dev);
- pci_set_drvdata(pci_dev, NULL);
- pci_iounmap(pci_dev, vp_dev->ioaddr);
- pci_release_regions(pci_dev);
- pci_disable_device(pci_dev);
kfree(vp_dev);
}
@@ -681,6 +676,12 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
unregister_virtio_device(&vp_dev->vdev);
+
+ vp_del_vqs(&vp_dev->vdev);
+ pci_set_drvdata(pci_dev, NULL);
+ pci_iounmap(pci_dev, vp_dev->ioaddr);
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cc2f73e0347..b0043fb26a4 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -371,6 +371,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
/* detach_buf clears data, so grab it now. */
buf = vq->data[i];
detach_buf(vq, i);
+ vq->vring.avail->idx--;
END_USE(vq);
return buf;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index aa309aa93fe..4cf04e11c66 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -258,6 +258,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
if (atomic_dec_and_test(&fp->fi_delegees)) {
vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
fp->fi_lease = NULL;
+ fput(fp->fi_deleg_file);
fp->fi_deleg_file = NULL;
}
}
@@ -402,8 +403,8 @@ static void free_generic_stateid(struct nfs4_stateid *stp)
if (stp->st_access_bmap) {
oflag = nfs4_access_bmap_to_omode(stp);
nfs4_file_put_access(stp->st_file, oflag);
- put_nfs4_file(stp->st_file);
}
+ put_nfs4_file(stp->st_file);
kmem_cache_free(stateid_slab, stp);
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 2e1cebde90d..129f3c9f62d 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1363,7 +1363,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
- err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (err)
goto out;
@@ -1385,6 +1385,13 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (IS_ERR(dchild))
goto out_nfserr;
+ /* If file doesn't exist, check for permissions to create one */
+ if (!dchild->d_inode) {
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ if (err)
+ goto out;
+ }
+
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
if (err)
goto out;
diff --git a/fs/xattr.c b/fs/xattr.c
index a19acdb81cd..f1ef94974de 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -666,7 +666,7 @@ generic_setxattr(struct dentry *dentry, const char *name, const void *value, siz
handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
if (!handler)
return -EOPNOTSUPP;
- return handler->set(dentry, name, value, size, 0, handler->flags);
+ return handler->set(dentry, name, value, size, flags, handler->flags);
}
/*
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c
index 3ca79560911..9f76cceb678 100644
--- a/fs/xfs/linux-2.6/xfs_message.c
+++ b/fs/xfs/linux-2.6/xfs_message.c
@@ -34,8 +34,10 @@ __xfs_printk(
const struct xfs_mount *mp,
struct va_format *vaf)
{
- if (mp && mp->m_fsname)
+ if (mp && mp->m_fsname) {
printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+ return;
+ }
printk("%sXFS: %pV\n", level, vaf);
}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index cbbfd98ad4a..2ad95fa1d13 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -388,20 +388,19 @@ struct request_queue
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
-#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
-#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */
-#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
+#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
+#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
+#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
-#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
-#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
+#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
+#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
+#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *q);
extern void blk_run_queue(struct request_queue *);
+extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 3c7329b8ea0..0e1855079fb 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -103,8 +103,8 @@ struct driver_info {
* Indicates to usbnet, that USB driver accumulates multiple IP packets.
* Affects statistic (counters) and short packet handling.
*/
-#define FLAG_MULTI_PACKET 0x1000
-#define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */
+#define FLAG_MULTI_PACKET 0x2000
+#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
/* init device ... can sleep, or cause probe() failure */
int (*bind)(struct usbnet *, struct usb_interface *);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 008ff6c4eec..f3bc322c589 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
goto drop;
}
- /* Zero out the CB buffer if no options present */
- if (iph->ihl == 5) {
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ if (iph->ihl == 5)
return 0;
- }
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 27dab26ad3b..054fdb5aeb8 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -13,6 +13,7 @@
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
+
#define container_obj(layr) ((struct cfsrvl *) layr)
#define DGM_CMD_BIT 0x80
@@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
+ u8 packet_type;
u32 zero = 0;
struct caif_payload_info *info;
struct cfsrvl *service = container_obj(layr);
@@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_getlen(pkt) > DGM_MTU)
return -EMSGSIZE;
- cfpkt_add_head(pkt, &zero, 4);
+ cfpkt_add_head(pkt, &zero, 3);
+ packet_type = 0x08; /* B9 set - UNCLASSIFIED */
+ cfpkt_add_head(pkt, &packet_type, 1);
/* Add info for MUX-layer to route the packet out. */
info = cfpkt_info(pkt);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 46f34b2e047..24f1ffa74b0 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -244,9 +244,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfmuxl *muxl = container_obj(layr);
- struct list_head *node;
+ struct list_head *node, *next;
struct cflayer *layer;
- list_for_each(node, &muxl->srvl_list) {
+ list_for_each_safe(node, next, &muxl->srvl_list) {
layer = list_entry(node, struct cflayer, node);
if (cfsrvl_phyid_match(layer, phyid))
layer->ctrlcmd(layer, ctrl, phyid);
diff --git a/net/core/dev.c b/net/core/dev.c
index 956d3b006e8..c2ac599fa0f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5203,11 +5203,15 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
}
/* TSO requires that SG is present as well. */
- if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
- netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
- features &= ~NETIF_F_TSO;
+ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
+ netdev_info(dev, "Dropping TSO features since no SG feature.\n");
+ features &= ~NETIF_F_ALL_TSO;
}
+ /* TSO ECN requires that TSO is present as well. */
+ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
+ features &= ~NETIF_F_TSO_ECN;
+
/* Software GSO depends on SG. */
if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index ce2d3358285..5761185f884 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,3 @@
obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
af_802154-y := af_ieee802154.o raw.o dgram.o
-
-ccflags-y += -Wall -DDEBUG
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6c0b7f4a3d7..38f23e721b8 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
- ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
+ sk2->sk_state == TCP_LISTEN) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,8 +122,7 @@ again:
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
- if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
- !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+ if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dd1b20eca1a..9df4e635fb5 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -354,7 +354,8 @@ static void inetpeer_free_rcu(struct rcu_head *head)
}
/* May be called with local BH enabled. */
-static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
+static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
int do_free;
@@ -368,7 +369,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
* We use refcnt=-1 to alert lockless readers this entry is deleted.
*/
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
- struct inet_peer __rcu **stack[PEER_MAXDEPTH];
struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(&p->daddr, stack, base) != p)
BUG();
@@ -422,7 +422,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p)
}
/* May be called with local BH enabled. */
-static int cleanup_once(unsigned long ttl)
+static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
struct inet_peer *p = NULL;
@@ -454,7 +454,7 @@ static int cleanup_once(unsigned long ttl)
* happen because of entry limits in route cache. */
return -1;
- unlink_from_pool(p, peer_to_base(p));
+ unlink_from_pool(p, peer_to_base(p), stack);
return 0;
}
@@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
if (base->total >= inet_peer_threshold)
/* Remove one less-recently-used entry. */
- cleanup_once(0);
+ cleanup_once(0, stack);
return p;
}
@@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy)
{
unsigned long now = jiffies;
int ttl, total;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH];
total = compute_total();
if (total >= inet_peer_threshold)
@@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy)
ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
total / inet_peer_threshold * HZ;
- while (!cleanup_once(ttl)) {
+ while (!cleanup_once(ttl, stack)) {
if (jiffies != now)
break;
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 28a736f3442..2391b24e825 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -329,7 +329,7 @@ int ip_options_compile(struct net *net,
pp_ptr = optptr + 2;
goto error;
}
- if (skb) {
+ if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
opt->is_changed = 1;
}
@@ -371,7 +371,7 @@ int ip_options_compile(struct net *net,
goto error;
}
opt->ts = optptr - iph;
- if (skb) {
+ if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
timeptr = (__be32*)&optptr[optptr[2]+3];
}
@@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
unsigned long orefdst;
int err;
- if (!opt->srr)
+ if (!opt->srr || !rt)
return 0;
if (skb->pkt_type != PACKET_HOST)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1a456652086..321e6e84dbc 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_do_large_bitmap,
},
-#ifdef CONFIG_IP_MULTICAST
{
.procname = "igmp_max_memberships",
.data = &sysctl_igmp_max_memberships,
@@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
-
-#endif
{
.procname = "igmp_max_msf",
.data = &sysctl_igmp_max_msf,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 16605465046..f2c5b0fc0f2 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
(!sk->sk_reuse || !sk2->sk_reuse ||
- ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
+ sk2->sk_state == TCP_LISTEN) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
}
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index c9890e25cd4..cc616974a44 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
MSG_NOSIGNAL)) {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
lock_sock(sk);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 058f1e9a912..90324211131 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -121,8 +121,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
s32 data_size = ntohs(pdulen) - llc_len;
if (data_size < 0 ||
- ((skb_tail_pointer(skb) -
- (u8 *)pdu) - llc_len) < data_size)
+ !pskb_may_pull(skb, data_size))
return 0;
if (unlikely(pskb_trim_rcsum(skb, data_size)))
return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 00a33242e90..a274300b6a5 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -343,6 +343,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
ipset_adtfn adtfn = set->variant->adt[adt];
struct ipmac data;
+ /* MAC can be src only */
+ if (!(flags & IPSET_DIM_TWO_SRC))
+ return 0;
+
data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
if (data.id < map->first_ip || data.id > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 9152e69a162..72d1ac611fd 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1022,8 +1022,9 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->args[1] >= ip_set_max)
goto out;
- pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+dump_last:
+ pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
for (; cb->args[1] < max; cb->args[1]++) {
index = (ip_set_id_t) cb->args[1];
set = ip_set_list[index];
@@ -1038,8 +1039,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
* so that lists (unions of sets) are dumped last.
*/
if (cb->args[0] != DUMP_ONE &&
- !((cb->args[0] == DUMP_ALL) ^
- (set->type->features & IPSET_DUMP_LAST)))
+ ((cb->args[0] == DUMP_ALL) ==
+ !!(set->type->features & IPSET_DUMP_LAST)))
continue;
pr_debug("List set: %s\n", set->name);
if (!cb->args[2]) {
@@ -1083,6 +1084,12 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
goto release_refcount;
}
}
+ /* If we dump all sets, continue with dumping last ones */
+ if (cb->args[0] == DUMP_ALL) {
+ cb->args[0] = DUMP_LAST;
+ cb->args[1] = 0;
+ goto dump_last;
+ }
goto out;
nla_put_failure:
@@ -1093,11 +1100,6 @@ release_refcount:
pr_debug("release set %s\n", ip_set_list[index]->name);
ip_set_put_byindex(index);
}
-
- /* If we dump all sets, continue with dumping last ones */
- if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
- cb->args[0] = DUMP_LAST;
-
out:
if (nlh) {
nlmsg_end(skb, nlh);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 061d48cec13..b3babaed771 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -81,6 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("Protocol error: set match dimension "
"is over the limit!\n");
+ ip_set_nfnl_put(info->match_set.index);
return -ERANGE;
}
@@ -135,6 +136,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
return -ENOENT;
}
}
@@ -142,6 +145,10 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
return -ERANGE;
}
@@ -192,6 +199,7 @@ set_match_checkentry(const struct xt_mtchk_param *par)
if (info->match_set.dim > IPSET_DIM_MAX) {
pr_warning("Protocol error: set match dimension "
"is over the limit!\n");
+ ip_set_nfnl_put(info->match_set.index);
return -ERANGE;
}
@@ -219,7 +227,7 @@ set_target(struct sk_buff *skb, const struct xt_action_param *par)
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_del(info->del_set.index,
skb, par->family,
- info->add_set.dim,
+ info->del_set.dim,
info->del_set.flags);
return XT_CONTINUE;
@@ -245,13 +253,19 @@ set_target_checkentry(const struct xt_tgchk_param *par)
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
return -ENOENT;
}
}
if (info->add_set.dim > IPSET_DIM_MAX ||
- info->del_set.flags > IPSET_DIM_MAX) {
+ info->del_set.dim > IPSET_DIM_MAX) {
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
return -ERANGE;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0698cad6176..1a21c571aa0 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -569,6 +569,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
sctp_assoc_set_primary(asoc, transport);
if (asoc->peer.active_path == peer)
asoc->peer.active_path = transport;
+ if (asoc->peer.retran_path == peer)
+ asoc->peer.retran_path = transport;
if (asoc->peer.last_data_from == peer)
asoc->peer.last_data_from = transport;
@@ -1323,6 +1325,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
if (t)
asoc->peer.retran_path = t;
+ else
+ t = asoc->peer.retran_path;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
" %p addr: ",