summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/glue.c55
-rw-r--r--drivers/acpi/processor_core.c3
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/ata/libata-acpi.c7
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/qos.c217
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/regmap/regmap-irq.c1
-rw-r--r--drivers/bcma/driver_pci_host.c2
-rw-r--r--drivers/char/hw_random/core.c19
-rw-r--r--drivers/connector/cn_proc.c8
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c42
-rw-r--r--drivers/gpio/gpio-ich.c4
-rw-r--r--drivers/gpio/gpiolib.c143
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c25
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c26
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c173
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c30
-rw-r--r--drivers/hwmon/sht15.c8
-rw-r--r--drivers/isdn/hisax/st5481_usb.c12
-rw-r--r--drivers/mailbox/pl320-ipc.c3
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/dm-raid.c123
-rw-r--r--drivers/md/md.c19
-rw-r--r--drivers/md/raid0.c13
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c97
-rw-r--r--drivers/md/raid10.h5
-rw-r--r--drivers/md/raid5.c38
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c64
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h3
-rw-r--r--drivers/net/ethernet/freescale/fec.c85
-rw-r--r--drivers/net/ethernet/freescale/fec.h18
-rw-r--r--drivers/net/ethernet/realtek/r8169.c27
-rw-r--r--drivers/net/ethernet/sfc/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/rx.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/phy/micrel.c3
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/usb/Kconfig18
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_devices.c31
-rw-r--r--drivers/net/usb/ax88179_178a.c1448
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c104
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c75
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c6
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/pci/pci-acpi.c8
-rw-r--r--drivers/pnp/pnpacpi/core.c8
-rw-r--r--drivers/regulator/core.c12
-rw-r--r--drivers/regulator/db8500-prcmu.c4
-rw-r--r--drivers/regulator/palmas-regulator.c3
-rw-r--r--drivers/regulator/twl-regulator.c9
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/tty/hvc/hvcs.c9
-rw-r--r--drivers/usb/core/usb-acpi.c9
98 files changed, 2727 insertions, 675 deletions
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index ef6f155469b..40a84cc6740 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
return -ENODEV;
- if (type && type->bus && type->find_device) {
+ if (type && type->match && type->find_device) {
down_write(&bus_type_sem);
list_add_tail(&type->list, &bus_type_list);
up_write(&bus_type_sem);
- printk(KERN_INFO PREFIX "bus type %s registered\n",
- type->bus->name);
+ printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
return 0;
}
return -ENODEV;
@@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
down_write(&bus_type_sem);
list_del_init(&type->list);
up_write(&bus_type_sem);
- printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n",
- type->bus->name);
+ printk(KERN_INFO PREFIX "bus type %s unregistered\n",
+ type->name);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
-static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
+static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
{
struct acpi_bus_type *tmp, *ret = NULL;
- if (!type)
- return NULL;
-
down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) {
- if (tmp->bus == type) {
+ if (tmp->match(dev)) {
ret = tmp;
break;
}
@@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
return ret;
}
-static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
-{
- struct acpi_bus_type *tmp;
- int ret = -ENODEV;
-
- down_read(&bus_type_sem);
- list_for_each_entry(tmp, &bus_type_list, list) {
- if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
- ret = 0;
- break;
- }
- }
- up_read(&bus_type_sem);
- return ret;
-}
-
static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
void *addr_p, void **ret_p)
{
@@ -261,29 +241,12 @@ err:
static int acpi_platform_notify(struct device *dev)
{
- struct acpi_bus_type *type;
+ struct acpi_bus_type *type = acpi_get_bus_type(dev);
acpi_handle handle;
int ret;
ret = acpi_bind_one(dev, NULL);
- if (ret && (!dev->bus || !dev->parent)) {
- /* bridge devices genernally haven't bus or parent */
- ret = acpi_find_bridge_device(dev, &handle);
- if (!ret) {
- ret = acpi_bind_one(dev, handle);
- if (ret)
- goto out;
- }
- }
-
- type = acpi_get_bus_type(dev->bus);
- if (ret) {
- if (!type || !type->find_device) {
- DBG("No ACPI bus support for %s\n", dev_name(dev));
- ret = -EINVAL;
- goto out;
- }
-
+ if (ret && type) {
ret = type->find_device(dev, &handle);
if (ret) {
DBG("Unable to get handle for %s\n", dev_name(dev));
@@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
{
struct acpi_bus_type *type;
- type = acpi_get_bus_type(dev->bus);
+ type = acpi_get_bus_type(dev);
if (type && type->cleanup)
type->cleanup(dev);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index eff722278ff..164d49569ae 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
}
exit:
- if (buffer.pointer)
- kfree(buffer.pointer);
+ kfree(buffer.pointer);
return apic_id;
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index df34bd04ae6..bec717ffd25 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
return 0;
#endif
- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
+ BUG_ON(pr->id >= nr_cpu_ids);
/*
* Buggy BIOS check
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d3a06a629a..24213033fba 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
status = acpi_get_sleep_type_data(i, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
sleep_states[i] = 1;
- pr_cont(" S%d", i);
}
}
@@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
hibernation_set_ops(old_suspend_ordering ?
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
sleep_states[ACPI_STATE_S4] = 1;
- pr_cont(KERN_CONT " S4");
if (nosigcheck)
return;
@@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
{
acpi_status status;
u8 type_a, type_b;
+ char supported[ACPI_S_STATE_COUNT * 3 + 1];
+ char *pos = supported;
+ int i;
if (acpi_disabled)
return 0;
@@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
acpi_sleep_dmi_check();
sleep_states[ACPI_STATE_S0] = 1;
- pr_info(PREFIX "(supports S0");
acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup();
@@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
sleep_states[ACPI_STATE_S5] = 1;
- pr_cont(" S5");
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
}
- pr_cont(")\n");
+
+ supported[0] = 0;
+ for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
+ if (sleep_states[i])
+ pos += sprintf(pos, " S%d", i);
+ }
+ pr_info(PREFIX "(supports%s)\n", supported);
+
/*
* Register the tts_notifier to reboot notifier list so that the _TTS
* object can also be evaluated when the system enters S5.
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 0ea1018280b..beea3115577 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
return -ENODEV;
}
-static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
-{
- return -ENODEV;
-}
-
static struct acpi_bus_type ata_acpi_bus = {
- .find_bridge = ata_acpi_find_dummy,
+ .name = "ATA",
.find_device = ata_acpi_find_device,
};
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2b7f77d3fcb..15beb500a4e 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
- dev_pm_qos_constraints_init(dev);
mutex_unlock(&dpm_list_mtx);
}
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
- dev_pm_qos_constraints_destroy(dev);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b16686a0a5a..cfc3226ec49 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
{
if (!dev->power.early_init) {
spin_lock_init(&dev->power.lock);
- dev->power.power_state = PMSG_INVALID;
+ dev->power.qos = NULL;
dev->power.early_init = true;
}
}
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
static inline void device_pm_sleep_init(struct device *dev) {}
-static inline void device_pm_add(struct device *dev)
-{
- dev_pm_qos_constraints_init(dev);
-}
+static inline void device_pm_add(struct device *dev) {}
static inline void device_pm_remove(struct device *dev)
{
- dev_pm_qos_constraints_destroy(dev);
pm_runtime_remove(dev);
}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3d4d1f8aac5..5f74587ef25 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
+#include <linux/err.h>
#include "power.h"
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
struct pm_qos_flags *pqf;
s32 val;
- if (!qos)
+ if (IS_ERR_OR_NULL(qos))
return PM_QOS_FLAGS_UNDEFINED;
pqf = &qos->flags;
@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
*/
s32 __dev_pm_qos_read_value(struct device *dev)
{
- return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
+ return IS_ERR_OR_NULL(dev->power.qos) ?
+ 0 : pm_qos_read_value(&dev->power.qos->latency);
}
/**
@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
return 0;
}
-/**
- * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
- * @dev: target device
- *
- * Called from the device PM subsystem during device insertion under
- * device_pm_lock().
- */
-void dev_pm_qos_constraints_init(struct device *dev)
-{
- mutex_lock(&dev_pm_qos_mtx);
- dev->power.qos = NULL;
- dev->power.power_state = PMSG_ON;
- mutex_unlock(&dev_pm_qos_mtx);
-}
+static void __dev_pm_qos_hide_latency_limit(struct device *dev);
+static void __dev_pm_qos_hide_flags(struct device *dev);
/**
* dev_pm_qos_constraints_destroy
@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
+ mutex_lock(&dev_pm_qos_mtx);
+
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
- dev_pm_qos_hide_latency_limit(dev);
- dev_pm_qos_hide_flags(dev);
+ __dev_pm_qos_hide_latency_limit(dev);
+ __dev_pm_qos_hide_flags(dev);
- mutex_lock(&dev_pm_qos_mtx);
-
- dev->power.power_state = PMSG_INVALID;
qos = dev->power.qos;
if (!qos)
goto out;
@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
}
spin_lock_irq(&dev->power.lock);
- dev->power.qos = NULL;
+ dev->power.qos = ERR_PTR(-ENODEV);
spin_unlock_irq(&dev->power.lock);
kfree(c->notifiers);
@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
"%s() called for already added request\n", __func__))
return -EINVAL;
- req->dev = dev;
-
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.qos) {
- if (dev->power.power_state.event == PM_EVENT_INVALID) {
- /* The device has been removed from the system. */
- req->dev = NULL;
- ret = -ENODEV;
- goto out;
- } else {
- /*
- * Allocate the constraints data on the first call to
- * add_request, i.e. only if the data is not already
- * allocated and if the device has not been removed.
- */
- ret = dev_pm_qos_constraints_allocate(dev);
- }
- }
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
if (!ret) {
+ req->dev = dev;
req->type = type;
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
}
- out:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
s32 curr_value;
int ret = 0;
- if (!req->dev->power.qos)
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (WARN(!dev_pm_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
switch(req->type) {
@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
{
int ret;
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = __dev_pm_qos_update_request(req, new_value);
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret;
+
if (!req) /*guard against callers passing in null */
return -EINVAL;
@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
"%s() called for unknown object\n", __func__))
return -EINVAL;
- mutex_lock(&dev_pm_qos_mtx);
- ret = __dev_pm_qos_update_request(req, new_value);
- mutex_unlock(&dev_pm_qos_mtx);
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
+ return -ENODEV;
+ ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
/**
* dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
*/
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
- int ret = 0;
-
- if (!req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(!dev_pm_qos_request_active(req),
- "%s() called for unknown object\n", __func__))
- return -EINVAL;
+ int ret;
mutex_lock(&dev_pm_qos_mtx);
-
- if (req->dev->power.qos) {
- ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
- PM_QOS_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- } else {
- /* Return if the device has been removed */
- ret = -ENODEV;
- }
-
+ ret = __dev_pm_qos_remove_request(req);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.qos)
- ret = dev->power.power_state.event != PM_EVENT_INVALID ?
- dev_pm_qos_constraints_allocate(dev) : -ENODEV;
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
if (!ret)
ret = blocking_notifier_chain_register(
@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
- if (dev->power.qos)
+ if (!IS_ERR_OR_NULL(dev->power.qos))
retval = blocking_notifier_chain_unregister(
dev->power.qos->latency.notifiers,
notifier);
@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
static void __dev_pm_qos_drop_user_request(struct device *dev,
enum dev_pm_qos_req_type type)
{
+ struct dev_pm_qos_request *req = NULL;
+
switch(type) {
case DEV_PM_QOS_LATENCY:
- dev_pm_qos_remove_request(dev->power.qos->latency_req);
+ req = dev->power.qos->latency_req;
dev->power.qos->latency_req = NULL;
break;
case DEV_PM_QOS_FLAGS:
- dev_pm_qos_remove_request(dev->power.qos->flags_req);
+ req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL;
break;
}
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
}
/**
@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!device_is_registered(dev) || value < 0)
return -EINVAL;
- if (dev->power.qos && dev->power.qos->latency_req)
- return -EEXIST;
-
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(req);
return ret;
+ }
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->latency_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ goto out;
+ }
dev->power.qos->latency_req = req;
ret = pm_qos_sysfs_add_latency(dev);
if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
+static void __dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
+ pm_qos_sysfs_remove_latency(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ }
+}
+
/**
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (dev->power.qos && dev->power.qos->latency_req) {
- pm_qos_sysfs_remove_latency(dev);
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
- }
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_latency_limit(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
if (!device_is_registered(dev))
return -EINVAL;
- if (dev->power.qos && dev->power.qos->flags_req)
- return -EEXIST;
-
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- pm_runtime_get_sync(dev);
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
- if (ret < 0)
- goto fail;
+ if (ret < 0) {
+ kfree(req);
+ return ret;
+ }
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->flags_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ goto out;
+ }
dev->power.qos->flags_req = req;
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
-fail:
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
+static void __dev_pm_qos_hide_flags(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
+ pm_qos_sysfs_remove_flags(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ }
+}
+
/**
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
* @dev: Device whose PM QoS flags are to be hidden from user space.
*/
void dev_pm_qos_hide_flags(struct device *dev)
{
- if (dev->power.qos && dev->power.qos->flags_req) {
- pm_qos_sysfs_remove_flags(dev);
- pm_runtime_get_sync(dev);
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
- pm_runtime_put(dev);
- }
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_flags(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
+ pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
s32 value;
int ret;
- if (!dev->power.qos || !dev->power.qos->flags_req)
- return -EINVAL;
-
pm_runtime_get_sync(dev);
mutex_lock(&dev_pm_qos_mtx);
+ if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
value = dev_pm_qos_requested_flags(dev);
if (set)
value |= mask;
@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
+ out:
mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
-
return ret;
}
+#else /* !CONFIG_PM_RUNTIME */
+static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
+static void __dev_pm_qos_hide_flags(struct device *dev) {}
#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 50d16e3cb0a..a53ebd26570 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
sysfs_remove_group(&dev->kobj, &pm_attr_group);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4706c63d0bc..020ea2b9fd2 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
if (ret < 0) {
dev_err(map->dev, "IRQ thread failed to resume: %d\n",
ret);
+ pm_runtime_put(map->dev);
return IRQ_NONE;
}
}
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index d3bde6cec92..30629a3d44c 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
return;
}
+ spin_lock_init(&pc_host->cfgspace_lock);
+
pc->host_controller = pc_host;
pc_host->pci_controller.io_resource = &pc_host->io_resource;
pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 1bafb40ec8a..69ae5972713 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -40,6 +40,7 @@
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -52,8 +53,12 @@ static struct hwrng *current_rng;
static LIST_HEAD(rng_list);
static DEFINE_MUTEX(rng_mutex);
static int data_avail;
-static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
- __cacheline_aligned;
+static u8 *rng_buffer;
+
+static size_t rng_buffer_size(void)
+{
+ return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
+}
static inline int hwrng_init(struct hwrng *rng)
{
@@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (!data_avail) {
bytes_read = rng_get_data(current_rng, rng_buffer,
- sizeof(rng_buffer),
+ rng_buffer_size(),
!(filp->f_flags & O_NONBLOCK));
if (bytes_read < 0) {
err = bytes_read;
@@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
mutex_lock(&rng_mutex);
+ /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
+ err = -ENOMEM;
+ if (!rng_buffer) {
+ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
+ if (!rng_buffer)
+ goto out_unlock;
+ }
+
/* Must not register two RNGs with the same name. */
err = -EEXIST;
list_for_each_entry(tmp, &rng_list, list) {
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index fce2000eec3..1110478dd0f 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
(task_active_pid_ns(current) != &init_pid_ns))
return;
+ /* Can only change if privileged. */
+ if (!capable(CAP_NET_ADMIN)) {
+ err = EPERM;
+ goto out;
+ }
+
mc_op = (enum proc_cn_mcast_op *)msg->data;
switch (*mc_op) {
case PROC_CN_MCAST_LISTEN:
@@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
err = EINVAL;
break;
}
+
+out:
cn_proc_ack(err, msg->seq, msg->ack);
}
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index d2ac9115060..46bde01eee6 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \
* dbs: used as a shortform for demand based switching It helps to keep variable
* names smaller, simpler
* cdbs: common dbs
- * on_*: On-demand governor
+ * od_*: On-demand governor
* cs_*: Conservative governor
*/
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 66e3a71b81a..b61b5a3fad6 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -28,13 +28,7 @@
static int hb_voltage_change(unsigned int freq)
{
- int i;
- u32 msg[HB_CPUFREQ_IPC_LEN];
-
- msg[0] = HB_CPUFREQ_CHANGE_NOTE;
- msg[1] = freq / 1000000;
- for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
- msg[i] = 0;
+ u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
return pl320_ipc_transmit(msg);
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 096fde0ebcb..f6dd1e76112 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
+ if (!policy->cpuinfo.max_freq)
+ return -ENODEV;
+
intel_pstate_get_min_max(cpu, &min, &max);
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
.owner = THIS_MODULE,
};
-static void intel_pstate_exit(void)
-{
- int cpu;
-
- sysfs_remove_group(intel_pstate_kobject,
- &intel_pstate_attr_group);
- debugfs_remove_recursive(debugfs_parent);
-
- cpufreq_unregister_driver(&intel_pstate_driver);
-
- if (!all_cpu_data)
- return;
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- if (all_cpu_data[cpu]) {
- del_timer_sync(&all_cpu_data[cpu]->timer);
- kfree(all_cpu_data[cpu]);
- }
- }
-
- put_online_cpus();
- vfree(all_cpu_data);
-}
-module_exit(intel_pstate_exit);
-
static int __initdata no_load;
static int __init intel_pstate_init(void)
{
- int rc = 0;
+ int cpu, rc = 0;
const struct x86_cpu_id *id;
if (no_load)
@@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
intel_pstate_sysfs_expose_params();
return rc;
out:
- intel_pstate_exit();
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu]) {
+ del_timer_sync(&all_cpu_data[cpu]->timer);
+ kfree(all_cpu_data[cpu]);
+ }
+ }
+
+ put_online_cpus();
+ vfree(all_cpu_data);
return -ENODEV;
}
device_initcall(intel_pstate_init);
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 6f2306db859..f9dbd503fc4 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -128,9 +128,9 @@ static int ichx_read_bit(int reg, unsigned nr)
return data & (1 << bit) ? 1 : 0;
}
-static int ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
+static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
{
- return (ichx_priv.use_gpio & (1 << (nr / 32))) ? 0 : -ENXIO;
+ return ichx_priv.use_gpio & (1 << (nr / 32));
}
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index fff9786cdc6..c2534d62911 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -88,13 +88,14 @@ static int gpiod_request(struct gpio_desc *desc, const char *label);
static void gpiod_free(struct gpio_desc *desc);
static int gpiod_direction_input(struct gpio_desc *desc);
static int gpiod_direction_output(struct gpio_desc *desc, int value);
+static int gpiod_get_direction(const struct gpio_desc *desc);
static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
-static int gpiod_get_value_cansleep(struct gpio_desc *desc);
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc);
static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-static int gpiod_get_value(struct gpio_desc *desc);
+static int gpiod_get_value(const struct gpio_desc *desc);
static void gpiod_set_value(struct gpio_desc *desc, int value);
-static int gpiod_cansleep(struct gpio_desc *desc);
-static int gpiod_to_irq(struct gpio_desc *desc);
+static int gpiod_cansleep(const struct gpio_desc *desc);
+static int gpiod_to_irq(const struct gpio_desc *desc);
static int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
static int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
@@ -171,12 +172,12 @@ static int gpio_ensure_requested(struct gpio_desc *desc)
return 0;
}
-/* caller holds gpio_lock *OR* gpio is marked as requested */
-static struct gpio_chip *gpiod_to_chip(struct gpio_desc *desc)
+static struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
- return desc->chip;
+ return desc ? desc->chip : NULL;
}
+/* caller holds gpio_lock *OR* gpio is marked as requested */
struct gpio_chip *gpio_to_chip(unsigned gpio)
{
return gpiod_to_chip(gpio_to_desc(gpio));
@@ -207,7 +208,7 @@ static int gpiochip_find_base(int ngpio)
}
/* caller ensures gpio is valid and requested, chip->get_direction may sleep */
-static int gpiod_get_direction(struct gpio_desc *desc)
+static int gpiod_get_direction(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
unsigned offset;
@@ -223,11 +224,13 @@ static int gpiod_get_direction(struct gpio_desc *desc)
if (status > 0) {
/* GPIOF_DIR_IN, or other positive */
status = 1;
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ /* FLAG_IS_OUT is just a cache of the result of get_direction(),
+ * so it does not affect constness per se */
+ clear_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
}
if (status == 0) {
/* GPIOF_DIR_OUT */
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(FLAG_IS_OUT, &((struct gpio_desc *)desc)->flags);
}
return status;
}
@@ -263,7 +266,7 @@ static DEFINE_MUTEX(sysfs_lock);
static ssize_t gpio_direction_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct gpio_desc *desc = dev_get_drvdata(dev);
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
@@ -654,6 +657,11 @@ static ssize_t export_store(struct class *class,
goto done;
desc = gpio_to_desc(gpio);
+ /* reject invalid GPIOs */
+ if (!desc) {
+ pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ return -EINVAL;
+ }
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
@@ -690,12 +698,14 @@ static ssize_t unexport_store(struct class *class,
if (status < 0)
goto done;
- status = -EINVAL;
-
desc = gpio_to_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
- if (!desc)
- goto done;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ return -EINVAL;
+ }
+
+ status = -EINVAL;
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
@@ -846,8 +856,10 @@ static int gpiod_export_link(struct device *dev, const char *name,
{
int status = -EINVAL;
- if (!desc)
- goto done;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
mutex_lock(&sysfs_lock);
@@ -865,7 +877,6 @@ static int gpiod_export_link(struct device *dev, const char *name,
mutex_unlock(&sysfs_lock);
-done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
@@ -896,8 +907,10 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
struct device *dev = NULL;
int status = -EINVAL;
- if (!desc)
- goto done;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
mutex_lock(&sysfs_lock);
@@ -914,7 +927,6 @@ static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
unlock:
mutex_unlock(&sysfs_lock);
-done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
@@ -940,8 +952,8 @@ static void gpiod_unexport(struct gpio_desc *desc)
struct device *dev = NULL;
if (!desc) {
- status = -EINVAL;
- goto done;
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return;
}
mutex_lock(&sysfs_lock);
@@ -962,7 +974,7 @@ static void gpiod_unexport(struct gpio_desc *desc)
device_unregister(dev);
put_device(dev);
}
-done:
+
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, desc_to_gpio(desc),
status);
@@ -1384,12 +1396,13 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
int status = -EPROBE_DEFER;
unsigned long flags;
- spin_lock_irqsave(&gpio_lock, flags);
-
if (!desc) {
- status = -EINVAL;
- goto done;
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
}
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
chip = desc->chip;
if (chip == NULL)
goto done;
@@ -1432,8 +1445,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
done:
if (status)
pr_debug("_gpio_request: gpio-%d (%s) status %d\n",
- desc ? desc_to_gpio(desc) : -1,
- label ? : "?", status);
+ desc_to_gpio(desc), label ? : "?", status);
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
@@ -1616,10 +1628,13 @@ static int gpiod_direction_input(struct gpio_desc *desc)
int status = -EINVAL;
int offset;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&gpio_lock, flags);
- if (!desc)
- goto fail;
chip = desc->chip;
if (!chip || !chip->get || !chip->direction_input)
goto fail;
@@ -1655,13 +1670,9 @@ lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status) {
- int gpio = -1;
- if (desc)
- gpio = desc_to_gpio(desc);
- pr_debug("%s: gpio-%d status %d\n",
- __func__, gpio, status);
- }
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n", __func__,
+ desc_to_gpio(desc), status);
return status;
}
@@ -1678,6 +1689,11 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
int status = -EINVAL;
int offset;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+
/* Open drain pin should not be driven to 1 */
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
return gpiod_direction_input(desc);
@@ -1688,8 +1704,6 @@ static int gpiod_direction_output(struct gpio_desc *desc, int value)
spin_lock_irqsave(&gpio_lock, flags);
- if (!desc)
- goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->direction_output)
goto fail;
@@ -1725,13 +1739,9 @@ lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status) {
- int gpio = -1;
- if (desc)
- gpio = desc_to_gpio(desc);
- pr_debug("%s: gpio-%d status %d\n",
- __func__, gpio, status);
- }
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n", __func__,
+ desc_to_gpio(desc), status);
return status;
}
@@ -1753,10 +1763,13 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
int status = -EINVAL;
int offset;
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&gpio_lock, flags);
- if (!desc)
- goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->set_debounce)
goto fail;
@@ -1776,13 +1789,9 @@ static int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
- if (status) {
- int gpio = -1;
- if (desc)
- gpio = desc_to_gpio(desc);
- pr_debug("%s: gpio-%d status %d\n",
- __func__, gpio, status);
- }
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n", __func__,
+ desc_to_gpio(desc), status);
return status;
}
@@ -1824,12 +1833,14 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
* It returns the zero or nonzero value provided by the associated
* gpio_chip.get() method; or zero if no such method is provided.
*/
-static int gpiod_get_value(struct gpio_desc *desc)
+static int gpiod_get_value(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
int offset;
+ if (!desc)
+ return 0;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
/* Should be using gpio_get_value_cansleep() */
@@ -1912,6 +1923,8 @@ static void gpiod_set_value(struct gpio_desc *desc, int value)
{
struct gpio_chip *chip;
+ if (!desc)
+ return;
chip = desc->chip;
/* Should be using gpio_set_value_cansleep() */
WARN_ON(chip->can_sleep);
@@ -1938,8 +1951,10 @@ EXPORT_SYMBOL_GPL(__gpio_set_value);
* This is used directly or indirectly to implement gpio_cansleep(). It
* returns nonzero if access reading or writing the GPIO value can sleep.
*/
-static int gpiod_cansleep(struct gpio_desc *desc)
+static int gpiod_cansleep(const struct gpio_desc *desc)
{
+ if (!desc)
+ return 0;
/* only call this on GPIOs that are valid! */
return desc->chip->can_sleep;
}
@@ -1959,11 +1974,13 @@ EXPORT_SYMBOL_GPL(__gpio_cansleep);
* It returns the number of the IRQ signaled by this (input) GPIO,
* or a negative errno.
*/
-static int gpiod_to_irq(struct gpio_desc *desc)
+static int gpiod_to_irq(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int offset;
+ if (!desc)
+ return -EINVAL;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
@@ -1980,13 +1997,15 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
* Common examples include ones connected to I2C or SPI chips.
*/
-static int gpiod_get_value_cansleep(struct gpio_desc *desc)
+static int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int value;
int offset;
might_sleep_if(extra_checks);
+ if (!desc)
+ return 0;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
value = chip->get ? chip->get(chip, offset) : 0;
@@ -2005,6 +2024,8 @@ static void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
struct gpio_chip *chip;
might_sleep_if(extra_checks);
+ if (!desc)
+ return;
chip = desc->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c5b8c81b944..0a8eceb7590 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
- INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
+ INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
+ INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
- INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
+ INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
+ INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
- INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
+ INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
+ INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
- INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
@@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_modeset_disable(dev);
drm_irq_uninstall(dev);
+ dev_priv->enable_hotplug_processing = false;
}
i915_save_state(dev);
@@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev)
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
+ /* We need working interrupts for modeset enabling ... */
+ drm_irq_install(dev);
+
intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev, false);
- drm_irq_install(dev);
+
+ /*
+ * ... but also need to make sure that hotplug processing
+ * doesn't cause havoc. Like in the driver load code we don't
+ * bother with the tiny race here where we might loose hotplug
+ * notifications.
+ * */
intel_hpd_init(dev);
+ dev_priv->enable_hotplug_processing = true;
}
intel_opregion_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2cd97d1cc92..3c7bb0410b5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 de_iir, gt_iir, de_ier, pm_iir;
+ u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
irqreturn_t ret = IRQ_NONE;
int i;
@@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ /* Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will will be stored on its back queue, and then we'll be
+ * able to process them after we restore SDEIER (as soon as we restore
+ * it, we'll get an interrupt if SDEIIR still has something to process
+ * due to its back queue). */
+ sde_ier = I915_READ(SDEIER);
+ I915_WRITE(SDEIER, 0);
+ POSTING_READ(SDEIER);
+
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
snb_gt_irq_handler(dev, dev_priv, gt_iir);
@@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
+ I915_WRITE(SDEIER, sde_ier);
+ POSTING_READ(SDEIER);
return ret;
}
@@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pm_iir;
+ u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
atomic_inc(&dev_priv->irq_received);
@@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
POSTING_READ(DEIER);
+ /* Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will will be stored on its back queue, and then we'll be
+ * able to process them after we restore SDEIER (as soon as we restore
+ * it, we'll get an interrupt if SDEIIR still has something to process
+ * due to its back queue). */
+ sde_ier = I915_READ(SDEIER);
+ I915_WRITE(SDEIER, 0);
+ POSTING_READ(SDEIER);
+
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pm_iir = I915_READ(GEN6_PMIIR);
@@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
+ I915_WRITE(SDEIER, sde_ier);
+ POSTING_READ(SDEIER);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 527b664d343..848992f67d5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1613,9 +1613,9 @@
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
#define ADPA_HSYNC_CNTL_ENABLE 0
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
#define ADPA_VSYNC_ACTIVE_LOW 0
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 969d08c72d1..32a3693905e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder)
u32 temp;
temp = I915_READ(crt->adpa_reg);
- temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
temp &= ~ADPA_DAC_ENABLE;
I915_WRITE(crt->adpa_reg, temp);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d64af5aa4a1..8d0bac3c35d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
enum port port = intel_dig_port->port;
- bool wait;
uint32_t val;
+ bool wait = false;
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
val = I915_READ(DDI_BUF_CTL(port));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a05ac2c91ba..287b42c9d1a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/
}
+/**
+ * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
+ * cursor plane briefly if not already running after enabling the display
+ * plane.
+ * This workaround avoids occasional blank screens when self refresh is
+ * enabled.
+ */
+static void
+g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 cntl = I915_READ(CURCNTR(pipe));
+
+ if ((cntl & CURSOR_MODE) == 0) {
+ u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
+
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
+ I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+ I915_WRITE(CURCNTR(pipe), cntl);
+ I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+ I915_WRITE(FW_BLC_SELF, fw_bcl_self);
+ }
+}
+
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
+ if (IS_G4X(dev))
+ g4x_fixup_plane(dev_priv, pipe);
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
@@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_framebuffer *old_fb = crtc->fb;
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
@@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
- intel_fb = to_intel_framebuffer(crtc->fb);
- work->old_fb_obj = intel_fb->obj;
+ work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_vblank_get(dev, intel_crtc->pipe);
@@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->unpin_work = work;
spin_unlock_irqrestore(&dev->event_lock, flags);
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
@@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
+ crtc->fb = old_fb;
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f61cb7998c7..6f728e5ee79 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
if (has_aux_irq)
- done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
+ done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ msecs_to_jiffies(10));
else
done = wait_for_atomic(C, 10) == 0;
if (!done)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 61fee7fcdc2..a1794c6df1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* Check if we are enabling RC6 */
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 5ea5033eae0..4d932c46725 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -112,7 +112,6 @@ struct mga_framebuffer {
struct mga_fbdev {
struct drm_fb_helper helper;
struct mga_framebuffer mfb;
- struct list_head fbdev_list;
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 5a88ec51b51..d3dcf54e623 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
int ret;
int data, clock;
+ WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
WREG_DAC(MGA1064_GEN_IO_CTL, 0);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d3d99a28dde..a274b9906ef 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector)
static int mga_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+ struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+ struct mga_fbdev *mfbdev = mdev->mfbdev;
+ struct drm_fb_helper *fb_helper = &mfbdev->helper;
+ struct drm_fb_helper_connector *fb_helper_conn = NULL;
+ int bpp = 32;
+ int i = 0;
+
/* FIXME: Add bandwidth and g200se limitations */
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
@@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
+ /* Validate the mode input by the user */
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (fb_helper->connector_info[i]->connector == connector) {
+ /* Found the helper for this connector */
+ fb_helper_conn = fb_helper->connector_info[i];
+ if (fb_helper_conn->cmdline_mode.specified) {
+ if (fb_helper_conn->cmdline_mode.bpp_specified) {
+ bpp = fb_helper_conn->cmdline_mode.bpp;
+ }
+ }
+ }
+ }
+
+ if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+ if (fb_helper_conn)
+ fb_helper_conn->cmdline_mode.specified = false;
+ return MODE_BAD;
+ }
+
return MODE_OK;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 61cec0f6ff1..4857f913efd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
+ nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2cc1e6a5eb6..9c41b58d57e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init)
init->offset += 2;
init_wr32(init, dreg, idata);
- init_mask(init, creg, ~mask, data | idata);
+ init_mask(init, creg, ~mask, data | iaddr);
}
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index a114a0ed7e9..2e98e8a3f1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -142,6 +142,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
/* drop port's i2c subdev refcount, i2c handles this itself */
if (ret == 0) {
list_add_tail(&port->head, &i2c->ports);
+ atomic_dec(&parent->refcount);
atomic_dec(&engine->refcount);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index d28430cd2ba..6e7a55f93a8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -47,6 +47,18 @@ nouveau_agp_enabled(struct nouveau_drm *drm)
if (drm->agp.stat == UNKNOWN) {
if (!nouveau_agpmode)
return false;
+#ifdef __powerpc__
+ /* Disable AGP by default on all PowerPC machines for
+ * now -- At least some UniNorth-2 AGP bridges are
+ * known to be broken: DMA from the host to the card
+ * works just fine, but writeback from the card to the
+ * host goes straight to memory untranslated bypassing
+ * the GATT somehow, making them quite painful to deal
+ * with...
+ */
+ if (nouveau_agpmode == -1)
+ return false;
+#endif
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a6237c9cbbc..87a5a56ed35 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -55,9 +55,9 @@
/* offsets in shared sync bo of various structures */
#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
-#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
+#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
+#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
+#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
#define EVO_CORE_HANDLE (0xd1500000)
#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
@@ -341,10 +341,8 @@ struct nv50_curs {
struct nv50_sync {
struct nv50_dmac base;
- struct {
- u32 offset;
- u16 value;
- } sem;
+ u32 addr;
+ u32 data;
};
struct nv50_ovly {
@@ -471,13 +469,33 @@ nv50_display_crtc_sema(struct drm_device *dev, int crtc)
return nv50_disp(dev)->sync;
}
+struct nv50_display_flip {
+ struct nv50_disp *disp;
+ struct nv50_sync *chan;
+};
+
+static bool
+nv50_display_flip_wait(void *data)
+{
+ struct nv50_display_flip *flip = data;
+ if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
+ flip->chan->data);
+ return true;
+ usleep_range(1, 2);
+ return false;
+}
+
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
- struct nv50_sync *sync = nv50_sync(crtc);
+ struct nouveau_device *device = nouveau_dev(crtc->dev);
+ struct nv50_display_flip flip = {
+ .disp = nv50_disp(crtc->dev),
+ .chan = nv50_sync(crtc),
+ };
u32 *push;
- push = evo_wait(sync, 8);
+ push = evo_wait(flip.chan, 8);
if (push) {
evo_mthd(push, 0x0084, 1);
evo_data(push, 0x00000000);
@@ -487,8 +505,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
evo_data(push, 0x00000000);
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
- evo_kick(push, sync);
+ evo_kick(push, flip.chan);
}
+
+ nv_wait_cb(device, nv50_display_flip_wait, &flip);
}
int
@@ -496,11 +516,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan, u32 swap_interval)
{
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
- struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_sync *sync = nv50_sync(crtc);
+ int head = nv_crtc->index, ret;
u32 *push;
- int ret;
swap_interval <<= 4;
if (swap_interval == 0)
@@ -510,58 +529,64 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (unlikely(push == NULL))
return -EBUSY;
- /* synchronise with the rendering channel, if necessary */
- if (likely(chan)) {
+ if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ return ret;
+
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
+ OUT_RING (chan, NvEvoSema0 + head);
+ OUT_RING (chan, sync->addr ^ 0x10);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, sync->data + 1);
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sync->addr);
+ OUT_RING (chan, sync->data);
+ } else
+ if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr ^ 0x10));
+ OUT_RING (chan, lower_32_bits(addr ^ 0x10));
+ OUT_RING (chan, sync->data + 1);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, sync->data);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
+ } else
+ if (chan) {
+ u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
ret = RING_SPACE(chan, 10);
if (ret)
return ret;
- if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
- OUT_RING (chan, sync->sem.offset);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
- OUT_RING (chan, 0xf00d0000 | sync->sem.value);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
- OUT_RING (chan, sync->sem.offset ^ 0x10);
- OUT_RING (chan, 0x74b1e000);
- BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
- } else
- if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
- u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
- offset += sync->sem.offset;
-
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | sync->sem.value);
- OUT_RING (chan, 0x00000002);
- BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset ^ 0x10));
- OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x00000001);
- } else {
- u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
- offset += sync->sem.offset;
-
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 0xf00d0000 | sync->sem.value);
- OUT_RING (chan, 0x00001002);
- BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset ^ 0x10));
- OUT_RING (chan, 0x74b1e000);
- OUT_RING (chan, 0x00001001);
- }
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr ^ 0x10));
+ OUT_RING (chan, lower_32_bits(addr ^ 0x10));
+ OUT_RING (chan, sync->data + 1);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
+ NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, sync->data);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
+ NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+ }
+ if (chan) {
+ sync->addr ^= 0x10;
+ sync->data++;
FIRE_RING (chan);
} else {
- nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
- 0xf00d0000 | sync->sem.value);
evo_sync(crtc->dev);
}
@@ -575,9 +600,9 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_data(push, 0x40000000);
}
evo_mthd(push, 0x0088, 4);
- evo_data(push, sync->sem.offset);
- evo_data(push, 0xf00d0000 | sync->sem.value);
- evo_data(push, 0x74b1e000);
+ evo_data(push, sync->addr);
+ evo_data(push, sync->data++);
+ evo_data(push, sync->data);
evo_data(push, NvEvoSync);
evo_mthd(push, 0x00a0, 2);
evo_data(push, 0x00000000);
@@ -605,9 +630,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
evo_kick(push, sync);
-
- sync->sem.offset ^= 0x10;
- sync->sem.value++;
return 0;
}
@@ -1379,7 +1401,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
if (ret)
goto out;
- head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+ head->sync.addr = EVO_FLIP_SEM0(index);
+ head->sync.data = 0x00000000;
/* allocate overlay resources */
ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
@@ -2112,15 +2135,23 @@ nv50_display_fini(struct drm_device *dev)
int
nv50_display_init(struct drm_device *dev)
{
- u32 *push = evo_wait(nv50_mast(dev), 32);
- if (push) {
- evo_mthd(push, 0x0088, 1);
- evo_data(push, NvEvoSync);
- evo_kick(push, nv50_mast(dev));
- return 0;
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct drm_crtc *crtc;
+ u32 *push;
+
+ push = evo_wait(nv50_mast(dev), 32);
+ if (!push)
+ return -EBUSY;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nv50_sync *sync = nv50_sync(crtc);
+ nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
}
- return -EBUSY;
+ evo_mthd(push, 0x0088, 1);
+ evo_data(push, NvEvoSync);
+ evo_kick(push, nv50_mast(dev));
+ return 0;
}
void
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3c38ea46531..305a657bf21 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2438,6 +2438,12 @@ static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
if (tmp & L2_BUSY)
reset_mask |= RADEON_RESET_VMC;
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
return reset_mask;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 99fb13286fd..eb8ac315f92 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -834,7 +834,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
__func__, __LINE__, toffset, surf.base_align);
return -EINVAL;
}
- if (moffset & (surf.base_align - 1)) {
+ if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
__func__, __LINE__, moffset, surf.base_align);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7cead763be9..d4c633e1286 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1381,6 +1381,12 @@ static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
if (tmp & L2_BUSY)
reset_mask |= RADEON_RESET_VMC;
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
return reset_mask;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6d4b5611daf..0740db3fcd2 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1394,6 +1394,12 @@ static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
if (r600_is_display_hung(rdev))
reset_mask |= RADEON_RESET_DISPLAY;
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
return reset_mask;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e403bdda58..78edadc9e86 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -970,6 +970,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
found = 1;
}
+ /* quirks */
+ /* Radeon 9100 (R200) */
+ if ((dev->pdev->device == 0x514D) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+ (dev->pdev->subsystem_device == 0x7149)) {
+ /* vbios value is bad, use the default */
+ found = 0;
+ }
+
if (!found) /* fallback to defaults */
radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 167758488ed..66a7f0fd962 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -70,9 +70,10 @@
* 2.27.0 - r600-SI: Add CS ioctl support for async DMA
* 2.28.0 - r600-eg: Add MEM_WRITE packet support
* 2.29.0 - R500 FP16 color clear registers
+ * 2.30.0 - fix for FMASK texturing
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 29
+#define KMS_DRIVER_MINOR 30
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 90374dd7796..48f80cd42d8 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -400,6 +400,9 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = true;
radeon_irq_set(rdev);
@@ -419,6 +422,9 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = false;
radeon_irq_set(rdev);
@@ -438,6 +444,9 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
unsigned long irqflags;
int i;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
@@ -458,6 +467,9 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
unsigned long irqflags;
int i;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 80979ed951e..9128120da04 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2284,6 +2284,12 @@ static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
if (tmp & L2_BUSY)
reset_mask |= RADEON_RESET_VMC;
+ /* Skip MC reset as it's mostly likely not hung, just busy */
+ if (reset_mask & RADEON_RESET_MC) {
+ DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+ reset_mask &= ~RADEON_RESET_MC;
+ }
+
return reset_mask;
}
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index c92955df065..be1daf7344d 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -4,7 +4,6 @@ config DRM_TEGRA
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
- select DRM_HDMI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 9652a2c92a2..a58de38e23d 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -62,7 +62,7 @@ struct ltc2978_data {
int temp_min, temp_max;
int vout_min[8], vout_max[8];
int iout_max[2];
- int temp2_max[2];
+ int temp2_max;
struct pmbus_driver_info info;
};
@@ -204,10 +204,9 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
ret = pmbus_read_word_data(client, page,
LTC3880_MFR_TEMPERATURE2_PEAK);
if (ret >= 0) {
- if (lin11_to_val(ret)
- > lin11_to_val(data->temp2_max[page]))
- data->temp2_max[page] = ret;
- ret = data->temp2_max[page];
+ if (lin11_to_val(ret) > lin11_to_val(data->temp2_max))
+ data->temp2_max = ret;
+ ret = data->temp2_max;
}
break;
case PMBUS_VIRT_READ_VIN_MIN:
@@ -248,11 +247,11 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_VIRT_RESET_IOUT_HISTORY:
- data->iout_max[page] = 0x7fff;
+ data->iout_max[page] = 0x7c00;
ret = ltc2978_clear_peaks(client, page, data->id);
break;
case PMBUS_VIRT_RESET_TEMP2_HISTORY:
- data->temp2_max[page] = 0x7fff;
+ data->temp2_max = 0x7c00;
ret = ltc2978_clear_peaks(client, page, data->id);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
@@ -262,12 +261,12 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
break;
case PMBUS_VIRT_RESET_VIN_HISTORY:
data->vin_min = 0x7bff;
- data->vin_max = 0;
+ data->vin_max = 0x7c00;
ret = ltc2978_clear_peaks(client, page, data->id);
break;
case PMBUS_VIRT_RESET_TEMP_HISTORY:
data->temp_min = 0x7bff;
- data->temp_max = 0x7fff;
+ data->temp_max = 0x7c00;
ret = ltc2978_clear_peaks(client, page, data->id);
break;
default:
@@ -321,12 +320,13 @@ static int ltc2978_probe(struct i2c_client *client,
info = &data->info;
info->write_word_data = ltc2978_write_word_data;
- data->vout_min[0] = 0xffff;
data->vin_min = 0x7bff;
+ data->vin_max = 0x7c00;
data->temp_min = 0x7bff;
- data->temp_max = 0x7fff;
+ data->temp_max = 0x7c00;
+ data->temp2_max = 0x7c00;
- switch (id->driver_data) {
+ switch (data->id) {
case ltc2978:
info->read_word_data = ltc2978_read_word_data;
info->pages = 8;
@@ -336,7 +336,6 @@ static int ltc2978_probe(struct i2c_client *client,
for (i = 1; i < 8; i++) {
info->func[i] = PMBUS_HAVE_VOUT
| PMBUS_HAVE_STATUS_VOUT;
- data->vout_min[i] = 0xffff;
}
break;
case ltc3880:
@@ -352,11 +351,14 @@ static int ltc2978_probe(struct i2c_client *client,
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_POUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
- data->vout_min[1] = 0xffff;
+ data->iout_max[0] = 0x7c00;
+ data->iout_max[1] = 0x7c00;
break;
default:
return -ENODEV;
}
+ for (i = 0; i < info->pages; i++)
+ data->vout_min[i] = 0xffff;
return pmbus_do_probe(client, id, info);
}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index bfe326e896d..2507f902fb7 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -965,7 +965,13 @@ static int sht15_probe(struct platform_device *pdev)
if (voltage)
data->supply_uv = voltage;
- regulator_enable(data->reg);
+ ret = regulator_enable(data->reg);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
/*
* Setup a notifier block to update this if another device
* causes the voltage to change
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 017c67ea3f4..ead0a4fb744 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -294,13 +294,13 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
// Allocate URBs and buffers for interrupt endpoint
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
- return -ENOMEM;
+ goto err1;
}
intr->urb = urb;
buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
if (!buf) {
- return -ENOMEM;
+ goto err2;
}
endpoint = &altsetting->endpoint[EP_INT-1];
@@ -313,6 +313,14 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
endpoint->desc.bInterval);
return 0;
+err2:
+ usb_free_urb(intr->urb);
+ intr->urb = NULL;
+err1:
+ usb_free_urb(ctrl->urb);
+ ctrl->urb = NULL;
+
+ return -ENOMEM;
}
/*
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index c45b3aedafb..d873cbae2fb 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -138,8 +138,7 @@ int pl320_ipc_unregister_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
-static int __init pl320_probe(struct amba_device *adev,
- const struct amba_id *id)
+static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index e30b490055a..4d8d90b4fe7 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -154,17 +154,6 @@ config MD_RAID456
If unsure, say Y.
-config MULTICORE_RAID456
- bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)"
- depends on MD_RAID456
- depends on SMP
- depends on EXPERIMENTAL
- ---help---
- Enable the raid456 module to dispatch per-stripe raid operations to a
- thread pool.
-
- If unsure, say N.
-
config MD_MULTIPATH
tristate "Multipath I/O support"
depends on BLK_DEV_MD
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9a01d1e4c78..311e3d35b27 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -91,15 +91,44 @@ static struct raid_type {
{"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
};
+static char *raid10_md_layout_to_format(int layout)
+{
+ /*
+ * Bit 16 and 17 stand for "offset" and "use_far_sets"
+ * Refer to MD's raid10.c for details
+ */
+ if ((layout & 0x10000) && (layout & 0x20000))
+ return "offset";
+
+ if ((layout & 0xFF) > 1)
+ return "near";
+
+ return "far";
+}
+
static unsigned raid10_md_layout_to_copies(int layout)
{
- return layout & 0xFF;
+ if ((layout & 0xFF) > 1)
+ return layout & 0xFF;
+ return (layout >> 8) & 0xFF;
}
static int raid10_format_to_md_layout(char *format, unsigned copies)
{
- /* 1 "far" copy, and 'copies' "near" copies */
- return (1 << 8) | (copies & 0xFF);
+ unsigned n = 1, f = 1;
+
+ if (!strcmp("near", format))
+ n = copies;
+ else
+ f = copies;
+
+ if (!strcmp("offset", format))
+ return 0x30000 | (f << 8) | n;
+
+ if (!strcmp("far", format))
+ return 0x20000 | (f << 8) | n;
+
+ return (f << 8) | n;
}
static struct raid_type *get_raid_type(char *name)
@@ -352,6 +381,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
{
unsigned i, rebuild_cnt = 0;
unsigned rebuilds_per_group, copies, d;
+ unsigned group_size, last_group_start;
for (i = 0; i < rs->md.raid_disks; i++)
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
@@ -379,9 +409,6 @@ static int validate_raid_redundancy(struct raid_set *rs)
* as long as the failed devices occur in different mirror
* groups (i.e. different stripes).
*
- * Right now, we only allow for "near" copies. When other
- * formats are added, we will have to check those too.
- *
* When checking "near" format, make sure no adjacent devices
* have failed beyond what can be handled. In addition to the
* simple case where the number of devices is a multiple of the
@@ -391,14 +418,41 @@ static int validate_raid_redundancy(struct raid_set *rs)
* A A B B C
* C D D E E
*/
- for (i = 0; i < rs->md.raid_disks * copies; i++) {
- if (!(i % copies))
+ if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
+ for (i = 0; i < rs->md.raid_disks * copies; i++) {
+ if (!(i % copies))
+ rebuilds_per_group = 0;
+ d = i % rs->md.raid_disks;
+ if ((!rs->dev[d].rdev.sb_page ||
+ !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+ (++rebuilds_per_group >= copies))
+ goto too_many;
+ }
+ break;
+ }
+
+ /*
+ * When checking "far" and "offset" formats, we need to ensure
+ * that the device that holds its copy is not also dead or
+ * being rebuilt. (Note that "far" and "offset" formats only
+ * support two copies right now. These formats also only ever
+ * use the 'use_far_sets' variant.)
+ *
+ * This check is somewhat complicated by the need to account
+ * for arrays that are not a multiple of (far) copies. This
+ * results in the need to treat the last (potentially larger)
+ * set differently.
+ */
+ group_size = (rs->md.raid_disks / copies);
+ last_group_start = (rs->md.raid_disks / group_size) - 1;
+ last_group_start *= group_size;
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if (!(i % copies) && !(i > last_group_start))
rebuilds_per_group = 0;
- d = i % rs->md.raid_disks;
- if ((!rs->dev[d].rdev.sb_page ||
- !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+ if ((!rs->dev[i].rdev.sb_page ||
+ !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
(++rebuilds_per_group >= copies))
- goto too_many;
+ goto too_many;
}
break;
default:
@@ -433,7 +487,7 @@ too_many:
*
* RAID10-only options:
* [raid10_copies <# copies>] Number of copies. (Default: 2)
- * [raid10_format <near>] Layout algorithm. (Default: near)
+ * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
*/
static int parse_raid_params(struct raid_set *rs, char **argv,
unsigned num_raid_params)
@@ -520,7 +574,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
return -EINVAL;
}
- if (strcmp("near", argv[i])) {
+ if (strcmp("near", argv[i]) &&
+ strcmp("far", argv[i]) &&
+ strcmp("offset", argv[i])) {
rs->ti->error = "Invalid 'raid10_format' value given";
return -EINVAL;
}
@@ -644,6 +700,15 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
+ /*
+ * If the format is not "near", we only support
+ * two copies at the moment.
+ */
+ if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
+ rs->ti->error = "Too many copies for given RAID10 format.";
+ return -EINVAL;
+ }
+
/* (Len * #mirrors) / #devices */
sectors_per_dev = rs->ti->len * raid10_copies;
sector_div(sectors_per_dev, rs->md.raid_disks);
@@ -854,17 +919,30 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
/*
* Reshaping is not currently allowed
*/
- if ((le32_to_cpu(sb->level) != mddev->level) ||
- (le32_to_cpu(sb->layout) != mddev->layout) ||
- (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
- DMERR("Reshaping arrays not yet supported.");
+ if (le32_to_cpu(sb->level) != mddev->level) {
+ DMERR("Reshaping arrays not yet supported. (RAID level change)");
+ return -EINVAL;
+ }
+ if (le32_to_cpu(sb->layout) != mddev->layout) {
+ DMERR("Reshaping arrays not yet supported. (RAID layout change)");
+ DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
+ DMERR(" Old layout: %s w/ %d copies",
+ raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
+ raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
+ DMERR(" New layout: %s w/ %d copies",
+ raid10_md_layout_to_format(mddev->layout),
+ raid10_md_layout_to_copies(mddev->layout));
+ return -EINVAL;
+ }
+ if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+ DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
return -EINVAL;
}
/* We can only change the number of devices in RAID1 right now */
if ((rs->raid_type->level != 1) &&
(le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
- DMERR("Reshaping arrays not yet supported.");
+ DMERR("Reshaping arrays not yet supported. (device count change)");
return -EINVAL;
}
@@ -1329,7 +1407,8 @@ static void raid_status(struct dm_target *ti, status_type_t type,
raid10_md_layout_to_copies(rs->md.layout));
if (rs->print_flags & DMPF_RAID10_FORMAT)
- DMEMIT(" raid10_format near");
+ DMEMIT(" raid10_format %s",
+ raid10_md_layout_to_format(rs->md.layout));
DMEMIT(" %d", rs->md.raid_disks);
for (i = 0; i < rs->md.raid_disks; i++) {
@@ -1418,6 +1497,10 @@ static struct target_type raid_target = {
static int __init dm_raid_init(void)
{
+ DMINFO("Loading target version %u.%u.%u",
+ raid_target.version[0],
+ raid_target.version[1],
+ raid_target.version[2]);
return dm_register_target(&raid_target);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3db3d1b271f..fcb878f8879 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -307,6 +307,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
bio_io_error(bio);
return;
}
+ if (mddev->ro == 1 && unlikely(rw == WRITE)) {
+ bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
+ return;
+ }
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
if (mddev->suspended) {
@@ -2994,6 +2998,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (!sectors)
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset;
+ if (!my_mddev->pers->resize)
+ /* Cannot change size for RAID0 or Linear etc */
+ return -EINVAL;
}
if (sectors < my_mddev->dev_sectors)
return -EINVAL; /* component must fit device */
@@ -6525,7 +6532,17 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
mddev->ro = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
+ /* mddev_unlock will wake thread */
+ /* If a device failed while we were read-only, we
+ * need to make sure the metadata is updated now.
+ */
+ if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+ mddev_unlock(mddev);
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+ mddev_lock(mddev);
+ }
} else {
err = -EROFS;
goto abort_unlock;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 24b359717a7..0505452de8d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -175,7 +175,13 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
rdev1->new_raid_disk = j;
}
- if (j < 0 || j >= mddev->raid_disks) {
+ if (j < 0) {
+ printk(KERN_ERR
+ "md/raid0:%s: remove inactive devices before converting to RAID0\n",
+ mdname(mddev));
+ goto abort;
+ }
+ if (j >= mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
"aborting!\n", mdname(mddev), j);
goto abort;
@@ -289,7 +295,7 @@ abort:
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
- *private_conf = NULL;
+ *private_conf = ERR_PTR(err);
return err;
}
@@ -411,7 +417,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
"%s does not support generic reshape\n", __func__);
rdev_for_each(rdev, mddev)
- array_sectors += rdev->sectors;
+ array_sectors += (rdev->sectors &
+ ~(sector_t)(mddev->chunk_sectors-1));
return array_sectors;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d5bddfc4010..fd86b372692 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -967,6 +967,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
kfree(plug);
return;
@@ -1000,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
const unsigned long do_discard = (bio->bi_rw
& (REQ_DISCARD | REQ_SECURE));
+ const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@@ -1301,7 +1303,8 @@ read_again:
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
+ mbio->bi_rw =
+ WRITE | do_flush_fua | do_sync | do_discard | do_same;
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
@@ -2818,6 +2821,9 @@ static int run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
+ if (mddev->queue)
+ blk_queue_max_write_same_sectors(mddev->queue,
+ mddev->chunk_sectors);
rdev_for_each(rdev, mddev) {
if (!mddev->gendisk)
continue;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 64d48249c03..77b562d18a9 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -38,21 +38,36 @@
* near_copies (stored in low byte of layout)
* far_copies (stored in second byte of layout)
* far_offset (stored in bit 16 of layout )
+ * use_far_sets (stored in bit 17 of layout )
*
- * The data to be stored is divided into chunks using chunksize.
- * Each device is divided into far_copies sections.
- * In each section, chunks are laid out in a style similar to raid0, but
- * near_copies copies of each chunk is stored (each on a different drive).
- * The starting device for each section is offset near_copies from the starting
- * device of the previous section.
- * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
- * drive.
- * near_copies and far_copies must be at least one, and their product is at most
- * raid_disks.
+ * The data to be stored is divided into chunks using chunksize. Each device
+ * is divided into far_copies sections. In each section, chunks are laid out
+ * in a style similar to raid0, but near_copies copies of each chunk is stored
+ * (each on a different drive). The starting device for each section is offset
+ * near_copies from the starting device of the previous section. Thus there
+ * are (near_copies * far_copies) of each chunk, and each is on a different
+ * drive. near_copies and far_copies must be at least one, and their product
+ * is at most raid_disks.
*
* If far_offset is true, then the far_copies are handled a bit differently.
- * The copies are still in different stripes, but instead of be very far apart
- * on disk, there are adjacent stripes.
+ * The copies are still in different stripes, but instead of being very far
+ * apart on disk, there are adjacent stripes.
+ *
+ * The far and offset algorithms are handled slightly differently if
+ * 'use_far_sets' is true. In this case, the array's devices are grouped into
+ * sets that are (near_copies * far_copies) in size. The far copied stripes
+ * are still shifted by 'near_copies' devices, but this shifting stays confined
+ * to the set rather than the entire array. This is done to improve the number
+ * of device combinations that can fail without causing the array to fail.
+ * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
+ * on a device):
+ * A B C D A B C D E
+ * ... ...
+ * D A B C E A B C D
+ * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
+ * [A B] [C D] [A B] [C D E]
+ * |...| |...| |...| | ... |
+ * [B A] [D C] [B A] [E C D]
*/
/*
@@ -535,6 +550,13 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
sector_t stripe;
int dev;
int slot = 0;
+ int last_far_set_start, last_far_set_size;
+
+ last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+ last_far_set_start *= geo->far_set_size;
+
+ last_far_set_size = geo->far_set_size;
+ last_far_set_size += (geo->raid_disks % geo->far_set_size);
/* now calculate first sector/dev */
chunk = r10bio->sector >> geo->chunk_shift;
@@ -551,15 +573,25 @@ static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
/* and calculate all the others */
for (n = 0; n < geo->near_copies; n++) {
int d = dev;
+ int set;
sector_t s = sector;
- r10bio->devs[slot].addr = sector;
r10bio->devs[slot].devnum = d;
+ r10bio->devs[slot].addr = s;
slot++;
for (f = 1; f < geo->far_copies; f++) {
+ set = d / geo->far_set_size;
d += geo->near_copies;
- if (d >= geo->raid_disks)
- d -= geo->raid_disks;
+
+ if ((geo->raid_disks % geo->far_set_size) &&
+ (d > last_far_set_start)) {
+ d -= last_far_set_start;
+ d %= last_far_set_size;
+ d += last_far_set_start;
+ } else {
+ d %= geo->far_set_size;
+ d += geo->far_set_size * set;
+ }
s += geo->stride;
r10bio->devs[slot].devnum = d;
r10bio->devs[slot].addr = s;
@@ -595,6 +627,20 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
* or recovery, so reshape isn't happening
*/
struct geom *geo = &conf->geo;
+ int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
+ int far_set_size = geo->far_set_size;
+ int last_far_set_start;
+
+ if (geo->raid_disks % geo->far_set_size) {
+ last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
+ last_far_set_start *= geo->far_set_size;
+
+ if (dev >= last_far_set_start) {
+ far_set_size = geo->far_set_size;
+ far_set_size += (geo->raid_disks % geo->far_set_size);
+ far_set_start = last_far_set_start;
+ }
+ }
offset = sector & geo->chunk_mask;
if (geo->far_offset) {
@@ -602,13 +648,13 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
chunk = sector >> geo->chunk_shift;
fc = sector_div(chunk, geo->far_copies);
dev -= fc * geo->near_copies;
- if (dev < 0)
- dev += geo->raid_disks;
+ if (dev < far_set_start)
+ dev += far_set_size;
} else {
while (sector >= geo->stride) {
sector -= geo->stride;
- if (dev < geo->near_copies)
- dev += geo->raid_disks - geo->near_copies;
+ if (dev < (geo->near_copies + far_set_start))
+ dev += far_set_size - geo->near_copies;
else
dev -= geo->near_copies;
}
@@ -1073,6 +1119,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
kfree(plug);
return;
@@ -1105,6 +1152,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
const unsigned long do_discard = (bio->bi_rw
& (REQ_DISCARD | REQ_SECURE));
+ const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
@@ -1460,7 +1508,8 @@ retry_write:
rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_rw =
+ WRITE | do_sync | do_fua | do_discard | do_same;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1502,7 +1551,8 @@ retry_write:
r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_rw =
+ WRITE | do_sync | do_fua | do_discard | do_same;
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -3436,7 +3486,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
disks = mddev->raid_disks + mddev->delta_disks;
break;
}
- if (layout >> 17)
+ if (layout >> 18)
return -1;
if (chunk < (PAGE_SIZE >> 9) ||
!is_power_of_2(chunk))
@@ -3448,6 +3498,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
geo->near_copies = nc;
geo->far_copies = fc;
geo->far_offset = fo;
+ geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
geo->chunk_mask = chunk - 1;
geo->chunk_shift = ffz(~chunk);
return nc*fc;
@@ -3569,6 +3620,8 @@ static int run(struct mddev *mddev)
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue,
+ mddev->chunk_sectors);
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 1054cf60234..157d69e83ff 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -33,6 +33,11 @@ struct r10conf {
* far_offset, in which case it is
* 1 stripe.
*/
+ int far_set_size; /* The number of devices in a set,
+ * where a 'set' are devices that
+ * contain far/offset copies of
+ * each other.
+ */
int chunk_shift; /* shift from chunks to sectors */
sector_t chunk_mask;
} prev, geo;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5af2d270908..3ee2912889e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1403,7 +1403,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
}
-static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
{
int overlap_clear = 0, i, disks = sh->disks;
struct dma_async_tx_descriptor *tx = NULL;
@@ -1468,36 +1468,6 @@ static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-#ifdef CONFIG_MULTICORE_RAID456
-static void async_run_ops(void *param, async_cookie_t cookie)
-{
- struct stripe_head *sh = param;
- unsigned long ops_request = sh->ops.request;
-
- clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
- wake_up(&sh->ops.wait_for_ops);
-
- __raid_run_ops(sh, ops_request);
- release_stripe(sh);
-}
-
-static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
-{
- /* since handle_stripe can be called outside of raid5d context
- * we need to ensure sh->ops.request is de-staged before another
- * request arrives
- */
- wait_event(sh->ops.wait_for_ops,
- !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
- sh->ops.request = ops_request;
-
- atomic_inc(&sh->count);
- async_schedule(async_run_ops, sh);
-}
-#else
-#define raid_run_ops __raid_run_ops
-#endif
-
static int grow_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
@@ -1506,9 +1476,6 @@ static int grow_one_stripe(struct r5conf *conf)
return 0;
sh->raid_conf = conf;
- #ifdef CONFIG_MULTICORE_RAID456
- init_waitqueue_head(&sh->ops.wait_for_ops);
- #endif
spin_lock_init(&sh->stripe_lock);
@@ -1627,9 +1594,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
break;
nsh->raid_conf = conf;
- #ifdef CONFIG_MULTICORE_RAID456
- init_waitqueue_head(&nsh->ops.wait_for_ops);
- #endif
spin_lock_init(&nsh->stripe_lock);
list_add(&nsh->lru, &newstripes);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 11d01d67b3f..7bd068a6056 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1629,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (bond->dev_addr_from_first)
+ if (bond->slave_cnt == 0 && bond->dev_addr_from_first)
bond_set_dev_addr(bond->dev, slave_dev);
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 639049d7e92..da5f4397f87 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
ring->start);
} else {
+ /* Omit CRC. */
+ len -= ETH_FCS_LEN;
+
new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
if (new_skb) {
skb_put(new_skb, len);
skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
new_skb->data,
len);
+ skb_checksum_none_assert(skb);
new_skb->protocol =
eth_type_trans(new_skb, bgmac->net_dev);
netif_receive_skb(new_skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ecac04a3687..a923bc4d5a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
tsum = ~csum_fold(csum_add((__force __wsum) csum,
csum_partial(t_header, -fix, 0)));
- return bswab16(csum);
+ return bswab16(tsum);
}
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 9a674b14b40..edfa67adf2f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
}
cmd->maxtxpkt = 0;
@@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ADVERTISED_10000baseKR_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+
+ if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
}
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 1663e0b6b5a..31c5787970d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x0);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant off.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x0);
+ }
}
break;
case LED_MODE_ON:
@@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x20);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Disable MI_INT interrupt before setting LED4
+ * source to constant on.
+ */
+ if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4) &
+ NIG_MASK_MI_INT) {
+ params->link_flags |=
+ LINK_FLAGS_INT_DISABLED;
+
+ bnx2x_bits_dis(
+ bp,
+ NIG_REG_MASK_INTERRUPT_PORT0 +
+ params->port*4,
+ NIG_MASK_MI_INT);
+ }
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x20);
+ }
}
break;
@@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
val);
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
+ /* Restore LED4 source to external link,
+ * and re-enable interrupts.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_SIGNAL_MASK,
+ 0x40);
+ if (params->link_flags &
+ LINK_FLAGS_INT_DISABLED) {
+ bnx2x_link_int_enable(params);
+ params->link_flags &=
+ ~LINK_FLAGS_INT_DISABLED;
+ }
+ }
}
break;
}
@@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
phy->media_type = ETH_PHY_KR;
phy->flags |= FLAGS_WC_DUAL_MODE;
phy->supported &= (SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_FIBRE |
SUPPORTED_Pause |
@@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
- (phy->speed_cap_mask & SPEED_20000))
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
bnx2x_check_kr2_wa(params, vars, phy);
bnx2x_check_over_curr(params, vars);
if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d25c7d79787..be5c195d03d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -307,7 +307,8 @@ struct link_params {
struct bnx2x *bp;
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
req_flow_ctrl is set to AUTO */
- u16 rsrv1;
+ u16 link_flags;
+#define LINK_FLAGS_INT_DISABLED (1<<0)
u32 lfa_base;
};
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fccc3bf2141..069a155d16e 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct bufdesc *bdp;
void *bufaddr;
unsigned short status;
- unsigned long flags;
+ unsigned int index;
if (!fep->link) {
/* Link is down or autonegotiation is in progress. */
return NETDEV_TX_BUSY;
}
- spin_lock_irqsave(&fep->hw_lock, flags);
/* Fill in a Tx ring entry */
bdp = fep->cur_tx;
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* This should not happen, since ndev->tbusy should be set.
*/
printk("%s: tx queue full!.\n", ndev->name);
- spin_unlock_irqrestore(&fep->hw_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* 4-byte boundaries. Use bounce buffers to copy data
* and get it aligned. Ugh.
*/
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
+
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
- unsigned int index;
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->tx_bd_base;
- else
- index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
swap_buffer(bufaddr, skb->len);
/* Save skb pointer */
- fep->tx_skbuff[fep->skb_cur] = skb;
-
- ndev->stats.tx_bytes += skb->len;
- fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+ fep->tx_skbuff[index] = skb;
/* Push the data cache so the CPM does not get stale memory
* data.
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
}
- /* Trigger transmission start */
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
-
/* If this was the last BD in the ring, start at the beginning again. */
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
- if (bdp == fep->dirty_tx) {
- fep->tx_full = 1;
+ fep->cur_tx = bdp;
+
+ if (fep->cur_tx == fep->dirty_tx)
netif_stop_queue(ndev);
- }
- fep->cur_tx = bdp;
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
skb_tx_timestamp(skb);
- spin_unlock_irqrestore(&fep->hw_lock, flags);
-
return NETDEV_TX_OK;
}
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
- /* Reset SKB transmit buffers. */
- fep->skb_cur = fep->skb_dirty = 0;
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
+ int index = 0;
fep = netdev_priv(ndev);
- spin_lock(&fep->hw_lock);
bdp = fep->dirty_tx;
+ /* get next bdp of dirty_tx */
+ if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+ bdp = fep->tx_bd_base;
+ else
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
- if (bdp == fep->cur_tx && fep->tx_full == 0)
+
+ /* current queue is empty */
+ if (bdp == fep->cur_tx)
break;
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
+
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
- skb = fep->tx_skbuff[fep->skb_dirty];
+ skb = fep->tx_skbuff[index];
+
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
- fep->tx_skbuff[fep->skb_dirty] = NULL;
- fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+ fep->tx_skbuff[index] = NULL;
+
+ fep->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
if (status & BD_ENET_TX_WRAP)
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (fep->tx_full) {
- fep->tx_full = 0;
+ if (fep->dirty_tx != fep->cur_tx) {
if (netif_queue_stopped(ndev))
netif_wake_queue(ndev);
}
}
- fep->dirty_tx = bdp;
- spin_unlock(&fep->hw_lock);
+ return;
}
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
int_events = readl(fep->hwp + FEC_IEVENT);
writel(int_events, fep->hwp + FEC_IEVENT);
- if (int_events & FEC_ENET_RXF) {
+ if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
ret = IRQ_HANDLED;
/* Disable the RX interrupt */
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
}
}
- /* Transmit OK, or non-fatal error. Update the buffer
- * descriptors. FEC handles all errors, we just discover
- * them as part of the transmit process.
- */
- if (int_events & FEC_ENET_TXF) {
- ret = IRQ_HANDLED;
- fec_enet_tx(ndev);
- }
-
if (int_events & FEC_ENET_MII) {
ret = IRQ_HANDLED;
complete(&fep->mdio_done);
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
int pkts = fec_enet_rx(ndev, budget);
struct fec_enet_private *fep = netdev_priv(ndev);
+ fec_enet_tx(ndev);
+
if (pkts < budget) {
napi_complete(napi);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
+ fep->cur_tx = bdp;
for (i = 0; i < TX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
@@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
+ fep->dirty_tx = bdp;
fec_restart(ndev, 0);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 01579b8e37c..f5390071efd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -97,6 +97,13 @@ struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
};
+#else
+struct bufdesc {
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned short cbd_datlen; /* Data length */
+ unsigned long cbd_bufaddr; /* Buffer address */
+};
+#endif
struct bufdesc_ex {
struct bufdesc desc;
@@ -107,14 +114,6 @@ struct bufdesc_ex {
unsigned short res0[4];
};
-#else
-struct bufdesc {
- unsigned short cbd_sc; /* Control and status info */
- unsigned short cbd_datlen; /* Data length */
- unsigned long cbd_bufaddr; /* Buffer address */
-};
-#endif
-
/*
* The following definitions courtesy of commproc.h, which where
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
@@ -214,8 +213,6 @@ struct fec_enet_private {
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE];
struct sk_buff *rx_skbuff[RX_RING_SIZE];
- ushort skb_cur;
- ushort skb_dirty;
/* CPM dual port RAM relative addresses */
dma_addr_t bd_dma;
@@ -227,7 +224,6 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
- uint tx_full;
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 8900398ba10..28fb50a1e9c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
- rtl_tx_performance_tweak(pdev,
- (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+ if (tp->dev->mtu <= ETH_DATA_LEN) {
+ rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ }
}
static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_disable_clock_request(pdev);
@@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
@@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, TxPacketMax);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
@@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, TxPacketMax);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
@@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W8(MaxTxPacketSize, TxPacketMax);
@@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W8(MaxTxPacketSize, TxPacketMax);
@@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
- rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+ if (tp->dev->mtu <= ETH_DATA_LEN)
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index bf57b3cb16a..0bc00991d31 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
tx_queue->txd.entries);
}
+ efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_stop_interrupts(efx, true);
@@ -832,6 +833,7 @@ out:
efx_start_interrupts(efx, true);
efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
return rc;
rollback:
@@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
/* Flush efx_mac_work(), refill_workqueue, monitor_work */
efx_flush_all(efx);
- /* Stop the kernel transmit interface late, so the watchdog
- * timer isn't ticking over the flush */
+ /* Stop the kernel transmit interface. This is only valid if
+ * the device is stopped or detached; otherwise the watchdog
+ * may fire immediately.
+ */
+ WARN_ON(netif_running(efx->net_dev) &&
+ netif_device_present(efx->net_dev));
netif_tx_disable(efx->net_dev);
efx_stop_datapath(efx);
@@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
if (new_mtu > EFX_MAX_MTU)
return -EINVAL;
- efx_stop_all(efx);
-
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 2d756c1d714..0a90abd2421 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -210,6 +210,7 @@ struct efx_tx_queue {
* Will be %NULL if the buffer slot is currently free.
* @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
* Will be %NULL if the buffer slot is currently free.
+ * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
* @len: Buffer length, in bytes.
* @flags: Flags for buffer and packet state.
*/
@@ -219,7 +220,8 @@ struct efx_rx_buffer {
struct sk_buff *skb;
struct page *page;
} u;
- unsigned int len;
+ u16 page_offset;
+ u16 len;
u16 flags;
};
#define EFX_RX_BUF_PAGE 0x0001
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d780a0d096b..879ff5849bb 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
struct efx_rx_buffer *buf)
{
- /* Offset is always within one page, so we don't need to consider
- * the page order.
- */
- return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
- efx->type->rx_buffer_hash_size;
+ return buf->page_offset + efx->type->rx_buffer_hash_size;
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
struct page *page;
+ unsigned int page_offset;
struct efx_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
state->dma_addr = dma_addr;
dma_addr += sizeof(struct efx_rx_page_state);
+ page_offset = sizeof(struct efx_rx_page_state);
split:
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page;
+ rx_buf->page_offset = page_offset;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
rx_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count;
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
/* Use the second half of the page */
get_page(page);
dma_addr += (PAGE_SIZE >> 1);
+ page_offset += (PAGE_SIZE >> 1);
++count;
goto split;
}
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
}
static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+ struct efx_rx_buffer *rx_buf,
+ unsigned int used_len)
{
if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
struct efx_rx_page_state *state;
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state->dma_addr,
efx_rx_buf_size(efx),
DMA_FROM_DEVICE);
+ } else if (used_len) {
+ dma_sync_single_for_cpu(&efx->pci_dev->dev,
+ rx_buf->dma_addr, used_len,
+ DMA_FROM_DEVICE);
}
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
goto out;
}
- /* Release card resources - assumes all RX buffers consumed in-order
- * per RX queue
+ /* Release and/or sync DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue
*/
- efx_unmap_rx_buffer(efx, rx_buf);
+ efx_unmap_rx_buffer(efx, rx_buf, len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7e93df6585e..01ffbc48698 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
writel(vlan, &priv->host_port_regs->port_vlan);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < priv->data.slaves; i++)
slave_write(priv->slaves + i, vlan, reg);
cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 29934446436..abf7b6153d0 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -257,8 +257,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
- .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
- | SUPPORTED_Asym_Pause),
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9930f999956..3657b4a2912 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -44,13 +44,13 @@ MODULE_LICENSE("GPL");
void phy_device_free(struct phy_device *phydev)
{
- kfree(phydev);
+ put_device(&phydev->dev);
}
EXPORT_SYMBOL(phy_device_free);
static void phy_device_release(struct device *dev)
{
- phy_device_free(to_phy_device(dev));
+ kfree(to_phy_device(dev));
}
static struct phy_driver genphy_driver;
@@ -201,6 +201,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
there's no driver _already_ loaded. */
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
+ device_initialize(&dev->dev);
+
return dev;
}
EXPORT_SYMBOL(phy_device_create);
@@ -363,9 +365,9 @@ int phy_device_register(struct phy_device *phydev)
/* Run all of the fixups for this PHY */
phy_scan_fixups(phydev);
- err = device_register(&phydev->dev);
+ err = device_add(&phydev->dev);
if (err) {
- pr_err("phy %d failed to register\n", phydev->addr);
+ pr_err("PHY %d failed to add\n", phydev->addr);
goto out;
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index da92ed3797a..3b6e9b83342 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -156,6 +156,24 @@ config USB_NET_AX8817X
This driver creates an interface named "ethX", where X depends on
what other networking devices you have in use.
+config USB_NET_AX88179_178A
+ tristate "ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet"
+ depends on USB_USBNET
+ select CRC32
+ select PHYLIB
+ default y
+ help
+ This option adds support for ASIX AX88179 based USB 3.0/2.0
+ to Gigabit Ethernet adapters.
+
+ This driver should work with at least the following devices:
+ * ASIX AX88179
+ * ASIX AX88178A
+ * Sitcomm LN-032
+
+ This driver creates an interface named "ethX", where X depends on
+ what other networking devices you have in use.
+
config USB_NET_CDCETHER
tristate "CDC Ethernet support (smart devices such as cable modems)"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 478691326f3..119b06c9aa1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o
obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
+obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2205dbc8d32..70975346909 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -924,6 +924,29 @@ static const struct driver_info ax88178_info = {
.tx_fixup = asix_tx_fixup,
};
+/*
+ * USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
+ * no-name packaging.
+ * USB device strings are:
+ * 1: Manufacturer: USBLINK
+ * 2: Product: HG20F9 USB2.0
+ * 3: Serial: 000003
+ * Appears to be compatible with Asix 88772B.
+ */
+static const struct driver_info hg20f9_info = {
+ .description = "HG20F9 USB 2.0 Ethernet",
+ .bind = ax88772_bind,
+ .unbind = ax88772_unbind,
+ .status = asix_status,
+ .link_reset = ax88772_link_reset,
+ .reset = ax88772_reset,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ .data = FLAG_EEPROM_MAC,
+};
+
extern const struct driver_info ax88172a_info;
static const struct usb_device_id products [] = {
@@ -1063,6 +1086,14 @@ static const struct usb_device_id products [] = {
/* ASIX 88172a demo board */
USB_DEVICE(0x0b95, 0x172a),
.driver_info = (unsigned long) &ax88172a_info,
+}, {
+ /*
+ * USBLINK HG20F9 "USB 2.0 LAN"
+ * Appears to have gazumped Linksys's manufacturer ID but
+ * doesn't (yet) conflict with any known Linksys product.
+ */
+ USB_DEVICE(0x066b, 0x20f9),
+ .driver_info = (unsigned long) &hg20f9_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
new file mode 100644
index 00000000000..71c27d8d214
--- /dev/null
+++ b/drivers/net/usb/ax88179_178a.c
@@ -0,0 +1,1448 @@
+/*
+ * ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet Devices
+ *
+ * Copyright (C) 2011-2013 ASIX
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+
+#define AX88179_PHY_ID 0x03
+#define AX_EEPROM_LEN 0x100
+#define AX88179_EEPROM_MAGIC 0x17900b95
+#define AX_MCAST_FLTSIZE 8
+#define AX_MAX_MCAST 64
+#define AX_INT_PPLS_LINK ((u32)BIT(16))
+#define AX_RXHDR_L4_TYPE_MASK 0x1c
+#define AX_RXHDR_L4_TYPE_UDP 4
+#define AX_RXHDR_L4_TYPE_TCP 16
+#define AX_RXHDR_L3CSUM_ERR 2
+#define AX_RXHDR_L4CSUM_ERR 1
+#define AX_RXHDR_CRC_ERR ((u32)BIT(31))
+#define AX_RXHDR_DROP_ERR ((u32)BIT(30))
+#define AX_ACCESS_MAC 0x01
+#define AX_ACCESS_PHY 0x02
+#define AX_ACCESS_EEPROM 0x04
+#define AX_ACCESS_EFUS 0x05
+#define AX_PAUSE_WATERLVL_HIGH 0x54
+#define AX_PAUSE_WATERLVL_LOW 0x55
+
+#define PHYSICAL_LINK_STATUS 0x02
+ #define AX_USB_SS 0x04
+ #define AX_USB_HS 0x02
+
+#define GENERAL_STATUS 0x03
+/* Check AX88179 version. UA1:Bit2 = 0, UA2:Bit2 = 1 */
+ #define AX_SECLD 0x04
+
+#define AX_SROM_ADDR 0x07
+#define AX_SROM_CMD 0x0a
+ #define EEP_RD 0x04
+ #define EEP_BUSY 0x10
+
+#define AX_SROM_DATA_LOW 0x08
+#define AX_SROM_DATA_HIGH 0x09
+
+#define AX_RX_CTL 0x0b
+ #define AX_RX_CTL_DROPCRCERR 0x0100
+ #define AX_RX_CTL_IPE 0x0200
+ #define AX_RX_CTL_START 0x0080
+ #define AX_RX_CTL_AP 0x0020
+ #define AX_RX_CTL_AM 0x0010
+ #define AX_RX_CTL_AB 0x0008
+ #define AX_RX_CTL_AMALL 0x0002
+ #define AX_RX_CTL_PRO 0x0001
+ #define AX_RX_CTL_STOP 0x0000
+
+#define AX_NODE_ID 0x10
+#define AX_MULFLTARY 0x16
+
+#define AX_MEDIUM_STATUS_MODE 0x22
+ #define AX_MEDIUM_GIGAMODE 0x01
+ #define AX_MEDIUM_FULL_DUPLEX 0x02
+ #define AX_MEDIUM_ALWAYS_ONE 0x04
+ #define AX_MEDIUM_EN_125MHZ 0x08
+ #define AX_MEDIUM_RXFLOW_CTRLEN 0x10
+ #define AX_MEDIUM_TXFLOW_CTRLEN 0x20
+ #define AX_MEDIUM_RECEIVE_EN 0x100
+ #define AX_MEDIUM_PS 0x200
+ #define AX_MEDIUM_JUMBO_EN 0x8040
+
+#define AX_MONITOR_MOD 0x24
+ #define AX_MONITOR_MODE_RWLC 0x02
+ #define AX_MONITOR_MODE_RWMP 0x04
+ #define AX_MONITOR_MODE_PMEPOL 0x20
+ #define AX_MONITOR_MODE_PMETYPE 0x40
+
+#define AX_GPIO_CTRL 0x25
+ #define AX_GPIO_CTRL_GPIO3EN 0x80
+ #define AX_GPIO_CTRL_GPIO2EN 0x40
+ #define AX_GPIO_CTRL_GPIO1EN 0x20
+
+#define AX_PHYPWR_RSTCTL 0x26
+ #define AX_PHYPWR_RSTCTL_BZ 0x0010
+ #define AX_PHYPWR_RSTCTL_IPRL 0x0020
+ #define AX_PHYPWR_RSTCTL_AT 0x1000
+
+#define AX_RX_BULKIN_QCTRL 0x2e
+#define AX_CLK_SELECT 0x33
+ #define AX_CLK_SELECT_BCS 0x01
+ #define AX_CLK_SELECT_ACS 0x02
+ #define AX_CLK_SELECT_ULR 0x08
+
+#define AX_RXCOE_CTL 0x34
+ #define AX_RXCOE_IP 0x01
+ #define AX_RXCOE_TCP 0x02
+ #define AX_RXCOE_UDP 0x04
+ #define AX_RXCOE_TCPV6 0x20
+ #define AX_RXCOE_UDPV6 0x40
+
+#define AX_TXCOE_CTL 0x35
+ #define AX_TXCOE_IP 0x01
+ #define AX_TXCOE_TCP 0x02
+ #define AX_TXCOE_UDP 0x04
+ #define AX_TXCOE_TCPV6 0x20
+ #define AX_TXCOE_UDPV6 0x40
+
+#define AX_LEDCTRL 0x73
+
+#define GMII_PHY_PHYSR 0x11
+ #define GMII_PHY_PHYSR_SMASK 0xc000
+ #define GMII_PHY_PHYSR_GIGA 0x8000
+ #define GMII_PHY_PHYSR_100 0x4000
+ #define GMII_PHY_PHYSR_FULL 0x2000
+ #define GMII_PHY_PHYSR_LINK 0x400
+
+#define GMII_LED_ACT 0x1a
+ #define GMII_LED_ACTIVE_MASK 0xff8f
+ #define GMII_LED0_ACTIVE BIT(4)
+ #define GMII_LED1_ACTIVE BIT(5)
+ #define GMII_LED2_ACTIVE BIT(6)
+
+#define GMII_LED_LINK 0x1c
+ #define GMII_LED_LINK_MASK 0xf888
+ #define GMII_LED0_LINK_10 BIT(0)
+ #define GMII_LED0_LINK_100 BIT(1)
+ #define GMII_LED0_LINK_1000 BIT(2)
+ #define GMII_LED1_LINK_10 BIT(4)
+ #define GMII_LED1_LINK_100 BIT(5)
+ #define GMII_LED1_LINK_1000 BIT(6)
+ #define GMII_LED2_LINK_10 BIT(8)
+ #define GMII_LED2_LINK_100 BIT(9)
+ #define GMII_LED2_LINK_1000 BIT(10)
+ #define LED0_ACTIVE BIT(0)
+ #define LED0_LINK_10 BIT(1)
+ #define LED0_LINK_100 BIT(2)
+ #define LED0_LINK_1000 BIT(3)
+ #define LED0_FD BIT(4)
+ #define LED0_USB3_MASK 0x001f
+ #define LED1_ACTIVE BIT(5)
+ #define LED1_LINK_10 BIT(6)
+ #define LED1_LINK_100 BIT(7)
+ #define LED1_LINK_1000 BIT(8)
+ #define LED1_FD BIT(9)
+ #define LED1_USB3_MASK 0x03e0
+ #define LED2_ACTIVE BIT(10)
+ #define LED2_LINK_1000 BIT(13)
+ #define LED2_LINK_100 BIT(12)
+ #define LED2_LINK_10 BIT(11)
+ #define LED2_FD BIT(14)
+ #define LED_VALID BIT(15)
+ #define LED2_USB3_MASK 0x7c00
+
+#define GMII_PHYPAGE 0x1e
+#define GMII_PHY_PAGE_SELECT 0x1f
+ #define GMII_PHY_PGSEL_EXT 0x0007
+ #define GMII_PHY_PGSEL_PAGE0 0x0000
+
+struct ax88179_data {
+ u16 rxctl;
+ u16 reserved;
+};
+
+struct ax88179_int_data {
+ __le32 intdata1;
+ __le32 intdata2;
+};
+
+static const struct {
+ unsigned char ctrl, timer_l, timer_h, size, ifg;
+} AX88179_BULKIN_SIZE[] = {
+ {7, 0x4f, 0, 0x12, 0xff},
+ {7, 0x20, 3, 0x16, 0xff},
+ {7, 0xae, 7, 0x18, 0xff},
+ {7, 0xcc, 0x4c, 0x18, 8},
+};
+
+static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data, int in_pm)
+{
+ int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
+
+ BUG_ON(!dev);
+
+ if (!in_pm)
+ fn = usbnet_read_cmd;
+ else
+ fn = usbnet_read_cmd_nopm;
+
+ ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
+ index, ret);
+
+ return ret;
+}
+
+static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data, int in_pm)
+{
+ int ret;
+ int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
+
+ BUG_ON(!dev);
+
+ if (!in_pm)
+ fn = usbnet_write_cmd;
+ else
+ fn = usbnet_write_cmd_nopm;
+
+ ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, data, size);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
+ index, ret);
+
+ return ret;
+}
+
+static void ax88179_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ u16 buf;
+
+ if (2 == size) {
+ buf = *((u16 *)data);
+ cpu_to_le16s(&buf);
+ usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, &buf,
+ size);
+ } else {
+ usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, data,
+ size);
+ }
+}
+
+static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ if (2 == size) {
+ u16 buf;
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
+ le16_to_cpus(&buf);
+ *((u16 *)data) = buf;
+ } else if (4 == size) {
+ u32 buf;
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
+ le32_to_cpus(&buf);
+ *((u32 *)data) = buf;
+ } else {
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 1);
+ }
+
+ return ret;
+}
+
+static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ if (2 == size) {
+ u16 buf;
+ buf = *((u16 *)data);
+ cpu_to_le16s(&buf);
+ ret = __ax88179_write_cmd(dev, cmd, value, index,
+ size, &buf, 1);
+ } else {
+ ret = __ax88179_write_cmd(dev, cmd, value, index,
+ size, data, 1);
+ }
+
+ return ret;
+}
+
+static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int ret;
+
+ if (2 == size) {
+ u16 buf;
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ le16_to_cpus(&buf);
+ *((u16 *)data) = buf;
+ } else if (4 == size) {
+ u32 buf;
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
+ le32_to_cpus(&buf);
+ *((u32 *)data) = buf;
+ } else {
+ ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 0);
+ }
+
+ return ret;
+}
+
+static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int ret;
+
+ if (2 == size) {
+ u16 buf;
+ buf = *((u16 *)data);
+ cpu_to_le16s(&buf);
+ ret = __ax88179_write_cmd(dev, cmd, value, index,
+ size, &buf, 0);
+ } else {
+ ret = __ax88179_write_cmd(dev, cmd, value, index,
+ size, data, 0);
+ }
+
+ return ret;
+}
+
+static void ax88179_status(struct usbnet *dev, struct urb *urb)
+{
+ struct ax88179_int_data *event;
+ u32 link;
+
+ if (urb->actual_length < 8)
+ return;
+
+ event = urb->transfer_buffer;
+ le32_to_cpus((void *)&event->intdata1);
+
+ link = (((__force u32)event->intdata1) & AX_INT_PPLS_LINK) >> 16;
+
+ if (netif_carrier_ok(dev->net) != link) {
+ if (link)
+ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+ else
+ netif_carrier_off(dev->net);
+
+ netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
+ }
+}
+
+static int ax88179_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ u16 res;
+
+ ax88179_read_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
+ return res;
+}
+
+static void ax88179_mdio_write(struct net_device *netdev, int phy_id, int loc,
+ int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ u16 res = (u16) val;
+
+ ax88179_write_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
+}
+
+static int ax88179_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ u16 tmp16;
+ u8 tmp8;
+
+ usbnet_suspend(intf, message);
+
+ /* Disable RX path */
+ ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
+ ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+
+ /* Force bulk-in zero length */
+ ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+ 2, 2, &tmp16);
+
+ tmp16 |= AX_PHYPWR_RSTCTL_BZ | AX_PHYPWR_RSTCTL_IPRL;
+ ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+ 2, 2, &tmp16);
+
+ /* change clock */
+ tmp8 = 0;
+ ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+
+ /* Configure RX control register => stop operation */
+ tmp16 = AX_RX_CTL_STOP;
+ ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+ return 0;
+}
+
+/* This function is used to enable the autodetach function. */
+/* This function is determined by offset 0x43 of EEPROM */
+static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
+{
+ u16 tmp16;
+ u8 tmp8;
+ int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
+ int (*fnw)(struct usbnet *, u8, u16, u16, u16, void *);
+
+ if (!in_pm) {
+ fnr = ax88179_read_cmd;
+ fnw = ax88179_write_cmd;
+ } else {
+ fnr = ax88179_read_cmd_nopm;
+ fnw = ax88179_write_cmd_nopm;
+ }
+
+ if (fnr(dev, AX_ACCESS_EEPROM, 0x43, 1, 2, &tmp16) < 0)
+ return 0;
+
+ if ((tmp16 == 0xFFFF) || (!(tmp16 & 0x0100)))
+ return 0;
+
+ /* Enable Auto Detach bit */
+ tmp8 = 0;
+ fnr(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+ tmp8 |= AX_CLK_SELECT_ULR;
+ fnw(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+
+ fnr(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+ tmp16 |= AX_PHYPWR_RSTCTL_AT;
+ fnw(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+
+ return 0;
+}
+
+static int ax88179_resume(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ u16 tmp16;
+ u8 tmp8;
+
+ netif_carrier_off(dev->net);
+
+ /* Power up ethernet PHY */
+ tmp16 = 0;
+ ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+ 2, 2, &tmp16);
+ udelay(1000);
+
+ tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+ ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
+ 2, 2, &tmp16);
+ msleep(200);
+
+ /* Ethernet PHY Auto Detach*/
+ ax88179_auto_detach(dev, 1);
+
+ /* Enable clock */
+ ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+ tmp8 |= AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+ ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
+ msleep(100);
+
+ /* Configure RX control register => start operation */
+ tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
+ AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
+ ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+ return usbnet_resume(intf);
+}
+
+static void
+ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt;
+
+ if (ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+ 1, 1, &opt) < 0) {
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+ return;
+ }
+
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+ if (opt & AX_MONITOR_MODE_RWLC)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & AX_MONITOR_MODE_RWMP)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int
+ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt = 0;
+
+ if (wolinfo->wolopts & WAKE_PHY)
+ opt |= AX_MONITOR_MODE_RWLC;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ opt |= AX_MONITOR_MODE_RWMP;
+
+ if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
+ 1, 1, &opt) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ax88179_get_eeprom_len(struct net_device *net)
+{
+ return AX_EEPROM_LEN;
+}
+
+static int
+ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int i, ret;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = AX88179_EEPROM_MAGIC;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ /* ax88179/178A returns 2 bytes from eeprom on read */
+ for (i = first_word; i <= last_word; i++) {
+ ret = __ax88179_read_cmd(dev, AX_ACCESS_EEPROM, i, 1, 2,
+ &eeprom_buff[i - first_word],
+ 0);
+ if (ret < 0) {
+ kfree(eeprom_buff);
+ return -EIO;
+ }
+ }
+
+ memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+ return 0;
+}
+
+static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+ return mii_ethtool_gset(&dev->mii, cmd);
+}
+
+static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+ return mii_ethtool_sset(&dev->mii, cmd);
+}
+
+
+static int ax88179_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static const struct ethtool_ops ax88179_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_wol = ax88179_get_wol,
+ .set_wol = ax88179_set_wol,
+ .get_eeprom_len = ax88179_get_eeprom_len,
+ .get_eeprom = ax88179_get_eeprom,
+ .get_settings = ax88179_get_settings,
+ .set_settings = ax88179_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static void ax88179_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct ax88179_data *data = (struct ax88179_data *)dev->data;
+ u8 *m_filter = ((u8 *)dev->data) + 12;
+
+ data->rxctl = (AX_RX_CTL_START | AX_RX_CTL_AB | AX_RX_CTL_IPE);
+
+ if (net->flags & IFF_PROMISC) {
+ data->rxctl |= AX_RX_CTL_PRO;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > AX_MAX_MCAST) {
+ data->rxctl |= AX_RX_CTL_AMALL;
+ } else if (netdev_mc_empty(net)) {
+ /* just broadcast and directed */
+ } else {
+ /* We use the 20 byte dev->data for our 8 byte filter buffer
+ * to avoid allocating memory that is tricky to free later
+ */
+ u32 crc_bits;
+ struct netdev_hw_addr *ha;
+
+ memset(m_filter, 0, AX_MCAST_FLTSIZE);
+
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ *(m_filter + (crc_bits >> 3)) |= (1 << (crc_bits & 7));
+ }
+
+ ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_MULFLTARY,
+ AX_MCAST_FLTSIZE, AX_MCAST_FLTSIZE,
+ m_filter);
+
+ data->rxctl |= AX_RX_CTL_AM;
+ }
+
+ ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_RX_CTL,
+ 2, 2, &data->rxctl);
+}
+
+static int
+ax88179_set_features(struct net_device *net, netdev_features_t features)
+{
+ u8 tmp;
+ struct usbnet *dev = netdev_priv(net);
+ netdev_features_t changed = net->features ^ features;
+
+ if (changed & NETIF_F_IP_CSUM) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+ tmp ^= AX_TXCOE_TCP | AX_TXCOE_UDP;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+ }
+
+ if (changed & NETIF_F_IPV6_CSUM) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+ tmp ^= AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
+ }
+
+ if (changed & NETIF_F_RXCSUM) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
+ tmp ^= AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+ AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
+ }
+
+ return 0;
+}
+
+static int ax88179_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 tmp16;
+
+ if (new_mtu <= 0 || new_mtu > 4088)
+ return -EINVAL;
+
+ net->mtu = new_mtu;
+ dev->hard_mtu = net->mtu + net->hard_header_len;
+
+ if (net->mtu > 1500) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ tmp16 |= AX_MEDIUM_JUMBO_EN;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ } else {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ tmp16 &= ~AX_MEDIUM_JUMBO_EN;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ }
+
+ return 0;
+}
+
+static int ax88179_set_mac_addr(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* Set the MAC address */
+ return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+ ETH_ALEN, net->dev_addr);
+}
+
+static const struct net_device_ops ax88179_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = ax88179_change_mtu,
+ .ndo_set_mac_address = ax88179_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = ax88179_ioctl,
+ .ndo_set_rx_mode = ax88179_set_multicast,
+ .ndo_set_features = ax88179_set_features,
+};
+
+static int ax88179_check_eeprom(struct usbnet *dev)
+{
+ u8 i, buf, eeprom[20];
+ u16 csum, delay = HZ / 10;
+ unsigned long jtimeout;
+
+ /* Read EEPROM content */
+ for (i = 0; i < 6; i++) {
+ buf = i;
+ if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
+ 1, 1, &buf) < 0)
+ return -EINVAL;
+
+ buf = EEP_RD;
+ if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+ 1, 1, &buf) < 0)
+ return -EINVAL;
+
+ jtimeout = jiffies + delay;
+ do {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+ 1, 1, &buf);
+
+ if (time_after(jiffies, jtimeout))
+ return -EINVAL;
+
+ } while (buf & EEP_BUSY);
+
+ __ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
+ 2, 2, &eeprom[i * 2], 0);
+
+ if ((i == 0) && (eeprom[0] == 0xFF))
+ return -EINVAL;
+ }
+
+ csum = eeprom[6] + eeprom[7] + eeprom[8] + eeprom[9];
+ csum = (csum >> 8) + (csum & 0xff);
+ if ((csum + eeprom[10]) != 0xff)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ax88179_check_efuse(struct usbnet *dev, u16 *ledmode)
+{
+ u8 i;
+ u8 efuse[64];
+ u16 csum = 0;
+
+ if (ax88179_read_cmd(dev, AX_ACCESS_EFUS, 0, 64, 64, efuse) < 0)
+ return -EINVAL;
+
+ if (*efuse == 0xFF)
+ return -EINVAL;
+
+ for (i = 0; i < 64; i++)
+ csum = csum + efuse[i];
+
+ while (csum > 255)
+ csum = (csum & 0x00FF) + ((csum >> 8) & 0x00FF);
+
+ if (csum != 0xFF)
+ return -EINVAL;
+
+ *ledmode = (efuse[51] << 8) | efuse[52];
+
+ return 0;
+}
+
+static int ax88179_convert_old_led(struct usbnet *dev, u16 *ledvalue)
+{
+ u16 led;
+
+ /* Loaded the old eFuse LED Mode */
+ if (ax88179_read_cmd(dev, AX_ACCESS_EEPROM, 0x3C, 1, 2, &led) < 0)
+ return -EINVAL;
+
+ led >>= 8;
+ switch (led) {
+ case 0xFF:
+ led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
+ LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
+ LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
+ break;
+ case 0xFE:
+ led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 | LED_VALID;
+ break;
+ case 0xFD:
+ led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 |
+ LED2_LINK_10 | LED_VALID;
+ break;
+ case 0xFC:
+ led = LED0_ACTIVE | LED1_ACTIVE | LED1_LINK_1000 | LED2_ACTIVE |
+ LED2_LINK_100 | LED2_LINK_10 | LED_VALID;
+ break;
+ default:
+ led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
+ LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
+ LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
+ break;
+ }
+
+ *ledvalue = led;
+
+ return 0;
+}
+
+static int ax88179_led_setting(struct usbnet *dev)
+{
+ u8 ledfd, value = 0;
+ u16 tmp, ledact, ledlink, ledvalue = 0, delay = HZ / 10;
+ unsigned long jtimeout;
+
+ /* Check AX88179 version. UA1 or UA2*/
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, GENERAL_STATUS, 1, 1, &value);
+
+ if (!(value & AX_SECLD)) { /* UA1 */
+ value = AX_GPIO_CTRL_GPIO3EN | AX_GPIO_CTRL_GPIO2EN |
+ AX_GPIO_CTRL_GPIO1EN;
+ if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_GPIO_CTRL,
+ 1, 1, &value) < 0)
+ return -EINVAL;
+ }
+
+ /* Check EEPROM */
+ if (!ax88179_check_eeprom(dev)) {
+ value = 0x42;
+ if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
+ 1, 1, &value) < 0)
+ return -EINVAL;
+
+ value = EEP_RD;
+ if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+ 1, 1, &value) < 0)
+ return -EINVAL;
+
+ jtimeout = jiffies + delay;
+ do {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
+ 1, 1, &value);
+
+ if (time_after(jiffies, jtimeout))
+ return -EINVAL;
+
+ } while (value & EEP_BUSY);
+
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_HIGH,
+ 1, 1, &value);
+ ledvalue = (value << 8);
+
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
+ 1, 1, &value);
+ ledvalue |= value;
+
+ /* load internal ROM for defaule setting */
+ if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
+ ax88179_convert_old_led(dev, &ledvalue);
+
+ } else if (!ax88179_check_efuse(dev, &ledvalue)) {
+ if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
+ ax88179_convert_old_led(dev, &ledvalue);
+ } else {
+ ax88179_convert_old_led(dev, &ledvalue);
+ }
+
+ tmp = GMII_PHY_PGSEL_EXT;
+ ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+ GMII_PHY_PAGE_SELECT, 2, &tmp);
+
+ tmp = 0x2c;
+ ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+ GMII_PHYPAGE, 2, &tmp);
+
+ ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+ GMII_LED_ACT, 2, &ledact);
+
+ ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+ GMII_LED_LINK, 2, &ledlink);
+
+ ledact &= GMII_LED_ACTIVE_MASK;
+ ledlink &= GMII_LED_LINK_MASK;
+
+ if (ledvalue & LED0_ACTIVE)
+ ledact |= GMII_LED0_ACTIVE;
+
+ if (ledvalue & LED1_ACTIVE)
+ ledact |= GMII_LED1_ACTIVE;
+
+ if (ledvalue & LED2_ACTIVE)
+ ledact |= GMII_LED2_ACTIVE;
+
+ if (ledvalue & LED0_LINK_10)
+ ledlink |= GMII_LED0_LINK_10;
+
+ if (ledvalue & LED1_LINK_10)
+ ledlink |= GMII_LED1_LINK_10;
+
+ if (ledvalue & LED2_LINK_10)
+ ledlink |= GMII_LED2_LINK_10;
+
+ if (ledvalue & LED0_LINK_100)
+ ledlink |= GMII_LED0_LINK_100;
+
+ if (ledvalue & LED1_LINK_100)
+ ledlink |= GMII_LED1_LINK_100;
+
+ if (ledvalue & LED2_LINK_100)
+ ledlink |= GMII_LED2_LINK_100;
+
+ if (ledvalue & LED0_LINK_1000)
+ ledlink |= GMII_LED0_LINK_1000;
+
+ if (ledvalue & LED1_LINK_1000)
+ ledlink |= GMII_LED1_LINK_1000;
+
+ if (ledvalue & LED2_LINK_1000)
+ ledlink |= GMII_LED2_LINK_1000;
+
+ tmp = ledact;
+ ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+ GMII_LED_ACT, 2, &tmp);
+
+ tmp = ledlink;
+ ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+ GMII_LED_LINK, 2, &tmp);
+
+ tmp = GMII_PHY_PGSEL_PAGE0;
+ ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+ GMII_PHY_PAGE_SELECT, 2, &tmp);
+
+ /* LED full duplex setting */
+ ledfd = 0;
+ if (ledvalue & LED0_FD)
+ ledfd |= 0x01;
+ else if ((ledvalue & LED0_USB3_MASK) == 0)
+ ledfd |= 0x02;
+
+ if (ledvalue & LED1_FD)
+ ledfd |= 0x04;
+ else if ((ledvalue & LED1_USB3_MASK) == 0)
+ ledfd |= 0x08;
+
+ if (ledvalue & LED2_FD)
+ ledfd |= 0x10;
+ else if ((ledvalue & LED2_USB3_MASK) == 0)
+ ledfd |= 0x20;
+
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_LEDCTRL, 1, 1, &ledfd);
+
+ return 0;
+}
+
+static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ u8 buf[5];
+ u16 *tmp16;
+ u8 *tmp;
+ struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+
+ usbnet_get_endpoints(dev, intf);
+
+ tmp16 = (u16 *)buf;
+ tmp = (u8 *)buf;
+
+ memset(ax179_data, 0, sizeof(*ax179_data));
+
+ /* Power up ethernet PHY */
+ *tmp16 = 0;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+ *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+ msleep(200);
+
+ *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+ msleep(100);
+
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+ ETH_ALEN, dev->net->dev_addr);
+ memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
+
+ /* RX bulk configuration */
+ memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
+
+ dev->rx_urb_size = 1024 * 20;
+
+ *tmp = 0x34;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
+
+ *tmp = 0x52;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
+ 1, 1, tmp);
+
+ dev->net->netdev_ops = &ax88179_netdev_ops;
+ dev->net->ethtool_ops = &ax88179_ethtool_ops;
+ dev->net->needed_headroom = 8;
+
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = ax88179_mdio_read;
+ dev->mii.mdio_write = ax88179_mdio_write;
+ dev->mii.phy_id_mask = 0xff;
+ dev->mii.reg_num_mask = 0xff;
+ dev->mii.phy_id = 0x03;
+ dev->mii.supports_gmii = 1;
+
+ dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+ dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+ /* Enable checksum offload */
+ *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+ AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
+
+ *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
+ AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
+
+ /* Configure RX control register => start operation */
+ *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
+ AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
+
+ *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
+ AX_MONITOR_MODE_RWMP;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
+
+ /* Configure default medium type => giga */
+ *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
+ AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
+ AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, tmp16);
+
+ ax88179_led_setting(dev);
+
+ /* Restart autoneg */
+ mii_nway_restart(&dev->mii);
+
+ netif_carrier_off(dev->net);
+
+ return 0;
+}
+
+static void ax88179_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ u16 tmp16;
+
+ /* Configure RX control register => stop operation */
+ tmp16 = AX_RX_CTL_STOP;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
+
+ tmp16 = 0;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp16);
+
+ /* Power down ethernet PHY */
+ tmp16 = 0;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
+}
+
+static void
+ax88179_rx_checksum(struct sk_buff *skb, u32 *pkt_hdr)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* checksum error bit is set */
+ if ((*pkt_hdr & AX_RXHDR_L3CSUM_ERR) ||
+ (*pkt_hdr & AX_RXHDR_L4CSUM_ERR))
+ return;
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_TCP) ||
+ ((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_UDP))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct sk_buff *ax_skb;
+ int pkt_cnt;
+ u32 rx_hdr;
+ u16 hdr_off;
+ u32 *pkt_hdr;
+
+ skb_trim(skb, skb->len - 4);
+ memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
+ le32_to_cpus(&rx_hdr);
+
+ pkt_cnt = (u16)rx_hdr;
+ hdr_off = (u16)(rx_hdr >> 16);
+ pkt_hdr = (u32 *)(skb->data + hdr_off);
+
+ while (pkt_cnt--) {
+ u16 pkt_len;
+
+ le32_to_cpus(pkt_hdr);
+ pkt_len = (*pkt_hdr >> 16) & 0x1fff;
+
+ /* Check CRC or runt packet */
+ if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
+ (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
+ skb_pull(skb, (pkt_len + 7) & 0xFFF8);
+ pkt_hdr++;
+ continue;
+ }
+
+ if (pkt_cnt == 0) {
+ /* Skip IP alignment psudo header */
+ skb_pull(skb, 2);
+ skb->len = pkt_len;
+ skb_set_tail_pointer(skb, pkt_len);
+ skb->truesize = pkt_len + sizeof(struct sk_buff);
+ ax88179_rx_checksum(skb, pkt_hdr);
+ return 1;
+ }
+
+ ax_skb = skb_clone(skb, GFP_ATOMIC);
+ if (ax_skb) {
+ ax_skb->len = pkt_len;
+ ax_skb->data = skb->data + 2;
+ skb_set_tail_pointer(ax_skb, pkt_len);
+ ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
+ ax88179_rx_checksum(ax_skb, pkt_hdr);
+ usbnet_skb_return(dev, ax_skb);
+ } else {
+ return 0;
+ }
+
+ skb_pull(skb, (pkt_len + 7) & 0xFFF8);
+ pkt_hdr++;
+ }
+ return 1;
+}
+
+static struct sk_buff *
+ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+ u32 tx_hdr1, tx_hdr2;
+ int frame_size = dev->maxpacket;
+ int mss = skb_shinfo(skb)->gso_size;
+ int headroom;
+ int tailroom;
+
+ tx_hdr1 = skb->len;
+ tx_hdr2 = mss;
+ if (((skb->len + 8) % frame_size) == 0)
+ tx_hdr2 |= 0x80008000; /* Enable padding */
+
+ skb_linearize(skb);
+ headroom = skb_headroom(skb);
+ tailroom = skb_tailroom(skb);
+
+ if (!skb_header_cloned(skb) &&
+ !skb_cloned(skb) &&
+ (headroom + tailroom) >= 8) {
+ if (headroom < 8) {
+ skb->data = memmove(skb->head + 8, skb->data, skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, 8, 0, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ skb_push(skb, 4);
+ cpu_to_le32s(&tx_hdr2);
+ skb_copy_to_linear_data(skb, &tx_hdr2, 4);
+
+ skb_push(skb, 4);
+ cpu_to_le32s(&tx_hdr1);
+ skb_copy_to_linear_data(skb, &tx_hdr1, 4);
+
+ return skb;
+}
+
+static int ax88179_link_reset(struct usbnet *dev)
+{
+ struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+ u8 tmp[5], link_sts;
+ u16 mode, tmp16, delay = HZ / 10;
+ u32 tmp32 = 0x40000000;
+ unsigned long jtimeout;
+
+ jtimeout = jiffies + delay;
+ while (tmp32 & 0x40000000) {
+ mode = 0;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &mode);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2,
+ &ax179_data->rxctl);
+
+ /*link up, check the usb device control TX FIFO full or empty*/
+ ax88179_read_cmd(dev, 0x81, 0x8c, 0, 4, &tmp32);
+
+ if (time_after(jiffies, jtimeout))
+ return 0;
+ }
+
+ mode = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
+ AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE;
+
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, PHYSICAL_LINK_STATUS,
+ 1, 1, &link_sts);
+
+ ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+ GMII_PHY_PHYSR, 2, &tmp16);
+
+ if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
+ return 0;
+ } else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
+ mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
+ if (dev->net->mtu > 1500)
+ mode |= AX_MEDIUM_JUMBO_EN;
+
+ if (link_sts & AX_USB_SS)
+ memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
+ else if (link_sts & AX_USB_HS)
+ memcpy(tmp, &AX88179_BULKIN_SIZE[1], 5);
+ else
+ memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
+ } else if (GMII_PHY_PHYSR_100 == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
+ mode |= AX_MEDIUM_PS;
+
+ if (link_sts & (AX_USB_SS | AX_USB_HS))
+ memcpy(tmp, &AX88179_BULKIN_SIZE[2], 5);
+ else
+ memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
+ } else {
+ memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
+ }
+
+ /* RX bulk configuration */
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
+
+ dev->rx_urb_size = (1024 * (tmp[3] + 2));
+
+ if (tmp16 & GMII_PHY_PHYSR_FULL)
+ mode |= AX_MEDIUM_FULL_DUPLEX;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &mode);
+
+ netif_carrier_on(dev->net);
+
+ return 0;
+}
+
+static int ax88179_reset(struct usbnet *dev)
+{
+ u8 buf[5];
+ u16 *tmp16;
+ u8 *tmp;
+
+ tmp16 = (u16 *)buf;
+ tmp = (u8 *)buf;
+
+ /* Power up ethernet PHY */
+ *tmp16 = 0;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+
+ *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
+ msleep(200);
+
+ *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
+ msleep(100);
+
+ /* Ethernet PHY Auto Detach*/
+ ax88179_auto_detach(dev, 0);
+
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
+ dev->net->dev_addr);
+ memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
+
+ /* RX bulk configuration */
+ memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
+
+ dev->rx_urb_size = 1024 * 20;
+
+ *tmp = 0x34;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
+
+ *tmp = 0x52;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
+ 1, 1, tmp);
+
+ dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+ dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+
+ /* Enable checksum offload */
+ *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
+ AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
+
+ *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
+ AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
+
+ /* Configure RX control register => start operation */
+ *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
+ AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
+
+ *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
+ AX_MONITOR_MODE_RWMP;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
+
+ /* Configure default medium type => giga */
+ *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
+ AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
+ AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, tmp16);
+
+ ax88179_led_setting(dev);
+
+ /* Restart autoneg */
+ mii_nway_restart(&dev->mii);
+
+ netif_carrier_off(dev->net);
+
+ return 0;
+}
+
+static int ax88179_stop(struct usbnet *dev)
+{
+ u16 tmp16;
+
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+
+ return 0;
+}
+
+static const struct driver_info ax88179_info = {
+ .description = "ASIX AX88179 USB 3.0 Gigibit Ethernet",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct driver_info ax88178a_info = {
+ .description = "ASIX AX88178A USB 2.0 Gigibit Ethernet",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct driver_info sitecom_info = {
+ .description = "Sitecom USB 3.0 to Gigabit Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
+static const struct usb_device_id products[] = {
+{
+ /* ASIX AX88179 10/100/1000 */
+ USB_DEVICE(0x0b95, 0x1790),
+ .driver_info = (unsigned long)&ax88179_info,
+}, {
+ /* ASIX AX88178A 10/100/1000 */
+ USB_DEVICE(0x0b95, 0x178a),
+ .driver_info = (unsigned long)&ax88178a_info,
+}, {
+ /* Sitecom USB 3.0 to Gigabit Adapter */
+ USB_DEVICE(0x0df6, 0x0072),
+ .driver_info = (unsigned long) &sitecom_info,
+},
+ { },
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver ax88179_178a_driver = {
+ .name = "ax88179_178a",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .suspend = ax88179_suspend,
+ .resume = ax88179_resume,
+ .disconnect = usbnet_disconnect,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(ax88179_178a_driver);
+
+MODULE_DESCRIPTION("ASIX AX88179/178A based USB 3.0/2.0 Gigabit Ethernet Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4a8c25a2229..61b74a2b89a 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1213,6 +1213,14 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
+ /* tag Huawei devices as wwan */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* Huawei NCM devices disguised as vendor specific */
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
.driver_info = (unsigned long)&wwan_info,
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 5f845beeb18..050ca4a4850 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -27,7 +27,7 @@
#define WME_MAX_BA WME_BA_BMP_SIZE
#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
-#define ATH_RSSI_DUMMY_MARKER 0x127
+#define ATH_RSSI_DUMMY_MARKER 127
#define ATH_RSSI_LPF_LEN 10
#define RSSI_LPF_THRESHOLD -20
#define ATH_RSSI_EP_MULTIPLIER (1<<7)
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 96bfb18078f..d3b099d7898 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -22,6 +22,7 @@
#include <linux/firmware.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3ad1fd05c5e..bd8251c1c74 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1067,15 +1067,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
last_rssi = priv->rx.last_rssi;
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
- ATH_RSSI_EP_MULTIPLIER);
+ if (ieee80211_is_beacon(hdr->frame_control) &&
+ !is_zero_ether_addr(common->curbssid) &&
+ ether_addr_equal(hdr->addr3, common->curbssid)) {
+ s8 rssi = rxbuf->rxstatus.rs_rssi;
- if (rxbuf->rxstatus.rs_rssi < 0)
- rxbuf->rxstatus.rs_rssi = 0;
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
- if (ieee80211_is_beacon(fc))
- priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
+ if (rssi < 0)
+ rssi = 0;
+
+ priv->ah->stats.avgbrssi = rssi;
+ }
rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
rx_status->band = hw->conf.channel->band;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2a2ae403e0e..07e25260c31 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1463,7 +1463,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
reset_type = ATH9K_RESET_POWER_ON;
else
reset_type = ATH9K_RESET_COLD;
- }
+ } else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
+ (REG_READ(ah, AR_CR) & AR_CR_RXE))
+ reset_type = ATH9K_RESET_COLD;
if (!ath9k_hw_set_reset_reg(ah, reset_type))
return false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 9a0f45ec9e0..10f01793d7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -349,25 +349,23 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
TRACE_EVENT(iwlwifi_dev_hcmd,
TP_PROTO(const struct device *dev,
struct iwl_host_cmd *cmd, u16 total_size,
- const void *hdr, size_t hdr_len),
- TP_ARGS(dev, cmd, total_size, hdr, hdr_len),
+ struct iwl_cmd_header *hdr),
+ TP_ARGS(dev, cmd, total_size, hdr),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, hcmd, total_size)
__field(u32, flags)
),
TP_fast_assign(
- int i, offset = hdr_len;
+ int i, offset = sizeof(*hdr);
DEV_ASSIGN;
__entry->flags = cmd->flags;
- memcpy(__get_dynamic_array(hcmd), hdr, hdr_len);
+ memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
- if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
- continue;
memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
cmd->data[i], cmd->len[i]);
offset += cmd->len[i];
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 14fc8d39fc2..3392011a876 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -136,12 +136,6 @@ struct iwl_calib_res_notif_phy_db {
u8 data[];
} __packed;
-#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
-static inline void iwl_phy_db_test_pic(__le32 pic)
-{
- WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
-}
-
struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
{
struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
@@ -260,11 +254,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
(size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
}
- /* Test PIC */
- if (type != IWL_PHY_DB_CFG)
- iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
- (size / sizeof(__le32)) - 1));
-
IWL_DEBUG_INFO(phy_db->trans,
"%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
__func__, __LINE__, type, size);
@@ -372,11 +361,6 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
*size = entry->size;
}
- /* Test PIC */
- if (type != IWL_PHY_DB_CFG)
- iwl_phy_db_test_pic(*(((__le32 *)*data) +
- (*size / sizeof(__le32)) - 1));
-
IWL_DEBUG_INFO(phy_db->trans,
"%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
__func__, __LINE__, type, *size);
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index c64d864799c..994c8c263dc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -61,6 +61,7 @@
*
*****************************************************************************/
+#include <linux/etherdevice.h>
#include <net/cfg80211.h>
#include <net/ipv6.h>
#include "iwl-modparams.h"
@@ -192,6 +193,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
sizeof(wkc), &wkc);
data->error = ret != 0;
+ mvm->ptk_ivlen = key->iv_len;
+ mvm->ptk_icvlen = key->icv_len;
+ mvm->gtk_ivlen = key->iv_len;
+ mvm->gtk_icvlen = key->icv_len;
+
/* don't upload key again */
goto out_unlock;
}
@@ -304,9 +310,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
*/
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
key->hw_key_idx = 0;
+ mvm->ptk_ivlen = key->iv_len;
+ mvm->ptk_icvlen = key->icv_len;
} else {
data->gtk_key_idx++;
key->hw_key_idx = data->gtk_key_idx;
+ mvm->gtk_ivlen = key->iv_len;
+ mvm->gtk_icvlen = key->icv_len;
}
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
@@ -649,6 +659,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
/* We reprogram keys and shouldn't allocate new key indices */
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
+
/*
* The D3 firmware still hardcodes the AP station ID for the
* BSS we're associated with as 0. As a result, we have to move
@@ -783,7 +798,6 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct iwl_wowlan_status *status;
u32 reasons;
int ret, len;
- bool pkt8023 = false;
struct sk_buff *pkt = NULL;
iwl_trans_read_mem_bytes(mvm->trans, base,
@@ -824,7 +838,8 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
status = (void *)cmd.resp_pkt->data;
if (len - sizeof(struct iwl_cmd_header) !=
- sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) {
+ sizeof(*status) +
+ ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out;
}
@@ -836,61 +851,96 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
goto report;
}
- if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) {
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
wakeup.magic_pkt = true;
- pkt8023 = true;
- }
- if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) {
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
wakeup.pattern_idx =
le16_to_cpu(status->pattern_number);
- pkt8023 = true;
- }
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
wakeup.disconnect = true;
- if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) {
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
wakeup.gtk_rekey_failure = true;
- pkt8023 = true;
- }
- if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) {
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true;
- pkt8023 = true;
- }
- if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) {
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
wakeup.eap_identity_req = true;
- pkt8023 = true;
- }
- if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) {
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
wakeup.four_way_handshake = true;
- pkt8023 = true;
- }
if (status->wake_packet_bufsize) {
- u32 pktsize = le32_to_cpu(status->wake_packet_bufsize);
- u32 pktlen = le32_to_cpu(status->wake_packet_length);
+ int pktsize = le32_to_cpu(status->wake_packet_bufsize);
+ int pktlen = le32_to_cpu(status->wake_packet_length);
+ const u8 *pktdata = status->wake_packet;
+ struct ieee80211_hdr *hdr = (void *)pktdata;
+ int truncated = pktlen - pktsize;
+
+ /* this would be a firmware bug */
+ if (WARN_ON_ONCE(truncated < 0))
+ truncated = 0;
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int ivlen = 0, icvlen = 4; /* also FCS */
- if (pkt8023) {
pkt = alloc_skb(pktsize, GFP_KERNEL);
if (!pkt)
goto report;
- memcpy(skb_put(pkt, pktsize), status->wake_packet,
- pktsize);
+
+ memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
+ pktdata += hdrlen;
+ pktsize -= hdrlen;
+
+ if (ieee80211_has_protected(hdr->frame_control)) {
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ ivlen = mvm->gtk_ivlen;
+ icvlen += mvm->gtk_icvlen;
+ } else {
+ ivlen = mvm->ptk_ivlen;
+ icvlen += mvm->ptk_icvlen;
+ }
+ }
+
+ /* if truncated, FCS/ICV is (partially) gone */
+ if (truncated >= icvlen) {
+ icvlen = 0;
+ truncated -= icvlen;
+ } else {
+ icvlen -= truncated;
+ truncated = 0;
+ }
+
+ pktsize -= ivlen + icvlen;
+ pktdata += ivlen;
+
+ memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
+
if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
goto report;
wakeup.packet = pkt->data;
wakeup.packet_present_len = pkt->len;
- wakeup.packet_len = pkt->len - (pktlen - pktsize);
+ wakeup.packet_len = pkt->len - truncated;
wakeup.packet_80211 = false;
} else {
+ int fcslen = 4;
+
+ if (truncated >= 4) {
+ truncated -= 4;
+ fcslen = 0;
+ } else {
+ fcslen -= truncated;
+ truncated = 0;
+ }
+ pktsize -= fcslen;
wakeup.packet = status->wake_packet;
wakeup.packet_present_len = pktsize;
- wakeup.packet_len = pktlen;
+ wakeup.packet_len = pktlen - truncated;
wakeup.packet_80211 = true;
}
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e8264e11b12..7e169b085af 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -557,11 +557,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
return ret;
}
-static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u32 tfd_msk = 0, ac;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -594,12 +592,21 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
*/
flush_work(&mvm->sta_drained_wk);
}
+}
+
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_prepare_mac_removal(mvm, vif);
mutex_lock(&mvm->mutex);
/*
* For AP/GO interface, the tear down of the resources allocated to the
- * interface should be handled as part of the bss_info_changed flow.
+ * interface is be handled as part of the stop_ap flow.
*/
if (vif->type == NL80211_IFTYPE_AP) {
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
@@ -763,6 +770,8 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ iwl_mvm_prepare_mac_removal(mvm, vif);
+
mutex_lock(&mvm->mutex);
mvmvif->ap_active = false;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 4e339ccfa80..537711b1047 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -327,6 +327,10 @@ struct iwl_mvm {
struct led_classdev led;
struct ieee80211_vif *p2p_device_vif;
+
+#ifdef CONFIG_PM_SLEEP
+ int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
+#endif
};
/* Extract MVM priv from op_mode and _hw */
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index aa2a39a637d..3d62e805535 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -182,6 +182,15 @@ struct iwl_queue {
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
+/*
+ * The FH will write back to the first TB only, so we need
+ * to copy some data into the buffer regardless of whether
+ * it should be mapped or not. This indicates how much to
+ * copy, even for HCMDs it must be big enough to fit the
+ * DRAM scratch from the TX cmd, at least 16 bytes.
+ */
+#define IWL_HCMD_MIN_COPY_SIZE 16
+
struct iwl_pcie_txq_entry {
struct iwl_device_cmd *cmd;
struct iwl_device_cmd *copy_cmd;
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8e9e3212fe7..8b625a7f568 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1152,10 +1152,12 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
- u16 copy_size, cmd_size;
+ u16 copy_size, cmd_size, dma_size;
bool had_nocopy = false;
int i;
u32 cmd_pos;
+ const u8 *cmddata[IWL_MAX_CMD_TFDS];
+ u16 cmdlen[IWL_MAX_CMD_TFDS];
copy_size = sizeof(out_cmd->hdr);
cmd_size = sizeof(out_cmd->hdr);
@@ -1164,8 +1166,23 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+ cmddata[i] = cmd->data[i];
+ cmdlen[i] = cmd->len[i];
+
if (!cmd->len[i])
continue;
+
+ /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
+ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
+ int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
+
+ if (copy > cmdlen[i])
+ copy = cmdlen[i];
+ cmdlen[i] -= copy;
+ cmddata[i] += copy;
+ copy_size += copy;
+ }
+
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
had_nocopy = true;
if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
@@ -1185,7 +1202,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- dup_buf = kmemdup(cmd->data[i], cmd->len[i],
+ dup_buf = kmemdup(cmddata[i], cmdlen[i],
GFP_ATOMIC);
if (!dup_buf)
return -ENOMEM;
@@ -1195,7 +1212,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
idx = -EINVAL;
goto free_dup_buf;
}
- copy_size += cmd->len[i];
+ copy_size += cmdlen[i];
}
cmd_size += cmd->len[i];
}
@@ -1242,14 +1259,31 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/* and copy the data that needs to be copied */
cmd_pos = offsetof(struct iwl_device_cmd, payload);
+ copy_size = sizeof(out_cmd->hdr);
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
- if (!cmd->len[i])
+ int copy = 0;
+
+ if (!cmd->len)
continue;
- if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
- IWL_HCMD_DFL_DUP))
- break;
- memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
- cmd_pos += cmd->len[i];
+
+ /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
+ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
+ copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ }
+
+ /* copy everything if not nocopy/dup */
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP)))
+ copy = cmd->len[i];
+
+ if (copy) {
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+ copy_size += copy;
+ }
}
WARN_ON_ONCE(txq->entries[idx].copy_cmd);
@@ -1275,7 +1309,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
- phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
+ /*
+ * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
+ * still map at least that many bytes for the hardware to write back to.
+ * We have enough space, so that's not a problem.
+ */
+ dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
+
+ phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
idx = -ENOMEM;
@@ -1283,14 +1324,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
dma_unmap_addr_set(out_meta, mapping, phys_addr);
- dma_unmap_len_set(out_meta, len, copy_size);
+ dma_unmap_len_set(out_meta, len, dma_size);
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
+ /* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
- const void *data = cmd->data[i];
+ const void *data = cmddata[i];
- if (!cmd->len[i])
+ if (!cmdlen[i])
continue;
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
IWL_HCMD_DFL_DUP)))
@@ -1298,7 +1340,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
phys_addr = dma_map_single(trans->dev, (void *)data,
- cmd->len[i], DMA_BIDIRECTIONAL);
+ cmdlen[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_pcie_tfd_unmap(trans, out_meta,
&txq->tfds[q->write_ptr],
@@ -1307,7 +1349,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto out;
}
- iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
}
out_meta->flags = cmd->flags;
@@ -1317,8 +1359,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
txq->need_update = 1;
- trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
- &out_cmd->hdr, copy_size);
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 739309e70d8..45578335e42 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -825,6 +825,11 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
sdio_release_host(func);
+ /* Set fw_ready before queuing any commands so that
+ * lbs_thread won't block from sending them to firmware.
+ */
+ priv->fw_ready = 1;
+
/*
* FUNC_INIT is required for SD8688 WLAN/BT multiple functions
*/
@@ -839,7 +844,6 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
}
- priv->fw_ready = 1;
wake_up(&card->pwron_waitq);
if (!card->started) {
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 35c79722c36..5c395e2e6a2 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -302,7 +302,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
i++;
usleep_range(10, 20);
/* 50ms max wait */
- if (i == 50000)
+ if (i == 5000)
break;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 1031db66474..189744db65e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1236,8 +1236,10 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
*/
if_limit = &rt2x00dev->if_limits_ap;
if_limit->max = rt2x00dev->ops->max_ap_intf;
- if_limit->types = BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT);
+ if_limit->types = BIT(NL80211_IFTYPE_AP);
+#ifdef CONFIG_MAC80211_MESH
+ if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT);
+#endif
/*
* Build up AP interface combinations structure.
@@ -1309,7 +1311,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
BIT(NL80211_IFTYPE_WDS);
rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 39c937f9b42..dee5dddaa29 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -331,8 +331,14 @@ static void pci_acpi_cleanup(struct device *dev)
}
}
+static bool pci_acpi_bus_match(struct device *dev)
+{
+ return dev->bus == &pci_bus_type;
+}
+
static struct acpi_bus_type acpi_pci_bus = {
- .bus = &pci_bus_type,
+ .name = "PCI",
+ .match = pci_acpi_bus_match,
.find_device = acpi_pci_find_device,
.setup = pci_acpi_setup,
.cleanup = pci_acpi_cleanup,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 8813fc03aa0..55cd459a390 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -353,8 +353,14 @@ static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
/* complete initialization of a PNPACPI device includes having
* pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
*/
+static bool acpi_pnp_bus_match(struct device *dev)
+{
+ return dev->bus == &pnp_bus_type;
+}
+
static struct acpi_bus_type __initdata acpi_pnp_bus = {
- .bus = &pnp_bus_type,
+ .name = "PNP",
+ .match = acpi_pnp_bus_match,
.find_device = acpi_pnp_find_device,
};
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index da9782bd27d..e3661c20cf3 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2830,7 +2830,7 @@ EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
* regulator_allow_bypass - allow the regulator to go into bypass mode
*
* @regulator: Regulator to configure
- * @allow: enable or disable bypass mode
+ * @enable: enable or disable bypass mode
*
* Allow the regulator to go into bypass mode if all other consumers
* for the regulator also enable bypass mode and the machine
@@ -3057,9 +3057,13 @@ int regulator_bulk_enable(int num_consumers,
return 0;
err:
- pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret);
- while (--i >= 0)
- regulator_disable(consumers[i].consumer);
+ for (i = 0; i < num_consumers; i++) {
+ if (consumers[i].ret < 0)
+ pr_err("Failed to enable %s: %d\n", consumers[i].supply,
+ consumers[i].ret);
+ else
+ regulator_disable(consumers[i].consumer);
+ }
return ret;
}
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 219d162b651..a53c11a529d 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -528,7 +528,7 @@ static int db8500_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int __exit db8500_regulator_remove(struct platform_device *pdev)
+static int db8500_regulator_remove(struct platform_device *pdev)
{
int i;
@@ -553,7 +553,7 @@ static struct platform_driver db8500_regulator_driver = {
.owner = THIS_MODULE,
},
.probe = db8500_regulator_probe,
- .remove = __exit_p(db8500_regulator_remove),
+ .remove = db8500_regulator_remove,
};
static int __init db8500_regulator_init(void)
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index cde13bb5a8f..39cf1460678 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -4,6 +4,7 @@
* Copyright 2011-2012 Texas Instruments Inc.
*
* Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Ian Lartey <ian@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -156,7 +157,7 @@ static const struct regs_info palmas_regs_info[] = {
*
* So they are basically (maxV-minV)/stepV
*/
-#define PALMAS_SMPS_NUM_VOLTAGES 116
+#define PALMAS_SMPS_NUM_VOLTAGES 117
#define PALMAS_SMPS10_NUM_VOLTAGES 2
#define PALMAS_LDO_NUM_VOLTAGES 50
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 74508cc62d6..f705d25b437 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -471,24 +471,23 @@ twl4030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
selector);
}
-static int twl4030ldo_get_voltage(struct regulator_dev *rdev)
+static int twl4030ldo_get_voltage_sel(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
- int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
- VREG_VOLTAGE);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
if (vsel < 0)
return vsel;
vsel &= info->table_len - 1;
- return LDO_MV(info->table[vsel]) * 1000;
+ return vsel;
}
static struct regulator_ops twl4030ldo_ops = {
.list_voltage = twl4030ldo_list_voltage,
.set_voltage_sel = twl4030ldo_set_voltage_sel,
- .get_voltage = twl4030ldo_get_voltage,
+ .get_voltage_sel = twl4030ldo_get_voltage_sel,
.enable = twl4030reg_enable,
.disable = twl4030reg_disable,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 765398c063c..c31187d7934 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -71,9 +71,14 @@ struct kmem_cache *scsi_sdb_cache;
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
+static bool acpi_scsi_bus_match(struct device *dev)
+{
+ return dev->bus == &scsi_bus_type;
+}
+
int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
{
- bus->bus = &scsi_bus_type;
+ bus->match = acpi_scsi_bus_match;
return register_acpi_bus_type(bus);
}
EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 1956593ee89..81e939e90c4 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -881,17 +881,12 @@ static struct vio_driver hvcs_vio_driver = {
/* Only called from hvcs_get_pi please */
static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
{
- int clclength;
-
hvcsd->p_unit_address = pi->unit_address;
hvcsd->p_partition_ID = pi->partition_ID;
- clclength = strlen(&pi->location_code[0]);
- if (clclength > HVCS_CLC_LENGTH)
- clclength = HVCS_CLC_LENGTH;
/* copy the null-term char too */
- strncpy(&hvcsd->p_location_code[0],
- &pi->location_code[0], clclength + 1);
+ strlcpy(&hvcsd->p_location_code[0],
+ &pi->location_code[0], sizeof(hvcsd->p_location_code));
}
/*
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index cef4252bb31..b6f4bad3f75 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -210,9 +210,14 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
return 0;
}
+static bool usb_acpi_bus_match(struct device *dev)
+{
+ return is_usb_device(dev) || is_usb_port(dev);
+}
+
static struct acpi_bus_type usb_acpi_bus = {
- .bus = &usb_bus_type,
- .find_bridge = usb_acpi_find_device,
+ .name = "USB",
+ .match = usb_acpi_bus_match,
.find_device = usb_acpi_find_device,
};