summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c167
-rw-r--r--drivers/acpi/acpi_pnp.c4
-rw-r--r--drivers/acpi/acpica/evxfgpe.c32
-rw-r--r--drivers/acpi/acpica/hwgpe.c8
-rw-r--r--drivers/acpi/acpica/utresrc.c4
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/blacklist.c36
-rw-r--r--drivers/acpi/device_pm.c71
-rw-r--r--drivers/acpi/fan.c18
-rw-r--r--drivers/acpi/osl.c12
-rw-r--r--drivers/acpi/pci_root.c14
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/sbs.c80
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/utils.c1
-rw-r--r--drivers/acpi/video.c291
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/amba/bus.c11
-rw-r--r--drivers/base/platform.c16
-rw-r--r--drivers/base/power/clock_ops.c19
-rw-r--r--drivers/base/power/common.c52
-rw-r--r--drivers/base/power/domain.c865
-rw-r--r--drivers/base/power/domain_governor.c7
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/base/power/sysfs.c24
-rw-r--r--drivers/base/power/wakeup.c16
-rw-r--r--drivers/base/syscore.c7
-rw-r--r--drivers/cpufreq/Kconfig8
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c248
-rw-r--r--drivers/cpufreq/cpufreq-dt.c364
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c2
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c2
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c6
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c44
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c1
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig8
-rw-r--r--drivers/cpuidle/Kconfig.arm1
-rw-r--r--drivers/cpuidle/Kconfig.arm6414
-rw-r--r--drivers/cpuidle/Makefile5
-rw-r--r--drivers/cpuidle/cpuidle-arm64.c133
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c20
-rw-r--r--drivers/cpuidle/dt_idle_states.c213
-rw-r--r--drivers/cpuidle/dt_idle_states.h7
-rw-r--r--drivers/cpuidle/governor.c2
-rw-r--r--drivers/devfreq/Kconfig3
-rw-r--r--drivers/devfreq/devfreq.c3
-rw-r--r--drivers/devfreq/exynos/exynos_ppmu.c3
-rw-r--r--drivers/i2c/i2c-core.c14
-rw-r--r--drivers/mmc/core/sdio_bus.c5
-rw-r--r--drivers/pci/pcie/pme.c61
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c16
-rw-r--r--drivers/power/avs/Kconfig8
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/rockchip-io-domain.c351
-rw-r--r--drivers/sh/pm_runtime.c11
-rw-r--r--drivers/spi/spi.c13
61 files changed, 2228 insertions, 1150 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index b0ea767c869..93d160661f4 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -54,55 +54,58 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_PRV_REG_COUNT 9
-struct lpss_shared_clock {
- const char *name;
- unsigned long rate;
- struct clk *clk;
-};
+/* LPSS Flags */
+#define LPSS_CLK BIT(0)
+#define LPSS_CLK_GATE BIT(1)
+#define LPSS_CLK_DIVIDER BIT(2)
+#define LPSS_LTR BIT(3)
+#define LPSS_SAVE_CTX BIT(4)
struct lpss_private_data;
struct lpss_device_desc {
- bool clk_required;
- const char *clkdev_name;
- bool ltr_required;
+ unsigned int flags;
unsigned int prv_offset;
size_t prv_size_override;
- bool clk_divider;
- bool clk_gate;
- bool save_ctx;
- struct lpss_shared_clock *shared_clock;
void (*setup)(struct lpss_private_data *pdata);
};
static struct lpss_device_desc lpss_dma_desc = {
- .clk_required = true,
- .clkdev_name = "hclk",
+ .flags = LPSS_CLK,
};
struct lpss_private_data {
void __iomem *mmio_base;
resource_size_t mmio_size;
+ unsigned int fixed_clk_rate;
struct clk *clk;
const struct lpss_device_desc *dev_desc;
u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
};
+/* UART Component Parameter Register */
+#define LPSS_UART_CPR 0xF4
+#define LPSS_UART_CPR_AFCE BIT(4)
+
static void lpss_uart_setup(struct lpss_private_data *pdata)
{
unsigned int offset;
- u32 reg;
+ u32 val;
offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
- reg = readl(pdata->mmio_base + offset);
- writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
-
- offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
- reg = readl(pdata->mmio_base + offset);
- writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
+ val = readl(pdata->mmio_base + offset);
+ writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
+
+ val = readl(pdata->mmio_base + LPSS_UART_CPR);
+ if (!(val & LPSS_UART_CPR_AFCE)) {
+ offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
+ val = readl(pdata->mmio_base + offset);
+ val |= LPSS_GENERAL_UART_RTS_OVRD;
+ writel(val, pdata->mmio_base + offset);
+ }
}
-static void lpss_i2c_setup(struct lpss_private_data *pdata)
+static void byt_i2c_setup(struct lpss_private_data *pdata)
{
unsigned int offset;
u32 val;
@@ -111,100 +114,56 @@ static void lpss_i2c_setup(struct lpss_private_data *pdata)
val = readl(pdata->mmio_base + offset);
val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
writel(val, pdata->mmio_base + offset);
-}
-static struct lpss_device_desc wpt_dev_desc = {
- .clk_required = true,
- .prv_offset = 0x800,
- .ltr_required = true,
- .clk_divider = true,
- .clk_gate = true,
-};
+ if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
+ pdata->fixed_clk_rate = 133000000;
+}
static struct lpss_device_desc lpt_dev_desc = {
- .clk_required = true,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
.prv_offset = 0x800,
- .ltr_required = true,
- .clk_divider = true,
- .clk_gate = true,
};
static struct lpss_device_desc lpt_i2c_dev_desc = {
- .clk_required = true,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
.prv_offset = 0x800,
- .ltr_required = true,
- .clk_gate = true,
};
static struct lpss_device_desc lpt_uart_dev_desc = {
- .clk_required = true,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
.prv_offset = 0x800,
- .ltr_required = true,
- .clk_divider = true,
- .clk_gate = true,
.setup = lpss_uart_setup,
};
static struct lpss_device_desc lpt_sdio_dev_desc = {
+ .flags = LPSS_LTR,
.prv_offset = 0x1000,
.prv_size_override = 0x1018,
- .ltr_required = true,
-};
-
-static struct lpss_shared_clock pwm_clock = {
- .name = "pwm_clk",
- .rate = 25000000,
};
static struct lpss_device_desc byt_pwm_dev_desc = {
- .clk_required = true,
- .save_ctx = true,
- .shared_clock = &pwm_clock,
+ .flags = LPSS_SAVE_CTX,
};
static struct lpss_device_desc byt_uart_dev_desc = {
- .clk_required = true,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x800,
- .clk_divider = true,
- .clk_gate = true,
- .save_ctx = true,
.setup = lpss_uart_setup,
};
static struct lpss_device_desc byt_spi_dev_desc = {
- .clk_required = true,
+ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
.prv_offset = 0x400,
- .clk_divider = true,
- .clk_gate = true,
- .save_ctx = true,
};
static struct lpss_device_desc byt_sdio_dev_desc = {
- .clk_required = true,
-};
-
-static struct lpss_shared_clock i2c_clock = {
- .name = "i2c_clk",
- .rate = 100000000,
+ .flags = LPSS_CLK,
};
static struct lpss_device_desc byt_i2c_dev_desc = {
- .clk_required = true,
+ .flags = LPSS_CLK | LPSS_SAVE_CTX,
.prv_offset = 0x800,
- .save_ctx = true,
- .shared_clock = &i2c_clock,
- .setup = lpss_i2c_setup,
-};
-
-static struct lpss_shared_clock bsw_pwm_clock = {
- .name = "pwm_clk",
- .rate = 19200000,
-};
-
-static struct lpss_device_desc bsw_pwm_dev_desc = {
- .clk_required = true,
- .save_ctx = true,
- .shared_clock = &bsw_pwm_clock,
+ .setup = byt_i2c_setup,
};
#else
@@ -237,7 +196,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT33FC", },
/* Braswell LPSS devices */
- { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
+ { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
{ "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
{ "8086228E", LPSS_ADDR(byt_spi_dev_desc) },
{ "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
@@ -251,7 +210,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
{ "INT3437", },
- { "INT3438", LPSS_ADDR(wpt_dev_desc) },
+ /* Wildcat Point LPSS devices */
+ { "INT3438", LPSS_ADDR(lpt_dev_desc) },
{ }
};
@@ -276,7 +236,6 @@ static int register_device_clock(struct acpi_device *adev,
struct lpss_private_data *pdata)
{
const struct lpss_device_desc *dev_desc = pdata->dev_desc;
- struct lpss_shared_clock *shared_clock = dev_desc->shared_clock;
const char *devname = dev_name(&adev->dev);
struct clk *clk = ERR_PTR(-ENODEV);
struct lpss_clk_data *clk_data;
@@ -289,12 +248,7 @@ static int register_device_clock(struct acpi_device *adev,
clk_data = platform_get_drvdata(lpss_clk_dev);
if (!clk_data)
return -ENODEV;
-
- if (dev_desc->clkdev_name) {
- clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name,
- devname);
- return 0;
- }
+ clk = clk_data->clk;
if (!pdata->mmio_base
|| pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
@@ -303,24 +257,19 @@ static int register_device_clock(struct acpi_device *adev,
parent = clk_data->name;
prv_base = pdata->mmio_base + dev_desc->prv_offset;
- if (shared_clock) {
- clk = shared_clock->clk;
- if (!clk) {
- clk = clk_register_fixed_rate(NULL, shared_clock->name,
- "lpss_clk", 0,
- shared_clock->rate);
- shared_clock->clk = clk;
- }
- parent = shared_clock->name;
+ if (pdata->fixed_clk_rate) {
+ clk = clk_register_fixed_rate(NULL, devname, parent, 0,
+ pdata->fixed_clk_rate);
+ goto out;
}
- if (dev_desc->clk_gate) {
+ if (dev_desc->flags & LPSS_CLK_GATE) {
clk = clk_register_gate(NULL, devname, parent, 0,
prv_base, 0, 0, NULL);
parent = devname;
}
- if (dev_desc->clk_divider) {
+ if (dev_desc->flags & LPSS_CLK_DIVIDER) {
/* Prevent division by zero */
if (!readl(prv_base))
writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
@@ -344,7 +293,7 @@ static int register_device_clock(struct acpi_device *adev,
kfree(parent);
kfree(clk_name);
}
-
+out:
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -392,7 +341,10 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
pdata->dev_desc = dev_desc;
- if (dev_desc->clk_required) {
+ if (dev_desc->setup)
+ dev_desc->setup(pdata);
+
+ if (dev_desc->flags & LPSS_CLK) {
ret = register_device_clock(adev, pdata);
if (ret) {
/* Skip the device, but continue the namespace scan. */
@@ -413,9 +365,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
goto err_out;
}
- if (dev_desc->setup)
- dev_desc->setup(pdata);
-
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev);
if (!IS_ERR_OR_NULL(pdev)) {
@@ -692,19 +641,19 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
switch (action) {
case BUS_NOTIFY_BOUND_DRIVER:
- if (pdata->dev_desc->save_ctx)
+ if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
pdev->dev.pm_domain = &acpi_lpss_pm_domain;
break;
case BUS_NOTIFY_UNBOUND_DRIVER:
- if (pdata->dev_desc->save_ctx)
+ if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
pdev->dev.pm_domain = NULL;
break;
case BUS_NOTIFY_ADD_DEVICE:
- if (pdata->dev_desc->ltr_required)
+ if (pdata->dev_desc->flags & LPSS_LTR)
return sysfs_create_group(&pdev->dev.kobj,
&lpss_attr_group);
case BUS_NOTIFY_DEL_DEVICE:
- if (pdata->dev_desc->ltr_required)
+ if (pdata->dev_desc->flags & LPSS_LTR)
sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
default:
break;
@@ -721,7 +670,7 @@ static void acpi_lpss_bind(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
+ if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
return;
if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 1f8b20496f3..b193f842599 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -130,10 +130,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"PNP0401"}, /* ECP Printer Port */
/* apple-gmux */
{"APP000B"},
- /* fujitsu-laptop.c */
- {"FUJ02bf"},
- {"FUJ02B1"},
- {"FUJ02E3"},
/* system */
{"PNP0c02"}, /* General ID for reserving resources */
{"PNP0c01"}, /* memory controller */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 0cf159cc6e6..56710a03c9b 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -596,6 +596,38 @@ acpi_status acpi_enable_all_runtime_gpes(void)
ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
+/******************************************************************************
+ *
+ * FUNCTION: acpi_enable_all_wakeup_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable all "wakeup" GPEs and disable all of the other GPEs, in
+ * all GPE blocks.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_all_wakeup_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_all_wakeup_gpes);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_hw_enable_all_wakeup_gpes();
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
+
/*******************************************************************************
*
* FUNCTION: acpi_install_gpe_block
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 2e6caabba07..ea62d40fd16 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -396,11 +396,11 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
- if (!gpe_block->register_info[i].enable_for_wake) {
- continue;
- }
- /* Enable all "wake" GPEs in this register */
+ /*
+ * Enable all "wake" GPEs in this register and disable the
+ * remaining ones.
+ */
status =
acpi_hw_write(gpe_block->register_info[i].enable_for_wake,
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 14cb6c0c8be..5cd017c7ac0 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -87,7 +87,9 @@ const char *acpi_gbl_io_decode[] = {
const char *acpi_gbl_ll_decode[] = {
"ActiveHigh",
- "ActiveLow"
+ "ActiveLow",
+ "ActiveBoth",
+ "Reserved"
};
const char *acpi_gbl_max_decode[] = {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 5fdfe65fe16..8ec8a89a20a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -695,7 +695,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
if (battery->power_unit && dmi_name_in_vendors("LENOVO")) {
const char *s;
s = dmi_get_system_info(DMI_PRODUCT_VERSION);
- if (s && !strnicmp(s, "ThinkPad", 8)) {
+ if (s && !strncasecmp(s, "ThinkPad", 8)) {
dmi_walk(find_battery, battery);
if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
&battery->flags) &&
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 36eb42e3b0b..ed122e17636 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -247,8 +247,8 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
/*
- * These machines will power on immediately after shutdown when
- * reporting the Windows 2012 OSI.
+ * The wireless hotkey does not work on those machines when
+ * returning true for _OSI("Windows 2012")
*/
{
.callback = dmi_disable_osi_win8,
@@ -258,6 +258,38 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
},
},
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 7537",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 5437",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 3437",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Vostro 3446",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 67075f800e3..bea6896be12 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1041,6 +1041,40 @@ static struct dev_pm_domain acpi_general_pm_domain = {
};
/**
+ * acpi_dev_pm_detach - Remove ACPI power management from the device.
+ * @dev: Device to take care of.
+ * @power_off: Whether or not to try to remove power from the device.
+ *
+ * Remove the device from the general ACPI PM domain and remove its wakeup
+ * notifier. If @power_off is set, additionally remove power from the device if
+ * possible.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+static void acpi_dev_pm_detach(struct device *dev, bool power_off)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev && dev->pm_domain == &acpi_general_pm_domain) {
+ dev->pm_domain = NULL;
+ acpi_remove_pm_notifier(adev);
+ if (power_off) {
+ /*
+ * If the device's PM QoS resume latency limit or flags
+ * have been exposed to user space, they have to be
+ * hidden at this point, so that they don't affect the
+ * choice of the low-power state to put the device into.
+ */
+ dev_pm_qos_hide_latency_limit(dev);
+ dev_pm_qos_hide_flags(dev);
+ acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
+ }
+ }
+}
+
+/**
* acpi_dev_pm_attach - Prepare device for ACPI power management.
* @dev: Device to prepare.
* @power_on: Whether or not to power on the device.
@@ -1072,42 +1106,9 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
acpi_dev_pm_full_power(adev);
acpi_device_wakeup(adev, ACPI_STATE_S0, false);
}
+
+ dev->pm_domain->detach = acpi_dev_pm_detach;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
-
-/**
- * acpi_dev_pm_detach - Remove ACPI power management from the device.
- * @dev: Device to take care of.
- * @power_off: Whether or not to try to remove power from the device.
- *
- * Remove the device from the general ACPI PM domain and remove its wakeup
- * notifier. If @power_off is set, additionally remove power from the device if
- * possible.
- *
- * Callers must ensure proper synchronization of this function with power
- * management callbacks.
- */
-void acpi_dev_pm_detach(struct device *dev, bool power_off)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
-
- if (adev && dev->pm_domain == &acpi_general_pm_domain) {
- dev->pm_domain = NULL;
- acpi_remove_pm_notifier(adev);
- if (power_off) {
- /*
- * If the device's PM QoS resume latency limit or flags
- * have been exposed to user space, they have to be
- * hidden at this point, so that they don't affect the
- * choice of the low-power state to put the device into.
- */
- dev_pm_qos_hide_latency_limit(dev);
- dev_pm_qos_hide_flags(dev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
- acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
- }
- }
-}
-EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
#endif /* CONFIG_PM */
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 8acf53e6296..5328b1090e0 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -27,12 +27,10 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/thermal.h>
#include <linux/acpi.h>
-#define PREFIX "ACPI: "
-
#define ACPI_FAN_CLASS "fan"
#define ACPI_FAN_FILE_STATE "state"
@@ -127,8 +125,9 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
};
/* --------------------------------------------------------------------------
- Driver Interface
- -------------------------------------------------------------------------- */
+ * Driver Interface
+ * --------------------------------------------------------------------------
+*/
static int acpi_fan_add(struct acpi_device *device)
{
@@ -143,7 +142,7 @@ static int acpi_fan_add(struct acpi_device *device)
result = acpi_bus_update_power(device->handle, NULL);
if (result) {
- printk(KERN_ERR PREFIX "Setting initial power state\n");
+ dev_err(&device->dev, "Setting initial power state\n");
goto end;
}
@@ -168,10 +167,9 @@ static int acpi_fan_add(struct acpi_device *device)
&device->dev.kobj,
"device");
if (result)
- dev_err(&device->dev, "Failed to create sysfs link "
- "'device'\n");
+ dev_err(&device->dev, "Failed to create sysfs link 'device'\n");
- printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+ dev_info(&device->dev, "ACPI: %s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
@@ -217,7 +215,7 @@ static int acpi_fan_resume(struct device *dev)
result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL);
if (result)
- printk(KERN_ERR PREFIX "Error updating fan power state\n");
+ dev_err(dev, "Error updating fan power state\n");
return result;
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 3abe9b223ba..9964f70be98 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -152,6 +152,16 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
osi_linux.dmi ? " via DMI" : "");
}
+ if (!strcmp("Darwin", interface)) {
+ /*
+ * Apple firmware will behave poorly if it receives positive
+ * answers to "Darwin" and any other OS. Respond positively
+ * to Darwin and then disable all other vendor strings.
+ */
+ acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
+ supported = ACPI_UINT32_MAX;
+ }
+
return supported;
}
@@ -825,7 +835,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
acpi_irq_handler = handler;
acpi_irq_context = context;
- if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
+ if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index e6ae603ed1a..cd4de7e038e 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -35,6 +35,7 @@
#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include <linux/slab.h>
+#include <linux/dmi.h>
#include <acpi/apei.h> /* for acpi_hest_init() */
#include "internal.h"
@@ -430,6 +431,19 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
acpi_handle handle = device->handle;
/*
+ * Apple always return failure on _OSC calls when _OSI("Darwin") has
+ * been called successfully. We know the feature set supported by the
+ * platform, so avoid calling _OSC at all
+ */
+
+ if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) {
+ root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
+ decode_osc_control(root, "OS assumes control of",
+ root->osc_control_set);
+ return;
+ }
+
+ /*
* All supported architectures that use ACPI have support for
* PCI domains, so we indicate this in _OSC support capabilities.
*/
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e32321ce9a5..ef58f46c844 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -16,7 +16,7 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_apic *lapic =
- (struct acpi_madt_local_apic *)entry;
+ container_of(entry, struct acpi_madt_local_apic, header);
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
@@ -32,7 +32,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_x2apic *apic =
- (struct acpi_madt_local_x2apic *)entry;
+ container_of(entry, struct acpi_madt_local_x2apic, header);
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
@@ -49,7 +49,7 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
- (struct acpi_madt_local_sapic *)entry;
+ container_of(entry, struct acpi_madt_local_sapic, header);
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 366ca40a6f7..a7a3edd28be 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -35,6 +35,7 @@
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/power_supply.h>
+#include <linux/dmi.h>
#include "sbshc.h"
#include "battery.h"
@@ -61,6 +62,8 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+static bool sbs_manager_broken;
+
#define MAX_SBS_BAT 4
#define ACPI_SBS_BLOCK_MAX 32
@@ -109,6 +112,7 @@ struct acpi_sbs {
u8 batteries_supported:4;
u8 manager_present:1;
u8 charger_present:1;
+ u8 charger_exists:1;
};
#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
@@ -429,9 +433,19 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER,
0x13, (u8 *) & status);
- if (!result)
- sbs->charger_present = (status >> 15) & 0x1;
- return result;
+
+ if (result)
+ return result;
+
+ /*
+ * The spec requires that bit 4 always be 1. If it's not set, assume
+ * that the implementation doesn't support an SBS charger
+ */
+ if (!((status >> 4) & 0x1))
+ return -ENODEV;
+
+ sbs->charger_present = (status >> 15) & 0x1;
+ return 0;
}
static ssize_t acpi_battery_alarm_show(struct device *dev,
@@ -483,16 +497,21 @@ static int acpi_battery_read(struct acpi_battery *battery)
ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2);
} else if (battery->id == 0)
battery->present = 1;
+
if (result || !battery->present)
return result;
if (saved_present != battery->present) {
battery->update_time = 0;
result = acpi_battery_get_info(battery);
- if (result)
+ if (result) {
+ battery->present = 0;
return result;
+ }
}
result = acpi_battery_get_state(battery);
+ if (result)
+ battery->present = 0;
return result;
}
@@ -524,6 +543,7 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
result = power_supply_register(&sbs->device->dev, &battery->bat);
if (result)
goto end;
+
result = device_create_file(battery->bat.dev, &alarm_attr);
if (result)
goto end;
@@ -554,6 +574,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
if (result)
goto end;
+ sbs->charger_exists = 1;
sbs->charger.name = "sbs-charger";
sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
sbs->charger.properties = sbs_ac_props;
@@ -580,9 +601,12 @@ static void acpi_sbs_callback(void *context)
struct acpi_battery *bat;
u8 saved_charger_state = sbs->charger_present;
u8 saved_battery_state;
- acpi_ac_get_present(sbs);
- if (sbs->charger_present != saved_charger_state)
- kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
+
+ if (sbs->charger_exists) {
+ acpi_ac_get_present(sbs);
+ if (sbs->charger_present != saved_charger_state)
+ kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
+ }
if (sbs->manager_present) {
for (id = 0; id < MAX_SBS_BAT; ++id) {
@@ -598,12 +622,31 @@ static void acpi_sbs_callback(void *context)
}
}
+static int disable_sbs_manager(const struct dmi_system_id *d)
+{
+ sbs_manager_broken = true;
+ return 0;
+}
+
+static struct dmi_system_id acpi_sbs_dmi_table[] = {
+ {
+ .callback = disable_sbs_manager,
+ .ident = "Apple",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.")
+ },
+ },
+ { },
+};
+
static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs;
int result = 0;
int id;
+ dmi_check_system(acpi_sbs_dmi_table);
+
sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
if (!sbs) {
result = -ENOMEM;
@@ -619,17 +662,24 @@ static int acpi_sbs_add(struct acpi_device *device)
device->driver_data = sbs;
result = acpi_charger_add(sbs);
- if (result)
+ if (result && result != -ENODEV)
goto end;
- result = acpi_manager_get_info(sbs);
- if (!result) {
- sbs->manager_present = 1;
- for (id = 0; id < MAX_SBS_BAT; ++id)
- if ((sbs->batteries_supported & (1 << id)))
- acpi_battery_add(sbs, id);
- } else
+ result = 0;
+
+ if (!sbs_manager_broken) {
+ result = acpi_manager_get_info(sbs);
+ if (!result) {
+ sbs->manager_present = 0;
+ for (id = 0; id < MAX_SBS_BAT; ++id)
+ if ((sbs->batteries_supported & (1 << id)))
+ acpi_battery_add(sbs, id);
+ }
+ }
+
+ if (!sbs->manager_present)
acpi_battery_add(sbs, 0);
+
acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
end:
if (result)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 54da4a3fe65..05a31b573fc 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/dmi.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#include <linux/suspend.h>
#include <linux/reboot.h>
#include <linux/acpi.h>
@@ -626,6 +627,19 @@ static int acpi_freeze_begin(void)
return 0;
}
+static int acpi_freeze_prepare(void)
+{
+ acpi_enable_all_wakeup_gpes();
+ enable_irq_wake(acpi_gbl_FADT.sci_interrupt);
+ return 0;
+}
+
+static void acpi_freeze_restore(void)
+{
+ disable_irq_wake(acpi_gbl_FADT.sci_interrupt);
+ acpi_enable_all_runtime_gpes();
+}
+
static void acpi_freeze_end(void)
{
acpi_scan_lock_release();
@@ -633,6 +647,8 @@ static void acpi_freeze_end(void)
static const struct platform_freeze_ops acpi_freeze_ops = {
.begin = acpi_freeze_begin,
+ .prepare = acpi_freeze_prepare,
+ .restore = acpi_freeze_restore,
.end = acpi_freeze_end,
};
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 07c8c5a5ee9..834f35c4bf8 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -661,7 +661,6 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
* @uuid: UUID of requested functions, should be 16 bytes at least
* @rev: revision number of requested functions
* @funcs: bitmap of requested functions
- * @exclude: excluding special value, used to support i915 and nouveau
*
* Evaluate device's _DSM method to check whether it supports requested
* functions. Currently only support 64 functions at maximum, should be
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8e7e18567ae..807a88a0f39 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -411,12 +411,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
-static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
-{
- use_native_backlight_dmi = true;
- return 0;
-}
-
static int __init video_disable_native_backlight(const struct dmi_system_id *d)
{
use_native_backlight_dmi = false;
@@ -467,265 +461,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
- {
- .callback = video_set_use_native_backlight,
- .ident = "ThinkPad X230",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "ThinkPad T430 and T430s",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "ThinkPad T430",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "ThinkPad T431s",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "ThinkPad Edge E530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "ThinkPad Edge E530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "ThinkPad Edge E530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "ThinkPad W530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "ThinkPad X1 Carbon",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Lenovo Yoga 13",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Lenovo Yoga 2 11",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2 11"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Thinkpad Helix",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Dell Inspiron 7520",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Acer Aspire 5733Z",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Acer Aspire 5742G",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Acer Aspire V5-171",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "V5-171"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Acer Aspire V5-431",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Acer Aspire V5-471G",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-471G"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Acer TravelMate B113",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Acer Aspire V5-572G",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "Acer Aspire V5-573G",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "ASUS Zenbook Prime UX31A",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP ProBook 4340s",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP ProBook 4540s",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP ProBook 2013 models",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
- DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP EliteBook 2013 models",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
- DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP EliteBook 2014 models",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
- DMI_MATCH(DMI_PRODUCT_NAME, " G2"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP ZBook 14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP ZBook 15",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP ZBook 17",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP EliteBook 8470p",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8470p"),
- },
- },
- {
- .callback = video_set_use_native_backlight,
- .ident = "HP EliteBook 8780w",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
- },
- },
/*
* These models have a working acpi_video backlight control, and using
@@ -1419,6 +1154,23 @@ acpi_video_device_bind(struct acpi_video_bus *video,
}
}
+static bool acpi_video_device_in_dod(struct acpi_video_device *device)
+{
+ struct acpi_video_bus *video = device->video;
+ int i;
+
+ /* If we have a broken _DOD, no need to test */
+ if (!video->attached_count)
+ return true;
+
+ for (i = 0; i < video->attached_count; i++) {
+ if (video->attached_array[i].bind_info == device)
+ return true;
+ }
+
+ return false;
+}
+
/*
* Arg:
* video : video bus device
@@ -1858,6 +1610,15 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
static int count;
char *name;
+ /*
+ * Do not create backlight device for video output
+ * device that is not in the enumerated list.
+ */
+ if (!acpi_video_device_in_dod(device)) {
+ dev_dbg(&device->dev->dev, "not in _DOD list, ignore\n");
+ return;
+ }
+
result = acpi_video_init_brightness(device);
if (result)
return;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index c42feb2bacd..27c43499977 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -174,6 +174,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Lenovo IdeaPad Z570",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
+ },
+ },
{ },
};
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 3cf61a127ee..47bbdc1b5be 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/amba/bus.h>
#include <linux/sizes.h>
@@ -182,9 +183,15 @@ static int amba_probe(struct device *dev)
int ret;
do {
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret == -EPROBE_DEFER)
+ break;
+
ret = amba_get_enable_pclk(pcdev);
- if (ret)
+ if (ret) {
+ dev_pm_domain_detach(dev, true);
break;
+ }
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
@@ -199,6 +206,7 @@ static int amba_probe(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
+ dev_pm_domain_detach(dev, true);
} while (0);
return ret;
@@ -220,6 +228,7 @@ static int amba_remove(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
+ dev_pm_domain_detach(dev, true);
return ret;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ab4f4ce0272..b2afc29403f 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/idr.h>
#include <linux/acpi.h>
#include <linux/clk/clk-conf.h>
@@ -506,11 +507,12 @@ static int platform_drv_probe(struct device *_dev)
if (ret < 0)
return ret;
- acpi_dev_pm_attach(_dev, true);
-
- ret = drv->probe(dev);
- if (ret)
- acpi_dev_pm_detach(_dev, true);
+ ret = dev_pm_domain_attach(_dev, true);
+ if (ret != -EPROBE_DEFER) {
+ ret = drv->probe(dev);
+ if (ret)
+ dev_pm_domain_detach(_dev, true);
+ }
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
dev_warn(_dev, "probe deferral not supported\n");
@@ -532,7 +534,7 @@ static int platform_drv_remove(struct device *_dev)
int ret;
ret = drv->remove(dev);
- acpi_dev_pm_detach(_dev, true);
+ dev_pm_domain_detach(_dev, true);
return ret;
}
@@ -543,7 +545,7 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
- acpi_dev_pm_detach(_dev, true);
+ dev_pm_domain_detach(_dev, true);
}
/**
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index b99e6c06ee6..78369305e06 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -368,8 +368,13 @@ int pm_clk_suspend(struct device *dev)
spin_lock_irqsave(&psd->lock, flags);
- list_for_each_entry_reverse(ce, &psd->clock_list, node)
- clk_disable(ce->clk);
+ list_for_each_entry_reverse(ce, &psd->clock_list, node) {
+ if (ce->status < PCE_STATUS_ERROR) {
+ if (ce->status == PCE_STATUS_ENABLED)
+ clk_disable(ce->clk);
+ ce->status = PCE_STATUS_ACQUIRED;
+ }
+ }
spin_unlock_irqrestore(&psd->lock, flags);
@@ -385,6 +390,7 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -394,8 +400,13 @@ int pm_clk_resume(struct device *dev)
spin_lock_irqsave(&psd->lock, flags);
- list_for_each_entry(ce, &psd->clock_list, node)
- __pm_clk_enable(dev, ce->clk);
+ list_for_each_entry(ce, &psd->clock_list, node) {
+ if (ce->status < PCE_STATUS_ERROR) {
+ ret = __pm_clk_enable(dev, ce->clk);
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
+ }
+ }
spin_unlock_irqrestore(&psd->lock, flags);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index df2e5eeaeb0..b0f138806bb 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -11,6 +11,8 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/pm_clock.h>
+#include <linux/acpi.h>
+#include <linux/pm_domain.h>
/**
* dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
@@ -82,3 +84,53 @@ int dev_pm_put_subsys_data(struct device *dev)
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
+
+/**
+ * dev_pm_domain_attach - Attach a device to its PM domain.
+ * @dev: Device to attach.
+ * @power_on: Used to indicate whether we should power on the device.
+ *
+ * The @dev may only be attached to a single PM domain. By iterating through
+ * the available alternatives we try to find a valid PM domain for the device.
+ * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain
+ * should be assigned by the corresponding attach function.
+ *
+ * This function should typically be invoked from subsystem level code during
+ * the probe phase. Especially for those that holds devices which requires
+ * power management through PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns 0 on successfully attached PM domain or negative error code.
+ */
+int dev_pm_domain_attach(struct device *dev, bool power_on)
+{
+ int ret;
+
+ ret = acpi_dev_pm_attach(dev, power_on);
+ if (ret)
+ ret = genpd_dev_pm_attach(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
+
+/**
+ * dev_pm_domain_detach - Detach a device from its PM domain.
+ * @dev: Device to attach.
+ * @power_off: Used to indicate whether we should power off the device.
+ *
+ * This functions will reverse the actions from dev_pm_domain_attach() and thus
+ * try to detach the @dev from its PM domain. Typically it should be invoked
+ * from subsystem level code during the remove phase.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach(struct device *dev, bool power_off)
+{
+ if (dev->pm_domain && dev->pm_domain->detach)
+ dev->pm_domain->detach(dev, power_off);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index eee55c1e5fd..40bc2f4072c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
@@ -25,10 +26,6 @@
__routine = genpd->dev_ops.callback; \
if (__routine) { \
__ret = __routine(dev); \
- } else { \
- __routine = dev_gpd_data(dev)->ops.callback; \
- if (__routine) \
- __ret = __routine(dev); \
} \
__ret; \
})
@@ -70,8 +67,6 @@ static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
return genpd;
}
-#ifdef CONFIG_PM
-
struct generic_pm_domain *dev_to_genpd(struct device *dev)
{
if (IS_ERR_OR_NULL(dev->pm_domain))
@@ -147,13 +142,13 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
{
s64 usecs64;
- if (!genpd->cpu_data)
+ if (!genpd->cpuidle_data)
return;
usecs64 = genpd->power_on_latency_ns;
do_div(usecs64, NSEC_PER_USEC);
- usecs64 += genpd->cpu_data->saved_exit_latency;
- genpd->cpu_data->idle_state->exit_latency = usecs64;
+ usecs64 += genpd->cpuidle_data->saved_exit_latency;
+ genpd->cpuidle_data->idle_state->exit_latency = usecs64;
}
/**
@@ -193,9 +188,9 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
return 0;
}
- if (genpd->cpu_data) {
+ if (genpd->cpuidle_data) {
cpuidle_pause_and_lock();
- genpd->cpu_data->idle_state->disabled = true;
+ genpd->cpuidle_data->idle_state->disabled = true;
cpuidle_resume_and_unlock();
goto out;
}
@@ -285,8 +280,6 @@ int pm_genpd_name_poweron(const char *domain_name)
return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
}
-#endif /* CONFIG_PM */
-
#ifdef CONFIG_PM_RUNTIME
static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
@@ -430,7 +423,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
* Queue up the execution of pm_genpd_poweroff() unless it's already been done
* before.
*/
-void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
queue_work(pm_wq, &genpd->power_off_work);
}
@@ -520,17 +513,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
}
- if (genpd->cpu_data) {
+ if (genpd->cpuidle_data) {
/*
- * If cpu_data is set, cpuidle should turn the domain off when
- * the CPU in it is idle. In that case we don't decrement the
- * subdomain counts of the master domains, so that power is not
- * removed from the current domain prematurely as a result of
- * cutting off the masters' power.
+ * If cpuidle_data is set, cpuidle should turn the domain off
+ * when the CPU in it is idle. In that case we don't decrement
+ * the subdomain counts of the master domains, so that power is
+ * not removed from the current domain prematurely as a result
+ * of cutting off the masters' power.
*/
genpd->status = GPD_STATE_POWER_OFF;
cpuidle_pause_and_lock();
- genpd->cpu_data->idle_state->disabled = false;
+ genpd->cpuidle_data->idle_state->disabled = false;
cpuidle_resume_and_unlock();
goto out;
}
@@ -619,8 +612,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- might_sleep_if(!genpd->dev_irq_safe);
-
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
if (stop_ok && !stop_ok(dev))
return -EBUSY;
@@ -665,8 +656,6 @@ static int pm_genpd_runtime_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- might_sleep_if(!genpd->dev_irq_safe);
-
/* If power.irq_safe, the PM domain is never powered off. */
if (dev->power.irq_safe)
return genpd_start_dev_no_timing(genpd, dev);
@@ -733,6 +722,13 @@ void pm_genpd_poweroff_unused(void)
mutex_unlock(&gpd_list_lock);
}
+static int __init genpd_poweroff_unused(void)
+{
+ pm_genpd_poweroff_unused();
+ return 0;
+}
+late_initcall(genpd_poweroff_unused);
+
#else
static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
@@ -741,6 +737,9 @@ static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static inline void
+genpd_queue_power_off_work(struct generic_pm_domain *genpd) {}
+
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#define pm_genpd_runtime_suspend NULL
@@ -774,46 +773,6 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
}
-static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
-}
-
-static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
-}
-
-static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
-}
-
-static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
-}
-
-static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
-}
-
-static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
-}
-
-static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
-}
-
-static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
-}
-
/**
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -995,7 +954,7 @@ static int pm_genpd_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
}
/**
@@ -1016,7 +975,7 @@ static int pm_genpd_suspend_late(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
}
/**
@@ -1103,7 +1062,7 @@ static int pm_genpd_resume_early(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
}
/**
@@ -1124,7 +1083,7 @@ static int pm_genpd_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
}
/**
@@ -1145,7 +1104,7 @@ static int pm_genpd_freeze(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
}
/**
@@ -1167,7 +1126,7 @@ static int pm_genpd_freeze_late(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
}
/**
@@ -1231,7 +1190,7 @@ static int pm_genpd_thaw_early(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
}
/**
@@ -1252,7 +1211,7 @@ static int pm_genpd_thaw(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
}
/**
@@ -1344,13 +1303,13 @@ static void pm_genpd_complete(struct device *dev)
}
/**
- * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
+ * genpd_syscore_switch - Switch power during system core suspend or resume.
* @dev: Device that normally is marked as "always on" to switch power for.
*
* This routine may only be called during the system core (syscore) suspend or
* resume phase for devices whose "always on" flags are set.
*/
-void pm_genpd_syscore_switch(struct device *dev, bool suspend)
+static void genpd_syscore_switch(struct device *dev, bool suspend)
{
struct generic_pm_domain *genpd;
@@ -1366,7 +1325,18 @@ void pm_genpd_syscore_switch(struct device *dev, bool suspend)
genpd->suspended_count--;
}
}
-EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
+
+void pm_genpd_syscore_poweroff(struct device *dev)
+{
+ genpd_syscore_switch(dev, true);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
+
+void pm_genpd_syscore_poweron(struct device *dev)
+{
+ genpd_syscore_switch(dev, false);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#else
@@ -1466,6 +1436,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
spin_unlock_irq(&dev->power.lock);
+ if (genpd->attach_dev)
+ genpd->attach_dev(dev);
+
mutex_lock(&gpd_data->lock);
gpd_data->base.dev = dev;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
@@ -1484,39 +1457,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
}
/**
- * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
- * @genpd_node: Device tree node pointer representing a PM domain to which the
- * the device is added to.
- * @dev: Device to be added.
- * @td: Set of PM QoS timing parameters to attach to the device.
- */
-int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
- struct gpd_timing_data *td)
-{
- struct generic_pm_domain *genpd = NULL, *gpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
- return -EINVAL;
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (gpd->of_node == genpd_node) {
- genpd = gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
-
- if (!genpd)
- return -EINVAL;
-
- return __pm_genpd_add_device(genpd, dev, td);
-}
-
-
-/**
* __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
* @domain_name: Name of the PM domain to add the device to.
* @dev: Device to be added.
@@ -1558,6 +1498,9 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
genpd->device_count--;
genpd->max_off_time_changed = true;
+ if (genpd->detach_dev)
+ genpd->detach_dev(dev);
+
spin_lock_irq(&dev->power.lock);
dev->pm_domain = NULL;
@@ -1744,112 +1687,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
/**
- * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
- * @dev: Device to add the callbacks to.
- * @ops: Set of callbacks to add.
- * @td: Timing data to add to the device along with the callbacks (optional).
- *
- * Every call to this routine should be balanced with a call to
- * __pm_genpd_remove_callbacks() and they must not be nested.
- */
-int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
- struct gpd_timing_data *td)
-{
- struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
- int ret = 0;
-
- if (!(dev && ops))
- return -EINVAL;
-
- gpd_data_new = __pm_genpd_alloc_dev_data(dev);
- if (!gpd_data_new)
- return -ENOMEM;
-
- pm_runtime_disable(dev);
- device_pm_lock();
-
- ret = dev_pm_get_subsys_data(dev);
- if (ret)
- goto out;
-
- spin_lock_irq(&dev->power.lock);
-
- if (dev->power.subsys_data->domain_data) {
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- } else {
- gpd_data = gpd_data_new;
- dev->power.subsys_data->domain_data = &gpd_data->base;
- }
- gpd_data->refcount++;
- gpd_data->ops = *ops;
- if (td)
- gpd_data->td = *td;
-
- spin_unlock_irq(&dev->power.lock);
-
- out:
- device_pm_unlock();
- pm_runtime_enable(dev);
-
- if (gpd_data != gpd_data_new)
- __pm_genpd_free_dev_data(dev, gpd_data_new);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
-
-/**
- * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
- * @dev: Device to remove the callbacks from.
- * @clear_td: If set, clear the device's timing data too.
- *
- * This routine can only be called after pm_genpd_add_callbacks().
- */
-int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
-{
- struct generic_pm_domain_data *gpd_data = NULL;
- bool remove = false;
- int ret = 0;
-
- if (!(dev && dev->power.subsys_data))
- return -EINVAL;
-
- pm_runtime_disable(dev);
- device_pm_lock();
-
- spin_lock_irq(&dev->power.lock);
-
- if (dev->power.subsys_data->domain_data) {
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- gpd_data->ops = (struct gpd_dev_ops){ NULL };
- if (clear_td)
- gpd_data->td = (struct gpd_timing_data){ 0 };
-
- if (--gpd_data->refcount == 0) {
- dev->power.subsys_data->domain_data = NULL;
- remove = true;
- }
- } else {
- ret = -EINVAL;
- }
-
- spin_unlock_irq(&dev->power.lock);
-
- device_pm_unlock();
- pm_runtime_enable(dev);
-
- if (ret)
- return ret;
-
- dev_pm_put_subsys_data(dev);
- if (remove)
- __pm_genpd_free_dev_data(dev, gpd_data);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
-
-/**
* pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
* @genpd: PM domain to be connected with cpuidle.
* @state: cpuidle state this domain can disable/enable.
@@ -1861,7 +1698,7 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
{
struct cpuidle_driver *cpuidle_drv;
- struct gpd_cpu_data *cpu_data;
+ struct gpd_cpuidle_data *cpuidle_data;
struct cpuidle_state *idle_state;
int ret = 0;
@@ -1870,12 +1707,12 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
genpd_acquire_lock(genpd);
- if (genpd->cpu_data) {
+ if (genpd->cpuidle_data) {
ret = -EEXIST;
goto out;
}
- cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
- if (!cpu_data) {
+ cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
+ if (!cpuidle_data) {
ret = -ENOMEM;
goto out;
}
@@ -1893,9 +1730,9 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
ret = -EAGAIN;
goto err;
}
- cpu_data->idle_state = idle_state;
- cpu_data->saved_exit_latency = idle_state->exit_latency;
- genpd->cpu_data = cpu_data;
+ cpuidle_data->idle_state = idle_state;
+ cpuidle_data->saved_exit_latency = idle_state->exit_latency;
+ genpd->cpuidle_data = cpuidle_data;
genpd_recalc_cpu_exit_latency(genpd);
out:
@@ -1906,7 +1743,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
cpuidle_driver_unref();
err_drv:
- kfree(cpu_data);
+ kfree(cpuidle_data);
goto out;
}
@@ -1929,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
*/
int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
{
- struct gpd_cpu_data *cpu_data;
+ struct gpd_cpuidle_data *cpuidle_data;
struct cpuidle_state *idle_state;
int ret = 0;
@@ -1938,20 +1775,20 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
genpd_acquire_lock(genpd);
- cpu_data = genpd->cpu_data;
- if (!cpu_data) {
+ cpuidle_data = genpd->cpuidle_data;
+ if (!cpuidle_data) {
ret = -ENODEV;
goto out;
}
- idle_state = cpu_data->idle_state;
+ idle_state = cpuidle_data->idle_state;
if (!idle_state->disabled) {
ret = -EAGAIN;
goto out;
}
- idle_state->exit_latency = cpu_data->saved_exit_latency;
+ idle_state->exit_latency = cpuidle_data->saved_exit_latency;
cpuidle_driver_unref();
- genpd->cpu_data = NULL;
- kfree(cpu_data);
+ genpd->cpuidle_data = NULL;
+ kfree(cpuidle_data);
out:
genpd_release_lock(genpd);
@@ -1970,17 +1807,13 @@ int pm_genpd_name_detach_cpuidle(const char *name)
/* Default device callbacks for generic PM domains. */
/**
- * pm_genpd_default_save_state - Default "save device state" for PM domians.
+ * pm_genpd_default_save_state - Default "save device state" for PM domains.
* @dev: Device to handle.
*/
static int pm_genpd_default_save_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- cb = dev_gpd_data(dev)->ops.save_state;
- if (cb)
- return cb(dev);
-
if (dev->type && dev->type->pm)
cb = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
@@ -1997,17 +1830,13 @@ static int pm_genpd_default_save_state(struct device *dev)
}
/**
- * pm_genpd_default_restore_state - Default PM domians "restore device state".
+ * pm_genpd_default_restore_state - Default PM domains "restore device state".
* @dev: Device to handle.
*/
static int pm_genpd_default_restore_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- cb = dev_gpd_data(dev)->ops.restore_state;
- if (cb)
- return cb(dev);
-
if (dev->type && dev->type->pm)
cb = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)
@@ -2023,109 +1852,6 @@ static int pm_genpd_default_restore_state(struct device *dev)
return cb ? cb(dev) : 0;
}
-#ifdef CONFIG_PM_SLEEP
-
-/**
- * pm_genpd_default_suspend - Default "device suspend" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_suspend(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
-
- return cb ? cb(dev) : pm_generic_suspend(dev);
-}
-
-/**
- * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_suspend_late(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
-
- return cb ? cb(dev) : pm_generic_suspend_late(dev);
-}
-
-/**
- * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_resume_early(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
-
- return cb ? cb(dev) : pm_generic_resume_early(dev);
-}
-
-/**
- * pm_genpd_default_resume - Default "device resume" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_resume(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
-
- return cb ? cb(dev) : pm_generic_resume(dev);
-}
-
-/**
- * pm_genpd_default_freeze - Default "device freeze" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_freeze(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
-
- return cb ? cb(dev) : pm_generic_freeze(dev);
-}
-
-/**
- * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_freeze_late(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
-
- return cb ? cb(dev) : pm_generic_freeze_late(dev);
-}
-
-/**
- * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_thaw_early(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
-
- return cb ? cb(dev) : pm_generic_thaw_early(dev);
-}
-
-/**
- * pm_genpd_default_thaw - Default "device thaw" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_thaw(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
-
- return cb ? cb(dev) : pm_generic_thaw(dev);
-}
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define pm_genpd_default_suspend NULL
-#define pm_genpd_default_suspend_late NULL
-#define pm_genpd_default_resume_early NULL
-#define pm_genpd_default_resume NULL
-#define pm_genpd_default_freeze NULL
-#define pm_genpd_default_freeze_late NULL
-#define pm_genpd_default_thaw_early NULL
-#define pm_genpd_default_thaw NULL
-
-#endif /* !CONFIG_PM_SLEEP */
-
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -2177,15 +1903,452 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.complete = pm_genpd_complete;
genpd->dev_ops.save_state = pm_genpd_default_save_state;
genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
- genpd->dev_ops.suspend = pm_genpd_default_suspend;
- genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
- genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
- genpd->dev_ops.resume = pm_genpd_default_resume;
- genpd->dev_ops.freeze = pm_genpd_default_freeze;
- genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
- genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
- genpd->dev_ops.thaw = pm_genpd_default_thaw;
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
}
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+/*
+ * Device Tree based PM domain providers.
+ *
+ * The code below implements generic device tree based PM domain providers that
+ * bind device tree nodes with generic PM domains registered in the system.
+ *
+ * Any driver that registers generic PM domains and needs to support binding of
+ * devices to these domains is supposed to register a PM domain provider, which
+ * maps a PM domain specifier retrieved from the device tree to a PM domain.
+ *
+ * Two simple mapping functions have been provided for convenience:
+ * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
+ * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
+ * index.
+ */
+
+/**
+ * struct of_genpd_provider - PM domain provider registration structure
+ * @link: Entry in global list of PM domain providers
+ * @node: Pointer to device tree node of PM domain provider
+ * @xlate: Provider-specific xlate callback mapping a set of specifier cells
+ * into a PM domain.
+ * @data: context pointer to be passed into @xlate callback
+ */
+struct of_genpd_provider {
+ struct list_head link;
+ struct device_node *node;
+ genpd_xlate_t xlate;
+ void *data;
+};
+
+/* List of registered PM domain providers. */
+static LIST_HEAD(of_genpd_providers);
+/* Mutex to protect the list above. */
+static DEFINE_MUTEX(of_genpd_mutex);
+
+/**
+ * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct generic_pm_domain
+ *
+ * This is a generic xlate function that can be used to model PM domains that
+ * have their own device tree nodes. The private data of xlate function needs
+ * to be a valid pointer to struct generic_pm_domain.
+ */
+struct generic_pm_domain *__of_genpd_xlate_simple(
+ struct of_phandle_args *genpdspec,
+ void *data)
+{
+ if (genpdspec->args_count != 0)
+ return ERR_PTR(-EINVAL);
+ return data;
+}
+EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
+
+/**
+ * __of_genpd_xlate_onecell() - Xlate function using a single index.
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct genpd_onecell_data
+ *
+ * This is a generic xlate function that can be used to model simple PM domain
+ * controllers that have one device tree node and provide multiple PM domains.
+ * A single cell is used as an index into an array of PM domains specified in
+ * the genpd_onecell_data struct when registering the provider.
+ */
+struct generic_pm_domain *__of_genpd_xlate_onecell(
+ struct of_phandle_args *genpdspec,
+ void *data)
+{
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int idx = genpdspec->args[0];
+
+ if (genpdspec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ if (idx >= genpd_data->num_domains) {
+ pr_err("%s: invalid domain index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!genpd_data->domains[idx])
+ return ERR_PTR(-ENOENT);
+
+ return genpd_data->domains[idx];
+}
+EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
+
+/**
+ * __of_genpd_add_provider() - Register a PM domain provider for a node
+ * @np: Device node pointer associated with the PM domain provider.
+ * @xlate: Callback for decoding PM domain from phandle arguments.
+ * @data: Context pointer for @xlate callback.
+ */
+int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
+ void *data)
+{
+ struct of_genpd_provider *cp;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ cp->node = of_node_get(np);
+ cp->data = data;
+ cp->xlate = xlate;
+
+ mutex_lock(&of_genpd_mutex);
+ list_add(&cp->link, &of_genpd_providers);
+ mutex_unlock(&of_genpd_mutex);
+ pr_debug("Added domain provider from %s\n", np->full_name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
+
+/**
+ * of_genpd_del_provider() - Remove a previously registered PM domain provider
+ * @np: Device node pointer associated with the PM domain provider
+ */
+void of_genpd_del_provider(struct device_node *np)
+{
+ struct of_genpd_provider *cp;
+
+ mutex_lock(&of_genpd_mutex);
+ list_for_each_entry(cp, &of_genpd_providers, link) {
+ if (cp->node == np) {
+ list_del(&cp->link);
+ of_node_put(cp->node);
+ kfree(cp);
+ break;
+ }
+ }
+ mutex_unlock(&of_genpd_mutex);
+}
+EXPORT_SYMBOL_GPL(of_genpd_del_provider);
+
+/**
+ * of_genpd_get_from_provider() - Look-up PM domain
+ * @genpdspec: OF phandle args to use for look-up
+ *
+ * Looks for a PM domain provider under the node specified by @genpdspec and if
+ * found, uses xlate function of the provider to map phandle args to a PM
+ * domain.
+ *
+ * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
+ * on failure.
+ */
+static struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec)
+{
+ struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
+ struct of_genpd_provider *provider;
+
+ mutex_lock(&of_genpd_mutex);
+
+ /* Check if we have such a provider in our array */
+ list_for_each_entry(provider, &of_genpd_providers, link) {
+ if (provider->node == genpdspec->np)
+ genpd = provider->xlate(genpdspec, provider->data);
+ if (!IS_ERR(genpd))
+ break;
+ }
+
+ mutex_unlock(&of_genpd_mutex);
+
+ return genpd;
+}
+
+/**
+ * genpd_dev_pm_detach - Detach a device from its PM domain.
+ * @dev: Device to attach.
+ * @power_off: Currently not used
+ *
+ * Try to locate a corresponding generic PM domain, which the device was
+ * attached to previously. If such is found, the device is detached from it.
+ */
+static void genpd_dev_pm_detach(struct device *dev, bool power_off)
+{
+ struct generic_pm_domain *pd = NULL, *gpd;
+ int ret = 0;
+
+ if (!dev->pm_domain)
+ return;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (&gpd->domain == dev->pm_domain) {
+ pd = gpd;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ if (!pd)
+ return;
+
+ dev_dbg(dev, "removing from PM domain %s\n", pd->name);
+
+ while (1) {
+ ret = pm_genpd_remove_device(pd, dev);
+ if (ret != -EAGAIN)
+ break;
+ cond_resched();
+ }
+
+ if (ret < 0) {
+ dev_err(dev, "failed to remove from PM domain %s: %d",
+ pd->name, ret);
+ return;
+ }
+
+ /* Check if PM domain can be powered off after removing this device. */
+ genpd_queue_power_off_work(pd);
+}
+
+/**
+ * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
+ * @dev: Device to attach.
+ *
+ * Parse device's OF node to find a PM domain specifier. If such is found,
+ * attaches the device to retrieved pm_domain ops.
+ *
+ * Both generic and legacy Samsung-specific DT bindings are supported to keep
+ * backwards compatibility with existing DTBs.
+ *
+ * Returns 0 on successfully attached PM domain or negative error code.
+ */
+int genpd_dev_pm_attach(struct device *dev)
+{
+ struct of_phandle_args pd_args;
+ struct generic_pm_domain *pd;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ if (dev->pm_domain)
+ return -EEXIST;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells", 0, &pd_args);
+ if (ret < 0) {
+ if (ret != -ENOENT)
+ return ret;
+
+ /*
+ * Try legacy Samsung-specific bindings
+ * (for backwards compatibility of DT ABI)
+ */
+ pd_args.args_count = 0;
+ pd_args.np = of_parse_phandle(dev->of_node,
+ "samsung,power-domain", 0);
+ if (!pd_args.np)
+ return -ENOENT;
+ }
+
+ pd = of_genpd_get_from_provider(&pd_args);
+ if (IS_ERR(pd)) {
+ dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
+ __func__, PTR_ERR(pd));
+ of_node_put(dev->of_node);
+ return PTR_ERR(pd);
+ }
+
+ dev_dbg(dev, "adding to PM domain %s\n", pd->name);
+
+ while (1) {
+ ret = pm_genpd_add_device(pd, dev);
+ if (ret != -EAGAIN)
+ break;
+ cond_resched();
+ }
+
+ if (ret < 0) {
+ dev_err(dev, "failed to add to PM domain %s: %d",
+ pd->name, ret);
+ of_node_put(dev->of_node);
+ return ret;
+ }
+
+ dev->pm_domain->detach = genpd_dev_pm_detach;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
+#endif
+
+
+/*** debugfs support ***/
+
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+static struct dentry *pm_genpd_debugfs_dir;
+
+/*
+ * TODO: This function is a slightly modified version of rtpm_status_show
+ * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME
+ * are too loose to generalize it.
+ */
+#ifdef CONFIG_PM_RUNTIME
+static void rtpm_status_str(struct seq_file *s, struct device *dev)
+{
+ static const char * const status_lookup[] = {
+ [RPM_ACTIVE] = "active",
+ [RPM_RESUMING] = "resuming",
+ [RPM_SUSPENDED] = "suspended",
+ [RPM_SUSPENDING] = "suspending"
+ };
+ const char *p = "";
+
+ if (dev->power.runtime_error)
+ p = "error";
+ else if (dev->power.disable_depth)
+ p = "unsupported";
+ else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
+ p = status_lookup[dev->power.runtime_status];
+ else
+ WARN_ON(1);
+
+ seq_puts(s, p);
+}
+#else
+static void rtpm_status_str(struct seq_file *s, struct device *dev)
+{
+ seq_puts(s, "active");
+}
+#endif
+
+static int pm_genpd_summary_one(struct seq_file *s,
+ struct generic_pm_domain *gpd)
+{
+ static const char * const status_lookup[] = {
+ [GPD_STATE_ACTIVE] = "on",
+ [GPD_STATE_WAIT_MASTER] = "wait-master",
+ [GPD_STATE_BUSY] = "busy",
+ [GPD_STATE_REPEAT] = "off-in-progress",
+ [GPD_STATE_POWER_OFF] = "off"
+ };
+ struct pm_domain_data *pm_data;
+ const char *kobj_path;
+ struct gpd_link *link;
+ int ret;
+
+ ret = mutex_lock_interruptible(&gpd->lock);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
+ goto exit;
+ seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]);
+
+ /*
+ * Modifications on the list require holding locks on both
+ * master and slave, so we are safe.
+ * Also gpd->name is immutable.
+ */
+ list_for_each_entry(link, &gpd->master_links, master_node) {
+ seq_printf(s, "%s", link->slave->name);
+ if (!list_is_last(&link->master_node, &gpd->master_links))
+ seq_puts(s, ", ");
+ }
+
+ list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
+ kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
+ if (kobj_path == NULL)
+ continue;
+
+ seq_printf(s, "\n %-50s ", kobj_path);
+ rtpm_status_str(s, pm_data->dev);
+ kfree(kobj_path);
+ }
+
+ seq_puts(s, "\n");
+exit:
+ mutex_unlock(&gpd->lock);
+
+ return 0;
+}
+
+static int pm_genpd_summary_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *gpd;
+ int ret = 0;
+
+ seq_puts(s, " domain status slaves\n");
+ seq_puts(s, " /device runtime status\n");
+ seq_puts(s, "----------------------------------------------------------------------\n");
+
+ ret = mutex_lock_interruptible(&gpd_list_lock);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ ret = pm_genpd_summary_one(s, gpd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+
+static int pm_genpd_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pm_genpd_summary_show, NULL);
+}
+
+static const struct file_operations pm_genpd_summary_fops = {
+ .open = pm_genpd_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init pm_genpd_debug_init(void)
+{
+ struct dentry *d;
+
+ pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
+
+ if (!pm_genpd_debugfs_dir)
+ return -ENOMEM;
+
+ d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
+ pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+late_initcall(pm_genpd_debug_init);
+
+static void __exit pm_genpd_debug_exit(void)
+{
+ debugfs_remove_recursive(pm_genpd_debugfs_dir);
+}
+__exitcall(pm_genpd_debug_exit);
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index a089e3bcdfb..d88a62e104d 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -42,7 +42,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
* default_stop_ok - Default PM domain governor routine for stopping devices.
* @dev: Device to check.
*/
-bool default_stop_ok(struct device *dev)
+static bool default_stop_ok(struct device *dev)
{
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
unsigned long flags;
@@ -229,10 +229,7 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
#else /* !CONFIG_PM_RUNTIME */
-bool default_stop_ok(struct device *dev)
-{
- return false;
-}
+static inline bool default_stop_ok(struct device *dev) { return false; }
#define default_power_down_ok NULL
#define always_on_power_down_ok NULL
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b67d9aef9fe..44973196d3f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -540,7 +540,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
* Call the "noirq" resume handlers for all devices in dpm_noirq_list and
* enable device drivers to receive interrupts.
*/
-static void dpm_resume_noirq(pm_message_t state)
+void dpm_resume_noirq(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@@ -662,7 +662,7 @@ static void async_resume_early(void *data, async_cookie_t cookie)
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
-static void dpm_resume_early(pm_message_t state)
+void dpm_resume_early(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@@ -1093,7 +1093,7 @@ static int device_suspend_noirq(struct device *dev)
* Prevent device drivers from receiving interrupts and call the "noirq" suspend
* handlers for all non-sysdev devices.
*/
-static int dpm_suspend_noirq(pm_message_t state)
+int dpm_suspend_noirq(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
@@ -1232,7 +1232,7 @@ static int device_suspend_late(struct device *dev)
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
-static int dpm_suspend_late(pm_message_t state)
+int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 95b181d1ca6..a9d26ed11bf 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -92,9 +92,6 @@
* wakeup_count - Report the number of wakeup events related to the device
*/
-static const char enabled[] = "enabled";
-static const char disabled[] = "disabled";
-
const char power_group_name[] = "power";
EXPORT_SYMBOL_GPL(power_group_name);
@@ -336,11 +333,14 @@ static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
+static const char _enabled[] = "enabled";
+static const char _disabled[] = "disabled";
+
static ssize_t
wake_show(struct device * dev, struct device_attribute *attr, char * buf)
{
return sprintf(buf, "%s\n", device_can_wakeup(dev)
- ? (device_may_wakeup(dev) ? enabled : disabled)
+ ? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
}
@@ -357,11 +357,11 @@ wake_store(struct device * dev, struct device_attribute *attr,
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
- if (len == sizeof enabled - 1
- && strncmp(buf, enabled, sizeof enabled - 1) == 0)
+ if (len == sizeof _enabled - 1
+ && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
device_set_wakeup_enable(dev, 1);
- else if (len == sizeof disabled - 1
- && strncmp(buf, disabled, sizeof disabled - 1) == 0)
+ else if (len == sizeof _disabled - 1
+ && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
device_set_wakeup_enable(dev, 0);
else
return -EINVAL;
@@ -570,7 +570,8 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%s\n",
- device_async_suspend_enabled(dev) ? enabled : disabled);
+ device_async_suspend_enabled(dev) ?
+ _enabled : _disabled);
}
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
@@ -582,9 +583,10 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
- if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0)
+ if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
device_enable_async_suspend(dev);
- else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0)
+ else if (len == sizeof _disabled - 1 &&
+ strncmp(buf, _disabled, len) == 0)
device_disable_async_suspend(dev);
else
return -EINVAL;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index eb1bd2ecad8..c2744b30d5d 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,6 +24,9 @@
*/
bool events_check_enabled __read_mostly;
+/* If set and the system is suspending, terminate the suspend. */
+static bool pm_abort_suspend __read_mostly;
+
/*
* Combined counters of registered wakeup events and wakeup events in progress.
* They need to be modified together atomically, so it's better to use one
@@ -719,7 +722,18 @@ bool pm_wakeup_pending(void)
pm_print_active_wakeup_sources();
}
- return ret;
+ return ret || pm_abort_suspend;
+}
+
+void pm_system_wakeup(void)
+{
+ pm_abort_suspend = true;
+ freeze_wake();
+}
+
+void pm_wakeup_clear(void)
+{
+ pm_abort_suspend = false;
}
/**
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index dbb8350ea8d..8d98a329f6e 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,7 +9,7 @@
#include <linux/syscore_ops.h>
#include <linux/mutex.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
+#include <linux/suspend.h>
#include <trace/events/power.h>
static LIST_HEAD(syscore_ops_list);
@@ -54,9 +54,8 @@ int syscore_suspend(void)
pr_debug("Checking wakeup interrupts\n");
/* Return error code if there are any wakeup interrupts pending. */
- ret = check_wakeup_irqs();
- if (ret)
- return ret;
+ if (pm_wakeup_pending())
+ return -EBUSY;
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index ffe350f86bc..3489f8f5fad 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -183,14 +183,14 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
-config GENERIC_CPUFREQ_CPU0
- tristate "Generic CPU0 cpufreq driver"
+config CPUFREQ_DT
+ tristate "Generic DT based cpufreq driver"
depends on HAVE_CLK && OF
- # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y:
+ # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
- This adds a generic cpufreq driver for CPU0 frequency management.
+ This adds a generic DT based cpufreq driver for frequency management.
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
systems which share clock and voltage across all CPUs.
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 28c666c8014..83a75dc8476 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -92,7 +92,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
- depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR
+ depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
default m
help
This adds the CPUFreq driver for Calxeda Highbank SoC
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index db6d9a2fea4..40c53dc1937 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
-obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
+obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
##################################################################################
# x86 drivers.
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
deleted file mode 100644
index 0d2172b0776..00000000000
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (C) 2012 Freescale Semiconductor, Inc.
- *
- * The OPP code in function cpu0_set_target() is reused from
- * drivers/cpufreq/omap-cpufreq.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/cpu.h>
-#include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/pm_opp.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <linux/thermal.h>
-
-static unsigned int transition_latency;
-static unsigned int voltage_tolerance; /* in percentage */
-
-static struct device *cpu_dev;
-static struct clk *cpu_clk;
-static struct regulator *cpu_reg;
-static struct cpufreq_frequency_table *freq_table;
-static struct thermal_cooling_device *cdev;
-
-static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
-{
- struct dev_pm_opp *opp;
- unsigned long volt = 0, volt_old = 0, tol = 0;
- unsigned int old_freq, new_freq;
- long freq_Hz, freq_exact;
- int ret;
-
- freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
- if (freq_Hz <= 0)
- freq_Hz = freq_table[index].frequency * 1000;
-
- freq_exact = freq_Hz;
- new_freq = freq_Hz / 1000;
- old_freq = clk_get_rate(cpu_clk) / 1000;
-
- if (!IS_ERR(cpu_reg)) {
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- pr_err("failed to find OPP for %ld\n", freq_Hz);
- return PTR_ERR(opp);
- }
- volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
- tol = volt * voltage_tolerance / 100;
- volt_old = regulator_get_voltage(cpu_reg);
- }
-
- pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
- old_freq / 1000, volt_old ? volt_old / 1000 : -1,
- new_freq / 1000, volt ? volt / 1000 : -1);
-
- /* scaling up? scale voltage before frequency */
- if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
- ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
- if (ret) {
- pr_err("failed to scale voltage up: %d\n", ret);
- return ret;
- }
- }
-
- ret = clk_set_rate(cpu_clk, freq_exact);
- if (ret) {
- pr_err("failed to set clock rate: %d\n", ret);
- if (!IS_ERR(cpu_reg))
- regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- return ret;
- }
-
- /* scaling down? scale voltage after frequency */
- if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
- ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
- if (ret) {
- pr_err("failed to scale voltage down: %d\n", ret);
- clk_set_rate(cpu_clk, old_freq * 1000);
- }
- }
-
- return ret;
-}
-
-static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
-{
- policy->clk = cpu_clk;
- return cpufreq_generic_init(policy, freq_table, transition_latency);
-}
-
-static struct cpufreq_driver cpu0_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = cpu0_set_target,
- .get = cpufreq_generic_get,
- .init = cpu0_cpufreq_init,
- .name = "generic_cpu0",
- .attr = cpufreq_generic_attr,
-};
-
-static int cpu0_cpufreq_probe(struct platform_device *pdev)
-{
- struct device_node *np;
- int ret;
-
- cpu_dev = get_cpu_device(0);
- if (!cpu_dev) {
- pr_err("failed to get cpu0 device\n");
- return -ENODEV;
- }
-
- np = of_node_get(cpu_dev->of_node);
- if (!np) {
- pr_err("failed to find cpu0 node\n");
- return -ENOENT;
- }
-
- cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
- if (IS_ERR(cpu_reg)) {
- /*
- * If cpu0 regulator supply node is present, but regulator is
- * not yet registered, we should try defering probe.
- */
- if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
- dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
- ret = -EPROBE_DEFER;
- goto out_put_node;
- }
- pr_warn("failed to get cpu0 regulator: %ld\n",
- PTR_ERR(cpu_reg));
- }
-
- cpu_clk = clk_get(cpu_dev, NULL);
- if (IS_ERR(cpu_clk)) {
- ret = PTR_ERR(cpu_clk);
- pr_err("failed to get cpu0 clock: %d\n", ret);
- goto out_put_reg;
- }
-
- /* OPPs might be populated at runtime, don't check for error here */
- of_init_opp_table(cpu_dev);
-
- ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
- if (ret) {
- pr_err("failed to init cpufreq table: %d\n", ret);
- goto out_put_clk;
- }
-
- of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
-
- if (of_property_read_u32(np, "clock-latency", &transition_latency))
- transition_latency = CPUFREQ_ETERNAL;
-
- if (!IS_ERR(cpu_reg)) {
- struct dev_pm_opp *opp;
- unsigned long min_uV, max_uV;
- int i;
-
- /*
- * OPP is maintained in order of increasing frequency, and
- * freq_table initialised from OPP is therefore sorted in the
- * same order.
- */
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
- ;
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_exact(cpu_dev,
- freq_table[0].frequency * 1000, true);
- min_uV = dev_pm_opp_get_voltage(opp);
- opp = dev_pm_opp_find_freq_exact(cpu_dev,
- freq_table[i-1].frequency * 1000, true);
- max_uV = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
- ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
- if (ret > 0)
- transition_latency += ret * 1000;
- }
-
- ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
- if (ret) {
- pr_err("failed register driver: %d\n", ret);
- goto out_free_table;
- }
-
- /*
- * For now, just loading the cooling device;
- * thermal DT code takes care of matching them.
- */
- if (of_find_property(np, "#cooling-cells", NULL)) {
- cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
- if (IS_ERR(cdev))
- pr_err("running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev));
- }
-
- of_node_put(np);
- return 0;
-
-out_free_table:
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
-out_put_clk:
- if (!IS_ERR(cpu_clk))
- clk_put(cpu_clk);
-out_put_reg:
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
-out_put_node:
- of_node_put(np);
- return ret;
-}
-
-static int cpu0_cpufreq_remove(struct platform_device *pdev)
-{
- cpufreq_cooling_unregister(cdev);
- cpufreq_unregister_driver(&cpu0_cpufreq_driver);
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
-
- return 0;
-}
-
-static struct platform_driver cpu0_cpufreq_platdrv = {
- .driver = {
- .name = "cpufreq-cpu0",
- .owner = THIS_MODULE,
- },
- .probe = cpu0_cpufreq_probe,
- .remove = cpu0_cpufreq_remove,
-};
-module_platform_driver(cpu0_cpufreq_platdrv);
-
-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
-MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
new file mode 100644
index 00000000000..6bbb8b91344
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * Copyright (C) 2014 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * The OPP code in function set_target() is reused from
+ * drivers/cpufreq/omap-cpufreq.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpu_cooling.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+struct private_data {
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+ struct thermal_cooling_device *cdev;
+ unsigned int voltage_tolerance; /* in percentage */
+};
+
+static int set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct dev_pm_opp *opp;
+ struct cpufreq_frequency_table *freq_table = policy->freq_table;
+ struct clk *cpu_clk = policy->clk;
+ struct private_data *priv = policy->driver_data;
+ struct device *cpu_dev = priv->cpu_dev;
+ struct regulator *cpu_reg = priv->cpu_reg;
+ unsigned long volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
+ long freq_Hz, freq_exact;
+ int ret;
+
+ freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
+ if (freq_Hz <= 0)
+ freq_Hz = freq_table[index].frequency * 1000;
+
+ freq_exact = freq_Hz;
+ new_freq = freq_Hz / 1000;
+ old_freq = clk_get_rate(cpu_clk) / 1000;
+
+ if (!IS_ERR(cpu_reg)) {
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(cpu_dev, "failed to find OPP for %ld\n",
+ freq_Hz);
+ return PTR_ERR(opp);
+ }
+ volt = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+ tol = volt * priv->voltage_tolerance / 100;
+ volt_old = regulator_get_voltage(cpu_reg);
+ }
+
+ dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
+
+ /* scaling up? scale voltage before frequency */
+ if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
+ ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale voltage up: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = clk_set_rate(cpu_clk, freq_exact);
+ if (ret) {
+ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+ if (!IS_ERR(cpu_reg))
+ regulator_set_voltage_tol(cpu_reg, volt_old, tol);
+ return ret;
+ }
+
+ /* scaling down? scale voltage after frequency */
+ if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
+ ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale voltage down: %d\n",
+ ret);
+ clk_set_rate(cpu_clk, old_freq * 1000);
+ }
+ }
+
+ return ret;
+}
+
+static int allocate_resources(int cpu, struct device **cdev,
+ struct regulator **creg, struct clk **cclk)
+{
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+ struct clk *cpu_clk;
+ int ret = 0;
+ char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", cpu);
+ return -ENODEV;
+ }
+
+ /* Try "cpu0" for older DTs */
+ if (!cpu)
+ reg = reg_cpu0;
+ else
+ reg = reg_cpu;
+
+try_again:
+ cpu_reg = regulator_get_optional(cpu_dev, reg);
+ if (IS_ERR(cpu_reg)) {
+ /*
+ * If cpu's regulator supply node is present, but regulator is
+ * not yet registered, we should try defering probe.
+ */
+ if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+ dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
+ cpu);
+ return -EPROBE_DEFER;
+ }
+
+ /* Try with "cpu-supply" */
+ if (reg == reg_cpu0) {
+ reg = reg_cpu;
+ goto try_again;
+ }
+
+ dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n",
+ cpu, PTR_ERR(cpu_reg));
+ }
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ /* put regulator */
+ if (!IS_ERR(cpu_reg))
+ regulator_put(cpu_reg);
+
+ ret = PTR_ERR(cpu_clk);
+
+ /*
+ * If cpu's clk node is present, but clock is not yet
+ * registered, we should try defering probe.
+ */
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
+ else
+ dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
+ cpu);
+ } else {
+ *cdev = cpu_dev;
+ *creg = cpu_reg;
+ *cclk = cpu_clk;
+ }
+
+ return ret;
+}
+
+static int cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct thermal_cooling_device *cdev;
+ struct device_node *np;
+ struct private_data *priv;
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+ struct clk *cpu_clk;
+ unsigned int transition_latency;
+ int ret;
+
+ ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
+ if (ret) {
+ pr_err("%s: Failed to allocate resources\n: %d", __func__, ret);
+ return ret;
+ }
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
+ ret = -ENOENT;
+ goto out_put_reg_clk;
+ }
+
+ /* OPPs might be populated at runtime, don't check for error here */
+ of_init_opp_table(cpu_dev);
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_put_node;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_table;
+ }
+
+ of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
+
+ if (of_property_read_u32(np, "clock-latency", &transition_latency))
+ transition_latency = CPUFREQ_ETERNAL;
+
+ if (!IS_ERR(cpu_reg)) {
+ struct dev_pm_opp *opp;
+ unsigned long min_uV, max_uV;
+ int i;
+
+ /*
+ * OPP is maintained in order of increasing frequency, and
+ * freq_table initialised from OPP is therefore sorted in the
+ * same order.
+ */
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+ ;
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
+ freq_table[0].frequency * 1000, true);
+ min_uV = dev_pm_opp_get_voltage(opp);
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
+ freq_table[i-1].frequency * 1000, true);
+ max_uV = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+ ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ }
+
+ /*
+ * For now, just loading the cooling device;
+ * thermal DT code takes care of matching them.
+ */
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ cdev = of_cpufreq_cooling_register(np, cpu_present_mask);
+ if (IS_ERR(cdev))
+ dev_err(cpu_dev,
+ "running cpufreq without cooling device: %ld\n",
+ PTR_ERR(cdev));
+ else
+ priv->cdev = cdev;
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->cpu_reg = cpu_reg;
+ policy->driver_data = priv;
+
+ policy->clk = cpu_clk;
+ ret = cpufreq_generic_init(policy, freq_table, transition_latency);
+ if (ret)
+ goto out_cooling_unregister;
+
+ of_node_put(np);
+
+ return 0;
+
+out_cooling_unregister:
+ cpufreq_cooling_unregister(priv->cdev);
+ kfree(priv);
+out_free_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_put_node:
+ of_node_put(np);
+out_put_reg_clk:
+ clk_put(cpu_clk);
+ if (!IS_ERR(cpu_reg))
+ regulator_put(cpu_reg);
+
+ return ret;
+}
+
+static int cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+
+ cpufreq_cooling_unregister(priv->cdev);
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ clk_put(policy->clk);
+ if (!IS_ERR(priv->cpu_reg))
+ regulator_put(priv->cpu_reg);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct cpufreq_driver dt_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = set_target,
+ .get = cpufreq_generic_get,
+ .init = cpufreq_init,
+ .exit = cpufreq_exit,
+ .name = "cpufreq-dt",
+ .attr = cpufreq_generic_attr,
+};
+
+static int dt_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+ struct clk *cpu_clk;
+ int ret;
+
+ /*
+ * All per-cluster (CPUs sharing clock/voltages) initialization is done
+ * from ->init(). In probe(), we just need to make sure that clk and
+ * regulators are available. Else defer probe and retry.
+ *
+ * FIXME: Is checking this only for CPU0 sufficient ?
+ */
+ ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
+ if (ret)
+ return ret;
+
+ clk_put(cpu_clk);
+ if (!IS_ERR(cpu_reg))
+ regulator_put(cpu_reg);
+
+ ret = cpufreq_register_driver(&dt_cpufreq_driver);
+ if (ret)
+ dev_err(cpu_dev, "failed register driver: %d\n", ret);
+
+ return ret;
+}
+
+static int dt_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&dt_cpufreq_driver);
+ return 0;
+}
+
+static struct platform_driver dt_cpufreq_platdrv = {
+ .driver = {
+ .name = "cpufreq-dt",
+ .owner = THIS_MODULE,
+ },
+ .probe = dt_cpufreq_probe,
+ .remove = dt_cpufreq_remove,
+};
+module_platform_driver(dt_cpufreq_platdrv);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Generic cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 61190f6b482..24bf76fba14 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -437,7 +437,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor)
struct cpufreq_governor *t;
list_for_each_entry(t, &cpufreq_governor_list, governor_list)
- if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
return t;
return NULL;
@@ -455,10 +455,10 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
goto out;
if (cpufreq_driver->setpolicy) {
- if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_PERFORMANCE;
err = 0;
- } else if (!strnicmp(str_governor, "powersave",
+ } else if (!strncasecmp(str_governor, "powersave",
CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_POWERSAVE;
err = 0;
@@ -1382,7 +1382,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (!cpufreq_suspended)
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
__func__, new_cpu, cpu);
- } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
+ } else if (cpufreq_driver->stop_cpu) {
cpufreq_driver->stop_cpu(policy);
}
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index 61a54310a1b..843ec824fd9 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -127,7 +127,7 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
* dependencies on platform headers. It is necessary to enable
* Exynos multi-platform support and will be removed together with
* this whole driver as soon as Exynos gets migrated to use
- * cpufreq-cpu0 driver.
+ * cpufreq-dt driver.
*/
np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock");
if (!np) {
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 351a2074cfe..9e78a850e29 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -174,7 +174,7 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
* dependencies on platform headers. It is necessary to enable
* Exynos multi-platform support and will be removed together with
* this whole driver as soon as Exynos gets migrated to use
- * cpufreq-cpu0 driver.
+ * cpufreq-dt driver.
*/
np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
if (!np) {
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index c91ce69dc63..3eafdc7ba78 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -153,7 +153,7 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
* dependencies on platform headers. It is necessary to enable
* Exynos multi-platform support and will be removed together with
* this whole driver as soon as Exynos gets migrated to use
- * cpufreq-cpu0 driver.
+ * cpufreq-dt driver.
*/
np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
if (!np) {
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index bf8902a0866..ec399ad2f05 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -6,7 +6,7 @@
* published by the Free Software Foundation.
*
* This driver provides the clk notifier callbacks that are used when
- * the cpufreq-cpu0 driver changes to frequency to alert the highbank
+ * the cpufreq-dt driver changes to frequency to alert the highbank
* EnergyCore Management Engine (ECME) about the need to change
* voltage. The ECME interfaces with the actual voltage regulators.
*/
@@ -60,7 +60,7 @@ static struct notifier_block hb_cpufreq_clk_nb = {
static int hb_cpufreq_driver_init(void)
{
- struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
+ struct platform_device_info devinfo = { .name = "cpufreq-dt", };
struct device *cpu_dev;
struct clk *cpu_clk;
struct device_node *np;
@@ -95,7 +95,7 @@ static int hb_cpufreq_driver_init(void)
goto out_put_node;
}
- /* Instantiate cpufreq-cpu0 */
+ /* Instantiate cpufreq-dt */
platform_device_register_full(&devinfo);
out_put_node:
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 379c0837f5a..2dfd4fdb5a5 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -26,6 +26,7 @@
#include <linux/cpufreq.h>
#include <linux/smp.h>
#include <linux/of.h>
+#include <linux/reboot.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
@@ -35,6 +36,7 @@
#define POWERNV_MAX_PSTATES 256
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+static bool rebooting;
/*
* Note: The set of pstates consists of contiguous integers, the
@@ -284,6 +286,15 @@ static void set_pstate(void *freq_data)
}
/*
+ * get_nominal_index: Returns the index corresponding to the nominal
+ * pstate in the cpufreq table
+ */
+static inline unsigned int get_nominal_index(void)
+{
+ return powernv_pstate_info.max - powernv_pstate_info.nominal;
+}
+
+/*
* powernv_cpufreq_target_index: Sets the frequency corresponding to
* the cpufreq table entry indexed by new_index on the cpus in the
* mask policy->cpus
@@ -293,6 +304,9 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
{
struct powernv_smp_call_data freq_data;
+ if (unlikely(rebooting) && new_index != get_nominal_index())
+ return 0;
+
freq_data.pstate_id = powernv_freqs[new_index].driver_data;
/*
@@ -317,6 +331,33 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
return cpufreq_table_validate_and_show(policy, powernv_freqs);
}
+static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ int cpu;
+ struct cpufreq_policy cpu_policy;
+
+ rebooting = true;
+ for_each_online_cpu(cpu) {
+ cpufreq_get_policy(&cpu_policy, cpu);
+ powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block powernv_cpufreq_reboot_nb = {
+ .notifier_call = powernv_cpufreq_reboot_notifier,
+};
+
+static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
+{
+ struct powernv_smp_call_data freq_data;
+
+ freq_data.pstate_id = powernv_pstate_info.min;
+ smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
+}
+
static struct cpufreq_driver powernv_cpufreq_driver = {
.name = "powernv-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
@@ -324,6 +365,7 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernv_cpufreq_target_index,
.get = powernv_cpufreq_get,
+ .stop_cpu = powernv_cpufreq_stop_cpu,
.attr = powernv_cpu_freq_attr,
};
@@ -342,12 +384,14 @@ static int __init powernv_cpufreq_init(void)
return rc;
}
+ register_reboot_notifier(&powernv_cpufreq_reboot_nb);
return cpufreq_register_driver(&powernv_cpufreq_driver);
}
module_init(powernv_cpufreq_init);
static void __exit powernv_cpufreq_exit(void)
{
+ unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
cpufreq_unregister_driver(&powernv_cpufreq_driver);
}
module_exit(powernv_cpufreq_exit);
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 3607070797a..bee5df7794d 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -199,7 +199,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
data->table = table;
- per_cpu(cpu_data, cpu) = data;
/* update ->cpus if we have cluster, no harm if not */
cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 3f9791f07b8..567caa6313f 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -597,7 +597,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
* and dependencies on platform headers. It is necessary to enable
* S5PV210 multi-platform support and will be removed together with
* this whole driver as soon as S5PV210 gets migrated to use
- * cpufreq-cpu0 driver.
+ * cpufreq-dt driver.
*/
np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
if (!np) {
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 32748c36c47..c5029c1209b 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -25,11 +25,19 @@ config CPU_IDLE_GOV_MENU
bool "Menu governor (for tickless system)"
default y
+config DT_IDLE_STATES
+ bool
+
menu "ARM CPU Idle Drivers"
depends on ARM
source "drivers/cpuidle/Kconfig.arm"
endmenu
+menu "ARM64 CPU Idle Drivers"
+depends on ARM64
+source "drivers/cpuidle/Kconfig.arm64"
+endmenu
+
menu "MIPS CPU Idle Drivers"
depends on MIPS
source "drivers/cpuidle/Kconfig.mips"
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 58bcd0d166e..8c16ab20fb1 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -7,6 +7,7 @@ config ARM_BIG_LITTLE_CPUIDLE
depends on MCPM
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
+ select DT_IDLE_STATES
help
Select this option to enable CPU idle driver for big.LITTLE based
ARM systems. Driver manages CPUs coordination through MCPM and
diff --git a/drivers/cpuidle/Kconfig.arm64 b/drivers/cpuidle/Kconfig.arm64
new file mode 100644
index 00000000000..d0a08ed1b2e
--- /dev/null
+++ b/drivers/cpuidle/Kconfig.arm64
@@ -0,0 +1,14 @@
+#
+# ARM64 CPU Idle drivers
+#
+
+config ARM64_CPUIDLE
+ bool "Generic ARM64 CPU idle Driver"
+ select ARM64_CPU_SUSPEND
+ select DT_IDLE_STATES
+ help
+ Select this to enable generic cpuidle driver for ARM64.
+ It provides a generic idle driver whose idle states are configured
+ at run-time through DT nodes. The CPUidle suspend backend is
+ initialized by calling the CPU operations init idle hook
+ provided by architecture code.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 11edb31c55e..4d177b916f7 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -4,6 +4,7 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
+obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
##################################################################################
# ARM SoC drivers
@@ -22,6 +23,10 @@ obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o
###############################################################################
+# ARM64 drivers
+obj-$(CONFIG_ARM64_CPUIDLE) += cpuidle-arm64.o
+
+###############################################################################
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
diff --git a/drivers/cpuidle/cpuidle-arm64.c b/drivers/cpuidle/cpuidle-arm64.c
new file mode 100644
index 00000000000..50997ea942f
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-arm64.c
@@ -0,0 +1,133 @@
+/*
+ * ARM64 generic CPU idle driver.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "CPUidle arm64: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <asm/cpuidle.h>
+#include <asm/suspend.h>
+
+#include "dt_idle_states.h"
+
+/*
+ * arm64_enter_idle_state - Programs CPU to enter the specified state
+ *
+ * dev: cpuidle device
+ * drv: cpuidle driver
+ * idx: state index
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static int arm64_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ int ret;
+
+ if (!idx) {
+ cpu_do_idle();
+ return idx;
+ }
+
+ ret = cpu_pm_enter();
+ if (!ret) {
+ /*
+ * Pass idle state index to cpu_suspend which in turn will
+ * call the CPU ops suspend protocol with idle index as a
+ * parameter.
+ */
+ ret = cpu_suspend(idx);
+
+ cpu_pm_exit();
+ }
+
+ return ret ? -1 : idx;
+}
+
+static struct cpuidle_driver arm64_idle_driver = {
+ .name = "arm64_idle",
+ .owner = THIS_MODULE,
+ /*
+ * State at index 0 is standby wfi and considered standard
+ * on all ARM platforms. If in some platforms simple wfi
+ * can't be used as "state 0", DT bindings must be implemented
+ * to work around this issue and allow installing a special
+ * handler for idle state index 0.
+ */
+ .states[0] = {
+ .enter = arm64_enter_idle_state,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "WFI",
+ .desc = "ARM64 WFI",
+ }
+};
+
+static const struct of_device_id arm64_idle_state_match[] __initconst = {
+ { .compatible = "arm,idle-state",
+ .data = arm64_enter_idle_state },
+ { },
+};
+
+/*
+ * arm64_idle_init
+ *
+ * Registers the arm64 specific cpuidle driver with the cpuidle
+ * framework. It relies on core code to parse the idle states
+ * and initialize them using driver data structures accordingly.
+ */
+static int __init arm64_idle_init(void)
+{
+ int cpu, ret;
+ struct cpuidle_driver *drv = &arm64_idle_driver;
+
+ /*
+ * Initialize idle states data, starting at index 1.
+ * This driver is DT only, if no DT idle states are detected (ret == 0)
+ * let the driver initialization fail accordingly since there is no
+ * reason to initialize the idle driver if only wfi is supported.
+ */
+ ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1);
+ if (ret <= 0) {
+ if (ret)
+ pr_err("failed to initialize idle states\n");
+ return ret ? : -ENODEV;
+ }
+
+ /*
+ * Call arch CPU operations in order to initialize
+ * idle states suspend back-end specific data
+ */
+ for_each_possible_cpu(cpu) {
+ ret = cpu_init_idle(cpu);
+ if (ret) {
+ pr_err("CPU %d failed to init idle CPU ops\n", cpu);
+ return ret;
+ }
+ }
+
+ ret = cpuidle_register(drv, NULL);
+ if (ret) {
+ pr_err("failed to register cpuidle driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+device_initcall(arm64_idle_init);
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index ef94c3b81f1..fbc00a1d3c4 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -24,6 +24,8 @@
#include <asm/smp_plat.h>
#include <asm/suspend.h>
+#include "dt_idle_states.h"
+
static int bl_enter_powerdown(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx);
@@ -73,6 +75,12 @@ static struct cpuidle_driver bl_idle_little_driver = {
.state_count = 2,
};
+static const struct of_device_id bl_idle_state_match[] __initconst = {
+ { .compatible = "arm,idle-state",
+ .data = bl_enter_powerdown },
+ { },
+};
+
static struct cpuidle_driver bl_idle_big_driver = {
.name = "big_idle",
.owner = THIS_MODULE,
@@ -159,6 +167,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
static const struct of_device_id compatible_machine_match[] = {
{ .compatible = "arm,vexpress,v2p-ca15_a7" },
{ .compatible = "samsung,exynos5420" },
+ { .compatible = "samsung,exynos5800" },
{},
};
@@ -190,6 +199,17 @@ static int __init bl_idle_init(void)
if (ret)
goto out_uninit_little;
+ /* Start at index 1, index 0 standard WFI */
+ ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1);
+ if (ret < 0)
+ goto out_uninit_big;
+
+ /* Start at index 1, index 0 standard WFI */
+ ret = dt_init_idle_driver(&bl_idle_little_driver,
+ bl_idle_state_match, 1);
+ if (ret < 0)
+ goto out_uninit_big;
+
ret = cpuidle_register(&bl_idle_little_driver, NULL);
if (ret)
goto out_uninit_big;
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
new file mode 100644
index 00000000000..52f4d11bbf3
--- /dev/null
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -0,0 +1,213 @@
+/*
+ * DT idle states parsing code.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "DT idle-states: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "dt_idle_states.h"
+
+static int init_state_node(struct cpuidle_state *idle_state,
+ const struct of_device_id *matches,
+ struct device_node *state_node)
+{
+ int err;
+ const struct of_device_id *match_id;
+
+ match_id = of_match_node(matches, state_node);
+ if (!match_id)
+ return -ENODEV;
+ /*
+ * CPUidle drivers are expected to initialize the const void *data
+ * pointer of the passed in struct of_device_id array to the idle
+ * state enter function.
+ */
+ idle_state->enter = match_id->data;
+
+ err = of_property_read_u32(state_node, "wakeup-latency-us",
+ &idle_state->exit_latency);
+ if (err) {
+ u32 entry_latency, exit_latency;
+
+ err = of_property_read_u32(state_node, "entry-latency-us",
+ &entry_latency);
+ if (err) {
+ pr_debug(" * %s missing entry-latency-us property\n",
+ state_node->full_name);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "exit-latency-us",
+ &exit_latency);
+ if (err) {
+ pr_debug(" * %s missing exit-latency-us property\n",
+ state_node->full_name);
+ return -EINVAL;
+ }
+ /*
+ * If wakeup-latency-us is missing, default to entry+exit
+ * latencies as defined in idle states bindings
+ */
+ idle_state->exit_latency = entry_latency + exit_latency;
+ }
+
+ err = of_property_read_u32(state_node, "min-residency-us",
+ &idle_state->target_residency);
+ if (err) {
+ pr_debug(" * %s missing min-residency-us property\n",
+ state_node->full_name);
+ return -EINVAL;
+ }
+
+ idle_state->flags = CPUIDLE_FLAG_TIME_VALID;
+ if (of_property_read_bool(state_node, "local-timer-stop"))
+ idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ /*
+ * TODO:
+ * replace with kstrdup and pointer assignment when name
+ * and desc become string pointers
+ */
+ strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1);
+ strncpy(idle_state->desc, state_node->name, CPUIDLE_DESC_LEN - 1);
+ return 0;
+}
+
+/*
+ * Check that the idle state is uniform across all CPUs in the CPUidle driver
+ * cpumask
+ */
+static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
+ const cpumask_t *cpumask)
+{
+ int cpu;
+ struct device_node *cpu_node, *curr_state_node;
+ bool valid = true;
+
+ /*
+ * Compare idle state phandles for index idx on all CPUs in the
+ * CPUidle driver cpumask. Start from next logical cpu following
+ * cpumask_first(cpumask) since that's the CPU state_node was
+ * retrieved from. If a mismatch is found bail out straight
+ * away since we certainly hit a firmware misconfiguration.
+ */
+ for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
+ cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
+ cpu_node = of_cpu_device_node_get(cpu);
+ curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+ idx);
+ if (state_node != curr_state_node)
+ valid = false;
+
+ of_node_put(curr_state_node);
+ of_node_put(cpu_node);
+ if (!valid)
+ break;
+ }
+
+ return valid;
+}
+
+/**
+ * dt_init_idle_driver() - Parse the DT idle states and initialize the
+ * idle driver states array
+ * @drv: Pointer to CPU idle driver to be initialized
+ * @matches: Array of of_device_id match structures to search in for
+ * compatible idle state nodes. The data pointer for each valid
+ * struct of_device_id entry in the matches array must point to
+ * a function with the following signature, that corresponds to
+ * the CPUidle state enter function signature:
+ *
+ * int (*)(struct cpuidle_device *dev,
+ * struct cpuidle_driver *drv,
+ * int index);
+ *
+ * @start_idx: First idle state index to be initialized
+ *
+ * If DT idle states are detected and are valid the state count and states
+ * array entries in the cpuidle driver are initialized accordingly starting
+ * from index start_idx.
+ *
+ * Return: number of valid DT idle states parsed, <0 on failure
+ */
+int dt_init_idle_driver(struct cpuidle_driver *drv,
+ const struct of_device_id *matches,
+ unsigned int start_idx)
+{
+ struct cpuidle_state *idle_state;
+ struct device_node *state_node, *cpu_node;
+ int i, err = 0;
+ const cpumask_t *cpumask;
+ unsigned int state_idx = start_idx;
+
+ if (state_idx >= CPUIDLE_STATE_MAX)
+ return -EINVAL;
+ /*
+ * We get the idle states for the first logical cpu in the
+ * driver mask (or cpu_possible_mask if the driver cpumask is not set)
+ * and we check through idle_state_valid() if they are uniform
+ * across CPUs, otherwise we hit a firmware misconfiguration.
+ */
+ cpumask = drv->cpumask ? : cpu_possible_mask;
+ cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
+
+ for (i = 0; ; i++) {
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ if (!state_node)
+ break;
+
+ if (!idle_state_valid(state_node, i, cpumask)) {
+ pr_warn("%s idle state not valid, bailing out\n",
+ state_node->full_name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (state_idx == CPUIDLE_STATE_MAX) {
+ pr_warn("State index reached static CPU idle driver states array size\n");
+ break;
+ }
+
+ idle_state = &drv->states[state_idx++];
+ err = init_state_node(idle_state, matches, state_node);
+ if (err) {
+ pr_err("Parsing idle state node %s failed with err %d\n",
+ state_node->full_name, err);
+ err = -EINVAL;
+ break;
+ }
+ of_node_put(state_node);
+ }
+
+ of_node_put(state_node);
+ of_node_put(cpu_node);
+ if (err)
+ return err;
+ /*
+ * Update the driver state count only if some valid DT idle states
+ * were detected
+ */
+ if (i)
+ drv->state_count = state_idx;
+
+ /*
+ * Return the number of present and valid DT idle states, which can
+ * also be 0 on platforms with missing DT idle states or legacy DT
+ * configuration predating the DT idle states bindings.
+ */
+ return i;
+}
+EXPORT_SYMBOL_GPL(dt_init_idle_driver);
diff --git a/drivers/cpuidle/dt_idle_states.h b/drivers/cpuidle/dt_idle_states.h
new file mode 100644
index 00000000000..4818134bc65
--- /dev/null
+++ b/drivers/cpuidle/dt_idle_states.h
@@ -0,0 +1,7 @@
+#ifndef __DT_IDLE_STATES
+#define __DT_IDLE_STATES
+
+int dt_init_idle_driver(struct cpuidle_driver *drv,
+ const struct of_device_id *matches,
+ unsigned int start_idx);
+#endif
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index ca89412f512..fb9f511cca2 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -28,7 +28,7 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
struct cpuidle_governor *gov;
list_for_each_entry(gov, &cpuidle_governors, governor_list)
- if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN))
+ if (!strncasecmp(str, gov->name, CPUIDLE_NAME_LEN))
return gov;
return NULL;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 3dced0a9eae..faf4e70c42e 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -78,9 +78,8 @@ config ARM_EXYNOS4_BUS_DEVFREQ
This does not yet operate with optimal voltages.
config ARM_EXYNOS5_BUS_DEVFREQ
- bool "ARM Exynos5250 Bus DEVFREQ Driver"
+ tristate "ARM Exynos5250 Bus DEVFREQ Driver"
depends on SOC_EXYNOS5250
- select ARCH_HAS_OPP
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_OPP
help
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 9f90369dd6b..30b538d8cc9 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -1119,6 +1119,7 @@ struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
return opp;
}
+EXPORT_SYMBOL(devfreq_recommended_opp);
/**
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
@@ -1142,6 +1143,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
return ret;
}
+EXPORT_SYMBOL(devfreq_register_opp_notifier);
/**
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
@@ -1168,6 +1170,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
return ret;
}
+EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
static void devm_devfreq_opp_release(struct device *dev, void *res)
{
diff --git a/drivers/devfreq/exynos/exynos_ppmu.c b/drivers/devfreq/exynos/exynos_ppmu.c
index 75fcc5140ff..97b75e513d2 100644
--- a/drivers/devfreq/exynos/exynos_ppmu.c
+++ b/drivers/devfreq/exynos/exynos_ppmu.c
@@ -73,6 +73,7 @@ void busfreq_mon_reset(struct busfreq_ppmu_data *ppmu_data)
exynos_ppmu_start(ppmu_base);
}
}
+EXPORT_SYMBOL(busfreq_mon_reset);
void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data)
{
@@ -97,6 +98,7 @@ void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data)
busfreq_mon_reset(ppmu_data);
}
+EXPORT_SYMBOL(exynos_read_ppmu);
int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data)
{
@@ -114,3 +116,4 @@ int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data)
return busy;
}
+EXPORT_SYMBOL(exynos_get_busier_ppmu);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index ccfbbab82a1..2f90ac6a7f7 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -50,6 +50,7 @@
#include <linux/irqflags.h>
#include <linux/rwsem.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/acpi.h>
#include <linux/jump_label.h>
#include <asm/uaccess.h>
@@ -643,10 +644,13 @@ static int i2c_device_probe(struct device *dev)
if (status < 0)
return status;
- acpi_dev_pm_attach(&client->dev, true);
- status = driver->probe(client, i2c_match_id(driver->id_table, client));
- if (status)
- acpi_dev_pm_detach(&client->dev, true);
+ status = dev_pm_domain_attach(&client->dev, true);
+ if (status != -EPROBE_DEFER) {
+ status = driver->probe(client, i2c_match_id(driver->id_table,
+ client));
+ if (status)
+ dev_pm_domain_detach(&client->dev, true);
+ }
return status;
}
@@ -666,7 +670,7 @@ static int i2c_device_remove(struct device *dev)
status = driver->remove(client);
}
- acpi_dev_pm_detach(&client->dev, true);
+ dev_pm_domain_detach(&client->dev, true);
return status;
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 4fa8fef9147..65cf7a7e05e 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/acpi.h>
#include <linux/mmc/card.h>
@@ -315,7 +316,7 @@ int sdio_add_func(struct sdio_func *func)
ret = device_add(&func->dev);
if (ret == 0) {
sdio_func_set_present(func);
- acpi_dev_pm_attach(&func->dev, false);
+ dev_pm_domain_attach(&func->dev, false);
}
return ret;
@@ -332,7 +333,7 @@ void sdio_remove_func(struct sdio_func *func)
if (!sdio_func_present(func))
return;
- acpi_dev_pm_detach(&func->dev, false);
+ dev_pm_domain_detach(&func->dev, false);
device_del(&func->dev);
put_device(&func->dev);
}
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 82e06a86cd7..a9f9c46e502 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -41,11 +41,17 @@ static int __init pcie_pme_setup(char *str)
}
__setup("pcie_pme=", pcie_pme_setup);
+enum pme_suspend_level {
+ PME_SUSPEND_NONE = 0,
+ PME_SUSPEND_WAKEUP,
+ PME_SUSPEND_NOIRQ,
+};
+
struct pcie_pme_service_data {
spinlock_t lock;
struct pcie_device *srv;
struct work_struct work;
- bool noirq; /* Don't enable the PME interrupt used by this service. */
+ enum pme_suspend_level suspend_level;
};
/**
@@ -223,7 +229,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
spin_lock_irq(&data->lock);
for (;;) {
- if (data->noirq)
+ if (data->suspend_level != PME_SUSPEND_NONE)
break;
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
@@ -250,7 +256,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
spin_lock_irq(&data->lock);
}
- if (!data->noirq)
+ if (data->suspend_level == PME_SUSPEND_NONE)
pcie_pme_interrupt_enable(port, true);
spin_unlock_irq(&data->lock);
@@ -367,6 +373,21 @@ static int pcie_pme_probe(struct pcie_device *srv)
return ret;
}
+static bool pcie_pme_check_wakeup(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ if (!bus)
+ return false;
+
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ if (device_may_wakeup(&dev->dev)
+ || pcie_pme_check_wakeup(dev->subordinate))
+ return true;
+
+ return false;
+}
+
/**
* pcie_pme_suspend - Suspend PCIe PME service device.
* @srv: PCIe service device to suspend.
@@ -375,11 +396,26 @@ static int pcie_pme_suspend(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
struct pci_dev *port = srv->port;
+ bool wakeup;
+ if (device_may_wakeup(&port->dev)) {
+ wakeup = true;
+ } else {
+ down_read(&pci_bus_sem);
+ wakeup = pcie_pme_check_wakeup(port->subordinate);
+ up_read(&pci_bus_sem);
+ }
spin_lock_irq(&data->lock);
- pcie_pme_interrupt_enable(port, false);
- pcie_clear_root_pme_status(port);
- data->noirq = true;
+ if (wakeup) {
+ enable_irq_wake(srv->irq);
+ data->suspend_level = PME_SUSPEND_WAKEUP;
+ } else {
+ struct pci_dev *port = srv->port;
+
+ pcie_pme_interrupt_enable(port, false);
+ pcie_clear_root_pme_status(port);
+ data->suspend_level = PME_SUSPEND_NOIRQ;
+ }
spin_unlock_irq(&data->lock);
synchronize_irq(srv->irq);
@@ -394,12 +430,17 @@ static int pcie_pme_suspend(struct pcie_device *srv)
static int pcie_pme_resume(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
- struct pci_dev *port = srv->port;
spin_lock_irq(&data->lock);
- data->noirq = false;
- pcie_clear_root_pme_status(port);
- pcie_pme_interrupt_enable(port, true);
+ if (data->suspend_level == PME_SUSPEND_NOIRQ) {
+ struct pci_dev *port = srv->port;
+
+ pcie_clear_root_pme_status(port);
+ pcie_pme_interrupt_enable(port, true);
+ } else {
+ disable_irq_wake(srv->irq);
+ }
+ data->suspend_level = PME_SUSPEND_NONE;
spin_unlock_irq(&data->lock);
return 0;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 87aa28c4280..2655d4a988f 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1050,6 +1050,13 @@ static struct acpi_driver acpi_fujitsu_hotkey_driver = {
},
};
+static const struct acpi_device_id fujitsu_ids[] __used = {
+ {ACPI_FUJITSU_HID, 0},
+ {ACPI_FUJITSU_HOTKEY_HID, 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
+
static int __init fujitsu_init(void)
{
int ret, result, max_brightness;
@@ -1208,12 +1215,3 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
-
-static struct pnp_device_id pnp_ids[] __used = {
- {.id = "FUJ02bf"},
- {.id = "FUJ02B1"},
- {.id = "FUJ02E3"},
- {.id = ""}
-};
-
-MODULE_DEVICE_TABLE(pnp, pnp_ids);
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index 2a1008b6112..7f3d389bd60 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -10,3 +10,11 @@ menuconfig POWER_AVS
AVS is also called SmartReflex on OMAP devices.
Say Y here to enable Adaptive Voltage Scaling class support.
+
+config ROCKCHIP_IODOMAIN
+ tristate "Rockchip IO domain support"
+ depends on ARCH_ROCKCHIP && OF
+ help
+ Say y here to enable support io domains on Rockchip SoCs. It is
+ necessary for the io domain setting of the SoC to match the
+ voltage supplied by the regulators.
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
index 0843386a6c1..ba4c7bc6922 100644
--- a/drivers/power/avs/Makefile
+++ b/drivers/power/avs/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
+obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
new file mode 100644
index 00000000000..3ae35d0590d
--- /dev/null
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -0,0 +1,351 @@
+/*
+ * Rockchip IO Voltage Domain driver
+ *
+ * Copyright 2014 MundoReader S.L.
+ * Copyright 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define MAX_SUPPLIES 16
+
+/*
+ * The max voltage for 1.8V and 3.3V come from the Rockchip datasheet under
+ * "Recommended Operating Conditions" for "Digital GPIO". When the typical
+ * is 3.3V the max is 3.6V. When the typical is 1.8V the max is 1.98V.
+ *
+ * They are used like this:
+ * - If the voltage on a rail is above the "1.8" voltage (1.98V) we'll tell the
+ * SoC we're at 3.3.
+ * - If the voltage on a rail is above the "3.3" voltage (3.6V) we'll consider
+ * that to be an error.
+ */
+#define MAX_VOLTAGE_1_8 1980000
+#define MAX_VOLTAGE_3_3 3600000
+
+#define RK3288_SOC_CON2 0x24c
+#define RK3288_SOC_CON2_FLASH0 BIT(7)
+#define RK3288_SOC_FLASH_SUPPLY_NUM 2
+
+struct rockchip_iodomain;
+
+/**
+ * @supplies: voltage settings matching the register bits.
+ */
+struct rockchip_iodomain_soc_data {
+ int grf_offset;
+ const char *supply_names[MAX_SUPPLIES];
+ void (*init)(struct rockchip_iodomain *iod);
+};
+
+struct rockchip_iodomain_supply {
+ struct rockchip_iodomain *iod;
+ struct regulator *reg;
+ struct notifier_block nb;
+ int idx;
+};
+
+struct rockchip_iodomain {
+ struct device *dev;
+ struct regmap *grf;
+ struct rockchip_iodomain_soc_data *soc_data;
+ struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
+};
+
+static int rockchip_iodomain_write(struct rockchip_iodomain_supply *supply,
+ int uV)
+{
+ struct rockchip_iodomain *iod = supply->iod;
+ u32 val;
+ int ret;
+
+ /* set value bit */
+ val = (uV > MAX_VOLTAGE_1_8) ? 0 : 1;
+ val <<= supply->idx;
+
+ /* apply hiword-mask */
+ val |= (BIT(supply->idx) << 16);
+
+ ret = regmap_write(iod->grf, iod->soc_data->grf_offset, val);
+ if (ret)
+ dev_err(iod->dev, "Couldn't write to GRF\n");
+
+ return ret;
+}
+
+static int rockchip_iodomain_notify(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct rockchip_iodomain_supply *supply =
+ container_of(nb, struct rockchip_iodomain_supply, nb);
+ int uV;
+ int ret;
+
+ /*
+ * According to Rockchip it's important to keep the SoC IO domain
+ * higher than (or equal to) the external voltage. That means we need
+ * to change it before external voltage changes happen in the case
+ * of an increase.
+ *
+ * Note that in the "pre" change we pick the max possible voltage that
+ * the regulator might end up at (the client requests a range and we
+ * don't know for certain the exact voltage). Right now we rely on the
+ * slop in MAX_VOLTAGE_1_8 and MAX_VOLTAGE_3_3 to save us if clients
+ * request something like a max of 3.6V when they really want 3.3V.
+ * We could attempt to come up with better rules if this fails.
+ */
+ if (event & REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) {
+ struct pre_voltage_change_data *pvc_data = data;
+
+ uV = max_t(unsigned long, pvc_data->old_uV, pvc_data->max_uV);
+ } else if (event & (REGULATOR_EVENT_VOLTAGE_CHANGE |
+ REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE)) {
+ uV = (unsigned long)data;
+ } else {
+ return NOTIFY_OK;
+ }
+
+ dev_dbg(supply->iod->dev, "Setting to %d\n", uV);
+
+ if (uV > MAX_VOLTAGE_3_3) {
+ dev_err(supply->iod->dev, "Voltage too high: %d\n", uV);
+
+ if (event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
+ return NOTIFY_BAD;
+ }
+
+ ret = rockchip_iodomain_write(supply, uV);
+ if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
+ return NOTIFY_BAD;
+
+ dev_info(supply->iod->dev, "Setting to %d done\n", uV);
+ return NOTIFY_OK;
+}
+
+static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no flash supply we should leave things alone */
+ if (!iod->supplies[RK3288_SOC_FLASH_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set flash0 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3288_SOC_CON2_FLASH0 | (RK3288_SOC_CON2_FLASH0 << 16);
+ ret = regmap_write(iod->grf, RK3288_SOC_CON2, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
+}
+
+/*
+ * On the rk3188 the io-domains are handled by a shared register with the
+ * lower 8 bits being still being continuing drive-strength settings.
+ */
+static const struct rockchip_iodomain_soc_data soc_data_rk3188 = {
+ .grf_offset = 0x104,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "ap0",
+ "ap1",
+ "cif",
+ "flash",
+ "vccio0",
+ "vccio1",
+ "lcdc0",
+ "lcdc1",
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
+ .grf_offset = 0x380,
+ .supply_names = {
+ "lcdc", /* LCDC_VDD */
+ "dvp", /* DVPIO_VDD */
+ "flash0", /* FLASH0_VDD (emmc) */
+ "flash1", /* FLASH1_VDD (sdio1) */
+ "wifi", /* APIO3_VDD (sdio0) */
+ "bb", /* APIO5_VDD */
+ "audio", /* APIO4_VDD */
+ "sdcard", /* SDMMC0_VDD (sdmmc) */
+ "gpio30", /* APIO1_VDD */
+ "gpio1830", /* APIO2_VDD */
+ },
+ .init = rk3288_iodomain_init,
+};
+
+static const struct of_device_id rockchip_iodomain_match[] = {
+ {
+ .compatible = "rockchip,rk3188-io-voltage-domain",
+ .data = (void *)&soc_data_rk3188
+ },
+ {
+ .compatible = "rockchip,rk3288-io-voltage-domain",
+ .data = (void *)&soc_data_rk3288
+ },
+ { /* sentinel */ },
+};
+
+static int rockchip_iodomain_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct rockchip_iodomain *iod;
+ int i, ret = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ iod = devm_kzalloc(&pdev->dev, sizeof(*iod), GFP_KERNEL);
+ if (!iod)
+ return -ENOMEM;
+
+ iod->dev = &pdev->dev;
+ platform_set_drvdata(pdev, iod);
+
+ match = of_match_node(rockchip_iodomain_match, np);
+ iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
+
+ iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(iod->grf)) {
+ dev_err(&pdev->dev, "couldn't find grf regmap\n");
+ return PTR_ERR(iod->grf);
+ }
+
+ for (i = 0; i < MAX_SUPPLIES; i++) {
+ const char *supply_name = iod->soc_data->supply_names[i];
+ struct rockchip_iodomain_supply *supply = &iod->supplies[i];
+ struct regulator *reg;
+ int uV;
+
+ if (!supply_name)
+ continue;
+
+ reg = devm_regulator_get_optional(iod->dev, supply_name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ /* If a supply wasn't specified, that's OK */
+ if (ret == -ENODEV)
+ continue;
+ else if (ret != -EPROBE_DEFER)
+ dev_err(iod->dev, "couldn't get regulator %s\n",
+ supply_name);
+ goto unreg_notify;
+ }
+
+ /* set initial correct value */
+ uV = regulator_get_voltage(reg);
+
+ /* must be a regulator we can get the voltage of */
+ if (uV < 0) {
+ dev_err(iod->dev, "Can't determine voltage: %s\n",
+ supply_name);
+ goto unreg_notify;
+ }
+
+ if (uV > MAX_VOLTAGE_3_3) {
+ dev_crit(iod->dev,
+ "%d uV is too high. May damage SoC!\n",
+ uV);
+ ret = -EINVAL;
+ goto unreg_notify;
+ }
+
+ /* setup our supply */
+ supply->idx = i;
+ supply->iod = iod;
+ supply->reg = reg;
+ supply->nb.notifier_call = rockchip_iodomain_notify;
+
+ ret = rockchip_iodomain_write(supply, uV);
+ if (ret) {
+ supply->reg = NULL;
+ goto unreg_notify;
+ }
+
+ /* register regulator notifier */
+ ret = regulator_register_notifier(reg, &supply->nb);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "regulator notifier request failed\n");
+ supply->reg = NULL;
+ goto unreg_notify;
+ }
+ }
+
+ if (iod->soc_data->init)
+ iod->soc_data->init(iod);
+
+ return 0;
+
+unreg_notify:
+ for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
+ struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
+
+ if (io_supply->reg)
+ regulator_unregister_notifier(io_supply->reg,
+ &io_supply->nb);
+ }
+
+ return ret;
+}
+
+static int rockchip_iodomain_remove(struct platform_device *pdev)
+{
+ struct rockchip_iodomain *iod = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
+ struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
+
+ if (io_supply->reg)
+ regulator_unregister_notifier(io_supply->reg,
+ &io_supply->nb);
+ }
+
+ return 0;
+}
+
+static struct platform_driver rockchip_iodomain_driver = {
+ .probe = rockchip_iodomain_probe,
+ .remove = rockchip_iodomain_remove,
+ .driver = {
+ .name = "rockchip-iodomain",
+ .of_match_table = rockchip_iodomain_match,
+ },
+};
+
+module_platform_driver(rockchip_iodomain_driver);
+
+MODULE_DESCRIPTION("Rockchip IO-domain driver");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_AUTHOR("Doug Anderson <dianders@chromium.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 72f63817a1a..fe2c2d595f5 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -75,8 +75,6 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
.con_ids = { NULL, },
};
-static bool default_pm_on;
-
static int __init sh_pm_runtime_init(void)
{
if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
@@ -96,16 +94,7 @@ static int __init sh_pm_runtime_init(void)
return 0;
}
- default_pm_on = true;
pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
core_initcall(sh_pm_runtime_init);
-
-static int __init sh_pm_runtime_late_init(void)
-{
- if (default_pm_on)
- pm_genpd_poweroff_unused();
- return 0;
-}
-late_initcall(sh_pm_runtime_late_init);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e19512ffc40..ebcb33df2eb 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -35,6 +35,7 @@
#include <linux/spi/spi.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/export.h>
#include <linux/sched/rt.h>
#include <linux/delay.h>
@@ -264,10 +265,12 @@ static int spi_drv_probe(struct device *dev)
if (ret)
return ret;
- acpi_dev_pm_attach(dev, true);
- ret = sdrv->probe(to_spi_device(dev));
- if (ret)
- acpi_dev_pm_detach(dev, true);
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret != -EPROBE_DEFER) {
+ ret = sdrv->probe(to_spi_device(dev));
+ if (ret)
+ dev_pm_domain_detach(dev, true);
+ }
return ret;
}
@@ -278,7 +281,7 @@ static int spi_drv_remove(struct device *dev)
int ret;
ret = sdrv->remove(to_spi_device(dev));
- acpi_dev_pm_detach(dev, true);
+ dev_pm_domain_detach(dev, true);
return ret;
}