summaryrefslogtreecommitdiffstats
path: root/drivers/thermal
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thermal')
-rw-r--r--drivers/thermal/Kconfig55
-rw-r--r--drivers/thermal/Makefile10
-rw-r--r--drivers/thermal/cpu_cooling.c21
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c5
-rw-r--r--drivers/thermal/db8500_thermal.c4
-rw-r--r--drivers/thermal/dove_thermal.c209
-rw-r--r--drivers/thermal/exynos_thermal.c219
-rw-r--r--drivers/thermal/intel_powerclamp.c794
-rw-r--r--drivers/thermal/kirkwood_thermal.c134
-rw-r--r--drivers/thermal/rcar_thermal.c490
-rw-r--r--drivers/thermal/spear_thermal.c7
-rw-r--r--drivers/thermal/step_wise.c122
-rw-r--r--drivers/thermal/thermal_sys.c122
13 files changed, 1898 insertions, 294 deletions
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c2c77d1ac49..a764f165b58 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -29,14 +29,14 @@ choice
config THERMAL_DEFAULT_GOV_STEP_WISE
bool "step_wise"
- select STEP_WISE
+ select THERMAL_GOV_STEP_WISE
help
Use the step_wise governor as default. This throttles the
devices one step at a time.
config THERMAL_DEFAULT_GOV_FAIR_SHARE
bool "fair_share"
- select FAIR_SHARE
+ select THERMAL_GOV_FAIR_SHARE
help
Use the fair_share governor as default. This throttles the
devices based on their 'contribution' to a zone. The
@@ -44,24 +44,24 @@ config THERMAL_DEFAULT_GOV_FAIR_SHARE
config THERMAL_DEFAULT_GOV_USER_SPACE
bool "user_space"
- select USER_SPACE
+ select THERMAL_GOV_USER_SPACE
help
Select this if you want to let the user space manage the
lpatform thermals.
endchoice
-config FAIR_SHARE
+config THERMAL_GOV_FAIR_SHARE
bool "Fair-share thermal governor"
help
Enable this to manage platform thermals using fair-share governor.
-config STEP_WISE
+config THERMAL_GOV_STEP_WISE
bool "Step_wise thermal governor"
help
Enable this to manage platform thermals using a simple linear
-config USER_SPACE
+config THERMAL_GOV_USER_SPACE
bool "User_space thermal governor"
help
Enable this to let the user space manage the platform thermals.
@@ -78,6 +78,14 @@ config CPU_THERMAL
and not the ACPI interface.
If you want this support, you should say Y here.
+config THERMAL_EMULATION
+ bool "Thermal emulation mode support"
+ help
+ Enable this option to make a emul_temp sysfs node in thermal zone
+ directory to support temperature emulation. With emulation sysfs node,
+ user can manually input temperature and test the different trip
+ threshold behaviour for simulation purpose.
+
config SPEAR_THERMAL
bool "SPEAr thermal sensor driver"
depends on PLAT_SPEAR
@@ -93,6 +101,14 @@ config RCAR_THERMAL
Enable this to plug the R-Car thermal sensor driver into the Linux
thermal framework
+config KIRKWOOD_THERMAL
+ tristate "Temperature sensor on Marvell Kirkwood SoCs"
+ depends on ARCH_KIRKWOOD
+ depends on OF
+ help
+ Support for the Kirkwood thermal sensor driver into the Linux thermal
+ framework. Only kirkwood 88F6282 and 88F6283 have this sensor.
+
config EXYNOS_THERMAL
tristate "Temperature sensor on Samsung EXYNOS"
depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5)
@@ -101,6 +117,23 @@ config EXYNOS_THERMAL
If you say yes here you get support for TMU (Thermal Management
Unit) on SAMSUNG EXYNOS series of SoC.
+config EXYNOS_THERMAL_EMUL
+ bool "EXYNOS TMU emulation mode support"
+ depends on EXYNOS_THERMAL
+ help
+ Exynos 4412 and 4414 and 5 series has emulation mode on TMU.
+ Enable this option will be make sysfs node in exynos thermal platform
+ device directory to support emulation mode. With emulation mode sysfs
+ node, you can manually input temperature to TMU for simulation purpose.
+
+config DOVE_THERMAL
+ tristate "Temperature sensor on Marvell Dove SoCs"
+ depends on ARCH_DOVE
+ depends on OF
+ help
+ Support for the Dove thermal sensor driver in the Linux thermal
+ framework.
+
config DB8500_THERMAL
bool "DB8500 thermal management"
depends on ARCH_U8500
@@ -122,4 +155,14 @@ config DB8500_CPUFREQ_COOLING
bound cpufreq cooling device turns active to set CPU frequency low to
cool down the CPU.
+config INTEL_POWERCLAMP
+ tristate "Intel PowerClamp idle injection driver"
+ depends on THERMAL
+ depends on X86
+ depends on CPU_SUP_INTEL
+ help
+ Enable this to enable Intel PowerClamp idle injection driver. This
+ enforce idle time which results in more package C-state residency. The
+ user interface is exposed via generic thermal framework.
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index d8da683245f..d3a2b38c31e 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -5,9 +5,9 @@
obj-$(CONFIG_THERMAL) += thermal_sys.o
# governors
-obj-$(CONFIG_FAIR_SHARE) += fair_share.o
-obj-$(CONFIG_STEP_WISE) += step_wise.o
-obj-$(CONFIG_USER_SPACE) += user_space.o
+obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
+obj-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
+obj-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
# cpufreq cooling
obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
@@ -15,6 +15,10 @@ obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o
# platform thermal drivers
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
+obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o
+obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
+obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
+
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 836828e29a8..8dc44cbb3e0 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -73,21 +73,14 @@ static struct cpufreq_cooling_device *notify_device;
*/
static int get_idr(struct idr *idr, int *id)
{
- int err;
-again:
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
- return -ENOMEM;
+ int ret;
mutex_lock(&cooling_cpufreq_lock);
- err = idr_get_new(idr, NULL, id);
+ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
mutex_unlock(&cooling_cpufreq_lock);
-
- if (unlikely(err == -EAGAIN))
- goto again;
- else if (unlikely(err))
- return err;
-
- *id = *id & MAX_IDR_MASK;
+ if (unlikely(ret < 0))
+ return ret;
+ *id = ret;
return 0;
}
@@ -118,8 +111,8 @@ static int is_cpufreq_valid(int cpu)
/**
* get_cpu_frequency - get the absolute value of frequency from level.
* @cpu: cpu for which frequency is fetched.
- * @level: level of frequency of the CPU
- * e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc
+ * @level: level of frequency, equals cooling state of cpu cooling device
+ * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
*/
static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
{
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 4cf8e72af90..21419851fc0 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -21,6 +21,7 @@
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -73,15 +74,13 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
{ .compatible = "stericsson,db8500-cpufreq-cooling" },
{},
};
-#else
-#define db8500_cpufreq_cooling_match NULL
#endif
static struct platform_driver db8500_cpufreq_cooling_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "db8500-cpufreq-cooling",
- .of_match_table = db8500_cpufreq_cooling_match,
+ .of_match_table = of_match_ptr(db8500_cpufreq_cooling_match),
},
.probe = db8500_cpufreq_cooling_probe,
.suspend = db8500_cpufreq_cooling_suspend,
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index ec71ade3e31..61ce60a3592 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -508,15 +508,13 @@ static const struct of_device_id db8500_thermal_match[] = {
{ .compatible = "stericsson,db8500-thermal" },
{},
};
-#else
-#define db8500_thermal_match NULL
#endif
static struct platform_driver db8500_thermal_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "db8500-thermal",
- .of_match_table = db8500_thermal_match,
+ .of_match_table = of_match_ptr(db8500_thermal_match),
},
.probe = db8500_thermal_probe,
.suspend = db8500_thermal_suspend,
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
new file mode 100644
index 00000000000..7b0bfa0e7a9
--- /dev/null
+++ b/drivers/thermal/dove_thermal.c
@@ -0,0 +1,209 @@
+/*
+ * Dove thermal sensor driver
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#define DOVE_THERMAL_TEMP_OFFSET 1
+#define DOVE_THERMAL_TEMP_MASK 0x1FF
+
+/* Dove Thermal Manager Control and Status Register */
+#define PMU_TM_DISABLE_OFFS 0
+#define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS)
+
+/* Dove Theraml Diode Control 0 Register */
+#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
+#define PMU_TDC0_SEL_VCAL_OFFS 5
+#define PMU_TDC0_SEL_VCAL_MASK (0x3 << PMU_TDC0_SEL_VCAL_OFFS)
+#define PMU_TDC0_REF_CAL_CNT_OFFS 11
+#define PMU_TDC0_REF_CAL_CNT_MASK (0x1FF << PMU_TDC0_REF_CAL_CNT_OFFS)
+#define PMU_TDC0_AVG_NUM_OFFS 25
+#define PMU_TDC0_AVG_NUM_MASK (0x7 << PMU_TDC0_AVG_NUM_OFFS)
+
+/* Dove Thermal Diode Control 1 Register */
+#define PMU_TEMP_DIOD_CTRL1_REG 0x04
+#define PMU_TDC1_TEMP_VALID_MASK (0x1 << 10)
+
+/* Dove Thermal Sensor Dev Structure */
+struct dove_thermal_priv {
+ void __iomem *sensor;
+ void __iomem *control;
+};
+
+static int dove_init_sensor(const struct dove_thermal_priv *priv)
+{
+ u32 reg;
+ u32 i;
+
+ /* Configure the Diode Control Register #0 */
+ reg = readl_relaxed(priv->control);
+
+ /* Use average of 2 */
+ reg &= ~PMU_TDC0_AVG_NUM_MASK;
+ reg |= (0x1 << PMU_TDC0_AVG_NUM_OFFS);
+
+ /* Reference calibration value */
+ reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+ reg |= (0x0F1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+
+ /* Set the high level reference for calibration */
+ reg &= ~PMU_TDC0_SEL_VCAL_MASK;
+ reg |= (0x2 << PMU_TDC0_SEL_VCAL_OFFS);
+ writel(reg, priv->control);
+
+ /* Reset the sensor */
+ reg = readl_relaxed(priv->control);
+ writel((reg | PMU_TDC0_SW_RST_MASK), priv->control);
+ writel(reg, priv->control);
+
+ /* Enable the sensor */
+ reg = readl_relaxed(priv->sensor);
+ reg &= ~PMU_TM_DISABLE_MASK;
+ writel(reg, priv->sensor);
+
+ /* Poll the sensor for the first reading */
+ for (i = 0; i < 1000000; i++) {
+ reg = readl_relaxed(priv->sensor);
+ if (reg & DOVE_THERMAL_TEMP_MASK)
+ break;
+ }
+
+ if (i == 1000000)
+ return -EIO;
+
+ return 0;
+}
+
+static int dove_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ unsigned long reg;
+ struct dove_thermal_priv *priv = thermal->devdata;
+
+ /* Valid check */
+ reg = readl_relaxed(priv->control + PMU_TEMP_DIOD_CTRL1_REG);
+ if ((reg & PMU_TDC1_TEMP_VALID_MASK) == 0x0) {
+ dev_err(&thermal->device,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ /*
+ * Calculate temperature. See Section 8.10.1 of 88AP510,
+ * Documentation/arm/Marvell/README
+ */
+ reg = readl_relaxed(priv->sensor);
+ reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK;
+ *temp = ((2281638UL - (7298*reg)) / 10);
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+ .get_temp = dove_get_temp,
+};
+
+static const struct of_device_id dove_thermal_id_table[] = {
+ { .compatible = "marvell,dove-thermal" },
+ {}
+};
+
+static int dove_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal = NULL;
+ struct dove_thermal_priv *priv;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->sensor) {
+ dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+ priv->control = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->control) {
+ dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = dove_init_sensor(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize sensor\n");
+ return ret;
+ }
+
+ thermal = thermal_zone_device_register("dove_thermal", 0, 0,
+ priv, &ops, NULL, 0, 0);
+ if (IS_ERR(thermal)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(thermal);
+ }
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+}
+
+static int dove_thermal_exit(struct platform_device *pdev)
+{
+ struct thermal_zone_device *dove_thermal =
+ platform_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(dove_thermal);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, dove_thermal_id_table);
+
+static struct platform_driver dove_thermal_driver = {
+ .probe = dove_thermal_probe,
+ .remove = dove_thermal_exit,
+ .driver = {
+ .name = "dove_thermal",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dove_thermal_id_table),
+ },
+};
+
+module_platform_driver(dove_thermal_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("Dove thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index 224751e9f5f..e04ebd8671a 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -82,7 +82,7 @@
#define EXYNOS_TRIMINFO_RELOAD 0x1
#define EXYNOS_TMU_CLEAR_RISE_INT 0x111
-#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 16)
+#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12)
#define EXYNOS_MUX_ADDR_VALUE 6
#define EXYNOS_MUX_ADDR_SHIFT 20
#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
@@ -94,11 +94,20 @@
#define SENSOR_NAME_LEN 16
#define MAX_TRIP_COUNT 8
#define MAX_COOLING_DEVICE 4
+#define MAX_THRESHOLD_LEVS 4
#define ACTIVE_INTERVAL 500
#define IDLE_INTERVAL 10000
#define MCELSIUS 1000
+#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+#define EXYNOS_EMUL_TIME 0x57F0
+#define EXYNOS_EMUL_TIME_SHIFT 16
+#define EXYNOS_EMUL_DATA_SHIFT 8
+#define EXYNOS_EMUL_DATA_MASK 0xFF
+#define EXYNOS_EMUL_ENABLE 0x1
+#endif /* CONFIG_EXYNOS_THERMAL_EMUL */
+
/* CPU Zone information */
#define PANIC_ZONE 4
#define WARN_ZONE 3
@@ -125,6 +134,7 @@ struct exynos_tmu_data {
struct thermal_trip_point_conf {
int trip_val[MAX_TRIP_COUNT];
int trip_count;
+ u8 trigger_falling;
};
struct thermal_cooling_conf {
@@ -174,7 +184,8 @@ static int exynos_set_mode(struct thermal_zone_device *thermal,
mutex_lock(&th_zone->therm_dev->lock);
- if (mode == THERMAL_DEVICE_ENABLED)
+ if (mode == THERMAL_DEVICE_ENABLED &&
+ !th_zone->sensor_conf->trip_data.trigger_falling)
th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
else
th_zone->therm_dev->polling_delay = 0;
@@ -284,7 +295,7 @@ static int exynos_bind(struct thermal_zone_device *thermal,
case MONITOR_ZONE:
case WARN_ZONE:
if (thermal_zone_bind_cooling_device(thermal, i, cdev,
- level, level)) {
+ level, 0)) {
pr_err("error binding cdev inst %d\n", i);
ret = -EINVAL;
}
@@ -362,10 +373,17 @@ static int exynos_get_temp(struct thermal_zone_device *thermal,
static int exynos_get_trend(struct thermal_zone_device *thermal,
int trip, enum thermal_trend *trend)
{
- if (thermal->temperature >= trip)
- *trend = THERMAL_TREND_RAISING;
+ int ret;
+ unsigned long trip_temp;
+
+ ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
+ if (ret < 0)
+ return ret;
+
+ if (thermal->temperature >= trip_temp)
+ *trend = THERMAL_TREND_RAISE_FULL;
else
- *trend = THERMAL_TREND_DROPPING;
+ *trend = THERMAL_TREND_DROP_FULL;
return 0;
}
@@ -413,7 +431,8 @@ static void exynos_report_trigger(void)
break;
}
- if (th_zone->mode == THERMAL_DEVICE_ENABLED) {
+ if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
+ !th_zone->sensor_conf->trip_data.trigger_falling) {
if (i > 0)
th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
else
@@ -452,7 +471,8 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name,
EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, NULL, 0,
- IDLE_INTERVAL);
+ sensor_conf->trip_data.trigger_falling ?
+ 0 : IDLE_INTERVAL);
if (IS_ERR(th_zone->therm_dev)) {
pr_err("Failed to register thermal zone device\n");
@@ -559,8 +579,9 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct exynos_tmu_platform_data *pdata = data->pdata;
- unsigned int status, trim_info, rising_threshold;
- int ret = 0, threshold_code;
+ unsigned int status, trim_info;
+ unsigned int rising_threshold = 0, falling_threshold = 0;
+ int ret = 0, threshold_code, i, trigger_levs = 0;
mutex_lock(&data->lock);
clk_enable(data->clk);
@@ -585,6 +606,11 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
(data->temp_error2 != 0))
data->temp_error1 = pdata->efuse_value;
+ /* Count trigger levels to be enabled */
+ for (i = 0; i < MAX_THRESHOLD_LEVS; i++)
+ if (pdata->trigger_levels[i])
+ trigger_levs++;
+
if (data->soc == SOC_ARCH_EXYNOS4210) {
/* Write temperature code for threshold */
threshold_code = temp_to_code(data, pdata->threshold);
@@ -594,44 +620,38 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
}
writeb(threshold_code,
data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
-
- writeb(pdata->trigger_levels[0],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0);
- writeb(pdata->trigger_levels[1],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL1);
- writeb(pdata->trigger_levels[2],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL2);
- writeb(pdata->trigger_levels[3],
- data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL3);
+ for (i = 0; i < trigger_levs; i++)
+ writeb(pdata->trigger_levels[i],
+ data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
writel(EXYNOS4210_TMU_INTCLEAR_VAL,
data->base + EXYNOS_TMU_REG_INTCLEAR);
} else if (data->soc == SOC_ARCH_EXYNOS) {
- /* Write temperature code for threshold */
- threshold_code = temp_to_code(data, pdata->trigger_levels[0]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
- }
- rising_threshold = threshold_code;
- threshold_code = temp_to_code(data, pdata->trigger_levels[1]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
- }
- rising_threshold |= (threshold_code << 8);
- threshold_code = temp_to_code(data, pdata->trigger_levels[2]);
- if (threshold_code < 0) {
- ret = threshold_code;
- goto out;
+ /* Write temperature code for rising and falling threshold */
+ for (i = 0; i < trigger_levs; i++) {
+ threshold_code = temp_to_code(data,
+ pdata->trigger_levels[i]);
+ if (threshold_code < 0) {
+ ret = threshold_code;
+ goto out;
+ }
+ rising_threshold |= threshold_code << 8 * i;
+ if (pdata->threshold_falling) {
+ threshold_code = temp_to_code(data,
+ pdata->trigger_levels[i] -
+ pdata->threshold_falling);
+ if (threshold_code > 0)
+ falling_threshold |=
+ threshold_code << 8 * i;
+ }
}
- rising_threshold |= (threshold_code << 16);
writel(rising_threshold,
data->base + EXYNOS_THD_TEMP_RISE);
- writel(0, data->base + EXYNOS_THD_TEMP_FALL);
+ writel(falling_threshold,
+ data->base + EXYNOS_THD_TEMP_FALL);
- writel(EXYNOS_TMU_CLEAR_RISE_INT|EXYNOS_TMU_CLEAR_FALL_INT,
+ writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT,
data->base + EXYNOS_TMU_REG_INTCLEAR);
}
out:
@@ -664,6 +684,8 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
pdata->trigger_level2_en << 8 |
pdata->trigger_level1_en << 4 |
pdata->trigger_level0_en;
+ if (pdata->threshold_falling)
+ interrupt_en |= interrupt_en << 16;
} else {
con |= EXYNOS_TMU_CORE_OFF;
interrupt_en = 0; /* Disable all interrupts */
@@ -697,20 +719,19 @@ static void exynos_tmu_work(struct work_struct *work)
struct exynos_tmu_data *data = container_of(work,
struct exynos_tmu_data, irq_work);
+ exynos_report_trigger();
mutex_lock(&data->lock);
clk_enable(data->clk);
-
-
if (data->soc == SOC_ARCH_EXYNOS)
- writel(EXYNOS_TMU_CLEAR_RISE_INT,
+ writel(EXYNOS_TMU_CLEAR_RISE_INT |
+ EXYNOS_TMU_CLEAR_FALL_INT,
data->base + EXYNOS_TMU_REG_INTCLEAR);
else
writel(EXYNOS4210_TMU_INTCLEAR_VAL,
data->base + EXYNOS_TMU_REG_INTCLEAR);
-
clk_disable(data->clk);
mutex_unlock(&data->lock);
- exynos_report_trigger();
+
enable_irq(data->irq);
}
@@ -759,6 +780,7 @@ static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = {
#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
static struct exynos_tmu_platform_data const exynos_default_tmu_data = {
+ .threshold_falling = 10,
.trigger_levels[0] = 85,
.trigger_levels[1] = 103,
.trigger_levels[2] = 110,
@@ -800,8 +822,6 @@ static const struct of_device_id exynos_tmu_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, exynos_tmu_match);
-#else
-#define exynos_tmu_match NULL
#endif
static struct platform_device_id exynos_tmu_driver_ids[] = {
@@ -832,6 +852,94 @@ static inline struct exynos_tmu_platform_data *exynos_get_driver_data(
return (struct exynos_tmu_platform_data *)
platform_get_device_id(pdev)->driver_data;
}
+
+#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+static ssize_t exynos_tmu_emulation_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ unsigned int reg;
+ u8 temp_code;
+ int temp = 0;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+ reg = readl(data->base + EXYNOS_EMUL_CON);
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+ if (reg & EXYNOS_EMUL_ENABLE) {
+ reg >>= EXYNOS_EMUL_DATA_SHIFT;
+ temp_code = reg & EXYNOS_EMUL_DATA_MASK;
+ temp = code_to_temp(data, temp_code);
+ }
+out:
+ return sprintf(buf, "%d\n", temp * MCELSIUS);
+}
+
+static ssize_t exynos_tmu_emulation_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ unsigned int reg;
+ int temp;
+
+ if (data->soc == SOC_ARCH_EXYNOS4210)
+ goto out;
+
+ if (!sscanf(buf, "%d\n", &temp) || temp < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ reg = readl(data->base + EXYNOS_EMUL_CON);
+
+ if (temp) {
+ /* Both CELSIUS and MCELSIUS type are available for input */
+ if (temp > MCELSIUS)
+ temp /= MCELSIUS;
+
+ reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
+ (temp_to_code(data, (temp / MCELSIUS))
+ << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
+ } else {
+ reg &= ~EXYNOS_EMUL_ENABLE;
+ }
+
+ writel(reg, data->base + EXYNOS_EMUL_CON);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+out:
+ return count;
+}
+
+static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show,
+ exynos_tmu_emulation_store);
+static int create_emulation_sysfs(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_emulation);
+}
+static void remove_emulation_sysfs(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_emulation);
+}
+#else
+static inline int create_emulation_sysfs(struct device *dev) { return 0; }
+static inline void remove_emulation_sysfs(struct device *dev) {}
+#endif
+
static int exynos_tmu_probe(struct platform_device *pdev)
{
struct exynos_tmu_data *data;
@@ -866,11 +974,9 @@ static int exynos_tmu_probe(struct platform_device *pdev)
return -ENOENT;
}
- data->base = devm_request_and_ioremap(&pdev->dev, data->mem);
- if (!data->base) {
- dev_err(&pdev->dev, "Failed to ioremap memory\n");
- return -ENODEV;
- }
+ data->base = devm_ioremap_resource(&pdev->dev, data->mem);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
IRQF_TRIGGER_RISING, "exynos-tmu", data);
@@ -916,6 +1022,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
exynos_sensor_conf.trip_data.trip_val[i] =
pdata->threshold + pdata->trigger_levels[i];
+ exynos_sensor_conf.trip_data.trigger_falling = pdata->threshold_falling;
+
exynos_sensor_conf.cooling_data.freq_clip_count =
pdata->freq_tab_count;
for (i = 0; i < pdata->freq_tab_count; i++) {
@@ -930,6 +1038,11 @@ static int exynos_tmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to register thermal interface\n");
goto err_clk;
}
+
+ ret = create_emulation_sysfs(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n");
+
return 0;
err_clk:
platform_set_drvdata(pdev, NULL);
@@ -941,6 +1054,8 @@ static int exynos_tmu_remove(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ remove_emulation_sysfs(&pdev->dev);
+
exynos_tmu_control(pdev, false);
exynos_unregister_thermal();
@@ -982,7 +1097,7 @@ static struct platform_driver exynos_tmu_driver = {
.name = "exynos-tmu",
.owner = THIS_MODULE,
.pm = EXYNOS_TMU_PM,
- .of_match_table = exynos_tmu_match,
+ .of_match_table = of_match_ptr(exynos_tmu_match),
},
.probe = exynos_tmu_probe,
.remove = exynos_tmu_remove,
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
new file mode 100644
index 00000000000..ab3ed907d2c
--- /dev/null
+++ b/drivers/thermal/intel_powerclamp.c
@@ -0,0 +1,794 @@
+/*
+ * intel_powerclamp.c - package c-state idle injection
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Authors:
+ * Arjan van de Ven <arjan@linux.intel.com>
+ * Jacob Pan <jacob.jun.pan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ *
+ * TODO:
+ * 1. better handle wakeup from external interrupts, currently a fixed
+ * compensation is added to clamping duration when excessive amount
+ * of wakeups are observed during idle time. the reason is that in
+ * case of external interrupts without need for ack, clamping down
+ * cpu in non-irq context does not reduce irq. for majority of the
+ * cases, clamping down cpu does help reduce irq as well, we should
+ * be able to differenciate the two cases and give a quantitative
+ * solution for the irqs that we can control. perhaps based on
+ * get_cpu_iowait_time_us()
+ *
+ * 2. synchronization with other hw blocks
+ *
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/nmi.h>
+#include <asm/msr.h>
+#include <asm/mwait.h>
+#include <asm/cpu_device_id.h>
+#include <asm/idle.h>
+#include <asm/hardirq.h>
+
+#define MAX_TARGET_RATIO (50U)
+/* For each undisturbed clamping period (no extra wake ups during idle time),
+ * we increment the confidence counter for the given target ratio.
+ * CONFIDENCE_OK defines the level where runtime calibration results are
+ * valid.
+ */
+#define CONFIDENCE_OK (3)
+/* Default idle injection duration, driver adjust sleep time to meet target
+ * idle ratio. Similar to frequency modulation.
+ */
+#define DEFAULT_DURATION_JIFFIES (6)
+
+static unsigned int target_mwait;
+static struct dentry *debug_dir;
+
+/* user selected target */
+static unsigned int set_target_ratio;
+static unsigned int current_ratio;
+static bool should_skip;
+static bool reduce_irq;
+static atomic_t idle_wakeup_counter;
+static unsigned int control_cpu; /* The cpu assigned to collect stat and update
+ * control parameters. default to BSP but BSP
+ * can be offlined.
+ */
+static bool clamping;
+
+
+static struct task_struct * __percpu *powerclamp_thread;
+static struct thermal_cooling_device *cooling_dev;
+static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
+ * clamping thread
+ */
+
+static unsigned int duration;
+static unsigned int pkg_cstate_ratio_cur;
+static unsigned int window_size;
+
+static int duration_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long new_duration;
+
+ ret = kstrtoul(arg, 10, &new_duration);
+ if (ret)
+ goto exit;
+ if (new_duration > 25 || new_duration < 6) {
+ pr_err("Out of recommended range %lu, between 6-25ms\n",
+ new_duration);
+ ret = -EINVAL;
+ }
+
+ duration = clamp(new_duration, 6ul, 25ul);
+ smp_mb();
+
+exit:
+
+ return ret;
+}
+
+static struct kernel_param_ops duration_ops = {
+ .set = duration_set,
+ .get = param_get_int,
+};
+
+
+module_param_cb(duration, &duration_ops, &duration, 0644);
+MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec.");
+
+struct powerclamp_calibration_data {
+ unsigned long confidence; /* used for calibration, basically a counter
+ * gets incremented each time a clamping
+ * period is completed without extra wakeups
+ * once that counter is reached given level,
+ * compensation is deemed usable.
+ */
+ unsigned long steady_comp; /* steady state compensation used when
+ * no extra wakeups occurred.
+ */
+ unsigned long dynamic_comp; /* compensate excessive wakeup from idle
+ * mostly from external interrupts.
+ */
+};
+
+static struct powerclamp_calibration_data cal_data[MAX_TARGET_RATIO];
+
+static int window_size_set(const char *arg, const struct kernel_param *kp)
+{
+ int ret = 0;
+ unsigned long new_window_size;
+
+ ret = kstrtoul(arg, 10, &new_window_size);
+ if (ret)
+ goto exit_win;
+ if (new_window_size > 10 || new_window_size < 2) {
+ pr_err("Out of recommended window size %lu, between 2-10\n",
+ new_window_size);
+ ret = -EINVAL;
+ }
+
+ window_size = clamp(new_window_size, 2ul, 10ul);
+ smp_mb();
+
+exit_win:
+
+ return ret;
+}
+
+static struct kernel_param_ops window_size_ops = {
+ .set = window_size_set,
+ .get = param_get_int,
+};
+
+module_param_cb(window_size, &window_size_ops, &window_size, 0644);
+MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
+ "\tpowerclamp controls idle ratio within this window. larger\n"
+ "\twindow size results in slower response time but more smooth\n"
+ "\tclamping results. default to 2.");
+
+static void find_target_mwait(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int highest_cstate = 0;
+ unsigned int highest_subcstate = 0;
+ int i;
+
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ return;
+
+ edx >>= MWAIT_SUBSTATE_SIZE;
+ for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+ if (edx & MWAIT_SUBSTATE_MASK) {
+ highest_cstate = i;
+ highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+ }
+ }
+ target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+ (highest_subcstate - 1);
+
+}
+
+static u64 pkg_state_counter(void)
+{
+ u64 val;
+ u64 count = 0;
+
+ static bool skip_c2;
+ static bool skip_c3;
+ static bool skip_c6;
+ static bool skip_c7;
+
+ if (!skip_c2) {
+ if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c2 = true;
+ }
+
+ if (!skip_c3) {
+ if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c3 = true;
+ }
+
+ if (!skip_c6) {
+ if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c6 = true;
+ }
+
+ if (!skip_c7) {
+ if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val))
+ count += val;
+ else
+ skip_c7 = true;
+ }
+
+ return count;
+}
+
+static void noop_timer(unsigned long foo)
+{
+ /* empty... just the fact that we get the interrupt wakes us up */
+}
+
+static unsigned int get_compensation(int ratio)
+{
+ unsigned int comp = 0;
+
+ /* we only use compensation if all adjacent ones are good */
+ if (ratio == 1 &&
+ cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 2].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio + 1].steady_comp +
+ cal_data[ratio + 2].steady_comp) / 3;
+ } else if (ratio == MAX_TARGET_RATIO - 1 &&
+ cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 2].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio - 1].steady_comp +
+ cal_data[ratio - 2].steady_comp) / 3;
+ } else if (cal_data[ratio].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio - 1].confidence >= CONFIDENCE_OK &&
+ cal_data[ratio + 1].confidence >= CONFIDENCE_OK) {
+ comp = (cal_data[ratio].steady_comp +
+ cal_data[ratio - 1].steady_comp +
+ cal_data[ratio + 1].steady_comp) / 3;
+ }
+
+ /* REVISIT: simple penalty of double idle injection */
+ if (reduce_irq)
+ comp = ratio;
+ /* do not exceed limit */
+ if (comp + ratio >= MAX_TARGET_RATIO)
+ comp = MAX_TARGET_RATIO - ratio - 1;
+
+ return comp;
+}
+
+static void adjust_compensation(int target_ratio, unsigned int win)
+{
+ int delta;
+ struct powerclamp_calibration_data *d = &cal_data[target_ratio];
+
+ /*
+ * adjust compensations if confidence level has not been reached or
+ * there are too many wakeups during the last idle injection period, we
+ * cannot trust the data for compensation.
+ */
+ if (d->confidence >= CONFIDENCE_OK ||
+ atomic_read(&idle_wakeup_counter) >
+ win * num_online_cpus())
+ return;
+
+ delta = set_target_ratio - current_ratio;
+ /* filter out bad data */
+ if (delta >= 0 && delta <= (1+target_ratio/10)) {
+ if (d->steady_comp)
+ d->steady_comp =
+ roundup(delta+d->steady_comp, 2)/2;
+ else
+ d->steady_comp = delta;
+ d->confidence++;
+ }
+}
+
+static bool powerclamp_adjust_controls(unsigned int target_ratio,
+ unsigned int guard, unsigned int win)
+{
+ static u64 msr_last, tsc_last;
+ u64 msr_now, tsc_now;
+ u64 val64;
+
+ /* check result for the last window */
+ msr_now = pkg_state_counter();
+ rdtscll(tsc_now);
+
+ /* calculate pkg cstate vs tsc ratio */
+ if (!msr_last || !tsc_last)
+ current_ratio = 1;
+ else if (tsc_now-tsc_last) {
+ val64 = 100*(msr_now-msr_last);
+ do_div(val64, (tsc_now-tsc_last));
+ current_ratio = val64;
+ }
+
+ /* update record */
+ msr_last = msr_now;
+ tsc_last = tsc_now;
+
+ adjust_compensation(target_ratio, win);
+ /*
+ * too many external interrupts, set flag such
+ * that we can take measure later.
+ */
+ reduce_irq = atomic_read(&idle_wakeup_counter) >=
+ 2 * win * num_online_cpus();
+
+ atomic_set(&idle_wakeup_counter, 0);
+ /* if we are above target+guard, skip */
+ return set_target_ratio + guard <= current_ratio;
+}
+
+static int clamp_thread(void *arg)
+{
+ int cpunr = (unsigned long)arg;
+ DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
+ static const struct sched_param param = {
+ .sched_priority = MAX_USER_RT_PRIO/2,
+ };
+ unsigned int count = 0;
+ unsigned int target_ratio;
+
+ set_bit(cpunr, cpu_clamping_mask);
+ set_freezable();
+ init_timer_on_stack(&wakeup_timer);
+ sched_setscheduler(current, SCHED_FIFO, &param);
+
+ while (true == clamping && !kthread_should_stop() &&
+ cpu_online(cpunr)) {
+ int sleeptime;
+ unsigned long target_jiffies;
+ unsigned int guard;
+ unsigned int compensation = 0;
+ int interval; /* jiffies to sleep for each attempt */
+ unsigned int duration_jiffies = msecs_to_jiffies(duration);
+ unsigned int window_size_now;
+
+ try_to_freeze();
+ /*
+ * make sure user selected ratio does not take effect until
+ * the next round. adjust target_ratio if user has changed
+ * target such that we can converge quickly.
+ */
+ target_ratio = set_target_ratio;
+ guard = 1 + target_ratio/20;
+ window_size_now = window_size;
+ count++;
+
+ /*
+ * systems may have different ability to enter package level
+ * c-states, thus we need to compensate the injected idle ratio
+ * to achieve the actual target reported by the HW.
+ */
+ compensation = get_compensation(target_ratio);
+ interval = duration_jiffies*100/(target_ratio+compensation);
+
+ /* align idle time */
+ target_jiffies = roundup(jiffies, interval);
+ sleeptime = target_jiffies - jiffies;
+ if (sleeptime <= 0)
+ sleeptime = 1;
+ schedule_timeout_interruptible(sleeptime);
+ /*
+ * only elected controlling cpu can collect stats and update
+ * control parameters.
+ */
+ if (cpunr == control_cpu && !(count%window_size_now)) {
+ should_skip =
+ powerclamp_adjust_controls(target_ratio,
+ guard, window_size_now);
+ smp_mb();
+ }
+
+ if (should_skip)
+ continue;
+
+ target_jiffies = jiffies + duration_jiffies;
+ mod_timer(&wakeup_timer, target_jiffies);
+ if (unlikely(local_softirq_pending()))
+ continue;
+ /*
+ * stop tick sched during idle time, interrupts are still
+ * allowed. thus jiffies are updated properly.
+ */
+ preempt_disable();
+ tick_nohz_idle_enter();
+ /* mwait until target jiffies is reached */
+ while (time_before(jiffies, target_jiffies)) {
+ unsigned long ecx = 1;
+ unsigned long eax = target_mwait;
+
+ /*
+ * REVISIT: may call enter_idle() to notify drivers who
+ * can save power during cpu idle. same for exit_idle()
+ */
+ local_touch_nmi();
+ stop_critical_timings();
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ cpu_relax(); /* allow HT sibling to run */
+ __mwait(eax, ecx);
+ start_critical_timings();
+ atomic_inc(&idle_wakeup_counter);
+ }
+ tick_nohz_idle_exit();
+ preempt_enable_no_resched();
+ }
+ del_timer_sync(&wakeup_timer);
+ clear_bit(cpunr, cpu_clamping_mask);
+
+ return 0;
+}
+
+/*
+ * 1 HZ polling while clamping is active, useful for userspace
+ * to monitor actual idle ratio.
+ */
+static void poll_pkg_cstate(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(poll_pkg_cstate_work, poll_pkg_cstate);
+static void poll_pkg_cstate(struct work_struct *dummy)
+{
+ static u64 msr_last;
+ static u64 tsc_last;
+ static unsigned long jiffies_last;
+
+ u64 msr_now;
+ unsigned long jiffies_now;
+ u64 tsc_now;
+ u64 val64;
+
+ msr_now = pkg_state_counter();
+ rdtscll(tsc_now);
+ jiffies_now = jiffies;
+
+ /* calculate pkg cstate vs tsc ratio */
+ if (!msr_last || !tsc_last)
+ pkg_cstate_ratio_cur = 1;
+ else {
+ if (tsc_now - tsc_last) {
+ val64 = 100 * (msr_now - msr_last);
+ do_div(val64, (tsc_now - tsc_last));
+ pkg_cstate_ratio_cur = val64;
+ }
+ }
+
+ /* update record */
+ msr_last = msr_now;
+ jiffies_last = jiffies_now;
+ tsc_last = tsc_now;
+
+ if (true == clamping)
+ schedule_delayed_work(&poll_pkg_cstate_work, HZ);
+}
+
+static int start_power_clamp(void)
+{
+ unsigned long cpu;
+ struct task_struct *thread;
+
+ /* check if pkg cstate counter is completely 0, abort in this case */
+ if (!pkg_state_counter()) {
+ pr_err("pkg cstate counter not functional, abort\n");
+ return -EINVAL;
+ }
+
+ set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
+ /* prevent cpu hotplug */
+ get_online_cpus();
+
+ /* prefer BSP */
+ control_cpu = 0;
+ if (!cpu_online(control_cpu))
+ control_cpu = smp_processor_id();
+
+ clamping = true;
+ schedule_delayed_work(&poll_pkg_cstate_work, 0);
+
+ /* start one thread per online cpu */
+ for_each_online_cpu(cpu) {
+ struct task_struct **p =
+ per_cpu_ptr(powerclamp_thread, cpu);
+
+ thread = kthread_create_on_node(clamp_thread,
+ (void *) cpu,
+ cpu_to_node(cpu),
+ "kidle_inject/%ld", cpu);
+ /* bind to cpu here */
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+ *p = thread;
+ }
+
+ }
+ put_online_cpus();
+
+ return 0;
+}
+
+static void end_power_clamp(void)
+{
+ int i;
+ struct task_struct *thread;
+
+ clamping = false;
+ /*
+ * make clamping visible to other cpus and give per cpu clamping threads
+ * sometime to exit, or gets killed later.
+ */
+ smp_mb();
+ msleep(20);
+ if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
+ for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
+ pr_debug("clamping thread for cpu %d alive, kill\n", i);
+ thread = *per_cpu_ptr(powerclamp_thread, i);
+ kthread_stop(thread);
+ }
+ }
+}
+
+static int powerclamp_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned long cpu = (unsigned long)hcpu;
+ struct task_struct *thread;
+ struct task_struct **percpu_thread =
+ per_cpu_ptr(powerclamp_thread, cpu);
+
+ if (false == clamping)
+ goto exit_ok;
+
+ switch (action) {
+ case CPU_ONLINE:
+ thread = kthread_create_on_node(clamp_thread,
+ (void *) cpu,
+ cpu_to_node(cpu),
+ "kidle_inject/%lu", cpu);
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ wake_up_process(thread);
+ *percpu_thread = thread;
+ }
+ /* prefer BSP as controlling CPU */
+ if (cpu == 0) {
+ control_cpu = 0;
+ smp_mb();
+ }
+ break;
+ case CPU_DEAD:
+ if (test_bit(cpu, cpu_clamping_mask)) {
+ pr_err("cpu %lu dead but powerclamping thread is not\n",
+ cpu);
+ kthread_stop(*percpu_thread);
+ }
+ if (cpu == control_cpu) {
+ control_cpu = smp_processor_id();
+ smp_mb();
+ }
+ }
+
+exit_ok:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block powerclamp_cpu_notifier = {
+ .notifier_call = powerclamp_cpu_callback,
+};
+
+static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MAX_TARGET_RATIO;
+
+ return 0;
+}
+
+static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ if (true == clamping)
+ *state = pkg_cstate_ratio_cur;
+ else
+ /* to save power, do not poll idle ratio while not clamping */
+ *state = -1; /* indicates invalid state */
+
+ return 0;
+}
+
+static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long new_target_ratio)
+{
+ int ret = 0;
+
+ new_target_ratio = clamp(new_target_ratio, 0UL,
+ (unsigned long) (MAX_TARGET_RATIO-1));
+ if (set_target_ratio == 0 && new_target_ratio > 0) {
+ pr_info("Start idle injection to reduce power\n");
+ set_target_ratio = new_target_ratio;
+ ret = start_power_clamp();
+ goto exit_set;
+ } else if (set_target_ratio > 0 && new_target_ratio == 0) {
+ pr_info("Stop forced idle injection\n");
+ set_target_ratio = 0;
+ end_power_clamp();
+ } else /* adjust currently running */ {
+ set_target_ratio = new_target_ratio;
+ /* make new set_target_ratio visible to other cpus */
+ smp_mb();
+ }
+
+exit_set:
+ return ret;
+}
+
+/* bind to generic thermal layer as cooling device*/
+static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
+ .get_max_state = powerclamp_get_max_state,
+ .get_cur_state = powerclamp_get_cur_state,
+ .set_cur_state = powerclamp_set_cur_state,
+};
+
+/* runs on Nehalem and later */
+static const struct x86_cpu_id intel_powerclamp_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x1a},
+ { X86_VENDOR_INTEL, 6, 0x1c},
+ { X86_VENDOR_INTEL, 6, 0x1e},
+ { X86_VENDOR_INTEL, 6, 0x1f},
+ { X86_VENDOR_INTEL, 6, 0x25},
+ { X86_VENDOR_INTEL, 6, 0x26},
+ { X86_VENDOR_INTEL, 6, 0x2a},
+ { X86_VENDOR_INTEL, 6, 0x2c},
+ { X86_VENDOR_INTEL, 6, 0x2d},
+ { X86_VENDOR_INTEL, 6, 0x2e},
+ { X86_VENDOR_INTEL, 6, 0x2f},
+ { X86_VENDOR_INTEL, 6, 0x3a},
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+
+static int powerclamp_probe(void)
+{
+ if (!x86_match_cpu(intel_powerclamp_ids)) {
+ pr_err("Intel powerclamp does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+ if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
+ !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ||
+ !boot_cpu_has(X86_FEATURE_MWAIT) ||
+ !boot_cpu_has(X86_FEATURE_ARAT))
+ return -ENODEV;
+
+ /* find the deepest mwait value */
+ find_target_mwait();
+
+ return 0;
+}
+
+static int powerclamp_debug_show(struct seq_file *m, void *unused)
+{
+ int i = 0;
+
+ seq_printf(m, "controlling cpu: %d\n", control_cpu);
+ seq_printf(m, "pct confidence steady dynamic (compensation)\n");
+ for (i = 0; i < MAX_TARGET_RATIO; i++) {
+ seq_printf(m, "%d\t%lu\t%lu\t%lu\n",
+ i,
+ cal_data[i].confidence,
+ cal_data[i].steady_comp,
+ cal_data[i].dynamic_comp);
+ }
+
+ return 0;
+}
+
+static int powerclamp_debug_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, powerclamp_debug_show, inode->i_private);
+}
+
+static const struct file_operations powerclamp_debug_fops = {
+ .open = powerclamp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static inline void powerclamp_create_debug_files(void)
+{
+ debug_dir = debugfs_create_dir("intel_powerclamp", NULL);
+ if (!debug_dir)
+ return;
+
+ if (!debugfs_create_file("powerclamp_calib", S_IRUGO, debug_dir,
+ cal_data, &powerclamp_debug_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(debug_dir);
+}
+
+static int powerclamp_init(void)
+{
+ int retval;
+ int bitmap_size;
+
+ bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long);
+ cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!cpu_clamping_mask)
+ return -ENOMEM;
+
+ /* probe cpu features and ids here */
+ retval = powerclamp_probe();
+ if (retval)
+ return retval;
+ /* set default limit, maybe adjusted during runtime based on feedback */
+ window_size = 2;
+ register_hotcpu_notifier(&powerclamp_cpu_notifier);
+ powerclamp_thread = alloc_percpu(struct task_struct *);
+ cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
+ &powerclamp_cooling_ops);
+ if (IS_ERR(cooling_dev))
+ return -ENODEV;
+
+ if (!duration)
+ duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+ powerclamp_create_debug_files();
+
+ return 0;
+}
+module_init(powerclamp_init);
+
+static void powerclamp_exit(void)
+{
+ unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+ end_power_clamp();
+ free_percpu(powerclamp_thread);
+ thermal_cooling_device_unregister(cooling_dev);
+ kfree(cpu_clamping_mask);
+
+ cancel_delayed_work_sync(&poll_pkg_cstate_work);
+ debugfs_remove_recursive(debug_dir);
+}
+module_exit(powerclamp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("Package Level C-state Idle Injection for Intel CPUs");
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
new file mode 100644
index 00000000000..65cb4f09e8f
--- /dev/null
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -0,0 +1,134 @@
+/*
+ * Kirkwood thermal sensor driver
+ *
+ * Copyright (C) 2012 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#define KIRKWOOD_THERMAL_VALID_OFFSET 9
+#define KIRKWOOD_THERMAL_VALID_MASK 0x1
+#define KIRKWOOD_THERMAL_TEMP_OFFSET 10
+#define KIRKWOOD_THERMAL_TEMP_MASK 0x1FF
+
+/* Kirkwood Thermal Sensor Dev Structure */
+struct kirkwood_thermal_priv {
+ void __iomem *sensor;
+};
+
+static int kirkwood_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ unsigned long reg;
+ struct kirkwood_thermal_priv *priv = thermal->devdata;
+
+ reg = readl_relaxed(priv->sensor);
+
+ /* Valid check */
+ if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
+ KIRKWOOD_THERMAL_VALID_MASK) {
+ dev_err(&thermal->device,
+ "Temperature sensor reading not valid\n");
+ return -EIO;
+ }
+
+ /*
+ * Calculate temperature. See Section 8.10.1 of the 88AP510,
+ * datasheet, which has the same sensor.
+ * Documentation/arm/Marvell/README
+ */
+ reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) &
+ KIRKWOOD_THERMAL_TEMP_MASK;
+ *temp = ((2281638UL - (7298*reg)) / 10);
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+ .get_temp = kirkwood_get_temp,
+};
+
+static const struct of_device_id kirkwood_thermal_id_table[] = {
+ { .compatible = "marvell,kirkwood-thermal" },
+ {}
+};
+
+static int kirkwood_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal = NULL;
+ struct kirkwood_thermal_priv *priv;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get platform resource\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv->sensor) {
+ dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
+ priv, &ops, NULL, 0, 0);
+ if (IS_ERR(thermal)) {
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device\n");
+ return PTR_ERR(thermal);
+ }
+
+ platform_set_drvdata(pdev, thermal);
+
+ return 0;
+}
+
+static int kirkwood_thermal_exit(struct platform_device *pdev)
+{
+ struct thermal_zone_device *kirkwood_thermal =
+ platform_get_drvdata(pdev);
+
+ thermal_zone_device_unregister(kirkwood_thermal);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, kirkwood_thermal_id_table);
+
+static struct platform_driver kirkwood_thermal_driver = {
+ .probe = kirkwood_thermal_probe,
+ .remove = kirkwood_thermal_exit,
+ .driver = {
+ .name = "kirkwood_thermal",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(kirkwood_thermal_id_table),
+ },
+};
+
+module_platform_driver(kirkwood_thermal_driver);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu@nigauri.org>");
+MODULE_DESCRIPTION("kirkwood thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 90db951725d..28f09199401 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -19,225 +19,473 @@
*/
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/thermal.h>
-#define THSCR 0x2c
-#define THSSR 0x30
+#define IDLE_INTERVAL 5000
+
+#define COMMON_STR 0x00
+#define COMMON_ENR 0x04
+#define COMMON_INTMSK 0x0c
+
+#define REG_POSNEG 0x20
+#define REG_FILONOFF 0x28
+#define REG_THSCR 0x2c
+#define REG_THSSR 0x30
+#define REG_INTCTRL 0x34
/* THSCR */
-#define CPTAP 0xf
+#define CPCTL (1 << 12)
/* THSSR */
#define CTEMP 0x3f
-
-struct rcar_thermal_priv {
+struct rcar_thermal_common {
void __iomem *base;
struct device *dev;
+ struct list_head head;
spinlock_t lock;
- u32 comp;
};
+struct rcar_thermal_priv {
+ void __iomem *base;
+ struct rcar_thermal_common *common;
+ struct thermal_zone_device *zone;
+ struct delayed_work work;
+ struct mutex lock;
+ struct list_head list;
+ int id;
+ int ctemp;
+};
+
+#define rcar_thermal_for_each_priv(pos, common) \
+ list_for_each_entry(pos, &common->head, list)
+
#define MCELSIUS(temp) ((temp) * 1000)
-#define rcar_zone_to_priv(zone) (zone->devdata)
+#define rcar_zone_to_priv(zone) ((zone)->devdata)
+#define rcar_priv_to_dev(priv) ((priv)->common->dev)
+#define rcar_has_irq_support(priv) ((priv)->common->base)
+#define rcar_id_to_shift(priv) ((priv)->id * 8)
+
+#ifdef DEBUG
+# define rcar_force_update_temp(priv) 1
+#else
+# define rcar_force_update_temp(priv) 0
+#endif
/*
* basic functions
*/
-static u32 rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
+#define rcar_thermal_common_read(c, r) \
+ _rcar_thermal_common_read(c, COMMON_ ##r)
+static u32 _rcar_thermal_common_read(struct rcar_thermal_common *common,
+ u32 reg)
{
- unsigned long flags;
- u32 ret;
-
- spin_lock_irqsave(&priv->lock, flags);
+ return ioread32(common->base + reg);
+}
- ret = ioread32(priv->base + reg);
+#define rcar_thermal_common_write(c, r, d) \
+ _rcar_thermal_common_write(c, COMMON_ ##r, d)
+static void _rcar_thermal_common_write(struct rcar_thermal_common *common,
+ u32 reg, u32 data)
+{
+ iowrite32(data, common->base + reg);
+}
- spin_unlock_irqrestore(&priv->lock, flags);
+#define rcar_thermal_common_bset(c, r, m, d) \
+ _rcar_thermal_common_bset(c, COMMON_ ##r, m, d)
+static void _rcar_thermal_common_bset(struct rcar_thermal_common *common,
+ u32 reg, u32 mask, u32 data)
+{
+ u32 val;
- return ret;
+ val = ioread32(common->base + reg);
+ val &= ~mask;
+ val |= (data & mask);
+ iowrite32(val, common->base + reg);
}
-#if 0 /* no user at this point */
-static void rcar_thermal_write(struct rcar_thermal_priv *priv,
- u32 reg, u32 data)
+#define rcar_thermal_read(p, r) _rcar_thermal_read(p, REG_ ##r)
+static u32 _rcar_thermal_read(struct rcar_thermal_priv *priv, u32 reg)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
+ return ioread32(priv->base + reg);
+}
+#define rcar_thermal_write(p, r, d) _rcar_thermal_write(p, REG_ ##r, d)
+static void _rcar_thermal_write(struct rcar_thermal_priv *priv,
+ u32 reg, u32 data)
+{
iowrite32(data, priv->base + reg);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
-#endif
-static void rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
- u32 mask, u32 data)
+#define rcar_thermal_bset(p, r, m, d) _rcar_thermal_bset(p, REG_ ##r, m, d)
+static void _rcar_thermal_bset(struct rcar_thermal_priv *priv, u32 reg,
+ u32 mask, u32 data)
{
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&priv->lock, flags);
-
val = ioread32(priv->base + reg);
val &= ~mask;
val |= (data & mask);
iowrite32(val, priv->base + reg);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
/*
* zone device functions
*/
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
- unsigned long *temp)
+static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
{
- struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
- int val, min, max, tmp;
-
- tmp = -200; /* default */
- while (1) {
- if (priv->comp < 1 || priv->comp > 12) {
- dev_err(priv->dev,
- "THSSR invalid data (%d)\n", priv->comp);
- priv->comp = 4; /* for next thermal */
- return -EINVAL;
- }
+ struct device *dev = rcar_priv_to_dev(priv);
+ int i;
+ int ctemp, old, new;
- /*
- * THS comparator offset and the reference temperature
- *
- * Comparator | reference | Temperature field
- * offset | temperature | measurement
- * | (degrees C) | (degrees C)
- * -------------+---------------+-------------------
- * 1 | -45 | -45 to -30
- * 2 | -30 | -30 to -15
- * 3 | -15 | -15 to 0
- * 4 | 0 | 0 to +15
- * 5 | +15 | +15 to +30
- * 6 | +30 | +30 to +45
- * 7 | +45 | +45 to +60
- * 8 | +60 | +60 to +75
- * 9 | +75 | +75 to +90
- * 10 | +90 | +90 to +105
- * 11 | +105 | +105 to +120
- * 12 | +120 | +120 to +135
- */
+ mutex_lock(&priv->lock);
- /* calculate thermal limitation */
- min = (priv->comp * 15) - 60;
- max = min + 15;
+ /*
+ * TSC decides a value of CPTAP automatically,
+ * and this is the conditions which validate interrupt.
+ */
+ rcar_thermal_bset(priv, THSCR, CPCTL, CPCTL);
+ ctemp = 0;
+ old = ~0;
+ for (i = 0; i < 128; i++) {
/*
* we need to wait 300us after changing comparator offset
* to get stable temperature.
* see "Usage Notes" on datasheet
*/
- rcar_thermal_bset(priv, THSCR, CPTAP, priv->comp);
udelay(300);
- /* calculate current temperature */
- val = rcar_thermal_read(priv, THSSR) & CTEMP;
- val = (val * 5) - 65;
+ new = rcar_thermal_read(priv, THSSR) & CTEMP;
+ if (new == old) {
+ ctemp = new;
+ break;
+ }
+ old = new;
+ }
- dev_dbg(priv->dev, "comp/min/max/val = %d/%d/%d/%d\n",
- priv->comp, min, max, val);
+ if (!ctemp) {
+ dev_err(dev, "thermal sensor was broken\n");
+ return -EINVAL;
+ }
- /*
- * If val is same as min/max, then,
- * it should try again on next comparator.
- * But the val might be correct temperature.
- * Keep it on "tmp" and compare with next val.
- */
- if (tmp == val)
- break;
+ /*
+ * enable IRQ
+ */
+ if (rcar_has_irq_support(priv)) {
+ rcar_thermal_write(priv, FILONOFF, 0);
- if (val <= min) {
- tmp = min;
- priv->comp--; /* try again */
- } else if (val >= max) {
- tmp = max;
- priv->comp++; /* try again */
- } else {
- tmp = val;
- break;
- }
+ /* enable Rising/Falling edge interrupt */
+ rcar_thermal_write(priv, POSNEG, 0x1);
+ rcar_thermal_write(priv, INTCTRL, (((ctemp - 0) << 8) |
+ ((ctemp - 1) << 0)));
+ }
+
+ dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp);
+
+ priv->ctemp = ctemp;
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
+ unsigned long *temp)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+
+ if (!rcar_has_irq_support(priv) || rcar_force_update_temp(priv))
+ rcar_thermal_update_temp(priv);
+
+ mutex_lock(&priv->lock);
+ *temp = MCELSIUS((priv->ctemp * 5) - 65);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
+ int trip, enum thermal_trip_type *type)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ /* see rcar_thermal_get_temp() */
+ switch (trip) {
+ case 0: /* +90 <= temp */
+ *type = THERMAL_TRIP_CRITICAL;
+ break;
+ default:
+ dev_err(dev, "rcar driver trip error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
+ int trip, unsigned long *temp)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ /* see rcar_thermal_get_temp() */
+ switch (trip) {
+ case 0: /* +90 <= temp */
+ *temp = MCELSIUS(90);
+ break;
+ default:
+ dev_err(dev, "rcar driver trip error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rcar_thermal_notify(struct thermal_zone_device *zone,
+ int trip, enum thermal_trip_type type)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ switch (type) {
+ case THERMAL_TRIP_CRITICAL:
+ /* FIXME */
+ dev_warn(dev, "Thermal reached to critical temperature\n");
+ break;
+ default:
+ break;
}
- *temp = MCELSIUS(tmp);
return 0;
}
static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
- .get_temp = rcar_thermal_get_temp,
+ .get_temp = rcar_thermal_get_temp,
+ .get_trip_type = rcar_thermal_get_trip_type,
+ .get_trip_temp = rcar_thermal_get_trip_temp,
+ .notify = rcar_thermal_notify,
};
/*
- * platform functions
+ * interrupt
*/
-static int rcar_thermal_probe(struct platform_device *pdev)
+#define rcar_thermal_irq_enable(p) _rcar_thermal_irq_ctrl(p, 1)
+#define rcar_thermal_irq_disable(p) _rcar_thermal_irq_ctrl(p, 0)
+static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
+{
+ struct rcar_thermal_common *common = priv->common;
+ unsigned long flags;
+ u32 mask = 0x3 << rcar_id_to_shift(priv); /* enable Rising/Falling */
+
+ spin_lock_irqsave(&common->lock, flags);
+
+ rcar_thermal_common_bset(common, INTMSK, mask, enable ? 0 : mask);
+
+ spin_unlock_irqrestore(&common->lock, flags);
+}
+
+static void rcar_thermal_work(struct work_struct *work)
{
- struct thermal_zone_device *zone;
struct rcar_thermal_priv *priv;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Could not get platform resource\n");
- return -ENODEV;
+ priv = container_of(work, struct rcar_thermal_priv, work.work);
+
+ rcar_thermal_update_temp(priv);
+ rcar_thermal_irq_enable(priv);
+ thermal_zone_device_update(priv->zone);
+}
+
+static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
+{
+ struct device *dev = rcar_priv_to_dev(priv);
+
+ status = (status >> rcar_id_to_shift(priv)) & 0x3;
+
+ if (status & 0x3) {
+ dev_dbg(dev, "thermal%d %s%s\n",
+ priv->id,
+ (status & 0x2) ? "Rising " : "",
+ (status & 0x1) ? "Falling" : "");
}
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "Could not allocate priv\n");
- return -ENOMEM;
+ return status;
+}
+
+static irqreturn_t rcar_thermal_irq(int irq, void *data)
+{
+ struct rcar_thermal_common *common = data;
+ struct rcar_thermal_priv *priv;
+ unsigned long flags;
+ u32 status, mask;
+
+ spin_lock_irqsave(&common->lock, flags);
+
+ mask = rcar_thermal_common_read(common, INTMSK);
+ status = rcar_thermal_common_read(common, STR);
+ rcar_thermal_common_write(common, STR, 0x000F0F0F & mask);
+
+ spin_unlock_irqrestore(&common->lock, flags);
+
+ status = status & ~mask;
+
+ /*
+ * check the status
+ */
+ rcar_thermal_for_each_priv(priv, common) {
+ if (rcar_thermal_had_changed(priv, status)) {
+ rcar_thermal_irq_disable(priv);
+ schedule_delayed_work(&priv->work,
+ msecs_to_jiffies(300));
+ }
}
- priv->comp = 4; /* basic setup */
- priv->dev = &pdev->dev;
- spin_lock_init(&priv->lock);
- priv->base = devm_ioremap_nocache(&pdev->dev,
- res->start, resource_size(res));
- if (!priv->base) {
- dev_err(&pdev->dev, "Unable to ioremap thermal register\n");
+ return IRQ_HANDLED;
+}
+
+/*
+ * platform functions
+ */
+static int rcar_thermal_probe(struct platform_device *pdev)
+{
+ struct rcar_thermal_common *common;
+ struct rcar_thermal_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct resource *res, *irq;
+ int mres = 0;
+ int i;
+ int idle = IDLE_INTERVAL;
+
+ common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
+ if (!common) {
+ dev_err(dev, "Could not allocate common\n");
return -ENOMEM;
}
- zone = thermal_zone_device_register("rcar_thermal", 0, 0, priv,
- &rcar_thermal_zone_ops, NULL, 0, 0);
- if (IS_ERR(zone)) {
- dev_err(&pdev->dev, "thermal zone device is NULL\n");
- return PTR_ERR(zone);
+ INIT_LIST_HEAD(&common->head);
+ spin_lock_init(&common->lock);
+ common->dev = dev;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq) {
+ int ret;
+
+ /*
+ * platform has IRQ support.
+ * Then, drier use common register
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
+ if (!res) {
+ dev_err(dev, "Could not get platform resource\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
+ dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "irq request failed\n ");
+ return ret;
+ }
+
+ /*
+ * rcar_has_irq_support() will be enabled
+ */
+ common->base = devm_request_and_ioremap(dev, res);
+ if (!common->base) {
+ dev_err(dev, "Unable to ioremap thermal register\n");
+ return -ENOMEM;
+ }
+
+ /* enable temperature comparation */
+ rcar_thermal_common_write(common, ENR, 0x00030303);
+
+ idle = 0; /* polling delaye is not needed */
}
- platform_set_drvdata(pdev, zone);
+ for (i = 0;; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, mres++);
+ if (!res)
+ break;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "Could not allocate priv\n");
+ return -ENOMEM;
+ }
+
+ priv->base = devm_request_and_ioremap(dev, res);
+ if (!priv->base) {
+ dev_err(dev, "Unable to ioremap priv register\n");
+ return -ENOMEM;
+ }
- dev_info(&pdev->dev, "proved\n");
+ priv->common = common;
+ priv->id = i;
+ mutex_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->list);
+ INIT_DELAYED_WORK(&priv->work, rcar_thermal_work);
+ rcar_thermal_update_temp(priv);
+
+ priv->zone = thermal_zone_device_register("rcar_thermal",
+ 1, 0, priv,
+ &rcar_thermal_zone_ops, NULL, 0,
+ idle);
+ if (IS_ERR(priv->zone)) {
+ dev_err(dev, "can't register thermal zone\n");
+ goto error_unregister;
+ }
+
+ list_move_tail(&priv->list, &common->head);
+
+ if (rcar_has_irq_support(priv))
+ rcar_thermal_irq_enable(priv);
+ }
+
+ platform_set_drvdata(pdev, common);
+
+ dev_info(dev, "%d sensor proved\n", i);
return 0;
+
+error_unregister:
+ rcar_thermal_for_each_priv(priv, common)
+ thermal_zone_device_unregister(priv->zone);
+
+ return -ENODEV;
}
static int rcar_thermal_remove(struct platform_device *pdev)
{
- struct thermal_zone_device *zone = platform_get_drvdata(pdev);
+ struct rcar_thermal_common *common = platform_get_drvdata(pdev);
+ struct rcar_thermal_priv *priv;
+
+ rcar_thermal_for_each_priv(priv, common)
+ thermal_zone_device_unregister(priv->zone);
- thermal_zone_device_unregister(zone);
platform_set_drvdata(pdev, NULL);
return 0;
}
+static const struct of_device_id rcar_thermal_dt_ids[] = {
+ { .compatible = "renesas,rcar-thermal", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
+
static struct platform_driver rcar_thermal_driver = {
.driver = {
.name = "rcar_thermal",
+ .of_match_table = rcar_thermal_dt_ids,
},
.probe = rcar_thermal_probe,
.remove = rcar_thermal_remove,
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 6b2d8b21aae..3c5ee560797 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -131,7 +131,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
return -ENOMEM;
}
- stdev->clk = clk_get(&pdev->dev, NULL);
+ stdev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(stdev->clk)) {
dev_err(&pdev->dev, "Can't get clock\n");
return PTR_ERR(stdev->clk);
@@ -140,7 +140,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
ret = clk_enable(stdev->clk);
if (ret) {
dev_err(&pdev->dev, "Can't enable clock\n");
- goto put_clk;
+ return ret;
}
stdev->flags = val;
@@ -163,8 +163,6 @@ static int spear_thermal_probe(struct platform_device *pdev)
disable_clk:
clk_disable(stdev->clk);
-put_clk:
- clk_put(stdev->clk);
return ret;
}
@@ -183,7 +181,6 @@ static int spear_thermal_exit(struct platform_device *pdev)
writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
clk_disable(stdev->clk);
- clk_put(stdev->clk);
return 0;
}
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 0cd5e9fbab1..407cde3211c 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -35,21 +35,54 @@
* state for this trip point
* b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
* state for this trip point
+ * c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
+ * for this trip point
+ * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
+ * for this trip point
+ * If the temperature is lower than a trip point,
+ * a. if the trend is THERMAL_TREND_RAISING, do nothing
+ * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
+ * state for this trip point, if the cooling state already
+ * equals lower limit, deactivate the thermal instance
+ * c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
+ * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
+ * if the cooling state already equals lower limit,
+ * deactive the thermal instance
*/
static unsigned long get_target_state(struct thermal_instance *instance,
- enum thermal_trend trend)
+ enum thermal_trend trend, bool throttle)
{
struct thermal_cooling_device *cdev = instance->cdev;
unsigned long cur_state;
cdev->ops->get_cur_state(cdev, &cur_state);
- if (trend == THERMAL_TREND_RAISING) {
- cur_state = cur_state < instance->upper ?
- (cur_state + 1) : instance->upper;
- } else if (trend == THERMAL_TREND_DROPPING) {
- cur_state = cur_state > instance->lower ?
- (cur_state - 1) : instance->lower;
+ switch (trend) {
+ case THERMAL_TREND_RAISING:
+ if (throttle)
+ cur_state = cur_state < instance->upper ?
+ (cur_state + 1) : instance->upper;
+ break;
+ case THERMAL_TREND_RAISE_FULL:
+ if (throttle)
+ cur_state = instance->upper;
+ break;
+ case THERMAL_TREND_DROPPING:
+ if (cur_state == instance->lower) {
+ if (!throttle)
+ cur_state = -1;
+ } else
+ cur_state -= 1;
+ break;
+ case THERMAL_TREND_DROP_FULL:
+ if (cur_state == instance->lower) {
+ if (!throttle)
+ cur_state = -1;
+ } else
+ cur_state = instance->lower;
+ break;
+ default:
+ break;
}
return cur_state;
@@ -66,57 +99,14 @@ static void update_passive_instance(struct thermal_zone_device *tz,
tz->passive += value;
}
-static void update_instance_for_throttle(struct thermal_zone_device *tz,
- int trip, enum thermal_trip_type trip_type,
- enum thermal_trend trend)
-{
- struct thermal_instance *instance;
-
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip)
- continue;
-
- instance->target = get_target_state(instance, trend);
-
- /* Activate a passive thermal instance */
- if (instance->target == THERMAL_NO_TARGET)
- update_passive_instance(tz, trip_type, 1);
-
- instance->cdev->updated = false; /* cdev needs update */
- }
-}
-
-static void update_instance_for_dethrottle(struct thermal_zone_device *tz,
- int trip, enum thermal_trip_type trip_type)
-{
- struct thermal_instance *instance;
- struct thermal_cooling_device *cdev;
- unsigned long cur_state;
-
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip ||
- instance->target == THERMAL_NO_TARGET)
- continue;
-
- cdev = instance->cdev;
- cdev->ops->get_cur_state(cdev, &cur_state);
-
- instance->target = cur_state > instance->lower ?
- (cur_state - 1) : THERMAL_NO_TARGET;
-
- /* Deactivate a passive thermal instance */
- if (instance->target == THERMAL_NO_TARGET)
- update_passive_instance(tz, trip_type, -1);
-
- cdev->updated = false; /* cdev needs update */
- }
-}
-
static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
{
long trip_temp;
enum thermal_trip_type trip_type;
enum thermal_trend trend;
+ struct thermal_instance *instance;
+ bool throttle = false;
+ int old_target;
if (trip == THERMAL_TRIPS_NONE) {
trip_temp = tz->forced_passive;
@@ -128,12 +118,30 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
trend = get_tz_trend(tz, trip);
+ if (tz->temperature >= trip_temp)
+ throttle = true;
+
mutex_lock(&tz->lock);
- if (tz->temperature >= trip_temp)
- update_instance_for_throttle(tz, trip, trip_type, trend);
- else
- update_instance_for_dethrottle(tz, trip, trip_type);
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip)
+ continue;
+
+ old_target = instance->target;
+ instance->target = get_target_state(instance, trend, throttle);
+
+ /* Activate a passive thermal instance */
+ if (old_target == THERMAL_NO_TARGET &&
+ instance->target != THERMAL_NO_TARGET)
+ update_passive_instance(tz, trip_type, 1);
+ /* Deactivate a passive thermal instance */
+ else if (old_target != THERMAL_NO_TARGET &&
+ instance->target == THERMAL_NO_TARGET)
+ update_passive_instance(tz, trip_type, -1);
+
+
+ instance->cdev->updated = false; /* cdev needs update */
+ }
mutex_unlock(&tz->lock);
}
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 8c8ce806180..5b7863a03f9 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -32,7 +32,6 @@
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/thermal.h>
-#include <linux/spinlock.h>
#include <linux/reboot.h>
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -132,23 +131,16 @@ EXPORT_SYMBOL_GPL(thermal_unregister_governor);
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
- int err;
-
-again:
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
- return -ENOMEM;
+ int ret;
if (lock)
mutex_lock(lock);
- err = idr_get_new(idr, NULL, id);
+ ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
if (lock)
mutex_unlock(lock);
- if (unlikely(err == -EAGAIN))
- goto again;
- else if (unlikely(err))
- return err;
-
- *id = *id & MAX_IDR_MASK;
+ if (unlikely(ret < 0))
+ return ret;
+ *id = ret;
return 0;
}
@@ -355,8 +347,9 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
tz->ops->notify(tz, trip, trip_type);
if (trip_type == THERMAL_TRIP_CRITICAL) {
- pr_emerg("Critical temperature reached(%d C),shutting down\n",
- tz->temperature / 1000);
+ dev_emerg(&tz->device,
+ "critical temperature reached(%d C),shutting down\n",
+ tz->temperature / 1000);
orderly_poweroff(true);
}
}
@@ -378,23 +371,57 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
monitor_thermal_zone(tz);
}
+static int thermal_zone_get_temp(struct thermal_zone_device *tz,
+ unsigned long *temp)
+{
+ int ret = 0;
+#ifdef CONFIG_THERMAL_EMULATION
+ int count;
+ unsigned long crit_temp = -1UL;
+ enum thermal_trip_type type;
+#endif
+
+ mutex_lock(&tz->lock);
+
+ ret = tz->ops->get_temp(tz, temp);
+#ifdef CONFIG_THERMAL_EMULATION
+ if (!tz->emul_temperature)
+ goto skip_emul;
+
+ for (count = 0; count < tz->trips; count++) {
+ ret = tz->ops->get_trip_type(tz, count, &type);
+ if (!ret && type == THERMAL_TRIP_CRITICAL) {
+ ret = tz->ops->get_trip_temp(tz, count, &crit_temp);
+ break;
+ }
+ }
+
+ if (ret)
+ goto skip_emul;
+
+ if (*temp < crit_temp)
+ *temp = tz->emul_temperature;
+skip_emul:
+#endif
+ mutex_unlock(&tz->lock);
+ return ret;
+}
+
static void update_temperature(struct thermal_zone_device *tz)
{
long temp;
int ret;
- mutex_lock(&tz->lock);
-
- ret = tz->ops->get_temp(tz, &temp);
+ ret = thermal_zone_get_temp(tz, &temp);
if (ret) {
- pr_warn("failed to read out thermal zone %d\n", tz->id);
- goto exit;
+ dev_warn(&tz->device, "failed to read out thermal zone %d\n",
+ tz->id);
+ return;
}
+ mutex_lock(&tz->lock);
tz->last_temperature = tz->temperature;
tz->temperature = temp;
-
-exit:
mutex_unlock(&tz->lock);
}
@@ -437,10 +464,7 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf)
long temperature;
int ret;
- if (!tz->ops->get_temp)
- return -EPERM;
-
- ret = tz->ops->get_temp(tz, &temperature);
+ ret = thermal_zone_get_temp(tz, &temperature);
if (ret)
return ret;
@@ -700,6 +724,31 @@ policy_show(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%s\n", tz->governor->name);
}
+#ifdef CONFIG_THERMAL_EMULATION
+static ssize_t
+emul_temp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int ret = 0;
+ unsigned long temperature;
+
+ if (kstrtoul(buf, 10, &temperature))
+ return -EINVAL;
+
+ if (!tz->ops->set_emul_temp) {
+ mutex_lock(&tz->lock);
+ tz->emul_temperature = temperature;
+ mutex_unlock(&tz->lock);
+ } else {
+ ret = tz->ops->set_emul_temp(tz, temperature);
+ }
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
+#endif/*CONFIG_THERMAL_EMULATION*/
+
static DEVICE_ATTR(type, 0444, type_show, NULL);
static DEVICE_ATTR(temp, 0444, temp_show, NULL);
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
@@ -842,7 +891,7 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
temp_input);
struct thermal_zone_device *tz = temp->tz;
- ret = tz->ops->get_temp(tz, &temperature);
+ ret = thermal_zone_get_temp(tz, &temperature);
if (ret)
return ret;
@@ -1529,6 +1578,9 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (!ops || !ops->get_temp)
return ERR_PTR(-EINVAL);
+ if (trips > 0 && !ops->get_trip_type)
+ return ERR_PTR(-EINVAL);
+
tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
if (!tz)
return ERR_PTR(-ENOMEM);
@@ -1592,6 +1644,11 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
goto unregister;
}
+#ifdef CONFIG_THERMAL_EMULATION
+ result = device_create_file(&tz->device, &dev_attr_emul_temp);
+ if (result)
+ goto unregister;
+#endif
/* Create policy attribute */
result = device_create_file(&tz->device, &dev_attr_policy);
if (result)
@@ -1711,7 +1768,8 @@ static struct genl_multicast_group thermal_event_mcgrp = {
.name = THERMAL_GENL_MCAST_GROUP_NAME,
};
-int thermal_generate_netlink_event(u32 orig, enum events event)
+int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+ enum events event)
{
struct sk_buff *skb;
struct nlattr *attr;
@@ -1721,6 +1779,9 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
int result;
static unsigned int thermal_event_seqnum;
+ if (!tz)
+ return -EINVAL;
+
/* allocate memory */
size = nla_total_size(sizeof(struct thermal_genl_event)) +
nla_total_size(0);
@@ -1755,7 +1816,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
memset(thermal_event, 0, sizeof(struct thermal_genl_event));
- thermal_event->orig = orig;
+ thermal_event->orig = tz->id;
thermal_event->event = event;
/* send multicast genetlink message */
@@ -1767,7 +1828,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
if (result)
- pr_info("failed to send netlink event:%d\n", result);
+ dev_err(&tz->device, "Failed to send netlink event:%d", result);
return result;
}
@@ -1807,6 +1868,7 @@ static int __init thermal_init(void)
idr_destroy(&thermal_cdev_idr);
mutex_destroy(&thermal_idr_lock);
mutex_destroy(&thermal_list_lock);
+ return result;
}
result = genetlink_init();
return result;