summaryrefslogtreecommitdiffstats
path: root/drivers/clocksource
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/Kconfig27
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c55
-rw-r--r--drivers/clocksource/arm_global_timer.c3
-rw-r--r--drivers/clocksource/bcm2835_timer.c4
-rw-r--r--drivers/clocksource/bcm_kona_timer.c8
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c5
-rw-r--r--drivers/clocksource/clksrc-of.c4
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c16
-rw-r--r--drivers/clocksource/em_sti.c55
-rw-r--r--drivers/clocksource/exynos_mct.c68
-rw-r--r--drivers/clocksource/mxs_timer.c4
-rw-r--r--drivers/clocksource/nomadik-mtu.c7
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c124
-rw-r--r--drivers/clocksource/sh_cmt.c50
-rw-r--r--drivers/clocksource/sh_mtu2.c16
-rw-r--r--drivers/clocksource/sh_tmu.c20
-rw-r--r--drivers/clocksource/sun4i_timer.c12
-rw-r--r--drivers/clocksource/tcb_clksrc.c61
-rw-r--r--drivers/clocksource/tegra20_timer.c8
-rw-r--r--drivers/clocksource/time-armada-370-xp.c227
-rw-r--r--drivers/clocksource/time-efm32.c275
-rw-r--r--drivers/clocksource/timer-marco.c98
-rw-r--r--drivers/clocksource/timer-prima2.c6
-rw-r--r--drivers/clocksource/vf_pit_timer.c4
-rw-r--r--drivers/clocksource/vt8500_timer.c2
26 files changed, 829 insertions, 331 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index b7b9b040a89..5c07a56962d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
config ARMADA_370_XP_TIMER
bool
+ select CLKSRC_OF
config ORION_TIMER
select CLKSRC_OF
@@ -33,6 +34,7 @@ config ORION_TIMER
bool
config SUN4I_TIMER
+ select CLKSRC_MMIO
bool
config VT8500_TIMER
@@ -70,10 +72,34 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
help
Use the always on PRCMU Timer as sched_clock
+config CLKSRC_EFM32
+ bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
+ depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+ default ARCH_EFM32
+ help
+ Support to use the timers of EFM32 SoCs as clock source and clock
+ event device.
+
config ARM_ARCH_TIMER
bool
select CLKSRC_OF if OF
+config ARM_ARCH_TIMER_EVTSTREAM
+ bool "Support for ARM architected timer event stream generation"
+ default y if ARM_ARCH_TIMER
+ depends on ARM_ARCH_TIMER
+ help
+ This option enables support for event stream generation based on
+ the ARM architected timer. It is used for waking up CPUs executing
+ the wfe instruction at a frequency represented as a power-of-2
+ divisor of the clock rate.
+ The main use of the event stream is wfe-based timeouts of userspace
+ locking implementations. It might also be useful for imposing timeout
+ on wfe to safeguard against any programming errors in case an expected
+ event is not generated.
+ This must be disabled for hardware validation purposes to detect any
+ hardware anomalies of missing events.
+
config ARM_GLOBAL_TIMER
bool
select CLKSRC_OF if OF
@@ -99,7 +125,6 @@ config CLKSRC_EXYNOS_MCT
config CLKSRC_SAMSUNG_PWM
bool
- select CLKSRC_MMIO
help
This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 704d6d342ad..33621efb914 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
+obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index fbd9ccd5e11..95fb944e15e 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -13,12 +13,14 @@
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/sched_clock.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -294,6 +296,19 @@ static void __arch_timer_setup(unsigned type,
clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
}
+static void arch_timer_configure_evtstream(void)
+{
+ int evt_stream_div, pos;
+
+ /* Find the closest power of two to the divisor */
+ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
+ pos = fls(evt_stream_div);
+ if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
+ pos--;
+ /* enable event stream */
+ arch_timer_evtstrm_enable(min(pos, 15));
+}
+
static int arch_timer_setup(struct clock_event_device *clk)
{
__arch_timer_setup(ARCH_CP15_TIMER, clk);
@@ -307,6 +322,8 @@ static int arch_timer_setup(struct clock_event_device *clk)
}
arch_counter_set_user_access();
+ if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
+ arch_timer_configure_evtstream();
return 0;
}
@@ -389,7 +406,7 @@ static struct clocksource clocksource_counter = {
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
static struct cyclecounter cyclecounter = {
@@ -419,6 +436,9 @@ static void __init arch_counter_register(unsigned type)
cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&timecounter, &cyclecounter, start_count);
+
+ /* 56 bits minimum, so we assume worst case rollover */
+ sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
}
static void arch_timer_stop(struct clock_event_device *clk)
@@ -460,6 +480,33 @@ static struct notifier_block arch_timer_cpu_nb = {
.notifier_call = arch_timer_cpu_notify,
};
+#ifdef CONFIG_CPU_PM
+static unsigned int saved_cntkctl;
+static int arch_timer_cpu_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_PM_ENTER)
+ saved_cntkctl = arch_timer_get_cntkctl();
+ else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
+ arch_timer_set_cntkctl(saved_cntkctl);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block arch_timer_cpu_pm_notifier = {
+ .notifier_call = arch_timer_cpu_pm_notify,
+};
+
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
+}
+#else
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return 0;
+}
+#endif
+
static int __init arch_timer_register(void)
{
int err;
@@ -499,11 +546,17 @@ static int __init arch_timer_register(void)
if (err)
goto out_free_irq;
+ err = arch_timer_cpu_pm_init();
+ if (err)
+ goto out_unreg_notify;
+
/* Immediately configure the timer on the boot CPU */
arch_timer_setup(this_cpu_ptr(arch_timer_evt));
return 0;
+out_unreg_notify:
+ unregister_cpu_notifier(&arch_timer_cpu_nb);
out_free_irq:
if (arch_timer_use_virtual)
free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index b66c1f36066..c639b1a9e99 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -169,7 +169,8 @@ static int gt_clockevents_init(struct clock_event_device *clk)
int cpu = smp_processor_id();
clk->name = "arm_global_timer";
- clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERCPU;
clk->set_mode = gt_clockevent_set_mode;
clk->set_next_event = gt_clockevent_set_next_event;
clk->cpumask = cpumask_of(cpu);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 07ea7ce900d..26ed331b1aa 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -49,7 +49,7 @@ struct bcm2835_timer {
static void __iomem *system_clock __read_mostly;
-static u32 notrace bcm2835_sched_read(void)
+static u64 notrace bcm2835_sched_read(void)
{
return readl_relaxed(system_clock);
}
@@ -110,7 +110,7 @@ static void __init bcm2835_timer_init(struct device_node *node)
panic("Can't read clock-frequency");
system_clock = base + REG_COUNTER_LO;
- setup_sched_clock(bcm2835_sched_read, 32, freq);
+ sched_clock_register(bcm2835_sched_read, 32, freq);
clocksource_mmio_init(base + REG_COUNTER_LO, node->name,
freq, 300, 32, clocksource_mmio_readl_up);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index ba3d85904c9..0d7d8c3ed6b 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -99,7 +99,8 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
}
static const struct of_device_id bcm_timer_ids[] __initconst = {
- {.compatible = "bcm,kona-timer"},
+ {.compatible = "brcm,kona-timer"},
+ {.compatible = "bcm,kona-timer"}, /* deprecated name */
{},
};
@@ -201,4 +202,9 @@ static void __init kona_timer_init(struct device_node *node)
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
}
+CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
+/*
+ * bcm,kona-timer is deprecated by brcm,kona-timer
+ * being kept here for driver compatibility
+ */
CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer", kona_timer_init);
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index a9fd4ad2567..b375106844d 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -53,7 +53,7 @@ static struct clocksource clocksource_dbx500_prcmu = {
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
-static u32 notrace dbx500_prcmu_sched_clock_read(void)
+static u64 notrace dbx500_prcmu_sched_clock_read(void)
{
if (unlikely(!clksrc_dbx500_timer_base))
return 0;
@@ -81,8 +81,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- setup_sched_clock(dbx500_prcmu_sched_clock_read,
- 32, RATE_32K);
+ sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
#endif
clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 37f5325bec9..35639cf4e5a 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -30,7 +30,11 @@ void __init clocksource_of_init(void)
clocksource_of_init_fn init_func;
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
+ if (!of_device_is_available(np))
+ continue;
+
init_func = match->data;
init_func(np);
+ of_node_put(np);
}
}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 4cbae4f762b..45ba8aecc72 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -23,7 +23,7 @@
#include <linux/clk.h>
#include <linux/sched_clock.h>
-static void timer_get_base_and_rate(struct device_node *np,
+static void __init timer_get_base_and_rate(struct device_node *np,
void __iomem **base, u32 *rate)
{
struct clk *timer_clk;
@@ -55,11 +55,11 @@ static void timer_get_base_and_rate(struct device_node *np,
try_clock_freq:
if (of_property_read_u32(np, "clock-freq", rate) &&
- of_property_read_u32(np, "clock-frequency", rate))
+ of_property_read_u32(np, "clock-frequency", rate))
panic("No clock nor clock-frequency property for %s", np->name);
}
-static void add_clockevent(struct device_node *event_timer)
+static void __init add_clockevent(struct device_node *event_timer)
{
void __iomem *iobase;
struct dw_apb_clock_event_device *ced;
@@ -82,7 +82,7 @@ static void add_clockevent(struct device_node *event_timer)
static void __iomem *sched_io_base;
static u32 sched_rate;
-static void add_clocksource(struct device_node *source_timer)
+static void __init add_clocksource(struct device_node *source_timer)
{
void __iomem *iobase;
struct dw_apb_clocksource *cs;
@@ -106,7 +106,7 @@ static void add_clocksource(struct device_node *source_timer)
sched_rate = rate;
}
-static u32 read_sched_clock(void)
+static u64 read_sched_clock(void)
{
return __raw_readl(sched_io_base);
}
@@ -117,7 +117,7 @@ static const struct of_device_id sptimer_ids[] __initconst = {
{ /* Sentinel */ },
};
-static void init_sched_clock(void)
+static void __init init_sched_clock(void)
{
struct device_node *sched_timer;
@@ -128,7 +128,7 @@ static void init_sched_clock(void)
of_node_put(sched_timer);
}
- setup_sched_clock(read_sched_clock, 32, sched_rate);
+ sched_clock_register(read_sched_clock, 32, sched_rate);
}
static int num_called;
@@ -138,12 +138,10 @@ static void __init dw_apb_timer_init(struct device_node *timer)
case 0:
pr_debug("%s: found clockevent timer\n", __func__);
add_clockevent(timer);
- of_node_put(timer);
break;
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
add_clocksource(timer);
- of_node_put(timer);
init_sched_clock();
break;
default:
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index 4329a29a531..9d170834fcf 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -78,7 +78,7 @@ static int em_sti_enable(struct em_sti_priv *p)
int ret;
/* enable clock */
- ret = clk_enable(p->clk);
+ ret = clk_prepare_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
@@ -107,7 +107,7 @@ static void em_sti_disable(struct em_sti_priv *p)
em_sti_write(p, STI_INTENCLR, 3);
/* stop clock */
- clk_disable(p->clk);
+ clk_disable_unprepare(p->clk);
}
static cycle_t em_sti_count(struct em_sti_priv *p)
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
ced->name = dev_name(&p->pdev->dev);
ced->features = CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200;
- ced->cpumask = cpumask_of(0);
+ ced->cpumask = cpu_possible_mask;
ced->set_next_event = em_sti_clock_event_next;
ced->set_mode = em_sti_clock_event_mode;
@@ -315,68 +315,47 @@ static int em_sti_probe(struct platform_device *pdev)
{
struct em_sti_priv *p;
struct resource *res;
- int irq, ret;
+ int irq;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (p == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
- ret = -ENOMEM;
- goto err0;
+ return -ENOMEM;
}
p->pdev = pdev;
platform_set_drvdata(pdev, p);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
- ret = -EINVAL;
- goto err0;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get irq\n");
- ret = -EINVAL;
- goto err0;
+ return -EINVAL;
}
/* map memory, let base point to the STI instance */
- p->base = ioremap_nocache(res->start, resource_size(res));
- if (p->base == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- ret = -ENXIO;
- goto err0;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(p->base))
+ return PTR_ERR(p->base);
/* get hold of clock */
- p->clk = clk_get(&pdev->dev, "sclk");
+ p->clk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
- ret = PTR_ERR(p->clk);
- goto err1;
+ return PTR_ERR(p->clk);
}
- if (request_irq(irq, em_sti_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
- dev_name(&pdev->dev), p)) {
+ if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&pdev->dev), p)) {
dev_err(&pdev->dev, "failed to request low IRQ\n");
- ret = -ENOENT;
- goto err2;
+ return -ENOENT;
}
raw_spin_lock_init(&p->lock);
em_sti_register_clockevent(p);
em_sti_register_clocksource(p);
return 0;
-
-err2:
- clk_put(p->clk);
-err1:
- iounmap(p->base);
-err0:
- kfree(p);
- return ret;
}
static int em_sti_remove(struct platform_device *pdev)
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index b2bbc415f12..62b0de6a183 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
+#include <linux/cpu.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/percpu.h>
@@ -24,7 +25,6 @@
#include <linux/of_address.h>
#include <linux/clocksource.h>
-#include <asm/localtimer.h>
#include <asm/mach/time.h>
#define EXYNOS4_MCTREG(x) (x)
@@ -80,7 +80,7 @@ static unsigned int mct_int_type;
static int mct_irqs[MCT_NR_IRQS];
struct mct_clock_event_device {
- struct clock_event_device *evt;
+ struct clock_event_device evt;
unsigned long base;
char name[10];
};
@@ -295,8 +295,6 @@ static void exynos4_clockevent_init(void)
setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
}
-#ifdef CONFIG_LOCAL_TIMERS
-
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
/* Clock event handling */
@@ -369,7 +367,7 @@ static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
{
- struct clock_event_device *evt = mevt->evt;
+ struct clock_event_device *evt = &mevt->evt;
/*
* This is for supporting oneshot mode.
@@ -391,7 +389,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
struct mct_clock_event_device *mevt = dev_id;
- struct clock_event_device *evt = mevt->evt;
+ struct clock_event_device *evt = &mevt->evt;
exynos4_mct_tick_clear(mevt);
@@ -405,8 +403,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
struct mct_clock_event_device *mevt;
unsigned int cpu = smp_processor_id();
- mevt = this_cpu_ptr(&percpu_mct_tick);
- mevt->evt = evt;
+ mevt = container_of(evt, struct mct_clock_event_device, evt);
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
sprintf(mevt->name, "mct_tick%d", cpu);
@@ -431,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
evt->irq);
return -EIO;
}
- irq_set_affinity(evt->irq, cpumask_of(cpu));
} else {
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
}
@@ -448,14 +444,44 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt)
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}
-static struct local_timer_ops exynos4_mct_tick_ops = {
- .setup = exynos4_local_timer_setup,
- .stop = exynos4_local_timer_stop,
+static int exynos4_mct_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct mct_clock_event_device *mevt;
+ unsigned int cpu;
+
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ mevt = this_cpu_ptr(&percpu_mct_tick);
+ exynos4_local_timer_setup(&mevt->evt);
+ break;
+ case CPU_ONLINE:
+ cpu = (unsigned long)hcpu;
+ if (mct_int_type == MCT_INT_SPI)
+ irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
+ cpumask_of(cpu));
+ break;
+ case CPU_DYING:
+ mevt = this_cpu_ptr(&percpu_mct_tick);
+ exynos4_local_timer_stop(&mevt->evt);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos4_mct_cpu_nb = {
+ .notifier_call = exynos4_mct_cpu_notify,
};
-#endif /* CONFIG_LOCAL_TIMERS */
static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
{
+ int err;
+ struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
struct clk *mct_clk, *tick_clk;
tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
@@ -473,19 +499,27 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
if (!reg_base)
panic("%s: unable to ioremap mct address space\n", __func__);
-#ifdef CONFIG_LOCAL_TIMERS
if (mct_int_type == MCT_INT_PPI) {
- int err;
err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
exynos4_mct_tick_isr, "MCT",
&percpu_mct_tick);
WARN(err, "MCT: can't request IRQ %d (%d)\n",
mct_irqs[MCT_L0_IRQ], err);
+ } else {
+ irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
}
- local_timer_register(&exynos4_mct_tick_ops);
-#endif /* CONFIG_LOCAL_TIMERS */
+ err = register_cpu_notifier(&exynos4_mct_cpu_nb);
+ if (err)
+ goto out_irq;
+
+ /* Immediately configure the timer on the boot CPU */
+ exynos4_local_timer_setup(&mevt->evt);
+ return;
+
+out_irq:
+ free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
}
void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1)
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index 0f5e65f74dc..445b68a01dc 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -222,7 +222,7 @@ static struct clocksource clocksource_mxs = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u32 notrace mxs_read_sched_clock_v2(void)
+static u64 notrace mxs_read_sched_clock_v2(void)
{
return ~readl_relaxed(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1));
}
@@ -236,7 +236,7 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
else {
clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1),
"mxs_timer", c, 200, 32, clocksource_mmio_readl_down);
- setup_sched_clock(mxs_read_sched_clock_v2, 32, c);
+ sched_clock_register(mxs_read_sched_clock_v2, 32, c);
}
return 0;
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 7d2c2c56f73..ed7b73b508e 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -76,7 +76,7 @@ static struct delay_timer mtu_delay_timer;
* local implementation which uses the clocksource to get some
* better resolution when scheduling the kernel.
*/
-static u32 notrace nomadik_read_sched_clock(void)
+static u64 notrace nomadik_read_sched_clock(void)
{
if (unlikely(!mtu_base))
return 0;
@@ -165,7 +165,8 @@ static void nmdk_clkevt_resume(struct clock_event_device *cedev)
static struct clock_event_device nmdk_clkevt = {
.name = "mtu_1",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DYNIRQ,
.rating = 200,
.set_mode = nmdk_clkevt_mode,
.set_next_event = nmdk_clkevt_next,
@@ -230,7 +231,7 @@ static void __init __nmdk_timer_init(void __iomem *base, int irq,
"mtu_0");
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
- setup_sched_clock(nomadik_read_sched_clock, 32, rate);
+ sched_clock_register(nomadik_read_sched_clock, 32, rate);
#endif
/* Timer 1 is used for events, register irq and clockevents */
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 584b5472eea..85082e8d305 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -44,16 +44,28 @@
#define TCFG1_SHIFT(x) ((x) * 4)
#define TCFG1_MUX_MASK 0xf
+/*
+ * Each channel occupies 4 bits in TCON register, but there is a gap of 4
+ * bits (one channel) after channel 0, so channels have different numbering
+ * when accessing TCON register.
+ *
+ * In addition, the location of autoreload bit for channel 4 (TCON channel 5)
+ * in its set of bits is 2 as opposed to 3 for other channels.
+ */
#define TCON_START(chan) (1 << (4 * (chan) + 0))
#define TCON_MANUALUPDATE(chan) (1 << (4 * (chan) + 1))
#define TCON_INVERT(chan) (1 << (4 * (chan) + 2))
-#define TCON_AUTORELOAD(chan) (1 << (4 * (chan) + 3))
+#define _TCON_AUTORELOAD(chan) (1 << (4 * (chan) + 3))
+#define _TCON_AUTORELOAD4(chan) (1 << (4 * (chan) + 2))
+#define TCON_AUTORELOAD(chan) \
+ ((chan < 5) ? _TCON_AUTORELOAD(chan) : _TCON_AUTORELOAD4(chan))
DEFINE_SPINLOCK(samsung_pwm_lock);
EXPORT_SYMBOL(samsung_pwm_lock);
struct samsung_pwm_clocksource {
void __iomem *base;
+ void __iomem *source_reg;
unsigned int irq[SAMSUNG_PWM_NUM];
struct samsung_pwm_variant variant;
@@ -195,17 +207,6 @@ static int samsung_set_next_event(unsigned long cycles,
return 0;
}
-static void samsung_timer_resume(void)
-{
- /* event timer restart */
- samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1);
- samsung_time_start(pwm.event_id, true);
-
- /* source timer restart */
- samsung_time_setup(pwm.source_id, pwm.tcnt_max);
- samsung_time_start(pwm.source_id, true);
-}
-
static void samsung_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
@@ -222,20 +223,29 @@ static void samsung_set_mode(enum clock_event_mode mode,
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
- break;
-
case CLOCK_EVT_MODE_RESUME:
- samsung_timer_resume();
break;
}
}
+static void samsung_clockevent_resume(struct clock_event_device *cev)
+{
+ samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div);
+ samsung_timer_set_divisor(pwm.event_id, pwm.tdiv);
+
+ if (pwm.variant.has_tint_cstat) {
+ u32 mask = (1 << pwm.event_id);
+ writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
+ }
+}
+
static struct clock_event_device time_event_device = {
.name = "samsung_event_timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.set_next_event = samsung_set_next_event,
.set_mode = samsung_set_mode,
+ .resume = samsung_clockevent_resume,
};
static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
@@ -286,23 +296,34 @@ static void __init samsung_clockevent_init(void)
}
}
-static void __iomem *samsung_timer_reg(void)
+static void samsung_clocksource_suspend(struct clocksource *cs)
{
- switch (pwm.source_id) {
- case 0:
- case 1:
- case 2:
- case 3:
- return pwm.base + pwm.source_id * 0x0c + 0x14;
-
- case 4:
- return pwm.base + 0x40;
-
- default:
- BUG();
- }
+ samsung_time_stop(pwm.source_id);
}
+static void samsung_clocksource_resume(struct clocksource *cs)
+{
+ samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div);
+ samsung_timer_set_divisor(pwm.source_id, pwm.tdiv);
+
+ samsung_time_setup(pwm.source_id, pwm.tcnt_max);
+ samsung_time_start(pwm.source_id, true);
+}
+
+static cycle_t samsung_clocksource_read(struct clocksource *c)
+{
+ return ~readl_relaxed(pwm.source_reg);
+}
+
+static struct clocksource samsung_clocksource = {
+ .name = "samsung_clocksource_timer",
+ .rating = 250,
+ .read = samsung_clocksource_read,
+ .suspend = samsung_clocksource_suspend,
+ .resume = samsung_clocksource_resume,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
@@ -310,19 +331,13 @@ static void __iomem *samsung_timer_reg(void)
* this wraps around for now, since it is just a relative time
* stamp. (Inspired by U300 implementation.)
*/
-static u32 notrace samsung_read_sched_clock(void)
+static u64 notrace samsung_read_sched_clock(void)
{
- void __iomem *reg = samsung_timer_reg();
-
- if (!reg)
- return 0;
-
- return ~__raw_readl(reg);
+ return samsung_clocksource_read(NULL);
}
static void __init samsung_clocksource_init(void)
{
- void __iomem *reg = samsung_timer_reg();
unsigned long pclk;
unsigned long clock_rate;
int ret;
@@ -337,22 +352,22 @@ static void __init samsung_clocksource_init(void)
samsung_time_setup(pwm.source_id, pwm.tcnt_max);
samsung_time_start(pwm.source_id, true);
- setup_sched_clock(samsung_read_sched_clock,
+ if (pwm.source_id == 4)
+ pwm.source_reg = pwm.base + 0x40;
+ else
+ pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14;
+
+ sched_clock_register(samsung_read_sched_clock,
pwm.variant.bits, clock_rate);
- ret = clocksource_mmio_init(reg, "samsung_clocksource_timer",
- clock_rate, 250, pwm.variant.bits,
- clocksource_mmio_readl_down);
+ samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
+ ret = clocksource_register_hz(&samsung_clocksource, clock_rate);
if (ret)
panic("samsung_clocksource_timer: can't register clocksource\n");
}
static void __init samsung_timer_resources(void)
{
- pwm.timerclk = clk_get(NULL, "timers");
- if (IS_ERR(pwm.timerclk))
- panic("failed to get timers clock for timer");
-
clk_prepare_enable(pwm.timerclk);
pwm.tcnt_max = (1UL << pwm.variant.bits) - 1;
@@ -397,6 +412,10 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
memcpy(&pwm.variant, variant, sizeof(pwm.variant));
memcpy(pwm.irq, irqs, SAMSUNG_PWM_NUM * sizeof(*irqs));
+ pwm.timerclk = clk_get(NULL, "timers");
+ if (IS_ERR(pwm.timerclk))
+ panic("failed to get timers clock for timer");
+
_samsung_pwm_clocksource_init();
}
@@ -404,7 +423,6 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
static void __init samsung_pwm_alloc(struct device_node *np,
const struct samsung_pwm_variant *variant)
{
- struct resource res;
struct property *prop;
const __be32 *cur;
u32 val;
@@ -423,20 +441,16 @@ static void __init samsung_pwm_alloc(struct device_node *np,
pwm.variant.output_mask |= 1 << val;
}
- of_address_to_resource(np, 0, &res);
- if (!request_mem_region(res.start,
- resource_size(&res), "samsung-pwm")) {
- pr_err("%s: failed to request IO mem region\n", __func__);
- return;
- }
-
- pwm.base = ioremap(res.start, resource_size(&res));
+ pwm.base = of_iomap(np, 0);
if (!pwm.base) {
pr_err("%s: failed to map PWM registers\n", __func__);
- release_mem_region(res.start, resource_size(&res));
return;
}
+ pwm.timerclk = of_clk_get_by_name(np, "timers");
+ if (IS_ERR(pwm.timerclk))
+ panic("failed to get timers clock for timer");
+
_samsung_pwm_clocksource_init();
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 08d0c418c94..0965e9848b3 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -37,6 +37,7 @@
struct sh_cmt_priv {
void __iomem *mapbase;
+ void __iomem *mapbase_str;
struct clk *clk;
unsigned long width; /* 16 or 32 bit version of hardware block */
unsigned long overflow_bit;
@@ -79,6 +80,12 @@ struct sh_cmt_priv {
* CMCSR 0xffca0060 16-bit
* CMCNT 0xffca0064 32-bit
* CMCOR 0xffca0068 32-bit
+ *
+ * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790:
+ * CMSTR 0xffca0500 32-bit
+ * CMCSR 0xffca0510 32-bit
+ * CMCNT 0xffca0514 32-bit
+ * CMCOR 0xffca0518 32-bit
*/
static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
@@ -109,9 +116,7 @@ static void sh_cmt_write32(void __iomem *base, unsigned long offs,
static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
-
- return p->read_control(p->mapbase - cfg->channel_offset, 0);
+ return p->read_control(p->mapbase_str, 0);
}
static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
@@ -127,9 +132,7 @@ static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
unsigned long value)
{
- struct sh_timer_config *cfg = p->pdev->dev.platform_data;
-
- p->write_control(p->mapbase - cfg->channel_offset, 0, value);
+ p->write_control(p->mapbase_str, 0, value);
}
static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
@@ -676,7 +679,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
{
struct sh_timer_config *cfg = pdev->dev.platform_data;
- struct resource *res;
+ struct resource *res, *res2;
int irq, ret;
ret = -ENXIO;
@@ -694,6 +697,9 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err0;
}
+ /* optional resource for the shared timer start/stop register */
+ res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);
+
irq = platform_get_irq(p->pdev, 0);
if (irq < 0) {
dev_err(&p->pdev->dev, "failed to get irq\n");
@@ -707,6 +713,15 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
goto err0;
}
+ /* map second resource for CMSTR */
+ p->mapbase_str = ioremap_nocache(res2 ? res2->start :
+ res->start - cfg->channel_offset,
+ res2 ? resource_size(res2) : 2);
+ if (p->mapbase_str == NULL) {
+ dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
+ goto err1;
+ }
+
/* request irq using setup_irq() (too early for request_irq()) */
p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
@@ -719,11 +734,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
if (IS_ERR(p->clk)) {
dev_err(&p->pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
- goto err1;
+ goto err2;
}
- p->read_control = sh_cmt_read16;
- p->write_control = sh_cmt_write16;
+ if (res2 && (resource_size(res2) == 4)) {
+ /* assume both CMSTR and CMCSR to be 32-bit */
+ p->read_control = sh_cmt_read32;
+ p->write_control = sh_cmt_write32;
+ } else {
+ p->read_control = sh_cmt_read16;
+ p->write_control = sh_cmt_write16;
+ }
if (resource_size(res) == 6) {
p->width = 16;
@@ -752,22 +773,23 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
cfg->clocksource_rating);
if (ret) {
dev_err(&p->pdev->dev, "registration failed\n");
- goto err2;
+ goto err3;
}
p->cs_enabled = false;
ret = setup_irq(irq, &p->irqaction);
if (ret) {
dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
- goto err2;
+ goto err3;
}
platform_set_drvdata(pdev, p);
return 0;
-err2:
+err3:
clk_put(p->clk);
-
+err2:
+ iounmap(p->mapbase_str);
err1:
iounmap(p->mapbase);
err0:
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 4aac9ee0d0c..3cf12834681 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -313,8 +313,20 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
goto err1;
}
- return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
- cfg->clockevent_rating);
+ ret = clk_prepare(p->clk);
+ if (ret < 0)
+ goto err2;
+
+ ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
+ cfg->clockevent_rating);
+ if (ret < 0)
+ goto err3;
+
+ return 0;
+ err3:
+ clk_unprepare(p->clk);
+ err2:
+ clk_put(p->clk);
err1:
iounmap(p->mapbase);
err0:
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 78b8dae4962..63557cda0a7 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -472,12 +472,26 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
ret = PTR_ERR(p->clk);
goto err1;
}
+
+ ret = clk_prepare(p->clk);
+ if (ret < 0)
+ goto err2;
+
p->cs_enabled = false;
p->enable_count = 0;
- return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
- cfg->clockevent_rating,
- cfg->clocksource_rating);
+ ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
+ cfg->clockevent_rating,
+ cfg->clocksource_rating);
+ if (ret < 0)
+ goto err3;
+
+ return 0;
+
+ err3:
+ clk_unprepare(p->clk);
+ err2:
+ clk_put(p->clk);
err1:
iounmap(p->mapbase);
err0:
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 8ead0258740..2fb4695a28d 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -37,6 +37,8 @@
#define TIMER_INTVAL_REG(val) (0x10 * (val) + 0x14)
#define TIMER_CNTVAL_REG(val) (0x10 * (val) + 0x18)
+#define TIMER_SYNC_TICKS 3
+
static void __iomem *timer_base;
static u32 ticks_per_jiffy;
@@ -50,7 +52,7 @@ static void sun4i_clkevt_sync(void)
{
u32 old = readl(timer_base + TIMER_CNTVAL_REG(1));
- while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < 3)
+ while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < TIMER_SYNC_TICKS)
cpu_relax();
}
@@ -104,7 +106,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,
struct clock_event_device *unused)
{
sun4i_clkevt_time_stop(0);
- sun4i_clkevt_time_setup(0, evt);
+ sun4i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS);
sun4i_clkevt_time_start(0, false);
return 0;
@@ -131,7 +133,7 @@ static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
static struct irqaction sun4i_timer_irq = {
.name = "sun4i_timer0",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = sun4i_timer_interrupt,
.dev_id = &sun4i_clockevent,
};
@@ -187,8 +189,8 @@ static void __init sun4i_timer_init(struct device_node *node)
sun4i_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&sun4i_clockevent, rate, 0x1,
- 0xffffffff);
+ clockevents_config_and_register(&sun4i_clockevent, rate,
+ TIMER_SYNC_TICKS, 0xffffffff);
}
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
sun4i_timer_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 8a6187225dd..00fdd117028 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- clk_disable(tcd->clk);
+ clk_disable_unprepare(tcd->clk);
}
switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
* of oneshot, we get lower overhead and improved accuracy.
*/
case CLOCK_EVT_MODE_PERIODIC:
- clk_enable(tcd->clk);
+ clk_prepare_enable(tcd->clk);
/* slow clock, count up to RC, then irq and restart */
__raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
break;
case CLOCK_EVT_MODE_ONESHOT:
- clk_enable(tcd->clk);
+ clk_prepare_enable(tcd->clk);
/* slow clock, count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -180,15 +180,22 @@ static irqreturn_t ch2_irq(int irq, void *handle)
static struct irqaction tc_irqaction = {
.name = "tc_clkevt",
- .flags = IRQF_TIMER | IRQF_DISABLED,
+ .flags = IRQF_TIMER,
.handler = ch2_irq,
};
-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
+ int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
+ /* try to enable t2 clk to avoid future errors in mode change */
+ ret = clk_prepare_enable(t2_clk);
+ if (ret)
+ return ret;
+ clk_disable_unprepare(t2_clk);
+
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
tc_irqaction.dev_id = &clkevt;
@@ -197,16 +204,21 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
clkevt.clkevt.cpumask = cpumask_of(0);
+ ret = setup_irq(irq, &tc_irqaction);
+ if (ret)
+ return ret;
+
clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
- setup_irq(irq, &tc_irqaction);
+ return ret;
}
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
/* NOTHING */
+ return 0;
}
#endif
@@ -265,6 +277,7 @@ static int __init tcb_clksrc_init(void)
int best_divisor_idx = -1;
int clk32k_divisor_idx = -1;
int i;
+ int ret;
tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name);
if (!tc) {
@@ -275,7 +288,11 @@ static int __init tcb_clksrc_init(void)
pdev = tc->pdev;
t0_clk = tc->clk[0];
- clk_enable(t0_clk);
+ ret = clk_prepare_enable(t0_clk);
+ if (ret) {
+ pr_debug("can't enable T0 clk\n");
+ goto err_free_tc;
+ }
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
@@ -313,17 +330,39 @@ static int __init tcb_clksrc_init(void)
/* tclib will give us three clocks no matter what the
* underlying platform supports.
*/
- clk_enable(tc->clk[1]);
+ ret = clk_prepare_enable(tc->clk[1]);
+ if (ret) {
+ pr_debug("can't enable T1 clk\n");
+ goto err_disable_t0;
+ }
/* setup both channel 0 & 1 */
tcb_setup_dual_chan(tc, best_divisor_idx);
}
/* and away we go! */
- clocksource_register_hz(&clksrc, divided_rate);
+ ret = clocksource_register_hz(&clksrc, divided_rate);
+ if (ret)
+ goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
- setup_clkevents(tc, clk32k_divisor_idx);
+ ret = setup_clkevents(tc, clk32k_divisor_idx);
+ if (ret)
+ goto err_unregister_clksrc;
return 0;
+
+err_unregister_clksrc:
+ clocksource_unregister(&clksrc);
+
+err_disable_t1:
+ if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
+ clk_disable_unprepare(tc->clk[1]);
+
+err_disable_t0:
+ clk_disable_unprepare(t0_clk);
+
+err_free_tc:
+ atmel_tc_free(tc);
+ return ret;
}
arch_initcall(tcb_clksrc_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 93961703b88..642849256d8 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -98,7 +98,7 @@ static struct clock_event_device tegra_clockevent = {
.set_mode = tegra_timer_set_mode,
};
-static u32 notrace tegra_read_sched_clock(void)
+static u64 notrace tegra_read_sched_clock(void)
{
return timer_readl(TIMERUS_CNTR_1US);
}
@@ -181,8 +181,6 @@ static void __init tegra20_init_timer(struct device_node *np)
rate = clk_get_rate(clk);
}
- of_node_put(np);
-
switch (rate) {
case 12000000:
timer_writel(0x000b, TIMERUS_USEC_CFG);
@@ -200,7 +198,7 @@ static void __init tegra20_init_timer(struct device_node *np)
WARN(1, "Unknown clock rate");
}
- setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
+ sched_clock_register(tegra_read_sched_clock, 32, 1000000);
if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
@@ -241,8 +239,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- of_node_put(np);
-
register_persistent_clock(NULL, tegra_read_persistent_clock);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 1b04b7e1d39..d8e47e50278 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -13,12 +13,26 @@
*
* Timer 0 is used as free-running clocksource, while timer 1 is
* used as clock_event_device.
+ *
+ * ---
+ * Clocksource driver for Armada 370 and Armada XP SoC.
+ * This driver implements one compatible string for each SoC, given
+ * each has its own characteristics:
+ *
+ * * Armada 370 has no 25 MHz fixed timer.
+ *
+ * * Armada XP cannot work properly without such 25 MHz fixed timer as
+ * doing otherwise leads to using a clocksource whose frequency varies
+ * when doing cpufreq frequency changes.
+ *
+ * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/timer.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -28,20 +42,19 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/sched_clock.h>
-
-#include <asm/localtimer.h>
#include <linux/percpu.h>
+
/*
* Timer block registers.
*/
#define TIMER_CTRL_OFF 0x0000
-#define TIMER0_EN 0x0001
-#define TIMER0_RELOAD_EN 0x0002
-#define TIMER0_25MHZ 0x0800
+#define TIMER0_EN BIT(0)
+#define TIMER0_RELOAD_EN BIT(1)
+#define TIMER0_25MHZ BIT(11)
#define TIMER0_DIV(div) ((div) << 19)
-#define TIMER1_EN 0x0004
-#define TIMER1_RELOAD_EN 0x0008
-#define TIMER1_25MHZ 0x1000
+#define TIMER1_EN BIT(2)
+#define TIMER1_RELOAD_EN BIT(3)
+#define TIMER1_25MHZ BIT(12)
#define TIMER1_DIV(div) ((div) << 22)
#define TIMER_EVENTS_STATUS 0x0004
#define TIMER0_CLR_MASK (~0x1)
@@ -69,9 +82,21 @@ static bool timer25Mhz = true;
*/
static u32 ticks_per_jiffy;
-static struct clock_event_device __percpu **percpu_armada_370_xp_evt;
+static struct clock_event_device __percpu *armada_370_xp_evt;
+
+static void timer_ctrl_clrset(u32 clr, u32 set)
+{
+ writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set,
+ timer_base + TIMER_CTRL_OFF);
+}
+
+static void local_timer_ctrl_clrset(u32 clr, u32 set)
+{
+ writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
+ local_base + TIMER_CTRL_OFF);
+}
-static u32 notrace armada_370_xp_read_sched_clock(void)
+static u64 notrace armada_370_xp_read_sched_clock(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
}
@@ -83,7 +108,6 @@ static int
armada_370_xp_clkevt_next_event(unsigned long delta,
struct clock_event_device *dev)
{
- u32 u;
/*
* Clear clockevent timer interrupt.
*/
@@ -97,11 +121,8 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
/*
* Enable the timer.
*/
- u = readl(local_base + TIMER_CTRL_OFF);
- u = ((u & ~TIMER0_RELOAD_EN) | TIMER0_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT));
- writel(u, local_base + TIMER_CTRL_OFF);
-
+ local_timer_ctrl_clrset(TIMER0_RELOAD_EN,
+ TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT));
return 0;
}
@@ -109,8 +130,6 @@ static void
armada_370_xp_clkevt_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
{
- u32 u;
-
if (mode == CLOCK_EVT_MODE_PERIODIC) {
/*
@@ -122,18 +141,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
/*
* Enable timer.
*/
-
- u = readl(local_base + TIMER_CTRL_OFF);
-
- writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT)),
- local_base + TIMER_CTRL_OFF);
+ local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN |
+ TIMER0_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT));
} else {
/*
* Disable timer.
*/
- u = readl(local_base + TIMER_CTRL_OFF);
- writel(u & ~TIMER0_EN, local_base + TIMER_CTRL_OFF);
+ local_timer_ctrl_clrset(TIMER0_EN, 0);
/*
* ACK pending timer interrupt.
@@ -142,21 +157,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
}
}
-static struct clock_event_device armada_370_xp_clkevt = {
- .name = "armada_370_xp_per_cpu_tick",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .shift = 32,
- .rating = 300,
- .set_next_event = armada_370_xp_clkevt_next_event,
- .set_mode = armada_370_xp_clkevt_mode,
-};
+static int armada_370_xp_clkevt_irq;
static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
{
/*
* ACK timer interrupt and call event handler.
*/
- struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+ struct clock_event_device *evt = dev_id;
writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
evt->event_handler(evt);
@@ -169,96 +177,88 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
*/
static int armada_370_xp_timer_setup(struct clock_event_device *evt)
{
- u32 u;
+ u32 clr = 0, set = 0;
int cpu = smp_processor_id();
- /* Use existing clock_event for cpu 0 */
- if (!smp_processor_id())
- return 0;
-
- u = readl(local_base + TIMER_CTRL_OFF);
if (timer25Mhz)
- writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+ set = TIMER0_25MHZ;
else
- writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
-
- evt->name = armada_370_xp_clkevt.name;
- evt->irq = armada_370_xp_clkevt.irq;
- evt->features = armada_370_xp_clkevt.features;
- evt->shift = armada_370_xp_clkevt.shift;
- evt->rating = armada_370_xp_clkevt.rating,
+ clr = TIMER0_25MHZ;
+ local_timer_ctrl_clrset(clr, set);
+
+ evt->name = "armada_370_xp_per_cpu_tick",
+ evt->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC;
+ evt->shift = 32,
+ evt->rating = 300,
evt->set_next_event = armada_370_xp_clkevt_next_event,
evt->set_mode = armada_370_xp_clkevt_mode,
+ evt->irq = armada_370_xp_clkevt_irq;
evt->cpumask = cpumask_of(cpu);
- *__this_cpu_ptr(percpu_armada_370_xp_evt) = evt;
-
clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
enable_percpu_irq(evt->irq, 0);
return 0;
}
-static void armada_370_xp_timer_stop(struct clock_event_device *evt)
+static void armada_370_xp_timer_stop(struct clock_event_device *evt)
{
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
disable_percpu_irq(evt->irq);
}
-static struct local_timer_ops armada_370_xp_local_timer_ops = {
- .setup = armada_370_xp_timer_setup,
- .stop = armada_370_xp_timer_stop,
+static int armada_370_xp_timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+ break;
+ case CPU_DYING:
+ armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt));
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block armada_370_xp_timer_cpu_nb = {
+ .notifier_call = armada_370_xp_timer_cpu_notify,
};
-void __init armada_370_xp_timer_init(void)
+static void __init armada_370_xp_timer_common_init(struct device_node *np)
{
- u32 u;
- struct device_node *np;
+ u32 clr = 0, set = 0;
int res;
- np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
timer_base = of_iomap(np, 0);
WARN_ON(!timer_base);
local_base = of_iomap(np, 1);
- if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
- /* The fixed 25MHz timer is available so let's use it */
- u = readl(local_base + TIMER_CTRL_OFF);
- writel(u | TIMER0_25MHZ,
- local_base + TIMER_CTRL_OFF);
- u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u | TIMER0_25MHZ,
- timer_base + TIMER_CTRL_OFF);
- timer_clk = 25000000;
- } else {
- unsigned long rate = 0;
- struct clk *clk = of_clk_get(np, 0);
- WARN_ON(IS_ERR(clk));
- rate = clk_get_rate(clk);
- u = readl(local_base + TIMER_CTRL_OFF);
- writel(u & ~(TIMER0_25MHZ),
- local_base + TIMER_CTRL_OFF);
-
- u = readl(timer_base + TIMER_CTRL_OFF);
- writel(u & ~(TIMER0_25MHZ),
- timer_base + TIMER_CTRL_OFF);
-
- timer_clk = rate / TIMER_DIVIDER;
- timer25Mhz = false;
- }
+ if (timer25Mhz)
+ set = TIMER0_25MHZ;
+ else
+ clr = TIMER0_25MHZ;
+ timer_ctrl_clrset(clr, set);
+ local_timer_ctrl_clrset(clr, set);
/*
* We use timer 0 as clocksource, and private(local) timer 0
* for clockevents
*/
- armada_370_xp_clkevt.irq = irq_of_parse_and_map(np, 4);
+ armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4);
ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
/*
* Set scale and timer for sched_clock.
*/
- setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk);
+ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
/*
* Setup free-running clocksource timer (interrupts
@@ -267,35 +267,52 @@ void __init armada_370_xp_timer_init(void)
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
- u = readl(timer_base + TIMER_CTRL_OFF);
-
- writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
- TIMER0_DIV(TIMER_DIVIDER_SHIFT)), timer_base + TIMER_CTRL_OFF);
+ timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT));
clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
"armada_370_xp_clocksource",
timer_clk, 300, 32, clocksource_mmio_readl_down);
- /* Register the clockevent on the private timer of CPU 0 */
- armada_370_xp_clkevt.cpumask = cpumask_of(0);
- clockevents_config_and_register(&armada_370_xp_clkevt,
- timer_clk, 1, 0xfffffffe);
+ register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
- percpu_armada_370_xp_evt = alloc_percpu(struct clock_event_device *);
+ armada_370_xp_evt = alloc_percpu(struct clock_event_device);
/*
* Setup clockevent timer (interrupt-driven).
*/
- *__this_cpu_ptr(percpu_armada_370_xp_evt) = &armada_370_xp_clkevt;
- res = request_percpu_irq(armada_370_xp_clkevt.irq,
+ res = request_percpu_irq(armada_370_xp_clkevt_irq,
armada_370_xp_timer_interrupt,
- armada_370_xp_clkevt.name,
- percpu_armada_370_xp_evt);
- if (!res) {
- enable_percpu_irq(armada_370_xp_clkevt.irq, 0);
-#ifdef CONFIG_LOCAL_TIMERS
- local_timer_register(&armada_370_xp_local_timer_ops);
-#endif
- }
+ "armada_370_xp_per_cpu_tick",
+ armada_370_xp_evt);
+ /* Immediately configure the timer on the boot CPU */
+ if (!res)
+ armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+}
+
+static void __init armada_xp_timer_init(struct device_node *np)
+{
+ struct clk *clk = of_clk_get_by_name(np, "fixed");
+
+ /* The 25Mhz fixed clock is mandatory, and must always be available */
+ BUG_ON(IS_ERR(clk));
+ timer_clk = clk_get_rate(clk);
+
+ armada_370_xp_timer_common_init(np);
+}
+CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
+ armada_xp_timer_init);
+
+static void __init armada_370_timer_init(struct device_node *np)
+{
+ struct clk *clk = of_clk_get(np, 0);
+
+ BUG_ON(IS_ERR(clk));
+ timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
+ timer25Mhz = false;
+
+ armada_370_xp_timer_common_init(np);
}
+CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
+ armada_370_timer_init);
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
new file mode 100644
index 00000000000..1a6205b7bed
--- /dev/null
+++ b/drivers/clocksource/time-efm32.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/clk.h>
+
+#define TIMERn_CTRL 0x00
+#define TIMERn_CTRL_PRESC(val) (((val) & 0xf) << 24)
+#define TIMERn_CTRL_PRESC_1024 TIMERn_CTRL_PRESC(10)
+#define TIMERn_CTRL_CLKSEL(val) (((val) & 0x3) << 16)
+#define TIMERn_CTRL_CLKSEL_PRESCHFPERCLK TIMERn_CTRL_CLKSEL(0)
+#define TIMERn_CTRL_OSMEN 0x00000010
+#define TIMERn_CTRL_MODE(val) (((val) & 0x3) << 0)
+#define TIMERn_CTRL_MODE_UP TIMERn_CTRL_MODE(0)
+#define TIMERn_CTRL_MODE_DOWN TIMERn_CTRL_MODE(1)
+
+#define TIMERn_CMD 0x04
+#define TIMERn_CMD_START 0x00000001
+#define TIMERn_CMD_STOP 0x00000002
+
+#define TIMERn_IEN 0x0c
+#define TIMERn_IF 0x10
+#define TIMERn_IFS 0x14
+#define TIMERn_IFC 0x18
+#define TIMERn_IRQ_UF 0x00000002
+
+#define TIMERn_TOP 0x1c
+#define TIMERn_CNT 0x24
+
+struct efm32_clock_event_ddata {
+ struct clock_event_device evtdev;
+ void __iomem *base;
+ unsigned periodic_top;
+};
+
+static void efm32_clock_event_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evtdev)
+{
+ struct efm32_clock_event_ddata *ddata =
+ container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_MODE_DOWN,
+ ddata->base + TIMERn_CTRL);
+ writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_OSMEN |
+ TIMERn_CTRL_MODE_DOWN,
+ ddata->base + TIMERn_CTRL);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static int efm32_clock_event_set_next_event(unsigned long evt,
+ struct clock_event_device *evtdev)
+{
+ struct efm32_clock_event_ddata *ddata =
+ container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
+
+ writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+ writel_relaxed(evt, ddata->base + TIMERn_CNT);
+ writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
+
+ return 0;
+}
+
+static irqreturn_t efm32_clock_event_handler(int irq, void *dev_id)
+{
+ struct efm32_clock_event_ddata *ddata = dev_id;
+
+ writel_relaxed(TIMERn_IRQ_UF, ddata->base + TIMERn_IFC);
+
+ ddata->evtdev.event_handler(&ddata->evtdev);
+
+ return IRQ_HANDLED;
+}
+
+static struct efm32_clock_event_ddata clock_event_ddata = {
+ .evtdev = {
+ .name = "efm32 clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_MODE_PERIODIC,
+ .set_mode = efm32_clock_event_set_mode,
+ .set_next_event = efm32_clock_event_set_next_event,
+ .rating = 200,
+ },
+};
+
+static struct irqaction efm32_clock_event_irq = {
+ .name = "efm32 clockevent",
+ .flags = IRQF_TIMER,
+ .handler = efm32_clock_event_handler,
+ .dev_id = &clock_event_ddata,
+};
+
+static int __init efm32_clocksource_init(struct device_node *np)
+{
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long rate;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clocksource (%d)\n", ret);
+ goto err_clk_get;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable timer clock for clocksource (%d)\n",
+ ret);
+ goto err_clk_enable;
+ }
+ rate = clk_get_rate(clk);
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map registers for clocksource\n");
+ goto err_iomap;
+ }
+
+ writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+ TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+ TIMERn_CTRL_MODE_UP, base + TIMERn_CTRL);
+ writel_relaxed(TIMERn_CMD_START, base + TIMERn_CMD);
+
+ ret = clocksource_mmio_init(base + TIMERn_CNT, "efm32 timer",
+ DIV_ROUND_CLOSEST(rate, 1024), 200, 16,
+ clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("failed to init clocksource (%d)\n", ret);
+ goto err_clocksource_init;
+ }
+
+ return 0;
+
+err_clocksource_init:
+
+ iounmap(base);
+err_iomap:
+
+ clk_disable_unprepare(clk);
+err_clk_enable:
+
+ clk_put(clk);
+err_clk_get:
+
+ return ret;
+}
+
+static int __init efm32_clockevent_init(struct device_node *np)
+{
+ struct clk *clk;
+ void __iomem *base;
+ unsigned long rate;
+ int irq;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clockevent (%d)\n", ret);
+ goto err_clk_get;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable timer clock for clockevent (%d)\n",
+ ret);
+ goto err_clk_enable;
+ }
+ rate = clk_get_rate(clk);
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map registers for clockevent\n");
+ goto err_iomap;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -ENOENT;
+ pr_err("failed to get irq for clockevent\n");
+ goto err_get_irq;
+ }
+
+ writel_relaxed(TIMERn_IRQ_UF, base + TIMERn_IEN);
+
+ clock_event_ddata.base = base;
+ clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
+
+ setup_irq(irq, &efm32_clock_event_irq);
+
+ clockevents_config_and_register(&clock_event_ddata.evtdev,
+ DIV_ROUND_CLOSEST(rate, 1024),
+ 0xf, 0xffff);
+
+ return 0;
+
+err_get_irq:
+
+ iounmap(base);
+err_iomap:
+
+ clk_disable_unprepare(clk);
+err_clk_enable:
+
+ clk_put(clk);
+err_clk_get:
+
+ return ret;
+}
+
+/*
+ * This function asserts that we have exactly one clocksource and one
+ * clock_event_device in the end.
+ */
+static void __init efm32_timer_init(struct device_node *np)
+{
+ static int has_clocksource, has_clockevent;
+ int ret;
+
+ if (!has_clocksource) {
+ ret = efm32_clocksource_init(np);
+ if (!ret) {
+ has_clocksource = 1;
+ return;
+ }
+ }
+
+ if (!has_clockevent) {
+ ret = efm32_clockevent_init(np);
+ if (!ret) {
+ has_clockevent = 1;
+ return;
+ }
+ }
+}
+CLOCKSOURCE_OF_DECLARE(efm32, "efm32,timer", efm32_timer_init);
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index 62876baa3ab..09a17d9a659 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
+#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/clk.h>
@@ -18,7 +19,6 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
-#include <asm/localtimer.h>
#include <asm/mach/time.h>
#define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000
@@ -151,13 +151,7 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs)
BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
}
-static struct clock_event_device sirfsoc_clockevent = {
- .name = "sirfsoc_clockevent",
- .rating = 200,
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = sirfsoc_timer_set_mode,
- .set_next_event = sirfsoc_timer_set_next_event,
-};
+static struct clock_event_device __percpu *sirfsoc_clockevent;
static struct clocksource sirfsoc_clocksource = {
.name = "sirfsoc_clocksource",
@@ -173,11 +167,8 @@ static struct irqaction sirfsoc_timer_irq = {
.name = "sirfsoc_timer0",
.flags = IRQF_TIMER | IRQF_NOBALANCING,
.handler = sirfsoc_timer_interrupt,
- .dev_id = &sirfsoc_clockevent,
};
-#ifdef CONFIG_LOCAL_TIMERS
-
static struct irqaction sirfsoc_timer1_irq = {
.name = "sirfsoc_timer1",
.flags = IRQF_TIMER | IRQF_NOBALANCING,
@@ -186,24 +177,28 @@ static struct irqaction sirfsoc_timer1_irq = {
static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
{
- /* Use existing clock_event for cpu 0 */
- if (!smp_processor_id())
- return 0;
+ int cpu = smp_processor_id();
+ struct irqaction *action;
+
+ if (cpu == 0)
+ action = &sirfsoc_timer_irq;
+ else
+ action = &sirfsoc_timer1_irq;
- ce->irq = sirfsoc_timer1_irq.irq;
+ ce->irq = action->irq;
ce->name = "local_timer";
- ce->features = sirfsoc_clockevent.features;
- ce->rating = sirfsoc_clockevent.rating;
+ ce->features = CLOCK_EVT_FEAT_ONESHOT;
+ ce->rating = 200;
ce->set_mode = sirfsoc_timer_set_mode;
ce->set_next_event = sirfsoc_timer_set_next_event;
- ce->shift = sirfsoc_clockevent.shift;
- ce->mult = sirfsoc_clockevent.mult;
- ce->max_delta_ns = sirfsoc_clockevent.max_delta_ns;
- ce->min_delta_ns = sirfsoc_clockevent.min_delta_ns;
+ clockevents_calc_mult_shift(ce, CLOCK_TICK_RATE, 60);
+ ce->max_delta_ns = clockevent_delta2ns(-2, ce);
+ ce->min_delta_ns = clockevent_delta2ns(2, ce);
+ ce->cpumask = cpumask_of(cpu);
- sirfsoc_timer1_irq.dev_id = ce;
- BUG_ON(setup_irq(ce->irq, &sirfsoc_timer1_irq));
- irq_set_affinity(sirfsoc_timer1_irq.irq, cpumask_of(1));
+ action->dev_id = ce;
+ BUG_ON(setup_irq(ce->irq, action));
+ irq_set_affinity(action->irq, cpumask_of(cpu));
clockevents_register_device(ce);
return 0;
@@ -211,31 +206,48 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
{
+ int cpu = smp_processor_id();
+
sirfsoc_timer_count_disable(1);
- remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
+ if (cpu == 0)
+ remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
+ else
+ remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
}
-static struct local_timer_ops sirfsoc_local_timer_ops = {
- .setup = sirfsoc_local_timer_setup,
- .stop = sirfsoc_local_timer_stop,
+static int sirfsoc_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
+ break;
+ case CPU_DYING:
+ sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent));
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sirfsoc_cpu_nb = {
+ .notifier_call = sirfsoc_cpu_notify,
};
-#endif /* CONFIG_LOCAL_TIMERS */
static void __init sirfsoc_clockevent_init(void)
{
- clockevents_calc_mult_shift(&sirfsoc_clockevent, CLOCK_TICK_RATE, 60);
-
- sirfsoc_clockevent.max_delta_ns =
- clockevent_delta2ns(-2, &sirfsoc_clockevent);
- sirfsoc_clockevent.min_delta_ns =
- clockevent_delta2ns(2, &sirfsoc_clockevent);
-
- sirfsoc_clockevent.cpumask = cpumask_of(0);
- clockevents_register_device(&sirfsoc_clockevent);
-#ifdef CONFIG_LOCAL_TIMERS
- local_timer_register(&sirfsoc_local_timer_ops);
-#endif
+ sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
+ BUG_ON(!sirfsoc_clockevent);
+
+ BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
+
+ /* Immediately configure the timer on the boot CPU */
+ sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
}
/* initialize the kernel jiffy timer source */
@@ -273,8 +285,6 @@ static void __init sirfsoc_marco_timer_init(void)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
- BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
-
sirfsoc_clockevent_init();
}
@@ -288,11 +298,9 @@ static void __init sirfsoc_of_timer_init(struct device_node *np)
if (!sirfsoc_timer_irq.irq)
panic("No irq passed for timer0 via DT\n");
-#ifdef CONFIG_LOCAL_TIMERS
sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
if (!sirfsoc_timer1_irq.irq)
panic("No irq passed for timer1 via DT\n");
-#endif
sirfsoc_marco_timer_init();
}
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index ef3cfb269d8..8a492d34ff9 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -165,9 +165,9 @@ static struct irqaction sirfsoc_timer_irq = {
};
/* Overwrite weak default sched_clock with more precise one */
-static u32 notrace sirfsoc_read_sched_clock(void)
+static u64 notrace sirfsoc_read_sched_clock(void)
{
- return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff);
+ return sirfsoc_timer_read(NULL);
}
static void __init sirfsoc_clockevent_init(void)
@@ -206,7 +206,7 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
- setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE);
+ sched_clock_register(sirfsoc_read_sched_clock, 64, CLOCK_TICK_RATE);
BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index 587e0202a70..02821b06a39 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -52,7 +52,7 @@ static inline void pit_irq_acknowledge(void)
__raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
}
-static unsigned int pit_read_sched_clock(void)
+static u64 pit_read_sched_clock(void)
{
return __raw_readl(clksrc_base + PITCVAL);
}
@@ -64,7 +64,7 @@ static int __init pit_clocksource_init(unsigned long rate)
__raw_writel(~0UL, clksrc_base + PITLDVAL);
__raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
- setup_sched_clock(pit_read_sched_clock, 32, rate);
+ sched_clock_register(pit_read_sched_clock, 32, rate);
return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
300, 32, clocksource_mmio_readl_down);
}
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index 64f553f04fa..ad3c0e83a77 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -137,14 +137,12 @@ static void __init vt8500_timer_init(struct device_node *np)
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
__func__);
- of_node_put(np);
return;
}
timer_irq = irq_of_parse_and_map(np, 0);
if (!timer_irq) {
pr_err("%s: Missing irq description in Device Tree\n",
__func__);
- of_node_put(np);
return;
}