summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorjacob.jun.pan@linux.intel.com <jacob.jun.pan@linux.intel.com>2011-02-18 13:42:54 -0800
committerThomas Gleixner <tglx@linutronix.de>2011-02-18 23:14:54 +0100
commit5df91509d324d44cfb11e55d9cb02fe18b53b045 (patch)
tree3076ee0783e0228bffd58011ddc8d3396a15bd62 /arch/x86
parent13884c6680973f0ce3483dc59b636b4962d6dafe (diff)
x86: mrst: Remove apb timer read workaround
APB timer current count was unreliable in the earlier silicon, which could result in time going backwards. This problem has been fixed in the current silicon stepping. This patch removes the workaround which was used to check and prevent timer rolling back when APB timer is used as clocksource device. The workaround code was also flawed by potential race condition around the cached read value last_read. Though a fix can be done by assigning last_read to a local variable at the beginning of apbt_read_clocksource(), but this is not necessary anymore. [ tglx: A sane timer on an Intel chip - I can't believe it ] Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Alan Cox <alan@linux.intel.com> LKML-Reference: <1298065374-25532-1-git-send-email-jacob.jun.pan@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/apb_timer.c60
1 files changed, 4 insertions, 56 deletions
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 7c9ab59653e..afc406498c9 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -506,64 +506,12 @@ static int apbt_next_event(unsigned long delta,
return 0;
}
-/*
- * APB timer clock is not in sync with pclk on Langwell, which translates to
- * unreliable read value caused by sampling error. the error does not add up
- * overtime and only happens when sampling a 0 as a 1 by mistake. so the time
- * would go backwards. the following code is trying to prevent time traveling
- * backwards. little bit paranoid.
- */
static cycle_t apbt_read_clocksource(struct clocksource *cs)
{
- unsigned long t0, t1, t2;
- static unsigned long last_read;
-
-bad_count:
- t1 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- t2 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- if (unlikely(t1 < t2)) {
- pr_debug("APBT: read current count error %lx:%lx:%lx\n",
- t1, t2, t2 - t1);
- goto bad_count;
- }
- /*
- * check against cached last read, makes sure time does not go back.
- * it could be a normal rollover but we will do tripple check anyway
- */
- if (unlikely(t2 > last_read)) {
- /* check if we have a normal rollover */
- unsigned long raw_intr_status =
- apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
- /*
- * cs timer interrupt is masked but raw intr bit is set if
- * rollover occurs. then we read EOI reg to clear it.
- */
- if (raw_intr_status & (1 << phy_cs_timer_id)) {
- apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
- goto out;
- }
- pr_debug("APB CS going back %lx:%lx:%lx ",
- t2, last_read, t2 - last_read);
-bad_count_x3:
- pr_debug("triple check enforced\n");
- t0 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- udelay(1);
- t1 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- udelay(1);
- t2 = apbt_readl(phy_cs_timer_id,
- APBTMR_N_CURRENT_VALUE);
- if ((t2 > t1) || (t1 > t0)) {
- printk(KERN_ERR "Error: APB CS tripple check failed\n");
- goto bad_count_x3;
- }
- }
-out:
- last_read = t2;
- return (cycle_t)~t2;
+ unsigned long current_count;
+
+ current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
+ return (cycle_t)~current_count;
}
static int apbt_clocksource_register(void)