diff options
author | Tony Luck <tony.luck@intel.com> | 2007-07-20 11:26:47 -0700 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-07-20 11:26:47 -0700 |
commit | c36c282b88963d0957368a443168588e62301fda (patch) | |
tree | 6343887ae42a65635a61b4ad99fd7f3e8dd24758 /arch/ia64/kernel | |
parent | f4fbfb0dda5577075a049eec7fb7ad38abca1912 (diff) | |
parent | 1f564ad6d4182859612cbae452122e5eb2d62a76 (diff) |
Pull ia64-clocksource into release branch
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/asm-offsets.c | 35 | ||||
-rw-r--r-- | arch/ia64/kernel/cyclone.c | 46 | ||||
-rw-r--r-- | arch/ia64/kernel/fsys.S | 179 | ||||
-rw-r--r-- | arch/ia64/kernel/fsyscall_gtod_data.h | 23 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 96 |
5 files changed, 253 insertions, 126 deletions
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 2236fabbb3c..0aebc6f79e9 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -7,6 +7,7 @@ #define ASM_OFFSETS_C 1 #include <linux/sched.h> +#include <linux/clocksource.h> #include <asm-ia64/processor.h> #include <asm-ia64/ptrace.h> @@ -15,6 +16,7 @@ #include <asm-ia64/mca.h> #include "../kernel/sigframe.h" +#include "../kernel/fsyscall_gtod_data.h" #define DEFINE(sym, val) \ asm volatile("\n->" #sym " %0 " #val : : "i" (val)) @@ -256,17 +258,24 @@ void foo(void) BLANK(); /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ - DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); - DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); - DEFINE(IA64_TIME_INTERPOLATOR_SHIFT_OFFSET, offsetof (struct time_interpolator, shift)); - DEFINE(IA64_TIME_INTERPOLATOR_NSEC_OFFSET, offsetof (struct time_interpolator, nsec_per_cyc)); - DEFINE(IA64_TIME_INTERPOLATOR_OFFSET_OFFSET, offsetof (struct time_interpolator, offset)); - DEFINE(IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET, offsetof (struct time_interpolator, last_cycle)); - DEFINE(IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET, offsetof (struct time_interpolator, last_counter)); - DEFINE(IA64_TIME_INTERPOLATOR_JITTER_OFFSET, offsetof (struct time_interpolator, jitter)); - DEFINE(IA64_TIME_INTERPOLATOR_MASK_OFFSET, offsetof (struct time_interpolator, mask)); - DEFINE(IA64_TIME_SOURCE_CPU, TIME_SOURCE_CPU); - DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64); - DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32); - DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec)); + DEFINE(IA64_GTOD_LOCK_OFFSET, + offsetof (struct fsyscall_gtod_data_t, lock)); + DEFINE(IA64_GTOD_WALL_TIME_OFFSET, + offsetof (struct fsyscall_gtod_data_t, wall_time)); + DEFINE(IA64_GTOD_MONO_TIME_OFFSET, + offsetof (struct fsyscall_gtod_data_t, monotonic_time)); + DEFINE(IA64_CLKSRC_MASK_OFFSET, + offsetof (struct fsyscall_gtod_data_t, clk_mask)); + DEFINE(IA64_CLKSRC_MULT_OFFSET, + offsetof (struct fsyscall_gtod_data_t, clk_mult)); + DEFINE(IA64_CLKSRC_SHIFT_OFFSET, + offsetof (struct fsyscall_gtod_data_t, clk_shift)); + DEFINE(IA64_CLKSRC_MMIO_OFFSET, + offsetof (struct fsyscall_gtod_data_t, clk_fsys_mmio)); + DEFINE(IA64_CLKSRC_CYCLE_LAST_OFFSET, + offsetof (struct fsyscall_gtod_data_t, clk_cycle_last)); + DEFINE(IA64_ITC_JITTER_OFFSET, + offsetof (struct itc_jitter_data_t, itc_jitter)); + DEFINE(IA64_ITC_LASTCYCLE_OFFSET, + offsetof (struct itc_jitter_data_t, itc_lastcycle)); } diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index e00b21514f7..2fd96d9062a 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c @@ -3,6 +3,7 @@ #include <linux/time.h> #include <linux/errno.h> #include <linux/timex.h> +#include <linux/clocksource.h> #include <asm/io.h> /* IBM Summit (EXA) Cyclone counter code*/ @@ -18,13 +19,21 @@ void __init cyclone_setup(void) use_cyclone = 1; } +static void __iomem *cyclone_mc; -struct time_interpolator cyclone_interpolator = { - .source = TIME_SOURCE_MMIO64, - .shift = 16, - .frequency = CYCLONE_TIMER_FREQ, - .drift = -100, - .mask = (1LL << 40) - 1 +static cycle_t read_cyclone(void) +{ + return (cycle_t)readq((void __iomem *)cyclone_mc); +} + +static struct clocksource clocksource_cyclone = { + .name = "cyclone", + .rating = 300, + .read = read_cyclone, + .mask = (1LL << 40) - 1, + .mult = 0, /*to be caluclated*/ + .shift = 16, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; int __init init_cyclone_clock(void) @@ -44,13 +53,15 @@ int __init init_cyclone_clock(void) offset = (CYCLONE_CBAR_ADDR); reg = (u64*)ioremap_nocache(offset, sizeof(u64)); if(!reg){ - printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n"); + printk(KERN_ERR "Summit chipset: Could not find valid CBAR" + " register.\n"); use_cyclone = 0; return -ENODEV; } base = readq(reg); if(!base){ - printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n"); + printk(KERN_ERR "Summit chipset: Could not find valid CBAR" + " value.\n"); use_cyclone = 0; return -ENODEV; } @@ -60,7 +71,8 @@ int __init init_cyclone_clock(void) offset = (base + CYCLONE_PMCC_OFFSET); reg = (u64*)ioremap_nocache(offset, sizeof(u64)); if(!reg){ - printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n"); + printk(KERN_ERR "Summit chipset: Could not find valid PMCC" + " register.\n"); use_cyclone = 0; return -ENODEV; } @@ -71,7 +83,8 @@ int __init init_cyclone_clock(void) offset = (base + CYCLONE_MPCS_OFFSET); reg = (u64*)ioremap_nocache(offset, sizeof(u64)); if(!reg){ - printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n"); + printk(KERN_ERR "Summit chipset: Could not find valid MPCS" + " register.\n"); use_cyclone = 0; return -ENODEV; } @@ -82,7 +95,8 @@ int __init init_cyclone_clock(void) offset = (base + CYCLONE_MPMC_OFFSET); cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32)); if(!cyclone_timer){ - printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n"); + printk(KERN_ERR "Summit chipset: Could not find valid MPMC" + " register.\n"); use_cyclone = 0; return -ENODEV; } @@ -93,7 +107,8 @@ int __init init_cyclone_clock(void) int stall = 100; while(stall--) barrier(); if(readl(cyclone_timer) == old){ - printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n"); + printk(KERN_ERR "Summit chipset: Counter not counting!" + " DISABLED\n"); iounmap(cyclone_timer); cyclone_timer = 0; use_cyclone = 0; @@ -101,8 +116,11 @@ int __init init_cyclone_clock(void) } } /* initialize last tick */ - cyclone_interpolator.addr = cyclone_timer; - register_time_interpolator(&cyclone_interpolator); + cyclone_mc = cyclone_timer; + clocksource_cyclone.fsys_mmio = cyclone_timer; + clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, + clocksource_cyclone.shift); + clocksource_register(&clocksource_cyclone); return 0; } diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 3f926c2dc70..44841971f07 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -147,12 +147,11 @@ ENTRY(fsys_set_tid_address) FSYS_RETURN END(fsys_set_tid_address) -/* - * Ensure that the time interpolator structure is compatible with the asm code - */ -#if IA64_TIME_INTERPOLATOR_SOURCE_OFFSET !=0 || IA64_TIME_INTERPOLATOR_SHIFT_OFFSET != 2 \ - || IA64_TIME_INTERPOLATOR_JITTER_OFFSET != 3 || IA64_TIME_INTERPOLATOR_NSEC_OFFSET != 4 -#error fsys_gettimeofday incompatible with changes to struct time_interpolator +#if IA64_GTOD_LOCK_OFFSET !=0 +#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t +#endif +#if IA64_ITC_JITTER_OFFSET !=0 +#error fsys_gettimeofday incompatible with changes to struct itc_jitter_data_t #endif #define CLOCK_REALTIME 0 #define CLOCK_MONOTONIC 1 @@ -179,126 +178,124 @@ ENTRY(fsys_gettimeofday) // r11 = preserved: saved ar.pfs // r12 = preserved: memory stack // r13 = preserved: thread pointer - // r14 = address of mask / mask + // r14 = address of mask / mask value // r15 = preserved: system call number // r16 = preserved: current task pointer - // r17 = wall to monotonic use - // r18 = time_interpolator->offset - // r19 = address of wall_to_monotonic - // r20 = pointer to struct time_interpolator / pointer to time_interpolator->address - // r21 = shift factor - // r22 = address of time interpolator->last_counter - // r23 = address of time_interpolator->last_cycle - // r24 = adress of time_interpolator->offset - // r25 = last_cycle value - // r26 = last_counter value - // r27 = pointer to xtime + // r17 = (not used) + // r18 = (not used) + // r19 = address of itc_lastcycle + // r20 = struct fsyscall_gtod_data (= address of gtod_lock.sequence) + // r21 = address of mmio_ptr + // r22 = address of wall_time or monotonic_time + // r23 = address of shift / value + // r24 = address mult factor / cycle_last value + // r25 = itc_lastcycle value + // r26 = address clocksource cycle_last + // r27 = (not used) // r28 = sequence number at the beginning of critcal section - // r29 = address of seqlock + // r29 = address of itc_jitter // r30 = time processing flags / memory address // r31 = pointer to result // Predicates // p6,p7 short term use // p8 = timesource ar.itc // p9 = timesource mmio64 - // p10 = timesource mmio32 + // p10 = timesource mmio32 - not used // p11 = timesource not to be handled by asm code - // p12 = memory time source ( = p9 | p10) - // p13 = do cmpxchg with time_interpolator_last_cycle + // p12 = memory time source ( = p9 | p10) - not used + // p13 = do cmpxchg with itc_lastcycle // p14 = Divide by 1000 // p15 = Add monotonic // - // Note that instructions are optimized for McKinley. McKinley can process two - // bundles simultaneously and therefore we continuously try to feed the CPU - // two bundles and then a stop. - tnat.nz p6,p0 = r31 // branch deferred since it does not fit into bundle structure + // Note that instructions are optimized for McKinley. McKinley can + // process two bundles simultaneously and therefore we continuously + // try to feed the CPU two bundles and then a stop. + // + // Additional note that code has changed a lot. Optimization is TBD. + // Comments begin with "?" are maybe outdated. + tnat.nz p6,p0 = r31 // ? branch deferred to fit later bundle mov pr = r30,0xc000 // Set predicates according to function add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 - movl r20 = time_interpolator + movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address ;; - ld8 r20 = [r20] // get pointer to time_interpolator structure - movl r29 = xtime_lock + movl r29 = itc_jitter_data // itc_jitter + add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time ld4 r2 = [r2] // process work pending flags - movl r27 = xtime - ;; // only one bundle here - ld8 r21 = [r20] // first quad with control information + ;; +(p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time + add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 + add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29 and r2 = TIF_ALLWORK_MASK,r2 -(p6) br.cond.spnt.few .fail_einval // deferred branch +(p6) br.cond.spnt.few .fail_einval // ? deferred branch ;; - add r10 = IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET,r20 - extr r3 = r21,32,32 // time_interpolator->nsec_per_cyc - extr r8 = r21,0,16 // time_interpolator->source + add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled (p6) br.cond.spnt.many fsys_fallback_syscall ;; - cmp.eq p8,p12 = 0,r8 // Check for cpu timer - cmp.eq p9,p0 = 1,r8 // MMIO64 ? - extr r2 = r21,24,8 // time_interpolator->jitter - cmp.eq p10,p0 = 2,r8 // MMIO32 ? - cmp.ltu p11,p0 = 2,r8 // function or other clock -(p11) br.cond.spnt.many fsys_fallback_syscall + // Begin critical section +.time_redo: + ld4.acq r28 = [r20] // gtod_lock.sequence, Must take first + ;; + and r28 = ~1,r28 // And make sequence even to force retry if odd ;; - setf.sig f7 = r3 // Setup for scaling of counter -(p15) movl r19 = wall_to_monotonic -(p12) ld8 r30 = [r10] - cmp.ne p13,p0 = r2,r0 // need jitter compensation? - extr r21 = r21,16,8 // shift factor + ld8 r30 = [r21] // clocksource->mmio_ptr + add r24 = IA64_CLKSRC_MULT_OFFSET,r20 + ld4 r2 = [r29] // itc_jitter value + add r23 = IA64_CLKSRC_SHIFT_OFFSET,r20 + add r14 = IA64_CLKSRC_MASK_OFFSET,r20 ;; -.time_redo: - .pred.rel.mutex p8,p9,p10 - ld4.acq r28 = [r29] // xtime_lock.sequence. Must come first for locking purposes + ld4 r3 = [r24] // clocksource mult value + ld8 r14 = [r14] // clocksource mask value + cmp.eq p8,p9 = 0,r30 // use cpu timer if no mmio_ptr ;; - and r28 = ~1,r28 // Make sequence even to force retry if odd + setf.sig f7 = r3 // Setup for mult scaling of counter +(p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13 + ld4 r23 = [r23] // clocksource shift value + ld8 r24 = [r26] // get clksrc_cycle_last value +(p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control ;; + .pred.rel.mutex p8,p9 (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! - add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20 -(p9) ld8 r2 = [r30] // readq(ti->address). Could also have latency issues.. -(p10) ld4 r2 = [r30] // readw(ti->address) -(p13) add r23 = IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET,r20 - ;; // could be removed by moving the last add upward - ld8 r26 = [r22] // time_interpolator->last_counter -(p13) ld8 r25 = [r23] // time interpolator->last_cycle - add r24 = IA64_TIME_INTERPOLATOR_OFFSET_OFFSET,r20 -(p15) ld8 r17 = [r19],IA64_TIMESPEC_TV_NSEC_OFFSET - ld8 r9 = [r27],IA64_TIMESPEC_TV_NSEC_OFFSET - add r14 = IA64_TIME_INTERPOLATOR_MASK_OFFSET, r20 - ;; - ld8 r18 = [r24] // time_interpolator->offset - ld8 r8 = [r27],-IA64_TIMESPEC_TV_NSEC_OFFSET // xtime.tv_nsec -(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm) - ;; - ld8 r14 = [r14] // time_interpolator->mask -(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared - sub r10 = r2,r26 // current_counter - last_counter - ;; -(p6) sub r10 = r25,r26 // time we got was less than last_cycle +(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. +(p13) ld8 r25 = [r19] // get itc_lastcycle value + ;; // ? could be removed by moving the last add upward + ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec + ;; + ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec +(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm) + ;; +(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared + sub r10 = r2,r24 // current_cycle - last_cycle + ;; +(p6) sub r10 = r25,r24 // time we got was less than last_cycle (p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg ;; +(p7) cmpxchg8.rel r3 = [r19],r2,ar.ccv + ;; +(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful + ;; +(p7) sub r10 = r3,r24 // then use new last_cycle instead + ;; and r10 = r10,r14 // Apply mask ;; setf.sig f8 = r10 nop.i 123 ;; -(p7) cmpxchg8.rel r3 = [r23],r2,ar.ccv -EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare time + // fault check takes 5 cycles and we have spare time +EX(.fail_efault, probe.w.fault r31, 3) xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) -(p15) add r9 = r9,r17 // Add wall to monotonic.secs to result secs ;; -(p15) ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET -(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful redo - // simulate tbit.nz.or p7,p0 = r28,0 + // ? simulate tbit.nz.or p7,p0 = r28,0 getf.sig r2 = f8 mf - add r8 = r8,r18 // Add time interpolator offset ;; - ld4 r10 = [r29] // xtime_lock.sequence -(p15) add r8 = r8, r17 // Add monotonic.nsecs to nsecs - shr.u r2 = r2,r21 - ;; // overloaded 3 bundles! - // End critical section. + ld4 r10 = [r20] // gtod_lock.sequence + shr.u r2 = r2,r23 // shift by factor + ;; // ? overloaded 3 bundles! add r8 = r8,r2 // Add xtime.nsecs - cmp4.ne.or p7,p0 = r28,r10 -(p7) br.cond.dpnt.few .time_redo // sequence number changed ? + cmp4.ne p7,p0 = r28,r10 +(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo + // End critical section. // Now r8=tv->tv_nsec and r9=tv->tv_sec mov r10 = r0 movl r2 = 1000000000 @@ -308,19 +305,19 @@ EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare .time_normalize: mov r21 = r8 cmp.ge p6,p0 = r8,r2 -(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting some time +(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting time ;; (p14) setf.sig f8 = r20 (p6) sub r8 = r8,r2 -(p6) add r9 = 1,r9 // two nops before the branch. -(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod +(p6) add r9 = 1,r9 // two nops before the branch. +(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod (p6) br.cond.dpnt.few .time_normalize ;; // Divided by 8 though shift. Now divide by 125 // The compiler was able to do that with a multiply // and a shift and we do the same -EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles -(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it... +EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles +(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it ;; mov r8 = r0 (p14) getf.sig r2 = f8 diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h new file mode 100644 index 00000000000..490dab55fba --- /dev/null +++ b/arch/ia64/kernel/fsyscall_gtod_data.h @@ -0,0 +1,23 @@ +/* + * (c) Copyright 2007 Hewlett-Packard Development Company, L.P. + * Contributed by Peter Keilty <peter.keilty@hp.com> + * + * fsyscall gettimeofday data + */ + +struct fsyscall_gtod_data_t { + seqlock_t lock; + struct timespec wall_time; + struct timespec monotonic_time; + cycle_t clk_mask; + u32 clk_mult; + u32 clk_shift; + void *clk_fsys_mmio; + cycle_t clk_cycle_last; +} __attribute__ ((aligned (L1_CACHE_BYTES))); + +struct itc_jitter_data_t { + int itc_jitter; + cycle_t itc_lastcycle; +} __attribute__ ((aligned (L1_CACHE_BYTES))); + diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 3486fe7d6e6..627785c48ea 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -19,6 +19,7 @@ #include <linux/interrupt.h> #include <linux/efi.h> #include <linux/timex.h> +#include <linux/clocksource.h> #include <asm/machvec.h> #include <asm/delay.h> @@ -28,6 +29,16 @@ #include <asm/sections.h> #include <asm/system.h> +#include "fsyscall_gtod_data.h" + +static cycle_t itc_get_cycles(void); + +struct fsyscall_gtod_data_t fsyscall_gtod_data = { + .lock = SEQLOCK_UNLOCKED, +}; + +struct itc_jitter_data_t itc_jitter_data; + volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ #ifdef CONFIG_IA64_DEBUG_IRQ @@ -37,11 +48,16 @@ EXPORT_SYMBOL(last_cli_ip); #endif -static struct time_interpolator itc_interpolator = { - .shift = 16, - .mask = 0xffffffffffffffffLL, - .source = TIME_SOURCE_CPU +static struct clocksource clocksource_itc = { + .name = "itc", + .rating = 350, + .read = itc_get_cycles, + .mask = 0xffffffffffffffff, + .mult = 0, /*to be caluclated*/ + .shift = 16, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +static struct clocksource *itc_clocksource; static irqreturn_t timer_interrupt (int irq, void *dev_id) @@ -210,8 +226,6 @@ ia64_init_itm (void) + itc_freq/2)/itc_freq; if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { - itc_interpolator.frequency = local_cpu_data->itc_freq; - itc_interpolator.drift = itc_drift; #ifdef CONFIG_SMP /* On IA64 in an SMP configuration ITCs are never accurately synchronized. * Jitter compensation requires a cmpxchg which may limit @@ -223,15 +237,50 @@ ia64_init_itm (void) * even going backward) if the ITC offsets between the individual CPUs * are too large. */ - if (!nojitter) itc_interpolator.jitter = 1; + if (!nojitter) + itc_jitter_data.itc_jitter = 1; #endif - register_time_interpolator(&itc_interpolator); } /* Setup the CPU local timer tick */ ia64_cpu_local_tick(); + + if (!itc_clocksource) { + /* Sort out mult/shift values: */ + clocksource_itc.mult = + clocksource_hz2mult(local_cpu_data->itc_freq, + clocksource_itc.shift); + clocksource_register(&clocksource_itc); + itc_clocksource = &clocksource_itc; + } } +static cycle_t itc_get_cycles() +{ + u64 lcycle, now, ret; + + if (!itc_jitter_data.itc_jitter) + return get_cycles(); + + lcycle = itc_jitter_data.itc_lastcycle; + now = get_cycles(); + if (lcycle && time_after(lcycle, now)) + return lcycle; + + /* + * Keep track of the last timer value returned. + * In an SMP environment, you could lose out in contention of + * cmpxchg. If so, your cmpxchg returns new value which the + * winner of contention updated to. Use the new value instead. + */ + ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); + if (unlikely(ret != lcycle)) + return ret; + + return now; +} + + static struct irqaction timer_irqaction = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_IRQPOLL, @@ -307,3 +356,34 @@ ia64_setup_printk_clock(void) if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) ia64_printk_clock = ia64_itc_printk_clock; } + +void update_vsyscall(struct timespec *wall, struct clocksource *c) +{ + unsigned long flags; + + write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); + + /* copy fsyscall clock data */ + fsyscall_gtod_data.clk_mask = c->mask; + fsyscall_gtod_data.clk_mult = c->mult; + fsyscall_gtod_data.clk_shift = c->shift; + fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; + fsyscall_gtod_data.clk_cycle_last = c->cycle_last; + + /* copy kernel time structures */ + fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; + fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; + fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec + + wall->tv_sec; + fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec + + wall->tv_nsec; + + /* normalize */ + while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { + fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; + fsyscall_gtod_data.monotonic_time.tv_sec++; + } + + write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); +} + |