diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-08-20 17:02:19 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-21 13:52:58 +0200 |
commit | 994025caba3e6beade9bde84dd1b70d9d250f27b (patch) | |
tree | 0fe4d6f08d252af1db6a631182725085bb76442b /arch/x86/xen/spinlock.c | |
parent | 168d2f464ab9860f0d1e66cf1f9684973222f1c6 (diff) |
xen: add debugfs support
Add support for exporting statistics on mmu updates, multicall
batching and pv spinlocks into debugfs. The base path is xen/ and
each subsystem adds its own directory: mmu, multicalls, spinlocks.
In each directory, writing 1 to "zero_stats" will cause the
corresponding stats to be zeroed the next time they're updated.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen/spinlock.c')
-rw-r--r-- | arch/x86/xen/spinlock.c | 165 |
1 files changed, 162 insertions, 3 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 4884bc603aa..0d8f3b2d9be 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -4,6 +4,8 @@ */ #include <linux/kernel_stat.h> #include <linux/spinlock.h> +#include <linux/debugfs.h> +#include <linux/log2.h> #include <asm/paravirt.h> @@ -11,6 +13,93 @@ #include <xen/events.h> #include "xen-ops.h" +#include "debugfs.h" + +#ifdef CONFIG_XEN_DEBUG_FS +static struct xen_spinlock_stats +{ + u64 taken; + u32 taken_slow; + u32 taken_slow_nested; + u32 taken_slow_pickup; + u32 taken_slow_spurious; + + u64 released; + u32 released_slow; + u32 released_slow_kicked; + +#define HISTO_BUCKETS 20 + u32 histo_spin_fast[HISTO_BUCKETS+1]; + u32 histo_spin[HISTO_BUCKETS+1]; + + u64 spinning_time; + u64 total_time; +} spinlock_stats; + +static u8 zero_stats; + +static unsigned lock_timeout = 1 << 10; +#define TIMEOUT lock_timeout + +static inline void check_zero(void) +{ + if (unlikely(zero_stats)) { + memset(&spinlock_stats, 0, sizeof(spinlock_stats)); + zero_stats = 0; + } +} + +#define ADD_STATS(elem, val) \ + do { check_zero(); spinlock_stats.elem += (val); } while(0) + +static inline u64 spin_time_start(void) +{ + return xen_clocksource_read(); +} + +static void __spin_time_accum(u64 delta, u32 *array) +{ + unsigned index = ilog2(delta); + + check_zero(); + + if (index < HISTO_BUCKETS) + array[index]++; + else + array[HISTO_BUCKETS]++; +} + +static inline void spin_time_accum_fast(u64 start) +{ + u32 delta = xen_clocksource_read() - start; + + __spin_time_accum(delta, spinlock_stats.histo_spin_fast); + spinlock_stats.spinning_time += delta; +} + +static inline void spin_time_accum(u64 start) +{ + u32 delta = xen_clocksource_read() - start; + + __spin_time_accum(delta, spinlock_stats.histo_spin); + spinlock_stats.total_time += delta; +} +#else /* !CONFIG_XEN_DEBUG_FS */ +#define TIMEOUT (1 << 10) +#define ADD_STATS(elem, val) do { (void)(val); } while(0) + +static inline u64 spin_time_start(void) +{ + return 0; +} + +static inline void spin_time_accum_fast(u64 start) +{ +} +static inline void spin_time_accum(u64 start) +{ +} +#endif /* CONFIG_XEN_DEBUG_FS */ struct xen_spinlock { unsigned char lock; /* 0 -> free; 1 -> locked */ @@ -92,6 +181,9 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) /* announce we're spinning */ prev = spinning_lock(xl); + ADD_STATS(taken_slow, 1); + ADD_STATS(taken_slow_nested, prev != NULL); + do { /* clear pending */ xen_clear_irq_pending(irq); @@ -100,6 +192,8 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) we weren't looking */ ret = xen_spin_trylock(lock); if (ret) { + ADD_STATS(taken_slow_pickup, 1); + /* * If we interrupted another spinlock while it * was blocking, make sure it doesn't block @@ -120,6 +214,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) * pending. */ xen_poll_irq(irq); + ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ kstat_this_cpu.irqs[irq]++; @@ -132,11 +227,18 @@ out: static void xen_spin_lock(struct raw_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; - int timeout; + unsigned timeout; u8 oldval; + u64 start_spin; + + ADD_STATS(taken, 1); + + start_spin = spin_time_start(); do { - timeout = 1 << 10; + u64 start_spin_fast = spin_time_start(); + + timeout = TIMEOUT; asm("1: xchgb %1,%0\n" " testb %1,%1\n" @@ -151,16 +253,22 @@ static void xen_spin_lock(struct raw_spinlock *lock) : "1" (1) : "memory"); - } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock))); + spin_time_accum_fast(start_spin_fast); + } while (unlikely(oldval != 0 && (TIMEOUT == ~0 || !xen_spin_lock_slow(lock)))); + + spin_time_accum(start_spin); } static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) { int cpu; + ADD_STATS(released_slow, 1); + for_each_online_cpu(cpu) { /* XXX should mix up next cpu selection */ if (per_cpu(lock_spinners, cpu) == xl) { + ADD_STATS(released_slow_kicked, 1); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); break; } @@ -171,6 +279,8 @@ static void xen_spin_unlock(struct raw_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; + ADD_STATS(released, 1); + smp_wmb(); /* make sure no writes get moved after unlock */ xl->lock = 0; /* release lock */ @@ -216,3 +326,52 @@ void __init xen_init_spinlocks(void) pv_lock_ops.spin_trylock = xen_spin_trylock; pv_lock_ops.spin_unlock = xen_spin_unlock; } + +#ifdef CONFIG_XEN_DEBUG_FS + +static struct dentry *d_spin_debug; + +static int __init xen_spinlock_debugfs(void) +{ + struct dentry *d_xen = xen_init_debugfs(); + + if (d_xen == NULL) + return -ENOMEM; + + d_spin_debug = debugfs_create_dir("spinlocks", d_xen); + + debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); + + debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout); + + debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken); + debugfs_create_u32("taken_slow", 0444, d_spin_debug, + &spinlock_stats.taken_slow); + debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug, + &spinlock_stats.taken_slow_nested); + debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, + &spinlock_stats.taken_slow_pickup); + debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, + &spinlock_stats.taken_slow_spurious); + + debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released); + debugfs_create_u32("released_slow", 0444, d_spin_debug, + &spinlock_stats.released_slow); + debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, + &spinlock_stats.released_slow_kicked); + + debugfs_create_u64("time_spinning", 0444, d_spin_debug, + &spinlock_stats.spinning_time); + debugfs_create_u64("time_total", 0444, d_spin_debug, + &spinlock_stats.total_time); + + xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug, + spinlock_stats.histo_spin, HISTO_BUCKETS + 1); + xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, + spinlock_stats.histo_spin_fast, HISTO_BUCKETS + 1); + + return 0; +} +fs_initcall(xen_spinlock_debugfs); + +#endif /* CONFIG_XEN_DEBUG_FS */ |