summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 09:02:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 09:02:01 -0800
commit8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch)
treeb907c35c79caadafff6ad46a91614e30afd2f967 /kernel/trace/ring_buffer.c
parent050cbb09dac0402672edeaeac06094ef8ff1749a (diff)
parentb5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) clockevents: Convert to raw_spinlock clockevents: Make tick_device_lock static debugobjects: Convert to raw_spinlocks perf_event: Convert to raw_spinlock hrtimers: Convert to raw_spinlocks genirq: Convert irq_desc.lock to raw_spinlock smp: Convert smplocks to raw_spinlocks rtmutes: Convert rtmutex.lock to raw_spinlock sched: Convert pi_lock to raw_spinlock sched: Convert cpupri lock to raw_spinlock sched: Convert rt_runtime_lock to raw_spinlock sched: Convert rq->lock to raw_spinlock plist: Make plist debugging raw_spinlock aware bkl: Fixup core_lock fallout locking: Cleanup the name space completely locking: Further name space cleanups alpha: Fix fallout from locking changes locking: Implement new raw_spinlock locking: Convert raw_rwlock functions to arch_rwlock locking: Convert raw_rwlock to arch_rwlock ...
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1ca4956ab5..f58c9ad1583 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
spinlock_t reader_lock; /* serialize readers */
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head *pages;
struct buffer_page *head_page; /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
- cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
int ret;
local_irq_save(flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
again:
/*
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
goto again;
out:
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return reader;
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
synchronize_sched();
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return iter;
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);