summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/locktorture.c529
-rw-r--r--kernel/locking/mcs_spinlock.h3
-rw-r--r--kernel/locking/mutex.c416
-rw-r--r--kernel/locking/mutex.h2
-rw-r--r--kernel/locking/rtmutex.c2
-rw-r--r--kernel/locking/rwsem-xadd.c27
-rw-r--r--kernel/locking/semaphore.c12
7 files changed, 695 insertions, 296 deletions
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 0955b885d0d..ec8cce25977 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -20,30 +20,20 @@
* Author: Paul E. McKenney <paulmck@us.ibm.com>
* Based on kernel/rcu/torture.c.
*/
-#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
-#include <linux/err.h>
#include <linux/spinlock.h>
+#include <linux/rwlock.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/atomic.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
#include <linux/moduleparam.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/freezer.h>
-#include <linux/cpu.h>
#include <linux/delay.h>
-#include <linux/stat.h>
#include <linux/slab.h>
-#include <linux/trace_clock.h>
-#include <asm/byteorder.h>
#include <linux/torture.h>
MODULE_LICENSE("GPL");
@@ -51,6 +41,8 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
torture_param(int, nwriters_stress, -1,
"Number of write-locking stress-test threads");
+torture_param(int, nreaders_stress, -1,
+ "Number of read-locking stress-test threads");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
"Time between CPU hotplugs (s), 0=disable");
@@ -66,30 +58,28 @@ torture_param(bool, verbose, true,
static char *torture_type = "spin_lock";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type,
- "Type of lock to torture (spin_lock, spin_lock_irq, ...)");
-
-static atomic_t n_lock_torture_errors;
+ "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
static struct task_struct *stats_task;
static struct task_struct **writer_tasks;
+static struct task_struct **reader_tasks;
-static int nrealwriters_stress;
static bool lock_is_write_held;
+static bool lock_is_read_held;
-struct lock_writer_stress_stats {
- long n_write_lock_fail;
- long n_write_lock_acquired;
+struct lock_stress_stats {
+ long n_lock_fail;
+ long n_lock_acquired;
};
-static struct lock_writer_stress_stats *lwsa;
#if defined(MODULE)
#define LOCKTORTURE_RUNNABLE_INIT 1
#else
#define LOCKTORTURE_RUNNABLE_INIT 0
#endif
-int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT;
-module_param(locktorture_runnable, int, 0444);
-MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at module init");
+int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
+module_param(torture_runnable, int, 0444);
+MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
/* Forward reference. */
static void lock_torture_cleanup(void);
@@ -102,12 +92,25 @@ struct lock_torture_ops {
int (*writelock)(void);
void (*write_delay)(struct torture_random_state *trsp);
void (*writeunlock)(void);
+ int (*readlock)(void);
+ void (*read_delay)(struct torture_random_state *trsp);
+ void (*readunlock)(void);
unsigned long flags;
const char *name;
};
-static struct lock_torture_ops *cur_ops;
-
+struct lock_torture_cxt {
+ int nrealwriters_stress;
+ int nrealreaders_stress;
+ bool debug_lock;
+ atomic_t n_lock_torture_errors;
+ struct lock_torture_ops *cur_ops;
+ struct lock_stress_stats *lwsa; /* writer statistics */
+ struct lock_stress_stats *lrsa; /* reader statistics */
+};
+static struct lock_torture_cxt cxt = { 0, 0, false,
+ ATOMIC_INIT(0),
+ NULL, NULL};
/*
* Definitions for lock torture testing.
*/
@@ -123,10 +126,10 @@ static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
- (nrealwriters_stress * 2000 * longdelay_us)))
+ (cxt.nrealwriters_stress * 2000 * longdelay_us)))
mdelay(longdelay_us);
#ifdef CONFIG_PREEMPT
- if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
+ if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
preempt_schedule(); /* Allow test to be preempted. */
#endif
}
@@ -140,6 +143,9 @@ static struct lock_torture_ops lock_busted_ops = {
.writelock = torture_lock_busted_write_lock,
.write_delay = torture_lock_busted_write_delay,
.writeunlock = torture_lock_busted_write_unlock,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
.name = "lock_busted"
};
@@ -160,13 +166,13 @@ static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
- (nrealwriters_stress * 2000 * longdelay_us)))
+ (cxt.nrealwriters_stress * 2000 * longdelay_us)))
mdelay(longdelay_us);
if (!(torture_random(trsp) %
- (nrealwriters_stress * 2 * shortdelay_us)))
+ (cxt.nrealwriters_stress * 2 * shortdelay_us)))
udelay(shortdelay_us);
#ifdef CONFIG_PREEMPT
- if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
+ if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
preempt_schedule(); /* Allow test to be preempted. */
#endif
}
@@ -180,39 +186,253 @@ static struct lock_torture_ops spin_lock_ops = {
.writelock = torture_spin_lock_write_lock,
.write_delay = torture_spin_lock_write_delay,
.writeunlock = torture_spin_lock_write_unlock,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
.name = "spin_lock"
};
static int torture_spin_lock_write_lock_irq(void)
-__acquires(torture_spinlock_irq)
+__acquires(torture_spinlock)
{
unsigned long flags;
spin_lock_irqsave(&torture_spinlock, flags);
- cur_ops->flags = flags;
+ cxt.cur_ops->flags = flags;
return 0;
}
static void torture_lock_spin_write_unlock_irq(void)
__releases(torture_spinlock)
{
- spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags);
+ spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
}
static struct lock_torture_ops spin_lock_irq_ops = {
.writelock = torture_spin_lock_write_lock_irq,
.write_delay = torture_spin_lock_write_delay,
.writeunlock = torture_lock_spin_write_unlock_irq,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
.name = "spin_lock_irq"
};
+static DEFINE_RWLOCK(torture_rwlock);
+
+static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
+{
+ write_lock(&torture_rwlock);
+ return 0;
+}
+
+static void torture_rwlock_write_delay(struct torture_random_state *trsp)
+{
+ const unsigned long shortdelay_us = 2;
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a short delay mostly to emulate likely code, and
+ * we want a long delay occasionally to force massive contention.
+ */
+ if (!(torture_random(trsp) %
+ (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms);
+ else
+ udelay(shortdelay_us);
+}
+
+static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
+{
+ write_unlock(&torture_rwlock);
+}
+
+static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
+{
+ read_lock(&torture_rwlock);
+ return 0;
+}
+
+static void torture_rwlock_read_delay(struct torture_random_state *trsp)
+{
+ const unsigned long shortdelay_us = 10;
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a short delay mostly to emulate likely code, and
+ * we want a long delay occasionally to force massive contention.
+ */
+ if (!(torture_random(trsp) %
+ (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms);
+ else
+ udelay(shortdelay_us);
+}
+
+static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
+{
+ read_unlock(&torture_rwlock);
+}
+
+static struct lock_torture_ops rw_lock_ops = {
+ .writelock = torture_rwlock_write_lock,
+ .write_delay = torture_rwlock_write_delay,
+ .writeunlock = torture_rwlock_write_unlock,
+ .readlock = torture_rwlock_read_lock,
+ .read_delay = torture_rwlock_read_delay,
+ .readunlock = torture_rwlock_read_unlock,
+ .name = "rw_lock"
+};
+
+static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&torture_rwlock, flags);
+ cxt.cur_ops->flags = flags;
+ return 0;
+}
+
+static void torture_rwlock_write_unlock_irq(void)
+__releases(torture_rwlock)
+{
+ write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
+}
+
+static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
+{
+ unsigned long flags;
+
+ read_lock_irqsave(&torture_rwlock, flags);
+ cxt.cur_ops->flags = flags;
+ return 0;
+}
+
+static void torture_rwlock_read_unlock_irq(void)
+__releases(torture_rwlock)
+{
+ write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
+}
+
+static struct lock_torture_ops rw_lock_irq_ops = {
+ .writelock = torture_rwlock_write_lock_irq,
+ .write_delay = torture_rwlock_write_delay,
+ .writeunlock = torture_rwlock_write_unlock_irq,
+ .readlock = torture_rwlock_read_lock_irq,
+ .read_delay = torture_rwlock_read_delay,
+ .readunlock = torture_rwlock_read_unlock_irq,
+ .name = "rw_lock_irq"
+};
+
+static DEFINE_MUTEX(torture_mutex);
+
+static int torture_mutex_lock(void) __acquires(torture_mutex)
+{
+ mutex_lock(&torture_mutex);
+ return 0;
+}
+
+static void torture_mutex_delay(struct torture_random_state *trsp)
+{
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a long delay occasionally to force massive contention. */
+ if (!(torture_random(trsp) %
+ (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms * 5);
+ else
+ mdelay(longdelay_ms / 5);
+#ifdef CONFIG_PREEMPT
+ if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
+ preempt_schedule(); /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_mutex_unlock(void) __releases(torture_mutex)
+{
+ mutex_unlock(&torture_mutex);
+}
+
+static struct lock_torture_ops mutex_lock_ops = {
+ .writelock = torture_mutex_lock,
+ .write_delay = torture_mutex_delay,
+ .writeunlock = torture_mutex_unlock,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
+ .name = "mutex_lock"
+};
+
+static DECLARE_RWSEM(torture_rwsem);
+static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
+{
+ down_write(&torture_rwsem);
+ return 0;
+}
+
+static void torture_rwsem_write_delay(struct torture_random_state *trsp)
+{
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a long delay occasionally to force massive contention. */
+ if (!(torture_random(trsp) %
+ (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms * 10);
+ else
+ mdelay(longdelay_ms / 10);
+#ifdef CONFIG_PREEMPT
+ if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
+ preempt_schedule(); /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_rwsem_up_write(void) __releases(torture_rwsem)
+{
+ up_write(&torture_rwsem);
+}
+
+static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
+{
+ down_read(&torture_rwsem);
+ return 0;
+}
+
+static void torture_rwsem_read_delay(struct torture_random_state *trsp)
+{
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a long delay occasionally to force massive contention. */
+ if (!(torture_random(trsp) %
+ (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms * 2);
+ else
+ mdelay(longdelay_ms / 2);
+#ifdef CONFIG_PREEMPT
+ if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
+ preempt_schedule(); /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_rwsem_up_read(void) __releases(torture_rwsem)
+{
+ up_read(&torture_rwsem);
+}
+
+static struct lock_torture_ops rwsem_lock_ops = {
+ .writelock = torture_rwsem_down_write,
+ .write_delay = torture_rwsem_write_delay,
+ .writeunlock = torture_rwsem_up_write,
+ .readlock = torture_rwsem_down_read,
+ .read_delay = torture_rwsem_read_delay,
+ .readunlock = torture_rwsem_up_read,
+ .name = "rwsem_lock"
+};
+
/*
* Lock torture writer kthread. Repeatedly acquires and releases
* the lock, checking for duplicate acquisitions.
*/
static int lock_torture_writer(void *arg)
{
- struct lock_writer_stress_stats *lwsp = arg;
+ struct lock_stress_stats *lwsp = arg;
static DEFINE_TORTURE_RANDOM(rand);
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
@@ -221,14 +441,19 @@ static int lock_torture_writer(void *arg)
do {
if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1);
- cur_ops->writelock();
+
+ cxt.cur_ops->writelock();
if (WARN_ON_ONCE(lock_is_write_held))
- lwsp->n_write_lock_fail++;
+ lwsp->n_lock_fail++;
lock_is_write_held = 1;
- lwsp->n_write_lock_acquired++;
- cur_ops->write_delay(&rand);
+ if (WARN_ON_ONCE(lock_is_read_held))
+ lwsp->n_lock_fail++; /* rare, but... */
+
+ lwsp->n_lock_acquired++;
+ cxt.cur_ops->write_delay(&rand);
lock_is_write_held = 0;
- cur_ops->writeunlock();
+ cxt.cur_ops->writeunlock();
+
stutter_wait("lock_torture_writer");
} while (!torture_must_stop());
torture_kthread_stopping("lock_torture_writer");
@@ -236,32 +461,66 @@ static int lock_torture_writer(void *arg)
}
/*
+ * Lock torture reader kthread. Repeatedly acquires and releases
+ * the reader lock.
+ */
+static int lock_torture_reader(void *arg)
+{
+ struct lock_stress_stats *lrsp = arg;
+ static DEFINE_TORTURE_RANDOM(rand);
+
+ VERBOSE_TOROUT_STRING("lock_torture_reader task started");
+ set_user_nice(current, MAX_NICE);
+
+ do {
+ if ((torture_random(&rand) & 0xfffff) == 0)
+ schedule_timeout_uninterruptible(1);
+
+ cxt.cur_ops->readlock();
+ lock_is_read_held = 1;
+ if (WARN_ON_ONCE(lock_is_write_held))
+ lrsp->n_lock_fail++; /* rare, but... */
+
+ lrsp->n_lock_acquired++;
+ cxt.cur_ops->read_delay(&rand);
+ lock_is_read_held = 0;
+ cxt.cur_ops->readunlock();
+
+ stutter_wait("lock_torture_reader");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("lock_torture_reader");
+ return 0;
+}
+
+/*
* Create an lock-torture-statistics message in the specified buffer.
*/
-static void lock_torture_printk(char *page)
+static void __torture_print_stats(char *page,
+ struct lock_stress_stats *statp, bool write)
{
bool fail = 0;
- int i;
+ int i, n_stress;
long max = 0;
- long min = lwsa[0].n_write_lock_acquired;
+ long min = statp[0].n_lock_acquired;
long long sum = 0;
- for (i = 0; i < nrealwriters_stress; i++) {
- if (lwsa[i].n_write_lock_fail)
+ n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
+ for (i = 0; i < n_stress; i++) {
+ if (statp[i].n_lock_fail)
fail = true;
- sum += lwsa[i].n_write_lock_acquired;
- if (max < lwsa[i].n_write_lock_fail)
- max = lwsa[i].n_write_lock_fail;
- if (min > lwsa[i].n_write_lock_fail)
- min = lwsa[i].n_write_lock_fail;
+ sum += statp[i].n_lock_acquired;
+ if (max < statp[i].n_lock_fail)
+ max = statp[i].n_lock_fail;
+ if (min > statp[i].n_lock_fail)
+ min = statp[i].n_lock_fail;
}
- page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
page += sprintf(page,
- "Writes: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
+ "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
+ write ? "Writes" : "Reads ",
sum, max, min, max / 2 > min ? "???" : "",
fail, fail ? "!!!" : "");
if (fail)
- atomic_inc(&n_lock_torture_errors);
+ atomic_inc(&cxt.n_lock_torture_errors);
}
/*
@@ -274,18 +533,35 @@ static void lock_torture_printk(char *page)
*/
static void lock_torture_stats_print(void)
{
- int size = nrealwriters_stress * 200 + 8192;
+ int size = cxt.nrealwriters_stress * 200 + 8192;
char *buf;
+ if (cxt.cur_ops->readlock)
+ size += cxt.nrealreaders_stress * 200 + 8192;
+
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
pr_err("lock_torture_stats_print: Out of memory, need: %d",
size);
return;
}
- lock_torture_printk(buf);
+
+ __torture_print_stats(buf, cxt.lwsa, true);
pr_alert("%s", buf);
kfree(buf);
+
+ if (cxt.cur_ops->readlock) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("lock_torture_stats_print: Out of memory, need: %d",
+ size);
+ return;
+ }
+
+ __torture_print_stats(buf, cxt.lrsa, false);
+ pr_alert("%s", buf);
+ kfree(buf);
+ }
}
/*
@@ -312,9 +588,10 @@ lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
const char *tag)
{
pr_alert("%s" TORTURE_FLAG
- "--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
- torture_type, tag, nrealwriters_stress, stat_interval, verbose,
- shuffle_interval, stutter, shutdown_secs,
+ "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
+ torture_type, tag, cxt.debug_lock ? " [debug]": "",
+ cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
+ verbose, shuffle_interval, stutter, shutdown_secs,
onoff_interval, onoff_holdoff);
}
@@ -322,46 +599,59 @@ static void lock_torture_cleanup(void)
{
int i;
- if (torture_cleanup())
+ if (torture_cleanup_begin())
return;
if (writer_tasks) {
- for (i = 0; i < nrealwriters_stress; i++)
+ for (i = 0; i < cxt.nrealwriters_stress; i++)
torture_stop_kthread(lock_torture_writer,
writer_tasks[i]);
kfree(writer_tasks);
writer_tasks = NULL;
}
+ if (reader_tasks) {
+ for (i = 0; i < cxt.nrealreaders_stress; i++)
+ torture_stop_kthread(lock_torture_reader,
+ reader_tasks[i]);
+ kfree(reader_tasks);
+ reader_tasks = NULL;
+ }
+
torture_stop_kthread(lock_torture_stats, stats_task);
lock_torture_stats_print(); /* -After- the stats thread is stopped! */
- if (atomic_read(&n_lock_torture_errors))
- lock_torture_print_module_parms(cur_ops,
+ if (atomic_read(&cxt.n_lock_torture_errors))
+ lock_torture_print_module_parms(cxt.cur_ops,
"End of test: FAILURE");
else if (torture_onoff_failures())
- lock_torture_print_module_parms(cur_ops,
+ lock_torture_print_module_parms(cxt.cur_ops,
"End of test: LOCK_HOTPLUG");
else
- lock_torture_print_module_parms(cur_ops,
+ lock_torture_print_module_parms(cxt.cur_ops,
"End of test: SUCCESS");
+ torture_cleanup_end();
}
static int __init lock_torture_init(void)
{
- int i;
+ int i, j;
int firsterr = 0;
static struct lock_torture_ops *torture_ops[] = {
- &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops,
+ &lock_busted_ops,
+ &spin_lock_ops, &spin_lock_irq_ops,
+ &rw_lock_ops, &rw_lock_irq_ops,
+ &mutex_lock_ops,
+ &rwsem_lock_ops,
};
- if (!torture_init_begin(torture_type, verbose, &locktorture_runnable))
+ if (!torture_init_begin(torture_type, verbose, &torture_runnable))
return -EBUSY;
/* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
- cur_ops = torture_ops[i];
- if (strcmp(torture_type, cur_ops->name) == 0)
+ cxt.cur_ops = torture_ops[i];
+ if (strcmp(torture_type, cxt.cur_ops->name) == 0)
break;
}
if (i == ARRAY_SIZE(torture_ops)) {
@@ -374,31 +664,69 @@ static int __init lock_torture_init(void)
torture_init_end();
return -EINVAL;
}
- if (cur_ops->init)
- cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+ if (cxt.cur_ops->init)
+ cxt.cur_ops->init(); /* no "goto unwind" prior to this point!!! */
if (nwriters_stress >= 0)
- nrealwriters_stress = nwriters_stress;
+ cxt.nrealwriters_stress = nwriters_stress;
else
- nrealwriters_stress = 2 * num_online_cpus();
- lock_torture_print_module_parms(cur_ops, "Start of test");
+ cxt.nrealwriters_stress = 2 * num_online_cpus();
+
+#ifdef CONFIG_DEBUG_MUTEXES
+ if (strncmp(torture_type, "mutex", 5) == 0)
+ cxt.debug_lock = true;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if ((strncmp(torture_type, "spin", 4) == 0) ||
+ (strncmp(torture_type, "rw_lock", 7) == 0))
+ cxt.debug_lock = true;
+#endif
/* Initialize the statistics so that each run gets its own numbers. */
lock_is_write_held = 0;
- lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL);
- if (lwsa == NULL) {
- VERBOSE_TOROUT_STRING("lwsa: Out of memory");
+ cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
+ if (cxt.lwsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
- for (i = 0; i < nrealwriters_stress; i++) {
- lwsa[i].n_write_lock_fail = 0;
- lwsa[i].n_write_lock_acquired = 0;
+ for (i = 0; i < cxt.nrealwriters_stress; i++) {
+ cxt.lwsa[i].n_lock_fail = 0;
+ cxt.lwsa[i].n_lock_acquired = 0;
}
- /* Start up the kthreads. */
+ if (cxt.cur_ops->readlock) {
+ if (nreaders_stress >= 0)
+ cxt.nrealreaders_stress = nreaders_stress;
+ else {
+ /*
+ * By default distribute evenly the number of
+ * readers and writers. We still run the same number
+ * of threads as the writer-only locks default.
+ */
+ if (nwriters_stress < 0) /* user doesn't care */
+ cxt.nrealwriters_stress = num_online_cpus();
+ cxt.nrealreaders_stress = cxt.nrealwriters_stress;
+ }
+
+ lock_is_read_held = 0;
+ cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
+ if (cxt.lrsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
+ firsterr = -ENOMEM;
+ kfree(cxt.lwsa);
+ goto unwind;
+ }
+
+ for (i = 0; i < cxt.nrealreaders_stress; i++) {
+ cxt.lrsa[i].n_lock_fail = 0;
+ cxt.lrsa[i].n_lock_acquired = 0;
+ }
+ }
+ lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
+ /* Prepare torture context. */
if (onoff_interval > 0) {
firsterr = torture_onoff_init(onoff_holdoff * HZ,
onoff_interval * HZ);
@@ -422,18 +750,51 @@ static int __init lock_torture_init(void)
goto unwind;
}
- writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]),
+ writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
GFP_KERNEL);
if (writer_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
- for (i = 0; i < nrealwriters_stress; i++) {
- firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i],
+
+ if (cxt.cur_ops->readlock) {
+ reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
+ GFP_KERNEL);
+ if (reader_tasks == NULL) {
+ VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ }
+
+ /*
+ * Create the kthreads and start torturing (oh, those poor little locks).
+ *
+ * TODO: Note that we interleave writers with readers, giving writers a
+ * slight advantage, by creating its kthread first. This can be modified
+ * for very specific needs, or even let the user choose the policy, if
+ * ever wanted.
+ */
+ for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
+ j < cxt.nrealreaders_stress; i++, j++) {
+ if (i >= cxt.nrealwriters_stress)
+ goto create_reader;
+
+ /* Create writer. */
+ firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
writer_tasks[i]);
if (firsterr)
goto unwind;
+
+ create_reader:
+ if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
+ continue;
+ /* Create reader. */
+ firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
+ reader_tasks[j]);
+ if (firsterr)
+ goto unwind;
}
if (stat_interval > 0) {
firsterr = torture_create_kthread(lock_torture_stats, NULL,
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 23e89c5930e..4d60986fcbe 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -56,9 +56,6 @@ do { \
* If the lock has already been acquired, then this will proceed to spin
* on this node->locked until the previous lock holder sets the node->locked
* in mcs_spin_unlock().
- *
- * We don't inline mcs_spin_lock() so that perf can correctly account for the
- * time spent in this lock function.
*/
static inline
void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index ae712b25e49..dadbf88c22c 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -15,7 +15,7 @@
* by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
* and Sven Dietrich.
*
- * Also see Documentation/mutex-design.txt.
+ * Also see Documentation/locking/mutex-design.txt.
*/
#include <linux/mutex.h>
#include <linux/ww_mutex.h>
@@ -106,6 +106,92 @@ void __sched mutex_lock(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock);
#endif
+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
+ struct ww_acquire_ctx *ww_ctx)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+ /*
+ * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
+ * but released with a normal mutex_unlock in this call.
+ *
+ * This should never happen, always use ww_mutex_unlock.
+ */
+ DEBUG_LOCKS_WARN_ON(ww->ctx);
+
+ /*
+ * Not quite done after calling ww_acquire_done() ?
+ */
+ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
+
+ if (ww_ctx->contending_lock) {
+ /*
+ * After -EDEADLK you tried to
+ * acquire a different ww_mutex? Bad!
+ */
+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
+
+ /*
+ * You called ww_mutex_lock after receiving -EDEADLK,
+ * but 'forgot' to unlock everything else first?
+ */
+ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
+ ww_ctx->contending_lock = NULL;
+ }
+
+ /*
+ * Naughty, using a different class will lead to undefined behavior!
+ */
+ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
+#endif
+ ww_ctx->acquired++;
+}
+
+/*
+ * after acquiring lock with fastpath or when we lost out in contested
+ * slowpath, set ctx and wake up any waiters so they can recheck.
+ *
+ * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
+ * as the fastpath and opportunistic spinning are disabled in that case.
+ */
+static __always_inline void
+ww_mutex_set_context_fastpath(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx)
+{
+ unsigned long flags;
+ struct mutex_waiter *cur;
+
+ ww_mutex_lock_acquired(lock, ctx);
+
+ lock->ctx = ctx;
+
+ /*
+ * The lock->ctx update should be visible on all cores before
+ * the atomic read is done, otherwise contended waiters might be
+ * missed. The contended waiters will either see ww_ctx == NULL
+ * and keep spinning, or it will acquire wait_lock, add itself
+ * to waiter list and sleep.
+ */
+ smp_mb(); /* ^^^ */
+
+ /*
+ * Check if lock is contended, if not there is nobody to wake up
+ */
+ if (likely(atomic_read(&lock->base.count) == 0))
+ return;
+
+ /*
+ * Uh oh, we raced in fastpath, wake up everyone in this case,
+ * so they can see the new lock->ctx.
+ */
+ spin_lock_mutex(&lock->base.wait_lock, flags);
+ list_for_each_entry(cur, &lock->base.wait_list, list) {
+ debug_mutex_wake_waiter(&lock->base, cur);
+ wake_up_process(cur->task);
+ }
+ spin_unlock_mutex(&lock->base.wait_lock, flags);
+}
+
+
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* In order to avoid a stampede of mutex spinners from acquiring the mutex
@@ -180,6 +266,129 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
*/
return retval;
}
+
+/*
+ * Atomically try to take the lock when it is available
+ */
+static inline bool mutex_try_to_acquire(struct mutex *lock)
+{
+ return !mutex_is_locked(lock) &&
+ (atomic_cmpxchg(&lock->count, 1, 0) == 1);
+}
+
+/*
+ * Optimistic spinning.
+ *
+ * We try to spin for acquisition when we find that the lock owner
+ * is currently running on a (different) CPU and while we don't
+ * need to reschedule. The rationale is that if the lock owner is
+ * running, it is likely to release the lock soon.
+ *
+ * Since this needs the lock owner, and this mutex implementation
+ * doesn't track the owner atomically in the lock field, we need to
+ * track it non-atomically.
+ *
+ * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
+ * to serialize everything.
+ *
+ * The mutex spinners are queued up using MCS lock so that only one
+ * spinner can compete for the mutex. However, if mutex spinning isn't
+ * going to happen, there is no point in going through the lock/unlock
+ * overhead.
+ *
+ * Returns true when the lock was taken, otherwise false, indicating
+ * that we need to jump to the slowpath and sleep.
+ */
+static bool mutex_optimistic_spin(struct mutex *lock,
+ struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+{
+ struct task_struct *task = current;
+
+ if (!mutex_can_spin_on_owner(lock))
+ goto done;
+
+ if (!osq_lock(&lock->osq))
+ goto done;
+
+ while (true) {
+ struct task_struct *owner;
+
+ if (use_ww_ctx && ww_ctx->acquired > 0) {
+ struct ww_mutex *ww;
+
+ ww = container_of(lock, struct ww_mutex, base);
+ /*
+ * If ww->ctx is set the contents are undefined, only
+ * by acquiring wait_lock there is a guarantee that
+ * they are not invalid when reading.
+ *
+ * As such, when deadlock detection needs to be
+ * performed the optimistic spinning cannot be done.
+ */
+ if (ACCESS_ONCE(ww->ctx))
+ break;
+ }
+
+ /*
+ * If there's an owner, wait for it to either
+ * release the lock or go to sleep.
+ */
+ owner = ACCESS_ONCE(lock->owner);
+ if (owner && !mutex_spin_on_owner(lock, owner))
+ break;
+
+ /* Try to acquire the mutex if it is unlocked. */
+ if (mutex_try_to_acquire(lock)) {
+ lock_acquired(&lock->dep_map, ip);
+
+ if (use_ww_ctx) {
+ struct ww_mutex *ww;
+ ww = container_of(lock, struct ww_mutex, base);
+
+ ww_mutex_set_context_fastpath(ww, ww_ctx);
+ }
+
+ mutex_set_owner(lock);
+ osq_unlock(&lock->osq);
+ return true;
+ }
+
+ /*
+ * When there's no owner, we might have preempted between the
+ * owner acquiring the lock and setting the owner field. If
+ * we're an RT task that will live-lock because we won't let
+ * the owner complete.
+ */
+ if (!owner && (need_resched() || rt_task(task)))
+ break;
+
+ /*
+ * The cpu_relax() call is a compiler barrier which forces
+ * everything in this loop to be re-loaded. We don't need
+ * memory barriers as we'll eventually observe the right
+ * values at the cost of a few extra spins.
+ */
+ cpu_relax_lowlatency();
+ }
+
+ osq_unlock(&lock->osq);
+done:
+ /*
+ * If we fell out of the spin path because of need_resched(),
+ * reschedule now, before we try-lock the mutex. This avoids getting
+ * scheduled out right after we obtained the mutex.
+ */
+ if (need_resched())
+ schedule_preempt_disabled();
+
+ return false;
+}
+#else
+static bool mutex_optimistic_spin(struct mutex *lock,
+ struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+{
+ return false;
+}
#endif
__visible __used noinline
@@ -277,91 +486,6 @@ __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
return 0;
}
-static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
- struct ww_acquire_ctx *ww_ctx)
-{
-#ifdef CONFIG_DEBUG_MUTEXES
- /*
- * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
- * but released with a normal mutex_unlock in this call.
- *
- * This should never happen, always use ww_mutex_unlock.
- */
- DEBUG_LOCKS_WARN_ON(ww->ctx);
-
- /*
- * Not quite done after calling ww_acquire_done() ?
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
-
- if (ww_ctx->contending_lock) {
- /*
- * After -EDEADLK you tried to
- * acquire a different ww_mutex? Bad!
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
-
- /*
- * You called ww_mutex_lock after receiving -EDEADLK,
- * but 'forgot' to unlock everything else first?
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
- ww_ctx->contending_lock = NULL;
- }
-
- /*
- * Naughty, using a different class will lead to undefined behavior!
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
-#endif
- ww_ctx->acquired++;
-}
-
-/*
- * after acquiring lock with fastpath or when we lost out in contested
- * slowpath, set ctx and wake up any waiters so they can recheck.
- *
- * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
- * as the fastpath and opportunistic spinning are disabled in that case.
- */
-static __always_inline void
-ww_mutex_set_context_fastpath(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx)
-{
- unsigned long flags;
- struct mutex_waiter *cur;
-
- ww_mutex_lock_acquired(lock, ctx);
-
- lock->ctx = ctx;
-
- /*
- * The lock->ctx update should be visible on all cores before
- * the atomic read is done, otherwise contended waiters might be
- * missed. The contended waiters will either see ww_ctx == NULL
- * and keep spinning, or it will acquire wait_lock, add itself
- * to waiter list and sleep.
- */
- smp_mb(); /* ^^^ */
-
- /*
- * Check if lock is contended, if not there is nobody to wake up
- */
- if (likely(atomic_read(&lock->base.count) == 0))
- return;
-
- /*
- * Uh oh, we raced in fastpath, wake up everyone in this case,
- * so they can see the new lock->ctx.
- */
- spin_lock_mutex(&lock->base.wait_lock, flags);
- list_for_each_entry(cur, &lock->base.wait_list, list) {
- debug_mutex_wake_waiter(&lock->base, cur);
- wake_up_process(cur->task);
- }
- spin_unlock_mutex(&lock->base.wait_lock, flags);
-}
-
/*
* Lock a mutex (possibly interruptible), slowpath:
*/
@@ -378,104 +502,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- /*
- * Optimistic spinning.
- *
- * We try to spin for acquisition when we find that the lock owner
- * is currently running on a (different) CPU and while we don't
- * need to reschedule. The rationale is that if the lock owner is
- * running, it is likely to release the lock soon.
- *
- * Since this needs the lock owner, and this mutex implementation
- * doesn't track the owner atomically in the lock field, we need to
- * track it non-atomically.
- *
- * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
- * to serialize everything.
- *
- * The mutex spinners are queued up using MCS lock so that only one
- * spinner can compete for the mutex. However, if mutex spinning isn't
- * going to happen, there is no point in going through the lock/unlock
- * overhead.
- */
- if (!mutex_can_spin_on_owner(lock))
- goto slowpath;
-
- if (!osq_lock(&lock->osq))
- goto slowpath;
-
- for (;;) {
- struct task_struct *owner;
-
- if (use_ww_ctx && ww_ctx->acquired > 0) {
- struct ww_mutex *ww;
-
- ww = container_of(lock, struct ww_mutex, base);
- /*
- * If ww->ctx is set the contents are undefined, only
- * by acquiring wait_lock there is a guarantee that
- * they are not invalid when reading.
- *
- * As such, when deadlock detection needs to be
- * performed the optimistic spinning cannot be done.
- */
- if (ACCESS_ONCE(ww->ctx))
- break;
- }
-
- /*
- * If there's an owner, wait for it to either
- * release the lock or go to sleep.
- */
- owner = ACCESS_ONCE(lock->owner);
- if (owner && !mutex_spin_on_owner(lock, owner))
- break;
-
- /* Try to acquire the mutex if it is unlocked. */
- if (!mutex_is_locked(lock) &&
- (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
- lock_acquired(&lock->dep_map, ip);
- if (use_ww_ctx) {
- struct ww_mutex *ww;
- ww = container_of(lock, struct ww_mutex, base);
-
- ww_mutex_set_context_fastpath(ww, ww_ctx);
- }
-
- mutex_set_owner(lock);
- osq_unlock(&lock->osq);
- preempt_enable();
- return 0;
- }
-
- /*
- * When there's no owner, we might have preempted between the
- * owner acquiring the lock and setting the owner field. If
- * we're an RT task that will live-lock because we won't let
- * the owner complete.
- */
- if (!owner && (need_resched() || rt_task(task)))
- break;
-
- /*
- * The cpu_relax() call is a compiler barrier which forces
- * everything in this loop to be re-loaded. We don't need
- * memory barriers as we'll eventually observe the right
- * values at the cost of a few extra spins.
- */
- cpu_relax_lowlatency();
+ if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
+ /* got the lock, yay! */
+ preempt_enable();
+ return 0;
}
- osq_unlock(&lock->osq);
-slowpath:
- /*
- * If we fell out of the spin path because of need_resched(),
- * reschedule now, before we try-lock the mutex. This avoids getting
- * scheduled out right after we obtained the mutex.
- */
- if (need_resched())
- schedule_preempt_disabled();
-#endif
+
spin_lock_mutex(&lock->wait_lock, flags);
/*
@@ -679,15 +711,21 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
* Release the lock, slowpath:
*/
static inline void
-__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
+__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
unsigned long flags;
/*
- * some architectures leave the lock unlocked in the fastpath failure
+ * As a performance measurement, release the lock before doing other
+ * wakeup related duties to follow. This allows other tasks to acquire
+ * the lock sooner, while still handling cleanups in past unlock calls.
+ * This can be done as we do not enforce strict equivalence between the
+ * mutex counter and wait_list.
+ *
+ *
+ * Some architectures leave the lock unlocked in the fastpath failure
* case, others need to leave it locked. In the later case we have to
- * unlock it here
+ * unlock it here - as the lock counter is currently 0 or negative.
*/
if (__mutex_slowpath_needs_to_unlock())
atomic_set(&lock->count, 1);
@@ -716,7 +754,9 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
__visible void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
- __mutex_unlock_common_slowpath(lock_count, 1);
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ __mutex_unlock_common_slowpath(lock, 1);
}
#ifndef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 4115fbf83b1..5cda397607f 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -16,7 +16,7 @@
#define mutex_remove_waiter(lock, waiter, ti) \
__list_del((waiter)->list.prev, (waiter)->list.next)
-#ifdef CONFIG_SMP
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
static inline void mutex_set_owner(struct mutex *lock)
{
lock->owner = current;
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index a0ea2a141b3..7c98873a307 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -8,7 +8,7 @@
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
* Copyright (C) 2006 Esben Nielsen
*
- * See Documentation/rt-mutex-design.txt for details.
+ * See Documentation/locking/rt-mutex-design.txt for details.
*/
#include <linux/spinlock.h>
#include <linux/export.h>
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index d6203faf2eb..7628c3fc37c 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -246,19 +246,22 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
return sem;
}
+EXPORT_SYMBOL(rwsem_down_read_failed);
static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
{
- if (!(count & RWSEM_ACTIVE_MASK)) {
- /* try acquiring the write lock */
- if (sem->count == RWSEM_WAITING_BIAS &&
- cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
- RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
- if (!list_is_singular(&sem->wait_list))
- rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
- return true;
- }
+ /*
+ * Try acquiring the write lock. Check count first in order
+ * to reduce unnecessary expensive cmpxchg() operations.
+ */
+ if (count == RWSEM_WAITING_BIAS &&
+ cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
+ RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
+ if (!list_is_singular(&sem->wait_list))
+ rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
+ return true;
}
+
return false;
}
@@ -465,6 +468,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
return sem;
}
+EXPORT_SYMBOL(rwsem_down_write_failed);
/*
* handle waking up a waiter on the semaphore
@@ -485,6 +489,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
return sem;
}
+EXPORT_SYMBOL(rwsem_wake);
/*
* downgrade a write lock into a read lock
@@ -506,8 +511,4 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
return sem;
}
-
-EXPORT_SYMBOL(rwsem_down_read_failed);
-EXPORT_SYMBOL(rwsem_down_write_failed);
-EXPORT_SYMBOL(rwsem_wake);
EXPORT_SYMBOL(rwsem_downgrade_wake);
diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
index 6815171a4ff..b8120abe594 100644
--- a/kernel/locking/semaphore.c
+++ b/kernel/locking/semaphore.c
@@ -36,7 +36,7 @@
static noinline void __down(struct semaphore *sem);
static noinline int __down_interruptible(struct semaphore *sem);
static noinline int __down_killable(struct semaphore *sem);
-static noinline int __down_timeout(struct semaphore *sem, long jiffies);
+static noinline int __down_timeout(struct semaphore *sem, long timeout);
static noinline void __up(struct semaphore *sem);
/**
@@ -145,14 +145,14 @@ EXPORT_SYMBOL(down_trylock);
/**
* down_timeout - acquire the semaphore within a specified time
* @sem: the semaphore to be acquired
- * @jiffies: how long to wait before failing
+ * @timeout: how long to wait before failing
*
* Attempts to acquire the semaphore. If no more tasks are allowed to
* acquire the semaphore, calling this function will put the task to sleep.
* If the semaphore is not released within the specified number of jiffies,
* this function returns -ETIME. It returns 0 if the semaphore was acquired.
*/
-int down_timeout(struct semaphore *sem, long jiffies)
+int down_timeout(struct semaphore *sem, long timeout)
{
unsigned long flags;
int result = 0;
@@ -161,7 +161,7 @@ int down_timeout(struct semaphore *sem, long jiffies)
if (likely(sem->count > 0))
sem->count--;
else
- result = __down_timeout(sem, jiffies);
+ result = __down_timeout(sem, timeout);
raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
@@ -248,9 +248,9 @@ static noinline int __sched __down_killable(struct semaphore *sem)
return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
}
-static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
+static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
{
- return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
+ return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
}
static noinline void __sched __up(struct semaphore *sem)