summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
authorArnd Bergmann <arnd.bergmann@de.ibm.com>2007-04-23 21:08:10 +0200
committerArnd Bergmann <arnd@klappe.arndb.de>2007-04-23 21:18:54 +0200
commit390c53430498c9973e015432806edd53b2efe6c6 (patch)
tree931970a9d56ecc7e1663d3a4ae64c68606f4b08a /arch/powerpc/platforms/cell
parente097b513285e616215b23af234d127298bb8d89a (diff)
[POWERPC] spufs: add memory barriers after set_bit
set_bit does not guarantee ordering on powerpc, so using it for communication between threads requires explicit mb() calls. Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 405a0555d75..1582d764523 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -76,6 +76,7 @@ void spu_start_tick(struct spu_context *ctx)
* Make sure the exiting bit is cleared.
*/
clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
+ mb();
queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
}
}
@@ -88,6 +89,7 @@ void spu_stop_tick(struct spu_context *ctx)
* makes sure it does not rearm itself anymore.
*/
set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
+ mb();
cancel_delayed_work(&ctx->sched_work);
}
}
@@ -239,6 +241,7 @@ static void spu_add_to_rq(struct spu_context *ctx)
spin_lock(&spu_prio->runq_lock);
list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
set_bit(ctx->prio, spu_prio->bitmap);
+ mb();
spin_unlock(&spu_prio->runq_lock);
}