summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
authorJulio M. Merino Vidal <jmerino@ac.upc.edu>2007-12-20 16:39:59 +0900
committerPaul Mackerras <paulus@samba.org>2007-12-21 19:46:18 +1100
commit9b1d21f858e8bad750ab19cac23dcbf79d099be3 (patch)
tree6ba3e1365fcd2c1fdd94dcec590e139e4907d1f7 /arch/powerpc/platforms/cell
parentc25620d7663fef41c373d42c4923c1d6b9847684 (diff)
[POWERPC] spufs: fix typos in sched.c comments
Fix a few typos in the spufs scheduler comments Signed-off-by: Julio M. Merino Vidal <jmerino@ac.upc.edu> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 4d257b3f933..0117eb8f6a9 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -105,15 +105,15 @@ void spu_set_timeslice(struct spu_context *ctx)
void __spu_update_sched_info(struct spu_context *ctx)
{
/*
- * 32-Bit assignment are atomic on powerpc, and we don't care about
- * memory ordering here because retriving the controlling thread is
- * per defintion racy.
+ * 32-Bit assignments are atomic on powerpc, and we don't care about
+ * memory ordering here because retrieving the controlling thread is
+ * per definition racy.
*/
ctx->tid = current->pid;
/*
* We do our own priority calculations, so we normally want
- * ->static_prio to start with. Unfortunately thies field
+ * ->static_prio to start with. Unfortunately this field
* contains junk for threads with a realtime scheduling
* policy so we have to look at ->prio in this case.
*/
@@ -127,7 +127,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
* A lot of places that don't hold list_mutex poke into
* cpus_allowed, including grab_runnable_context which
* already holds the runq_lock. So abuse runq_lock
- * to protect this field aswell.
+ * to protect this field as well.
*/
spin_lock(&spu_prio->runq_lock);
ctx->cpus_allowed = current->cpus_allowed;
@@ -182,7 +182,7 @@ static void notify_spus_active(void)
* Wake up the active spu_contexts.
*
* When the awakened processes see their "notify_active" flag is set,
- * they will call spu_switch_notify();
+ * they will call spu_switch_notify().
*/
for_each_online_node(node) {
struct spu *spu;
@@ -579,7 +579,7 @@ static struct spu *find_victim(struct spu_context *ctx)
/*
* Look for a possible preemption candidate on the local node first.
* If there is no candidate look at the other nodes. This isn't
- * exactly fair, but so far the whole spu schedule tries to keep
+ * exactly fair, but so far the whole spu scheduler tries to keep
* a strong node affinity. We might want to fine-tune this in
* the future.
*/
@@ -905,7 +905,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
/*
* Note that last_pid doesn't really make much sense for the
- * SPU loadavg (it even seems very odd on the CPU side..),
+ * SPU loadavg (it even seems very odd on the CPU side...),
* but we include it here to have a 100% compatible interface.
*/
seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",