summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c4
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/taskstats.c2
5 files changed, 11 insertions, 11 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index b2b2c2b0a49..b6d2ff7e37e 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -412,7 +412,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
/*
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
- * the callback is executed in the hrtimer_interupt context. The
+ * the callback is executed in the hrtimer_interrupt context. The
* reprogramming is handled either by the softirq, which called the
* callback or at the end of the hrtimer_interrupt.
*/
@@ -638,7 +638,7 @@ void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
#endif
/*
- * Counterpart to lock_timer_base above:
+ * Counterpart to lock_hrtimer_base above:
*/
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
diff --git a/kernel/panic.c b/kernel/panic.c
index 3886bd8230f..6f6e03e9159 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -148,7 +148,7 @@ EXPORT_SYMBOL(panic);
* 'F' - Module has been forcibly loaded.
* 'S' - SMP with CPUs not designed for SMP.
* 'R' - User forced a module unload.
- * 'M' - Machine had a machine check experience.
+ * 'M' - System experienced a machine check exception.
* 'B' - System has hit bad_page.
* 'U' - Userspace-defined naughtiness.
*
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index ccc95ac07be..78039b477d2 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1005,11 +1005,12 @@ copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
}
memory_bm_position_reset(orig_bm);
memory_bm_position_reset(copy_bm);
- do {
+ for(;;) {
pfn = memory_bm_next_pfn(orig_bm);
- if (likely(pfn != BM_END_OF_MAP))
- copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
- } while (pfn != BM_END_OF_MAP);
+ if (unlikely(pfn == BM_END_OF_MAP))
+ break;
+ copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
+ }
}
/* Total number of image pages */
diff --git a/kernel/sched.c b/kernel/sched.c
index afe76ec2e7f..7581e331b13 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3310,7 +3310,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
- * @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in user space since the last update
*/
void account_user_time(struct task_struct *p, cputime_t cputime)
@@ -5146,7 +5145,7 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
}
/*
- * Figure out where task on dead CPU should go, use force if neccessary.
+ * Figure out where task on dead CPU should go, use force if necessary.
* NOTE: interrupts should be disabled by the caller
*/
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
@@ -5525,7 +5524,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- /* Strictly unneccessary, as first user will wake it. */
+ /* Strictly unnecessary, as first user will wake it. */
wake_up_process(cpu_rq(cpu)->migration_thread);
break;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 9f360f68aad..354e74bc17c 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -263,7 +263,7 @@ out:
stats->version = TASKSTATS_VERSION;
/*
- * Accounting subsytems can also add calls here to modify
+ * Accounting subsystems can also add calls here to modify
* fields of taskstats.
*/
return rc;