summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/time.c14
-rw-r--r--arch/x86/kernel/time_64.c2
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/jiffies.h6
-rw-r--r--kernel/time.c4
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/timekeeping.c2
7 files changed, 16 insertions, 16 deletions
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 3ab04272097..17fda5293c6 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -49,13 +49,13 @@ EXPORT_SYMBOL(last_cli_ip);
#endif
static struct clocksource clocksource_itc = {
- .name = "itc",
- .rating = 350,
- .read = itc_get_cycles,
- .mask = CLOCKSOURCE_MASK(64),
- .mult = 0, /*to be caluclated*/
- .shift = 16,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .name = "itc",
+ .rating = 350,
+ .read = itc_get_cycles,
+ .mask = CLOCKSOURCE_MASK(64),
+ .mult = 0, /*to be calculated*/
+ .shift = 16,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *itc_clocksource;
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index 0380795121a..c737849e2ef 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -77,7 +77,7 @@ unsigned long __init native_calculate_cpu_khz(void)
reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}
local_irq_save(flags);
- /* start meauring cycles, incrementing from 0 */
+ /* start measuring cycles, incrementing from 0 */
wrmsrl(MSR_K7_PERFCTR0 + i, 0);
wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
rdtscl(tsc_start);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 203591e2321..600fc3bcf63 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -78,7 +78,7 @@ enum hrtimer_cb_mode {
* as otherwise the timer could be removed before the softirq code finishes the
* the handling of the timer.
*
- * The HRTIMER_STATE_ENQUEUE bit is always or'ed to the current state to
+ * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state to
* preserve the HRTIMER_STATE_CALLBACK bit in the above scenario.
*
* All state transitions are protected by cpu_base->lock.
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 7ba9e47bf06..e0b5b684d83 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -42,7 +42,7 @@
/* LATCH is used in the interval timer and ftape setup. */
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
+/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, then we can
* improve accuracy by shifting LSH bits, hence calculating:
* (NOM << LSH) / DEN
* This however means trouble for large NOM, because (NOM << LSH) may no
@@ -160,7 +160,7 @@ extern unsigned long preset_lpj;
* We want to do realistic conversions of time so we need to use the same
* values the update wall clock code uses as the jiffies size. This value
* is: TICK_NSEC (which is defined in timex.h). This
- * is a constant and is in nanoseconds. We will used scaled math
+ * is a constant and is in nanoseconds. We will use scaled math
* with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
* NSEC_JIFFIE_SC. Note that these defines contain nothing but
* constants and so are computed at compile time. SHIFT_HZ (computed in
@@ -204,7 +204,7 @@ extern unsigned long preset_lpj;
* operator if the result is a long long AND at least one of the
* operands is cast to long long (usually just prior to the "*" so as
* not to confuse it into thinking it really has a 64-bit operand,
- * which, buy the way, it can do, but it take more code and at least 2
+ * which, buy the way, it can do, but it takes more code and at least 2
* mpys).
* We also need to be aware that one second in nanoseconds is only a
diff --git a/kernel/time.c b/kernel/time.c
index 33af3e55570..3b705ecc3fb 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -267,7 +267,7 @@ EXPORT_SYMBOL(jiffies_to_usecs);
*
* This function should be only used for timestamps returned by
* current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
- * it doesn't handle the better resolution of the later.
+ * it doesn't handle the better resolution of the latter.
*/
struct timespec timespec_trunc(struct timespec t, unsigned gran)
{
@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(getnstimeofday);
* This algorithm was first published by Gauss (I think).
*
* WARNING: this function will overflow on 2106-02-07 06:28:16 on
- * machines were long is 32-bit! (However, as time_t is signed, we
+ * machines where long is 32-bit! (However, as time_t is signed, we
* will already get problems at other places on 2038-01-19 03:14:08)
*/
unsigned long
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 1d327f6db42..3d1e3e1a197 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -133,7 +133,7 @@ static void clockevents_do_notify(unsigned long reason, void *dev)
}
/*
- * Called after a notify add to make devices availble which were
+ * Called after a notify add to make devices available which were
* released from the notifier call.
*/
static void clockevents_notify_released(void)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4f2637eed0f..1af9fb050fe 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -364,7 +364,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
* with losing too many ticks, otherwise we would overadjust and
* produce an even larger error. The smaller the adjustment the
* faster we try to adjust for it, as lost ticks can do less harm
- * here. This is tuned so that an error of about 1 msec is adusted
+ * here. This is tuned so that an error of about 1 msec is adjusted
* within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
*/
error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);