summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-02-24 14:05:47 +1100
committerPaul Mackerras <paulus@samba.org>2006-02-24 14:05:47 +1100
commita00428f5b149e36b8225b2a0812742a6dfb07b8c (patch)
treea78869cd67cf78a0eb091fb0ea5d397734bd6738 /arch/powerpc/kernel
parent774fee58c465ea1c7e9775e347ec307bcf2deeb3 (diff)
parentfb5c594c2acc441f0d2d8f457484a0e0e9285db3 (diff)
Merge ../powerpc-merge
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/crash.c4
-rw-r--r--arch/powerpc/kernel/head_32.S1
-rw-r--r--arch/powerpc/kernel/head_64.S111
-rw-r--r--arch/powerpc/kernel/lparcfg.c6
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c10
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c16
-rw-r--r--arch/powerpc/kernel/process.c32
-rw-r--r--arch/powerpc/kernel/prom_init.c38
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c4
-rw-r--r--arch/powerpc/kernel/time.c282
11 files changed, 150 insertions, 358 deletions
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 8c21d378f5d..778f22fd85d 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -134,8 +134,10 @@ static void crash_kexec_prepare_cpus(void)
* the crash CPU will send an IPI and wait for other CPUs to
* respond. If not, proceed the kexec boot even though we failed to
* capture other CPU states.
+ * Delay of at least 10 seconds.
*/
- msecs = 1000000;
+ printk(KERN_ALERT "Sending IPI to other cpus...\n");
+ msecs = 10000;
while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) {
barrier();
mdelay(1);
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 03b25f9359f..a0579e859b2 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -714,6 +714,7 @@ AltiVecUnavailable:
#ifdef CONFIG_ALTIVEC
bne load_up_altivec /* if from user, just load it up */
#endif /* CONFIG_ALTIVEC */
+ addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
PerformanceMonitor:
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 7ebb73665e9..2b21ec49928 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -137,7 +137,7 @@ _GLOBAL(__secondary_hold)
ori r24,r24,MSR_RI
mtmsrd r24 /* RI on */
- /* Grab our linux cpu number */
+ /* Grab our physical cpu number */
mr r24,r3
/* Tell the master cpu we're here */
@@ -151,12 +151,7 @@ _GLOBAL(__secondary_hold)
cmpdi 0,r4,1
bne 100b
-#ifdef CONFIG_HMT
- SET_REG_IMMEDIATE(r4, .hmt_init)
- mtctr r4
- bctr
-#else
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init)
mtctr r4
mr r3,r24
@@ -164,7 +159,6 @@ _GLOBAL(__secondary_hold)
#else
BUG_OPCODE
#endif
-#endif
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
@@ -319,7 +313,6 @@ exception_marker:
label##_pSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
- RUNLATCH_ON(r13); \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define STD_EXCEPTION_ISERIES(n, label, area) \
@@ -327,7 +320,6 @@ label##_pSeries: \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
- RUNLATCH_ON(r13); \
EXCEPTION_PROLOG_ISERIES_1(area); \
EXCEPTION_PROLOG_ISERIES_2; \
b label##_common
@@ -337,7 +329,6 @@ label##_iSeries: \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
- RUNLATCH_ON(r13); \
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
lbz r10,PACAPROCENABLED(r13); \
cmpwi 0,r10,0; \
@@ -390,6 +381,7 @@ label##_common: \
label##_common: \
EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
DISABLE_INTS; \
+ bl .ppc64_runlatch_on; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
b .ret_from_except_lite
@@ -407,7 +399,6 @@ __start_interrupts:
_machine_check_pSeries:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13 /* save r13 */
- RUNLATCH_ON(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
. = 0x300
@@ -434,7 +425,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
data_access_slb_pSeries:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13
- RUNLATCH_ON(r13)
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
@@ -460,7 +450,6 @@ data_access_slb_pSeries:
instruction_access_slb_pSeries:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13
- RUNLATCH_ON(r13)
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
@@ -491,7 +480,6 @@ instruction_access_slb_pSeries:
.globl system_call_pSeries
system_call_pSeries:
HMT_MEDIUM
- RUNLATCH_ON(r9)
mr r9,r13
mfmsr r10
mfspr r13,SPRN_SPRG3
@@ -575,7 +563,6 @@ slb_miss_user_pseries:
system_reset_fwnmi:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13 /* save r13 */
- RUNLATCH_ON(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
.globl machine_check_fwnmi
@@ -583,7 +570,6 @@ system_reset_fwnmi:
machine_check_fwnmi:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13 /* save r13 */
- RUNLATCH_ON(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
#ifdef CONFIG_PPC_ISERIES
@@ -894,7 +880,6 @@ unrecov_fer:
.align 7
.globl data_access_common
data_access_common:
- RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
mfspr r10,SPRN_DAR
std r10,PACA_EXGEN+EX_DAR(r13)
mfspr r10,SPRN_DSISR
@@ -1042,6 +1027,7 @@ hardware_interrupt_common:
EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
hardware_interrupt_entry:
DISABLE_INTS
+ bl .ppc64_runlatch_on
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_IRQ
b .ret_from_except_lite
@@ -1816,22 +1802,6 @@ _STATIC(start_here_multiplatform)
ori r6,r6,MSR_RI
mtmsrd r6 /* RI on */
-#ifdef CONFIG_HMT
- /* Start up the second thread on cpu 0 */
- mfspr r3,SPRN_PVR
- srwi r3,r3,16
- cmpwi r3,0x34 /* Pulsar */
- beq 90f
- cmpwi r3,0x36 /* Icestar */
- beq 90f
- cmpwi r3,0x37 /* SStar */
- beq 90f
- b 91f /* HMT not supported */
-90: li r3,0
- bl .hmt_start_secondary
-91:
-#endif
-
/* The following gets the stack and TOC set up with the regs */
/* pointing to the real addr of the kernel stack. This is */
/* all done to support the C function call below which sets */
@@ -1945,77 +1915,8 @@ _STATIC(start_here_common)
bl .start_kernel
-_GLOBAL(hmt_init)
-#ifdef CONFIG_HMT
- LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
- mfspr r7,SPRN_PVR
- srwi r7,r7,16
- cmpwi r7,0x34 /* Pulsar */
- beq 90f
- cmpwi r7,0x36 /* Icestar */
- beq 91f
- cmpwi r7,0x37 /* SStar */
- beq 91f
- b 101f
-90: mfspr r6,SPRN_PIR
- andi. r6,r6,0x1f
- b 92f
-91: mfspr r6,SPRN_PIR
- andi. r6,r6,0x3ff
-92: sldi r4,r24,3
- stwx r6,r5,r4
- bl .hmt_start_secondary
- b 101f
-
-__hmt_secondary_hold:
- LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
- clrldi r5,r5,4
- li r7,0
- mfspr r6,SPRN_PIR
- mfspr r8,SPRN_PVR
- srwi r8,r8,16
- cmpwi r8,0x34
- bne 93f
- andi. r6,r6,0x1f
- b 103f
-93: andi. r6,r6,0x3f
-
-103: lwzx r8,r5,r7
- cmpw r8,r6
- beq 104f
- addi r7,r7,8
- b 103b
-
-104: addi r7,r7,4
- lwzx r9,r5,r7
- mr r24,r9
-101:
-#endif
- mr r3,r24
- b .pSeries_secondary_smp_init
-
-#ifdef CONFIG_HMT
-_GLOBAL(hmt_start_secondary)
- LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold)
- clrldi r4,r4,4
- mtspr SPRN_NIADORM, r4
- mfspr r4, SPRN_MSRDORM
- li r5, -65
- and r4, r4, r5
- mtspr SPRN_MSRDORM, r4
- lis r4,0xffef
- ori r4,r4,0x7403
- mtspr SPRN_TSC, r4
- li r4,0x1f4
- mtspr SPRN_TST, r4
- mfspr r4, SPRN_HID0
- ori r4, r4, 0x1
- mtspr SPRN_HID0, r4
- mfspr r4, SPRN_CTRLF
- oris r4, r4, 0x40
- mtspr SPRN_CTRLT, r4
- blr
-#endif
+ /* Not reached */
+ BUG_OPCODE
/*
* We put a few things here that have to be page-aligned.
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 1ae96a8ed7e..e789fef4eb8 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -341,7 +341,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
const char *system_id = "";
unsigned int *lp_index_ptr, lp_index = 0;
struct device_node *rtas_node;
- int *lrdrp;
+ int *lrdrp = NULL;
rootdn = find_path_device("/");
if (rootdn) {
@@ -362,7 +362,9 @@ static int lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "partition_id=%d\n", (int)lp_index);
rtas_node = find_path_device("/rtas");
- lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL);
+ if (rtas_node)
+ lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity",
+ NULL);
if (lrdrp == NULL) {
partition_potential_processors = vdso_data->processorCount;
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index d6431440c54..ee166c58664 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -26,8 +26,6 @@
#include <asm/prom.h>
#include <asm/smp.h>
-#define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */
-
int default_machine_kexec_prepare(struct kimage *image)
{
int i;
@@ -61,7 +59,7 @@ int default_machine_kexec_prepare(struct kimage *image)
*/
if (htab_address) {
low = __pa(htab_address);
- high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE;
+ high = low + htab_size_bytes;
for (i = 0; i < image->nr_segments; i++) {
begin = image->segment[i].mem;
@@ -294,7 +292,7 @@ void default_machine_kexec(struct kimage *image)
}
/* Values we need to export to the second kernel via the device tree. */
-static unsigned long htab_base, htab_size, kernel_end;
+static unsigned long htab_base, kernel_end;
static struct property htab_base_prop = {
.name = "linux,htab-base",
@@ -305,7 +303,7 @@ static struct property htab_base_prop = {
static struct property htab_size_prop = {
.name = "linux,htab-size",
.length = sizeof(unsigned long),
- .value = (unsigned char *)&htab_size,
+ .value = (unsigned char *)&htab_size_bytes,
};
static struct property kernel_end_prop = {
@@ -331,8 +329,6 @@ static void __init export_htab_values(void)
htab_base = __pa(htab_address);
prom_add_property(node, &htab_base_prop);
-
- htab_size = 1UL << ppc64_pft_size;
prom_add_property(node, &htab_size_prop);
out:
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index b212d3e64b8..af32922db65 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -78,15 +78,8 @@ EXPORT_SYMBOL(sys_sigreturn);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strpbrk);
-EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strcasecmp);
EXPORT_SYMBOL(csum_partial);
@@ -184,9 +177,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
EXPORT_SYMBOL(cuda_request);
EXPORT_SYMBOL(cuda_poll);
#endif /* CONFIG_ADB_CUDA */
-#ifdef CONFIG_PPC_PMAC
-EXPORT_SYMBOL(sys_ctrler);
-#endif
#ifdef CONFIG_VT
EXPORT_SYMBOL(kd_mksound);
#endif
@@ -204,7 +194,6 @@ EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
@@ -213,7 +202,6 @@ EXPORT_SYMBOL(screen_info);
#endif
#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(__delay);
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(tb_ticks_per_jiffy);
@@ -221,10 +209,6 @@ EXPORT_SYMBOL(console_drivers);
EXPORT_SYMBOL(cacheable_memcpy);
#endif
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-
#ifdef CONFIG_8xx
EXPORT_SYMBOL(cpm_install_handler);
EXPORT_SYMBOL(cpm_free_handler);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 1201880cab4..dd774c3c930 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -886,3 +886,35 @@ void dump_stack(void)
show_stack(current, NULL);
}
EXPORT_SYMBOL(dump_stack);
+
+#ifdef CONFIG_PPC64
+void ppc64_runlatch_on(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
+ HMT_medium();
+
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl |= CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+
+ set_thread_flag(TIF_RUNLATCH);
+ }
+}
+
+void ppc64_runlatch_off(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
+ HMT_medium();
+
+ clear_thread_flag(TIF_RUNLATCH);
+
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl &= ~CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
+}
+#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ec7153f4d47..d34fe537400 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -205,14 +205,6 @@ static cell_t __initdata regbuf[1024];
#define MAX_CPU_THREADS 2
-/* TO GO */
-#ifdef CONFIG_HMT
-struct {
- unsigned int pir;
- unsigned int threadid;
-} hmt_thread_data[NR_CPUS];
-#endif /* CONFIG_HMT */
-
/*
* Error results ... some OF calls will return "-1" on error, some
* will return 0, some will return either. To simplify, here are
@@ -1319,10 +1311,6 @@ static void __init prom_hold_cpus(void)
*/
*spinloop = 0;
-#ifdef CONFIG_HMT
- for (i = 0; i < NR_CPUS; i++)
- RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
-#endif
/* look for cpus */
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
@@ -1389,32 +1377,6 @@ static void __init prom_hold_cpus(void)
/* Reserve cpu #s for secondary threads. They start later. */
cpuid += cpu_threads;
}
-#ifdef CONFIG_HMT
- /* Only enable HMT on processors that provide support. */
- if (__is_processor(PV_PULSAR) ||
- __is_processor(PV_ICESTAR) ||
- __is_processor(PV_SSTAR)) {
- prom_printf(" starting secondary threads\n");
-
- for (i = 0; i < NR_CPUS; i += 2) {
- if (!cpu_online(i))
- continue;
-
- if (i == 0) {
- unsigned long pir = mfspr(SPRN_PIR);
- if (__is_processor(PV_PULSAR)) {
- RELOC(hmt_thread_data)[i].pir =
- pir & 0x1f;
- } else {
- RELOC(hmt_thread_data)[i].pir =
- pir & 0x3ff;
- }
- }
- }
- } else {
- prom_printf("Processor is not HMT capable\n");
- }
-#endif
if (cpuid > NR_CPUS)
prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a717dff695e..f96c49b03ba 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -311,8 +311,6 @@ void smp_release_cpus(void)
DBG(" <- smp_release_cpus()\n");
}
-#else
-#define smp_release_cpus()
#endif /* CONFIG_SMP || CONFIG_KEXEC */
/*
@@ -473,10 +471,12 @@ void __init setup_system(void)
check_smt_enabled();
smp_setup_cpu_maps();
+#ifdef CONFIG_SMP
/* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids
*/
smp_release_cpus();
+#endif
printk("Starting Linux PPC64 %s\n", system_utsname.version);
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 475249dc235..cd75ab2908f 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -176,7 +176,6 @@ struct timex32 {
};
extern int do_adjtimex(struct timex *);
-extern void ppc_adjtimex(void);
asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
{
@@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
ret = do_adjtimex(&txc);
- /* adjust the conversion of TB to time of day to track adjtimex */
- ppc_adjtimex();
-
if(put_user(txc.modes, &utp->modes) ||
__put_user(txc.offset, &utp->offset) ||
__put_user(txc.freq, &utp->freq) ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1886045a2fd..2a7ddc57937 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -50,6 +50,7 @@
#include <linux/security.h>
#include <linux/percpu.h>
#include <linux/rtc.h>
+#include <linux/jiffies.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -99,7 +100,15 @@ EXPORT_SYMBOL(tb_ticks_per_usec);
unsigned long tb_ticks_per_sec;
u64 tb_to_xs;
unsigned tb_to_us;
-unsigned long processor_freq;
+
+#define TICKLEN_SCALE (SHIFT_SCALE - 10)
+u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
+u64 ticklen_to_xs; /* 0.64 fraction */
+
+/* If last_tick_len corresponds to about 1/HZ seconds, then
+ last_tick_len << TICKLEN_SHIFT will be about 2^63. */
+#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
+
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
@@ -113,10 +122,6 @@ extern unsigned long wall_jiffies;
extern struct timezone sys_tz;
static long timezone_offset;
-void ppc_adjtimex(void);
-
-static unsigned adjusting_time = 0;
-
unsigned long ppc_proc_freq;
unsigned long ppc_tb_freq;
@@ -178,8 +183,7 @@ static __inline__ void timer_check_rtc(void)
*/
if (ppc_md.set_rtc_time && ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 &&
- abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
- jiffies - wall_jiffies == 1) {
+ abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
struct rtc_time tm;
to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
tm.tm_year -= 1900;
@@ -226,15 +230,14 @@ void do_gettimeofday(struct timeval *tv)
if (__USE_RTC()) {
/* do this the old way */
unsigned long flags, seq;
- unsigned int sec, nsec, usec, lost;
+ unsigned int sec, nsec, usec;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
sec = xtime.tv_sec;
nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
- lost = jiffies - wall_jiffies;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
- usec = nsec / 1000 + lost * (1000000 / HZ);
+ usec = nsec / 1000;
while (usec >= 1000000) {
usec -= 1000000;
++sec;
@@ -248,23 +251,6 @@ void do_gettimeofday(struct timeval *tv)
EXPORT_SYMBOL(do_gettimeofday);
-/* Synchronize xtime with do_gettimeofday */
-
-static inline void timer_sync_xtime(unsigned long cur_tb)
-{
-#ifdef CONFIG_PPC64
- /* why do we do this? */
- struct timeval my_tv;
-
- __do_gettimeofday(&my_tv, cur_tb);
-
- if (xtime.tv_sec <= my_tv.tv_sec) {
- xtime.tv_sec = my_tv.tv_sec;
- xtime.tv_nsec = my_tv.tv_usec * 1000;
- }
-#endif
-}
-
/*
* There are two copies of tb_to_xs and stamp_xsec so that no
* lock is needed to access and use these values in
@@ -323,15 +309,30 @@ static __inline__ void timer_recalc_offset(u64 cur_tb)
{
unsigned long offset;
u64 new_stamp_xsec;
+ u64 tlen, t2x;
if (__USE_RTC())
return;
+ tlen = current_tick_length();
offset = cur_tb - do_gtod.varp->tb_orig_stamp;
- if ((offset & 0x80000000u) == 0)
- return;
- new_stamp_xsec = do_gtod.varp->stamp_xsec
- + mulhdu(offset, do_gtod.varp->tb_to_xs);
- update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
+ if (tlen == last_tick_len && offset < 0x80000000u) {
+ /* check that we're still in sync; if not, resync */
+ struct timeval tv;
+ __do_gettimeofday(&tv, cur_tb);
+ if (tv.tv_sec <= xtime.tv_sec &&
+ (tv.tv_sec < xtime.tv_sec ||
+ tv.tv_usec * 1000 <= xtime.tv_nsec))
+ return;
+ }
+ if (tlen != last_tick_len) {
+ t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
+ last_tick_len = tlen;
+ } else
+ t2x = do_gtod.varp->tb_to_xs;
+ new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
+ do_div(new_stamp_xsec, 1000000000);
+ new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
+ update_gtod(cur_tb, new_stamp_xsec, t2x);
}
#ifdef CONFIG_SMP
@@ -462,13 +463,10 @@ void timer_interrupt(struct pt_regs * regs)
write_seqlock(&xtime_lock);
tb_last_jiffy += tb_ticks_per_jiffy;
tb_last_stamp = per_cpu(last_jiffy, cpu);
- timer_recalc_offset(tb_last_jiffy);
do_timer(regs);
- timer_sync_xtime(tb_last_jiffy);
+ timer_recalc_offset(tb_last_jiffy);
timer_check_rtc();
write_sequnlock(&xtime_lock);
- if (adjusting_time && (time_adjust == 0))
- ppc_adjtimex();
}
next_dec = tb_ticks_per_jiffy - ticks;
@@ -492,16 +490,18 @@ void timer_interrupt(struct pt_regs * regs)
void wakeup_decrementer(void)
{
- int i;
+ unsigned long ticks;
- set_dec(tb_ticks_per_jiffy);
/*
- * We don't expect this to be called on a machine with a 601,
- * so using get_tbl is fine.
+ * The timebase gets saved on sleep and restored on wakeup,
+ * so all we need to do is to reset the decrementer.
*/
- tb_last_stamp = tb_last_jiffy = get_tb();
- for_each_cpu(i)
- per_cpu(last_jiffy, i) = tb_last_stamp;
+ ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
+ if (ticks < tb_ticks_per_jiffy)
+ ticks = tb_ticks_per_jiffy - ticks;
+ else
+ ticks = 1;
+ set_dec(ticks);
}
#ifdef CONFIG_SMP
@@ -541,8 +541,8 @@ int do_settimeofday(struct timespec *tv)
time_t wtm_sec, new_sec = tv->tv_sec;
long wtm_nsec, new_nsec = tv->tv_nsec;
unsigned long flags;
- long int tb_delta;
- u64 new_xsec, tb_delta_xs;
+ u64 new_xsec;
+ unsigned long tb_delta;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
@@ -563,9 +563,19 @@ int do_settimeofday(struct timespec *tv)
first_settimeofday = 0;
}
#endif
+
+ /*
+ * Subtract off the number of nanoseconds since the
+ * beginning of the last tick.
+ * Note that since we don't increment jiffies_64 anywhere other
+ * than in do_timer (since we don't have a lost tick problem),
+ * wall_jiffies will always be the same as jiffies,
+ * and therefore the (jiffies - wall_jiffies) computation
+ * has been removed.
+ */
tb_delta = tb_ticks_since(tb_last_stamp);
- tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
- tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs);
+ tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
+ new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@@ -580,12 +590,12 @@ int do_settimeofday(struct timespec *tv)
ntp_clear();
- new_xsec = 0;
- if (new_nsec != 0) {
- new_xsec = (u64)new_nsec * XSEC_PER_SEC;
+ new_xsec = xtime.tv_nsec;
+ if (new_xsec != 0) {
+ new_xsec *= XSEC_PER_SEC;
do_div(new_xsec, NSEC_PER_SEC);
}
- new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs;
+ new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
@@ -671,7 +681,7 @@ void __init time_init(void)
unsigned long flags;
unsigned long tm = 0;
struct div_result res;
- u64 scale;
+ u64 scale, x;
unsigned shift;
if (ppc_md.time_init != NULL)
@@ -693,11 +703,36 @@ void __init time_init(void)
}
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
- tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
+ tb_ticks_per_sec = ppc_tb_freq;
tb_ticks_per_usec = ppc_tb_freq / 1000000;
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
- div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
- tb_to_xs = res.result_low;
+
+ /*
+ * Calculate the length of each tick in ns. It will not be
+ * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
+ * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
+ * rounded up.
+ */
+ x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
+ do_div(x, ppc_tb_freq);
+ tick_nsec = x;
+ last_tick_len = x << TICKLEN_SCALE;
+
+ /*
+ * Compute ticklen_to_xs, which is a factor which gets multiplied
+ * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
+ * It is computed as:
+ * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
+ * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
+ * so as to give the result as a 0.64 fixed-point fraction.
+ */
+ div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0,
+ tb_ticks_per_jiffy, &res);
+ div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
+ ticklen_to_xs = res.result_low;
+
+ /* Compute tb_to_xs from tick_nsec */
+ tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
/*
* Compute scale factor for sched_clock.
@@ -724,6 +759,14 @@ void __init time_init(void)
tm = get_boot_time();
write_seqlock_irqsave(&xtime_lock, flags);
+
+ /* If platform provided a timezone (pmac), we correct the time */
+ if (timezone_offset) {
+ sys_tz.tz_minuteswest = -timezone_offset / 60;
+ sys_tz.tz_dsttime = 0;
+ tm -= timezone_offset;
+ }
+
xtime.tv_sec = tm;
xtime.tv_nsec = 0;
do_gtod.varp = &do_gtod.vars[0];
@@ -738,18 +781,11 @@ void __init time_init(void)
vdso_data->tb_orig_stamp = tb_last_jiffy;
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
- vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
+ vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
vdso_data->tb_to_xs = tb_to_xs;
time_freq = 0;
- /* If platform provided a timezone (pmac), we correct the time */
- if (timezone_offset) {
- sys_tz.tz_minuteswest = -timezone_offset / 60;
- sys_tz.tz_dsttime = 0;
- xtime.tv_sec -= timezone_offset;
- }
-
last_rtc_update = xtime.tv_sec;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
@@ -759,126 +795,6 @@ void __init time_init(void)
set_dec(tb_ticks_per_jiffy);
}
-/*
- * After adjtimex is called, adjust the conversion of tb ticks
- * to microseconds to keep do_gettimeofday synchronized
- * with ntpd.
- *
- * Use the time_adjust, time_freq and time_offset computed by adjtimex to
- * adjust the frequency.
- */
-
-/* #define DEBUG_PPC_ADJTIMEX 1 */
-
-void ppc_adjtimex(void)
-{
-#ifdef CONFIG_PPC64
- unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
- new_tb_to_xs, new_xsec, new_stamp_xsec;
- unsigned long tb_ticks_per_sec_delta;
- long delta_freq, ltemp;
- struct div_result divres;
- unsigned long flags;
- long singleshot_ppm = 0;
-
- /*
- * Compute parts per million frequency adjustment to
- * accomplish the time adjustment implied by time_offset to be
- * applied over the elapsed time indicated by time_constant.
- * Use SHIFT_USEC to get it into the same units as
- * time_freq.
- */
- if ( time_offset < 0 ) {
- ltemp = -time_offset;
- ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
- ltemp >>= SHIFT_KG + time_constant;
- ltemp = -ltemp;
- } else {
- ltemp = time_offset;
- ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
- ltemp >>= SHIFT_KG + time_constant;
- }
-
- /* If there is a single shot time adjustment in progress */
- if ( time_adjust ) {
-#ifdef DEBUG_PPC_ADJTIMEX
- printk("ppc_adjtimex: ");
- if ( adjusting_time == 0 )
- printk("starting ");
- printk("single shot time_adjust = %ld\n", time_adjust);
-#endif
-
- adjusting_time = 1;
-
- /*
- * Compute parts per million frequency adjustment
- * to match time_adjust
- */
- singleshot_ppm = tickadj * HZ;
- /*
- * The adjustment should be tickadj*HZ to match the code in
- * linux/kernel/timer.c, but experiments show that this is too
- * large. 3/4 of tickadj*HZ seems about right
- */
- singleshot_ppm -= singleshot_ppm / 4;
- /* Use SHIFT_USEC to get it into the same units as time_freq */
- singleshot_ppm <<= SHIFT_USEC;
- if ( time_adjust < 0 )
- singleshot_ppm = -singleshot_ppm;
- }
- else {
-#ifdef DEBUG_PPC_ADJTIMEX
- if ( adjusting_time )
- printk("ppc_adjtimex: ending single shot time_adjust\n");
-#endif
- adjusting_time = 0;
- }
-
- /* Add up all of the frequency adjustments */
- delta_freq = time_freq + ltemp + singleshot_ppm;
-
- /*
- * Compute a new value for tb_ticks_per_sec based on
- * the frequency adjustment
- */
- den = 1000000 * (1 << (SHIFT_USEC - 8));
- if ( delta_freq < 0 ) {
- tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
- new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
- }
- else {
- tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
- new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
- }
-
-#ifdef DEBUG_PPC_ADJTIMEX
- printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
- printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
-#endif
-
- /*
- * Compute a new value of tb_to_xs (used to convert tb to
- * microseconds) and a new value of stamp_xsec which is the
- * time (in 1/2^20 second units) corresponding to
- * tb_orig_stamp. This new value of stamp_xsec compensates
- * for the change in frequency (implied by the new tb_to_xs)
- * which guarantees that the current time remains the same.
- */
- write_seqlock_irqsave( &xtime_lock, flags );
- tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
- div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
- new_tb_to_xs = divres.result_low;
- new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
-
- old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
- new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
-
- update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
-
- write_sequnlock_irqrestore( &xtime_lock, flags );
-#endif /* CONFIG_PPC64 */
-}
-
#define FEBRUARY 2
#define STARTOFTIME 1970