summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2006-08-07 16:19:19 +1000
committerPaul Mackerras <paulus@samba.org>2006-08-08 17:08:56 +1000
commit2f6093c84730b4bad65bcd0f2f904a5769b1dfc5 (patch)
treeab4e64a0520e944062f418e91706ff968e23a6ea /arch
parent452b5e21216011f2f068e80443568f5f3f3f4d63 (diff)
[POWERPC] Implement SLB shadow buffer
This adds a shadow buffer for the SLBs and regsiters it with PHYP. Only the bolted SLB entries (top 3) are shadowed. The SLB shadow buffer tells the hypervisor what the kernel needs to have in the SLB for the kernel to be able to function. The hypervisor can use this information to speed up partition context switches. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S13
-rw-r--r--arch/powerpc/kernel/paca.c15
-rw-r--r--arch/powerpc/mm/slb.c37
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c24
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h10
-rw-r--r--arch/powerpc/platforms/pseries/setup.c12
7 files changed, 102 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index ac0631958b2..2ef7ea86037 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -135,11 +135,13 @@ int main(void)
DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
+ DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
+ DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
#endif /* CONFIG_PPC64 */
/* RTAS */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 54d9f5cdaab..5baea498ea6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -323,6 +323,11 @@ _GLOBAL(ret_from_fork)
* The code which creates the new task context is in 'copy_thread'
* in arch/powerpc/kernel/process.c
*/
+#define SHADOW_SLB_BOLTED_STACK_ESID \
+ (SLBSHADOW_SAVEAREA + 0x10*(SLB_NUM_BOLTED-1))
+#define SHADOW_SLB_BOLTED_STACK_VSID \
+ (SLBSHADOW_SAVEAREA + 0x10*(SLB_NUM_BOLTED-1) + 8)
+
.align 7
_GLOBAL(_switch)
mflr r0
@@ -375,6 +380,14 @@ BEGIN_FTR_SECTION
ld r7,KSP_VSID(r4) /* Get new stack's VSID */
oris r0,r6,(SLB_ESID_V)@h
ori r0,r0,(SLB_NUM_BOLTED-1)@l
+
+ /* Update the last bolted SLB */
+ ld r9,PACA_SLBSHADOWPTR(r13)
+ li r12,0
+ std r12,SHADOW_SLB_BOLTED_STACK_ESID(r9) /* Clear ESID */
+ std r7,SHADOW_SLB_BOLTED_STACK_VSID(r9) /* Save VSID */
+ std r0,SHADOW_SLB_BOLTED_STACK_ESID(r9) /* Save ESID */
+
slbie r6
slbie r6 /* Workaround POWER5 < DD2.1 issue */
slbmte r7,r0
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index c68741fed14..55f1a25085c 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -17,6 +17,7 @@
#include <asm/lppaca.h>
#include <asm/iseries/it_lp_reg_save.h>
#include <asm/paca.h>
+#include <asm/mmu.h>
/* This symbol is provided by the linker - let it fill in the paca
@@ -45,6 +46,17 @@ struct lppaca lppaca[] = {
},
};
+/*
+ * 3 persistent SLBs are registered here. The buffer will be zero
+ * initially, hence will all be invaild until we actually write them.
+ */
+struct slb_shadow slb_shadow[] __cacheline_aligned = {
+ [0 ... (NR_CPUS-1)] = {
+ .persistent = SLB_NUM_BOLTED,
+ .buffer_length = sizeof(struct slb_shadow),
+ },
+};
+
/* The Paca is an array with one entry per processor. Each contains an
* lppaca, which contains the information shared between the
* hypervisor and Linux.
@@ -59,7 +71,8 @@ struct lppaca lppaca[] = {
.lock_token = 0x8000, \
.paca_index = (number), /* Paca Index */ \
.kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \
- .hw_cpu_id = 0xffff,
+ .hw_cpu_id = 0xffff, \
+ .slb_shadow_ptr = &slb_shadow[number],
#ifdef CONFIG_PPC_ISERIES
#define PACA_INIT_ISERIES(number) \
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index de0c8842415..d3733912adb 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -22,6 +22,8 @@
#include <asm/paca.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <linux/compiler.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -50,9 +52,32 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
}
-static inline void create_slbe(unsigned long ea, unsigned long flags,
- unsigned long entry)
+static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
+ unsigned long entry)
{
+ /*
+ * Clear the ESID first so the entry is not valid while we are
+ * updating it.
+ */
+ get_slb_shadow()->save_area[entry].esid = 0;
+ barrier();
+ get_slb_shadow()->save_area[entry].vsid = vsid;
+ barrier();
+ get_slb_shadow()->save_area[entry].esid = esid;
+
+}
+
+static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
+ unsigned long entry)
+{
+ /*
+ * Updating the shadow buffer before writing the SLB ensures
+ * we don't get a stale entry here if we get preempted by PHYP
+ * between these two statements.
+ */
+ slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags),
+ entry);
+
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, flags)),
"r" (mk_esid_data(ea, entry))
@@ -77,6 +102,10 @@ void slb_flush_and_rebolt(void)
if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
ksp_esid_data &= ~SLB_ESID_V;
+ /* Only third entry (stack) may change here so only resave that */
+ slb_shadow_update(ksp_esid_data,
+ mk_vsid_data(ksp_esid_data, lflags), 2);
+
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
asm volatile("isync\n"
@@ -209,9 +238,9 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
- create_slbe(PAGE_OFFSET, lflags, 0);
+ create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
- create_slbe(VMALLOC_START, vflags, 1);
+ create_shadowed_slbe(VMALLOC_START, vflags, 1);
/* We don't bolt the stack for the time being - we're in boot,
* so the stack is in the bolted segment. By the time it goes
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 6cbf14266d5..1820a0b0a8c 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -252,18 +252,34 @@ out:
void vpa_init(int cpu)
{
int hwcpu = get_hard_smp_processor_id(cpu);
- unsigned long vpa = __pa(&lppaca[cpu]);
+ unsigned long addr;
long ret;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
lppaca[cpu].vmxregs_in_use = 1;
- ret = register_vpa(hwcpu, vpa);
+ addr = __pa(&lppaca[cpu]);
+ ret = register_vpa(hwcpu, addr);
- if (ret)
+ if (ret) {
printk(KERN_ERR "WARNING: vpa_init: VPA registration for "
"cpu %d (hw %d) of area %lx returns %ld\n",
- cpu, hwcpu, vpa, ret);
+ cpu, hwcpu, addr, ret);
+ return;
+ }
+ /*
+ * PAPR says this feature is SLB-Buffer but firmware never
+ * reports that. All SPLPAR support SLB shadow buffer.
+ */
+ addr = __pa(&slb_shadow[cpu]);
+ if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ ret = register_slb_shadow(hwcpu, addr);
+ if (ret)
+ printk(KERN_ERR
+ "WARNING: vpa_init: SLB shadow buffer "
+ "registration for cpu %d (hw %d) of area %lx "
+ "returns %ld\n", cpu, hwcpu, addr, ret);
+ }
}
long pSeries_lpar_hpte_insert(unsigned long hpte_group,
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index ebd15de7597..3eb7b294d92 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -37,6 +37,16 @@ static inline long register_vpa(unsigned long cpu, unsigned long vpa)
return vpa_call(0x1, cpu, vpa);
}
+static inline long unregister_slb_shadow(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(0x7, cpu, vpa);
+}
+
+static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(0x3, cpu, vpa);
+}
+
extern void vpa_init(int cpu);
static inline long plpar_pte_enter(unsigned long flags,
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index de214d86ff4..6ebeecfd6bc 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -234,9 +234,17 @@ static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary)
{
/* Don't risk a hypervisor call if we're crashing */
if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
- unsigned long vpa = __pa(get_lppaca());
+ unsigned long addr;
- if (unregister_vpa(hard_smp_processor_id(), vpa)) {
+ addr = __pa(get_slb_shadow());
+ if (unregister_slb_shadow(hard_smp_processor_id(), addr))
+ printk("SLB shadow buffer deregistration of "
+ "cpu %u (hw_cpu_id %d) failed\n",
+ smp_processor_id(),
+ hard_smp_processor_id());
+
+ addr = __pa(get_lppaca());
+ if (unregister_vpa(hard_smp_processor_id(), addr)) {
printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
"failed\n", smp_processor_id(),
hard_smp_processor_id());