summaryrefslogtreecommitdiffstats
path: root/include/asm-ppc/mmu_context.h
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-06-11 14:15:17 +1000
committerPaul Mackerras <paulus@samba.org>2006-06-11 14:15:17 +1000
commit6218a761bbc27acc65248c80024875bcc06d52b1 (patch)
tree59a278c4c189f838ede99de5fd46241d1923f52b /include/asm-ppc/mmu_context.h
parent050613545b389825c1f5beb67fa2667b727f866d (diff)
powerpc: add context.vdso_base for 32-bit too
This adds a vdso_base element to the mm_context_t for 32-bit compiles (both for ARCH=powerpc and ARCH=ppc). This fixes the compile errors that have been reported in arch/powerpc/kernel/signal_32.c. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-ppc/mmu_context.h')
-rw-r--r--include/asm-ppc/mmu_context.h27
1 files changed, 16 insertions, 11 deletions
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index 4f152cca13c..4454ecf1aed 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -71,7 +71,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#else
/* PPC 6xx, 7xx CPUs */
-#define NO_CONTEXT ((mm_context_t) -1)
+#define NO_CONTEXT ((unsigned long) -1)
#define LAST_CONTEXT 32767
#define FIRST_CONTEXT 1
#endif
@@ -86,7 +86,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
* can be used for debugging on all processors (if you happen to have
* an Abatron).
*/
-extern void set_context(mm_context_t context, pgd_t *pgd);
+extern void set_context(unsigned long contextid, pgd_t *pgd);
/*
* Bitmap of contexts in use.
@@ -99,7 +99,7 @@ extern unsigned long context_map[];
* Its use is an optimization only, we can't rely on this context
* number to be free, but it usually will be.
*/
-extern mm_context_t next_mmu_context;
+extern unsigned long next_mmu_context;
/*
* If we don't have sufficient contexts to give one to every task
@@ -118,9 +118,9 @@ extern void steal_context(void);
*/
static inline void get_mmu_context(struct mm_struct *mm)
{
- mm_context_t ctx;
+ unsigned long ctx;
- if (mm->context != NO_CONTEXT)
+ if (mm->context.id != NO_CONTEXT)
return;
#ifdef FEW_CONTEXTS
while (atomic_dec_if_positive(&nr_free_contexts) < 0)
@@ -133,7 +133,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
ctx = 0;
}
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
- mm->context = ctx;
+ mm->context.id = ctx;
#ifdef FEW_CONTEXTS
context_mm[ctx] = mm;
#endif
@@ -142,7 +142,12 @@ static inline void get_mmu_context(struct mm_struct *mm)
/*
* Set up the context for a new address space.
*/
-#define init_new_context(tsk,mm) (((mm)->context = NO_CONTEXT), 0)
+static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = NO_CONTEXT;
+ mm->context.vdso_base = 0;
+ return 0;
+}
/*
* We're finished using the context for an address space.
@@ -150,9 +155,9 @@ static inline void get_mmu_context(struct mm_struct *mm)
static inline void destroy_context(struct mm_struct *mm)
{
preempt_disable();
- if (mm->context != NO_CONTEXT) {
- clear_bit(mm->context, context_map);
- mm->context = NO_CONTEXT;
+ if (mm->context.id != NO_CONTEXT) {
+ clear_bit(mm->context.id, context_map);
+ mm->context.id = NO_CONTEXT;
#ifdef FEW_CONTEXTS
atomic_inc(&nr_free_contexts);
#endif
@@ -180,7 +185,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Setup new userspace context */
get_mmu_context(next);
- set_context(next->context, next->pgd);
+ set_context(next->context.id, next->pgd);
}
#define deactivate_mm(tsk,mm) do { } while (0)