summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2012-05-14 16:44:45 +0900
committerPaul Mundt <lethal@linux-sh.org>2012-05-14 16:44:45 +0900
commit4de5185629f44942f60e2fd536709ef31bd5a9c1 (patch)
tree9e0d652a25c7c14d7d153e8ed322270bc554fa4f /arch
parentc06fd28387a3da2cc4763f7f471f735ccdd61b88 (diff)
sh64: Invert page fault fast-path error path values.
This brings the sh64 version in line with the sh32 one with regards to how errors are handled. Base work for further unification of the implementations. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2
-rw-r--r--arch/sh/mm/tlbex_64.c36
2 files changed, 19 insertions, 19 deletions
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index de60dc8d737..ff1f0e6e9be 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -335,7 +335,7 @@ tlb_miss:
/* If the fast path handler fixed the fault, just drop through quickly
to the restore code right away to return to the excepting context.
*/
- beqi/u r2, 0, tr1
+ bnei/u r2, 0, tr1
fast_tlb_miss_restore:
ld.q SP, SAVED_TR0, r2
diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c
index 98b64278f8c..59cb058217a 100644
--- a/arch/sh/mm/tlbex_64.c
+++ b/arch/sh/mm/tlbex_64.c
@@ -53,23 +53,23 @@ static int handle_vmalloc_fault(struct mm_struct *mm,
pud = pud_offset(dir, address);
if (pud_none_or_clear_bad(pud))
- return 0;
+ return 1;
pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
- return 0;
+ return 1;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
- return 0;
+ return 1;
if ((pte_val(entry) & protection_flags) != protection_flags)
- return 0;
+ return 1;
update_mmu_cache(NULL, address, pte);
- return 1;
+ return 0;
}
static int handle_tlbmiss(struct mm_struct *mm,
@@ -94,27 +94,27 @@ static int handle_tlbmiss(struct mm_struct *mm,
the next test is necessary. - RPC */
if (address >= (unsigned long) TASK_SIZE)
/* upper half - never has page table entries. */
- return 0;
+ return 1;
dir = pgd_offset(mm, address);
if (pgd_none(*dir) || !pgd_present(*dir))
- return 0;
+ return 1;
if (!pgd_present(*dir))
- return 0;
+ return 1;
pud = pud_offset(dir, address);
if (pud_none(*pud) || !pud_present(*pud))
- return 0;
+ return 1;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd) || !pmd_present(*pmd))
- return 0;
+ return 1;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
- return 0;
+ return 1;
/*
* If the page doesn't have sufficient protection bits set to
@@ -123,11 +123,11 @@ static int handle_tlbmiss(struct mm_struct *mm,
* handler.
*/
if ((pte_val(entry) & protection_flags) != protection_flags)
- return 0;
+ return 1;
update_mmu_cache(NULL, address, pte);
- return 1;
+ return 0;
}
/*
@@ -214,12 +214,12 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
* Process-contexts can never have this address
* range mapped
*/
- if (handle_vmalloc_fault(mm, protection_flags, address))
- return 1;
+ if (handle_vmalloc_fault(mm, protection_flags, address) == 0)
+ return 0;
} else if (!in_interrupt() && mm) {
- if (handle_tlbmiss(mm, protection_flags, address))
- return 1;
+ if (handle_tlbmiss(mm, protection_flags, address) == 0)
+ return 0;
}
- return 0;
+ return 1;
}