summaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/dyn.lds.S6
-rw-r--r--arch/um/kernel/physmem.c8
-rw-r--r--arch/um/kernel/process.c28
-rw-r--r--arch/um/kernel/skas/exec_kern.c2
-rw-r--r--arch/um/kernel/skas/include/mm_id.h17
-rw-r--r--arch/um/kernel/skas/include/mmu-skas.h7
-rw-r--r--arch/um/kernel/skas/include/skas.h15
-rw-r--r--arch/um/kernel/skas/mem.c6
-rw-r--r--arch/um/kernel/skas/mem_user.c225
-rw-r--r--arch/um/kernel/skas/mmu.c136
-rw-r--r--arch/um/kernel/skas/process.c153
-rw-r--r--arch/um/kernel/skas/process_kern.c33
-rw-r--r--arch/um/kernel/skas/tlb.c28
-rw-r--r--arch/um/kernel/tlb.c132
-rw-r--r--arch/um/kernel/tt/tlb.c4
-rw-r--r--arch/um/kernel/uml.lds.S7
16 files changed, 577 insertions, 230 deletions
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 715b0838a68..3942a5f245d 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -67,6 +67,12 @@ SECTIONS
*(.stub .text.* .gnu.linkonce.t.*)
/* .gnu.warning sections are handled specially by elf32.em. */
*(.gnu.warning)
+
+ . = ALIGN(4096);
+ __syscall_stub_start = .;
+ *(.__syscall_stub*)
+ __syscall_stub_end = .;
+ . = ALIGN(4096);
} =0x90909090
.fini : {
KEEP (*(.fini))
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 420e6d51fa0..a24e3b7f4bf 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -353,6 +353,8 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+extern int __syscall_stub_start, __binary_start;
+
void setup_physmem(unsigned long start, unsigned long reserve_end,
unsigned long len, unsigned long highmem)
{
@@ -371,6 +373,12 @@ void setup_physmem(unsigned long start, unsigned long reserve_end,
exit(1);
}
+ /* Special kludge - This page will be mapped in to userspace processes
+ * from physmem_fd, so it needs to be written out there.
+ */
+ os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
+ os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);
+
bootmap_size = init_bootmem(pfn, pfn + delta);
free_bootmem(__pa(reserve_end) + bootmap_size,
len - bootmap_size - reserve);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 1b5ef3e96c7..c45a60e9c92 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -32,6 +32,7 @@
#include "uml-config.h"
#include "choose-mode.h"
#include "mode.h"
+#include "tempfile.h"
#ifdef UML_CONFIG_MODE_SKAS
#include "skas.h"
#include "skas_ptrace.h"
@@ -358,11 +359,16 @@ void forward_pending_sigio(int target)
kill(target, SIGIO);
}
+int ptrace_faultinfo = 0;
+int proc_mm = 1;
+
+extern void *__syscall_stub_start, __syscall_stub_end;
+
#ifdef UML_CONFIG_MODE_SKAS
-static inline int check_skas3_ptrace_support(void)
+static inline void check_skas3_ptrace_support(void)
{
struct ptrace_faultinfo fi;
- int pid, n, ret = 1;
+ int pid, n;
printf("Checking for the skas3 patch in the host...");
pid = start_ptraced_child();
@@ -374,33 +380,31 @@ static inline int check_skas3_ptrace_support(void)
else {
perror("not found");
}
- ret = 0;
- } else {
+ }
+ else {
+ ptrace_faultinfo = 1;
printf("found\n");
}
init_registers(pid);
stop_ptraced_child(pid, 1, 1);
-
- return(ret);
}
int can_do_skas(void)
{
- int ret = 1;
-
printf("Checking for /proc/mm...");
if (os_access("/proc/mm", OS_ACC_W_OK) < 0) {
+ proc_mm = 0;
printf("not found\n");
- ret = 0;
goto out;
- } else {
+ }
+ else {
printf("found\n");
}
- ret = check_skas3_ptrace_support();
out:
- return ret;
+ check_skas3_ptrace_support();
+ return 1;
}
#else
int can_do_skas(void)
diff --git a/arch/um/kernel/skas/exec_kern.c b/arch/um/kernel/skas/exec_kern.c
index c6b4d5dba78..77ed7bbab21 100644
--- a/arch/um/kernel/skas/exec_kern.c
+++ b/arch/um/kernel/skas/exec_kern.c
@@ -18,7 +18,7 @@
void flush_thread_skas(void)
{
force_flush_all();
- switch_mm_skas(current->mm->context.skas.mm_fd);
+ switch_mm_skas(&current->mm->context.skas.id);
}
void start_thread_skas(struct pt_regs *regs, unsigned long eip,
diff --git a/arch/um/kernel/skas/include/mm_id.h b/arch/um/kernel/skas/include/mm_id.h
new file mode 100644
index 00000000000..48dd0989dda
--- /dev/null
+++ b/arch/um/kernel/skas/include/mm_id.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2005 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __MM_ID_H
+#define __MM_ID_H
+
+struct mm_id {
+ union {
+ int mm_fd;
+ int pid;
+ } u;
+ unsigned long stack;
+};
+
+#endif
diff --git a/arch/um/kernel/skas/include/mmu-skas.h b/arch/um/kernel/skas/include/mmu-skas.h
index 4cd60d7213f..278b72f1d9a 100644
--- a/arch/um/kernel/skas/include/mmu-skas.h
+++ b/arch/um/kernel/skas/include/mmu-skas.h
@@ -6,10 +6,15 @@
#ifndef __SKAS_MMU_H
#define __SKAS_MMU_H
+#include "mm_id.h"
+
struct mmu_context_skas {
- int mm_fd;
+ struct mm_id id;
+ unsigned long last_page_table;
};
+extern void switch_mm_skas(struct mm_id * mm_idp);
+
#endif
/*
diff --git a/arch/um/kernel/skas/include/skas.h b/arch/um/kernel/skas/include/skas.h
index 96b51dba347..d91a60f3830 100644
--- a/arch/um/kernel/skas/include/skas.h
+++ b/arch/um/kernel/skas/include/skas.h
@@ -6,9 +6,11 @@
#ifndef __SKAS_H
#define __SKAS_H
+#include "mm_id.h"
#include "sysdep/ptrace.h"
extern int userspace_pid[];
+extern int proc_mm, ptrace_faultinfo;
extern void switch_threads(void *me, void *next);
extern void thread_wait(void *sw, void *fb);
@@ -22,16 +24,17 @@ extern void new_thread_proc(void *stack, void (*handler)(int sig));
extern void remove_sigstack(void);
extern void new_thread_handler(int sig);
extern void handle_syscall(union uml_pt_regs *regs);
-extern void map(int fd, unsigned long virt, unsigned long len, int r, int w,
- int x, int phys_fd, unsigned long long offset);
-extern int unmap(int fd, void *addr, unsigned long len);
-extern int protect(int fd, unsigned long addr, unsigned long len,
- int r, int w, int x);
+extern int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len,
+ int r, int w, int x, int phys_fd, unsigned long long offset);
+extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len);
+extern int protect(struct mm_id * mm_idp, unsigned long addr,
+ unsigned long len, int r, int w, int x);
extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
extern int new_mm(int from);
-extern void start_userspace(int cpu);
+extern int start_userspace(unsigned long stub_stack);
extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
extern long execute_syscall_skas(void *r);
+extern unsigned long current_stub_stack(void);
#endif
diff --git a/arch/um/kernel/skas/mem.c b/arch/um/kernel/skas/mem.c
index 438db2f4345..147466d7ff4 100644
--- a/arch/um/kernel/skas/mem.c
+++ b/arch/um/kernel/skas/mem.c
@@ -5,7 +5,9 @@
#include "linux/config.h"
#include "linux/mm.h"
+#include "asm/pgtable.h"
#include "mem_user.h"
+#include "skas.h"
unsigned long set_task_sizes_skas(int arg, unsigned long *host_size_out,
unsigned long *task_size_out)
@@ -18,7 +20,9 @@ unsigned long set_task_sizes_skas(int arg, unsigned long *host_size_out,
*task_size_out = CONFIG_HOST_TASK_SIZE;
#else
*host_size_out = top;
- *task_size_out = top;
+ if (proc_mm && ptrace_faultinfo)
+ *task_size_out = top;
+ else *task_size_out = CONFIG_STUB_START & PGDIR_MASK;
#endif
return(((unsigned long) set_task_sizes_skas) & ~0xffffff);
}
diff --git a/arch/um/kernel/skas/mem_user.c b/arch/um/kernel/skas/mem_user.c
index 1310bf1e88d..b0980ff3bd9 100644
--- a/arch/um/kernel/skas/mem_user.c
+++ b/arch/um/kernel/skas/mem_user.c
@@ -3,100 +3,171 @@
* Licensed under the GPL
*/
+#include <signal.h>
#include <errno.h>
#include <sys/mman.h>
+#include <sys/wait.h>
+#include <asm/page.h>
+#include <asm/unistd.h>
#include "mem_user.h"
#include "mem.h"
+#include "mm_id.h"
#include "user.h"
#include "os.h"
#include "proc_mm.h"
-
-void map(int fd, unsigned long virt, unsigned long len, int r, int w,
- int x, int phys_fd, unsigned long long offset)
+#include "ptrace_user.h"
+#include "user_util.h"
+#include "kern_util.h"
+#include "task.h"
+#include "registers.h"
+#include "uml-config.h"
+#include "sysdep/ptrace.h"
+#include "sysdep/stub.h"
+#include "skas.h"
+
+extern unsigned long syscall_stub, __syscall_stub_start;
+
+extern void wait_stub_done(int pid, int sig, char * fname);
+
+static long run_syscall_stub(struct mm_id * mm_idp, int syscall,
+ unsigned long *args)
{
- struct proc_mm_op map;
- int prot, n;
-
- prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
- (x ? PROT_EXEC : 0);
-
- map = ((struct proc_mm_op) { .op = MM_MMAP,
- .u =
- { .mmap =
- { .addr = virt,
- .len = len,
- .prot = prot,
- .flags = MAP_SHARED |
- MAP_FIXED,
- .fd = phys_fd,
- .offset = offset
- } } } );
- n = os_write_file(fd, &map, sizeof(map));
- if(n != sizeof(map))
- printk("map : /proc/mm map failed, err = %d\n", -n);
+ int n, pid = mm_idp->u.pid;
+ unsigned long regs[MAX_REG_NR];
+
+ get_safe_registers(regs);
+ regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
+ ((unsigned long) &syscall_stub -
+ (unsigned long) &__syscall_stub_start);
+ /* XXX Don't have a define for starting a syscall */
+ regs[REGS_SYSCALL_NR] = syscall;
+ regs[REGS_SYSCALL_ARG1] = args[0];
+ regs[REGS_SYSCALL_ARG2] = args[1];
+ regs[REGS_SYSCALL_ARG3] = args[2];
+ regs[REGS_SYSCALL_ARG4] = args[3];
+ regs[REGS_SYSCALL_ARG5] = args[4];
+ regs[REGS_SYSCALL_ARG6] = args[5];
+ n = ptrace_setregs(pid, regs);
+ if(n < 0){
+ printk("run_syscall_stub : PTRACE_SETREGS failed, "
+ "errno = %d\n", n);
+ return(n);
+ }
+
+ wait_stub_done(pid, 0, "run_syscall_stub");
+
+ return(*((unsigned long *) mm_idp->stack));
}
-int unmap(int fd, void *addr, unsigned long len)
+int map(struct mm_id *mm_idp, unsigned long virt, unsigned long len,
+ int r, int w, int x, int phys_fd, unsigned long long offset)
{
- struct proc_mm_op unmap;
- int n;
-
- unmap = ((struct proc_mm_op) { .op = MM_MUNMAP,
- .u =
- { .munmap =
- { .addr = (unsigned long) addr,
- .len = len } } } );
- n = os_write_file(fd, &unmap, sizeof(unmap));
- if(n != sizeof(unmap)) {
- if(n < 0)
- return(n);
- else if(n > 0)
- return(-EIO);
- }
-
- return(0);
+ int prot, n;
+
+ prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
+ (x ? PROT_EXEC : 0);
+
+ if(proc_mm){
+ struct proc_mm_op map;
+ int fd = mm_idp->u.mm_fd;
+ map = ((struct proc_mm_op) { .op = MM_MMAP,
+ .u =
+ { .mmap =
+ { .addr = virt,
+ .len = len,
+ .prot = prot,
+ .flags = MAP_SHARED |
+ MAP_FIXED,
+ .fd = phys_fd,
+ .offset= offset
+ } } } );
+ n = os_write_file(fd, &map, sizeof(map));
+ if(n != sizeof(map))
+ printk("map : /proc/mm map failed, err = %d\n", -n);
+ }
+ else {
+ long res;
+ unsigned long args[] = { virt, len, prot,
+ MAP_SHARED | MAP_FIXED, phys_fd,
+ MMAP_OFFSET(offset) };
+
+ res = run_syscall_stub(mm_idp, STUB_MMAP_NR, args);
+ if((void *) res == MAP_FAILED)
+ printk("mmap stub failed, errno = %d\n", res);
+ }
+
+ return 0;
}
-int protect(int fd, unsigned long addr, unsigned long len, int r, int w,
- int x, int must_succeed)
+int unmap(struct mm_id *mm_idp, void *addr, unsigned long len)
{
- struct proc_mm_op protect;
- int prot, n;
-
- prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
- (x ? PROT_EXEC : 0);
-
- protect = ((struct proc_mm_op) { .op = MM_MPROTECT,
- .u =
- { .mprotect =
- { .addr = (unsigned long) addr,
- .len = len,
- .prot = prot } } } );
-
- n = os_write_file(fd, &protect, sizeof(protect));
- if(n != sizeof(protect)) {
- if(n == 0) return(0);
-
- if(must_succeed)
- panic("protect failed, err = %d", -n);
-
- return(-EIO);
- }
+ int n;
+
+ if(proc_mm){
+ struct proc_mm_op unmap;
+ int fd = mm_idp->u.mm_fd;
+ unmap = ((struct proc_mm_op) { .op = MM_MUNMAP,
+ .u =
+ { .munmap =
+ { .addr =
+ (unsigned long) addr,
+ .len = len } } } );
+ n = os_write_file(fd, &unmap, sizeof(unmap));
+ if(n != sizeof(unmap)) {
+ if(n < 0)
+ return(n);
+ else if(n > 0)
+ return(-EIO);
+ }
+ }
+ else {
+ int res;
+ unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
+ 0 };
+
+ res = run_syscall_stub(mm_idp, __NR_munmap, args);
+ if(res < 0)
+ printk("munmap stub failed, errno = %d\n", res);
+ }
+
+ return(0);
+}
- return(0);
+int protect(struct mm_id *mm_idp, unsigned long addr, unsigned long len,
+ int r, int w, int x)
+{
+ struct proc_mm_op protect;
+ int prot, n;
+
+ prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
+ (x ? PROT_EXEC : 0);
+
+ if(proc_mm){
+ int fd = mm_idp->u.mm_fd;
+ protect = ((struct proc_mm_op) { .op = MM_MPROTECT,
+ .u =
+ { .mprotect =
+ { .addr =
+ (unsigned long) addr,
+ .len = len,
+ .prot = prot } } } );
+
+ n = os_write_file(fd, &protect, sizeof(protect));
+ if(n != sizeof(protect))
+ panic("protect failed, err = %d", -n);
+ }
+ else {
+ int res;
+ unsigned long args[] = { addr, len, prot, 0, 0, 0 };
+
+ res = run_syscall_stub(mm_idp, __NR_mprotect, args);
+ if(res < 0)
+ panic("mprotect stub failed, errno = %d\n", res);
+ }
+
+ return(0);
}
void before_mem_skas(unsigned long unused)
{
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 6cb9a6d028a..511a855c9ec 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -3,46 +3,138 @@
* Licensed under the GPL
*/
+#include "linux/config.h"
#include "linux/sched.h"
#include "linux/list.h"
#include "linux/spinlock.h"
#include "linux/slab.h"
+#include "linux/errno.h"
+#include "linux/mm.h"
#include "asm/current.h"
#include "asm/segment.h"
#include "asm/mmu.h"
+#include "asm/pgalloc.h"
+#include "asm/pgtable.h"
#include "os.h"
#include "skas.h"
+extern int __syscall_stub_start;
+
+static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
+ unsigned long kernel)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ spin_lock(&mm->page_table_lock);
+ pgd = pgd_offset(mm, proc);
+ pud = pud_alloc(mm, pgd, proc);
+ if (!pud)
+ goto out;
+
+ pmd = pmd_alloc(mm, pud, proc);
+ if (!pmd)
+ goto out_pmd;
+
+ pte = pte_alloc_map(mm, pmd, proc);
+ if (!pte)
+ goto out_pte;
+
+ /* There's an interaction between the skas0 stub pages, stack
+ * randomization, and the BUG at the end of exit_mmap. exit_mmap
+ * checks that the number of page tables freed is the same as had
+ * been allocated. If the stack is on the last page table page,
+ * then the stack pte page will be freed, and if not, it won't. To
+ * avoid having to know where the stack is, or if the process mapped
+ * something at the top of its address space for some other reason,
+ * we set TASK_SIZE to end at the start of the last page table.
+ * This keeps exit_mmap off the last page, but introduces a leak
+ * of that page. So, we hang onto it here and free it in
+ * destroy_context_skas.
+ */
+
+ mm->context.skas.last_page_table = pmd_page_kernel(*pmd);
+
+ *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
+ *pte = pte_mkexec(*pte);
+ *pte = pte_wrprotect(*pte);
+ spin_unlock(&mm->page_table_lock);
+ return(0);
+
+ out_pmd:
+ pud_free(pud);
+ out_pte:
+ pmd_free(pmd);
+ out:
+ spin_unlock(&mm->page_table_lock);
+ return(-ENOMEM);
+}
+
int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
{
- int from;
+ struct mm_struct *cur_mm = current->mm;
+ struct mm_id *mm_id = &mm->context.skas.id;
+ unsigned long stack;
+ int from, ret;
- if((current->mm != NULL) && (current->mm != &init_mm))
- from = current->mm->context.skas.mm_fd;
- else from = -1;
+ if(proc_mm){
+ if((cur_mm != NULL) && (cur_mm != &init_mm))
+ from = cur_mm->context.skas.id.u.mm_fd;
+ else from = -1;
- mm->context.skas.mm_fd = new_mm(from);
- if(mm->context.skas.mm_fd < 0){
- printk("init_new_context_skas - new_mm failed, errno = %d\n",
- mm->context.skas.mm_fd);
- return(mm->context.skas.mm_fd);
+ ret = new_mm(from);
+ if(ret < 0){
+ printk("init_new_context_skas - new_mm failed, "
+ "errno = %d\n", ret);
+ return ret;
+ }
+ mm_id->u.mm_fd = ret;
}
+ else {
+ /* This zeros the entry that pgd_alloc didn't, needed since
+ * we are about to reinitialize it, and want mm.nr_ptes to
+ * be accurate.
+ */
+ mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
- return(0);
+ ret = init_stub_pte(mm, CONFIG_STUB_CODE,
+ (unsigned long) &__syscall_stub_start);
+ if(ret)
+ goto out;
+
+ ret = -ENOMEM;
+ stack = get_zeroed_page(GFP_KERNEL);
+ if(stack == 0)
+ goto out;
+ mm_id->stack = stack;
+
+ ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
+ if(ret)
+ goto out_free;
+
+ mm->nr_ptes--;
+ mm_id->u.pid = start_userspace(stack);
+ }
+
+ return 0;
+
+ out_free:
+ free_page(mm_id->stack);
+ out:
+ return ret;
}
void destroy_context_skas(struct mm_struct *mm)
{
- os_close_file(mm->context.skas.mm_fd);
-}
+ struct mmu_context_skas *mmu = &mm->context.skas;
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+ if(proc_mm)
+ os_close_file(mmu->id.u.mm_fd);
+ else {
+ os_kill_ptraced_process(mmu->id.u.pid, 1);
+ free_page(mmu->id.stack);
+ free_page(mmu->last_page_table);
+ }
+}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 773cd2b525f..1647abb0d1a 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2002- 2004 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
@@ -14,6 +14,7 @@
#include <sys/mman.h>
#include <sys/user.h>
#include <asm/unistd.h>
+#include <asm/types.h>
#include "user.h"
#include "ptrace_user.h"
#include "time_user.h"
@@ -21,13 +22,17 @@
#include "user_util.h"
#include "kern_util.h"
#include "skas.h"
+#include "mm_id.h"
#include "sysdep/sigcontext.h"
+#include "sysdep/stub.h"
#include "os.h"
#include "proc_mm.h"
#include "skas_ptrace.h"
#include "chan_user.h"
#include "signal_user.h"
#include "registers.h"
+#include "mem.h"
+#include "uml-config.h"
#include "process.h"
int is_skas_winch(int pid, int fd, void *data)
@@ -39,20 +44,55 @@ int is_skas_winch(int pid, int fd, void *data)
return(1);
}
-void get_skas_faultinfo(int pid, struct faultinfo * fi)
+void wait_stub_done(int pid, int sig, char * fname)
{
- int err;
-
- err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
- if(err)
- panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, "
- "errno = %d\n", errno);
+ int n, status, err;
+
+ do {
+ if ( sig != -1 ) {
+ err = ptrace(PTRACE_CONT, pid, 0, sig);
+ if(err)
+ panic("%s : continue failed, errno = %d\n",
+ fname, errno);
+ }
+ sig = 0;
+
+ CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+ } while((n >= 0) && WIFSTOPPED(status) &&
+ (WSTOPSIG(status) == SIGVTALRM));
+
+ if((n < 0) || !WIFSTOPPED(status) ||
+ (WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status != SIGTRAP))){
+ panic("%s : failed to wait for SIGUSR1/SIGTRAP, "
+ "pid = %d, n = %d, errno = %d, status = 0x%x\n",
+ fname, pid, n, errno, status);
+ }
+}
- /* Special handling for i386, which has different structs */
- if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
- memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
- sizeof(struct faultinfo) -
- sizeof(struct ptrace_faultinfo));
+void get_skas_faultinfo(int pid, struct faultinfo * fi)
+{
+ int err;
+
+ if(ptrace_faultinfo){
+ err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
+ if(err)
+ panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, "
+ "errno = %d\n", errno);
+
+ /* Special handling for i386, which has different structs */
+ if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
+ memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
+ sizeof(struct faultinfo) -
+ sizeof(struct ptrace_faultinfo));
+ }
+ else {
+ wait_stub_done(pid, SIGSEGV, "get_skas_faultinfo");
+
+ /* faultinfo is prepared by the stub-segv-handler at start of
+ * the stub stack page. We just have to copy it.
+ */
+ memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
+ }
}
static void handle_segv(int pid, union uml_pt_regs * regs)
@@ -91,11 +131,56 @@ static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu
handle_syscall(regs);
}
-static int userspace_tramp(void *arg)
+extern int __syscall_stub_start;
+
+static int userspace_tramp(void *stack)
{
- init_new_thread_signals(0);
- enable_timer();
+ void *addr;
+
ptrace(PTRACE_TRACEME, 0, 0, 0);
+
+ init_new_thread_signals(1);
+ enable_timer();
+
+ if(!proc_mm){
+ /* This has a pte, but it can't be mapped in with the usual
+ * tlb_flush mechanism because this is part of that mechanism
+ */
+ int fd;
+ __u64 offset;
+
+ fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
+ addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(),
+ PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
+ if(addr == MAP_FAILED){
+ printk("mapping mmap stub failed, errno = %d\n",
+ errno);
+ exit(1);
+ }
+
+ if(stack != NULL){
+ fd = phys_mapping(to_phys(stack), &offset);
+ addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(),
+ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED, fd, offset);
+ if(addr == MAP_FAILED){
+ printk("mapping segfault stack failed, "
+ "errno = %d\n", errno);
+ exit(1);
+ }
+ }
+ }
+ if(!ptrace_faultinfo && (stack != NULL)){
+ unsigned long v = UML_CONFIG_STUB_CODE +
+ (unsigned long) stub_segv_handler -
+ (unsigned long) &__syscall_stub_start;
+
+ set_sigstack((void *) UML_CONFIG_STUB_DATA, page_size());
+ set_handler(SIGSEGV, (void *) v, SA_ONSTACK,
+ SIGIO, SIGWINCH, SIGALRM, SIGVTALRM,
+ SIGUSR1, -1);
+ }
+
os_stop_process(os_getpid());
return(0);
}
@@ -105,11 +190,11 @@ static int userspace_tramp(void *arg)
#define NR_CPUS 1
int userspace_pid[NR_CPUS];
-void start_userspace(int cpu)
+int start_userspace(unsigned long stub_stack)
{
void *stack;
unsigned long sp;
- int pid, status, n;
+ int pid, status, n, flags;
stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -117,8 +202,9 @@ void start_userspace(int cpu)
panic("start_userspace : mmap failed, errno = %d", errno);
sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
- pid = clone(userspace_tramp, (void *) sp,
- CLONE_FILES | CLONE_VM | SIGCHLD, NULL);
+ flags = CLONE_FILES | SIGCHLD;
+ if(proc_mm) flags |= CLONE_VM;
+ pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
if(pid < 0)
panic("start_userspace : clone failed, errno = %d", errno);
@@ -140,7 +226,7 @@ void start_userspace(int cpu)
if(munmap(stack, PAGE_SIZE) < 0)
panic("start_userspace : munmap failed, errno = %d\n", errno);
- userspace_pid[cpu] = pid;
+ return(pid);
}
void userspace(union uml_pt_regs *regs)
@@ -174,7 +260,9 @@ void userspace(union uml_pt_regs *regs)
if(WIFSTOPPED(status)){
switch(WSTOPSIG(status)){
case SIGSEGV:
- handle_segv(pid, regs);
+ if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo)
+ user_signal(SIGSEGV, regs, pid);
+ else handle_segv(pid, regs);
break;
case SIGTRAP + 0x80:
handle_trap(pid, regs, local_using_sysemu);
@@ -194,6 +282,7 @@ void userspace(union uml_pt_regs *regs)
printk("userspace - child stopped with signal "
"%d\n", WSTOPSIG(status));
}
+ pid = userspace_pid[0];
interrupt_end();
/* Avoid -ERESTARTSYS handling in host */
@@ -334,21 +423,19 @@ void reboot_skas(void)
siglongjmp(initial_jmpbuf, INIT_JMP_REBOOT);
}
-void switch_mm_skas(int mm_fd)
+void switch_mm_skas(struct mm_id *mm_idp)
{
int err;
#warning need cpu pid in switch_mm_skas
- err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, mm_fd);
- if(err)
- panic("switch_mm_skas - PTRACE_SWITCH_MM failed, errno = %d\n",
- errno);
-}
-
-void kill_off_processes_skas(void)
-{
-#warning need to loop over userspace_pids in kill_off_processes_skas
- os_kill_ptraced_process(userspace_pid[0], 1);
+ if(proc_mm){
+ err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
+ mm_idp->u.mm_fd);
+ if(err)
+ panic("switch_mm_skas - PTRACE_SWITCH_MM failed, "
+ "errno = %d\n", errno);
+ }
+ else userspace_pid[0] = mm_idp->u.pid;
}
/*
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index 0a7b8aa55db..cbabab104ac 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -175,9 +175,12 @@ static int start_kernel_proc(void *unused)
return(0);
}
+extern int userspace_pid[];
+
int start_uml_skas(void)
{
- start_userspace(0);
+ if(proc_mm)
+ userspace_pid[0] = start_userspace(0);
init_new_thread_signals(1);
@@ -199,3 +202,31 @@ int thread_pid_skas(struct task_struct *task)
#warning Need to look up userspace_pid by cpu
return(userspace_pid[0]);
}
+
+void kill_off_processes_skas(void)
+{
+ if(proc_mm)
+#warning need to loop over userspace_pids in kill_off_processes_skas
+ os_kill_ptraced_process(userspace_pid[0], 1);
+ else {
+ struct task_struct *p;
+ int pid, me;
+
+ me = os_getpid();
+ for_each_process(p){
+ if(p->mm == NULL)
+ continue;
+
+ pid = p->mm->context.skas.id.u.pid;
+ os_kill_ptraced_process(pid, 1);
+ }
+ }
+}
+
+unsigned long current_stub_stack(void)
+{
+ if(current->mm == NULL)
+ return(0);
+
+ return(current->mm->context.skas.id.stack);
+}
diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c
index 18f9a7711de..6230999c672 100644
--- a/arch/um/kernel/skas/tlb.c
+++ b/arch/um/kernel/skas/tlb.c
@@ -6,6 +6,7 @@
#include "linux/stddef.h"
#include "linux/sched.h"
+#include "linux/config.h"
#include "linux/mm.h"
#include "asm/page.h"
#include "asm/pgtable.h"
@@ -17,7 +18,7 @@
#include "os.h"
#include "tlb.h"
-static void do_ops(int fd, struct host_vm_op *ops, int last)
+static void do_ops(union mm_context *mmu, struct host_vm_op *ops, int last)
{
struct host_vm_op *op;
int i;
@@ -26,18 +27,18 @@ static void do_ops(int fd, struct host_vm_op *ops, int last)
op = &ops[i];
switch(op->type){
case MMAP:
- map(fd, op->u.mmap.addr, op->u.mmap.len,
+ map(&mmu->skas.id, op->u.mmap.addr, op->u.mmap.len,
op->u.mmap.r, op->u.mmap.w, op->u.mmap.x,
op->u.mmap.fd, op->u.mmap.offset);
break;
case MUNMAP:
- unmap(fd, (void *) op->u.munmap.addr,
+ unmap(&mmu->skas.id, (void *) op->u.munmap.addr,
op->u.munmap.len);
break;
case MPROTECT:
- protect(fd, op->u.mprotect.addr, op->u.mprotect.len,
- op->u.mprotect.r, op->u.mprotect.w,
- op->u.mprotect.x);
+ protect(&mmu->skas.id, op->u.mprotect.addr,
+ op->u.mprotect.len, op->u.mprotect.r,
+ op->u.mprotect.w, op->u.mprotect.x);
break;
default:
printk("Unknown op type %d in do_ops\n", op->type);
@@ -46,12 +47,15 @@ static void do_ops(int fd, struct host_vm_op *ops, int last)
}
}
+extern int proc_mm;
+
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
- int fd = mm->context.skas.mm_fd;
+ if(!proc_mm && (end_addr > CONFIG_STUB_START))
+ end_addr = CONFIG_STUB_START;
- fix_range_common(mm, start_addr, end_addr, force, fd, do_ops);
+ fix_range_common(mm, start_addr, end_addr, force, do_ops);
}
void __flush_tlb_one_skas(unsigned long addr)
@@ -69,16 +73,20 @@ void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start,
void flush_tlb_mm_skas(struct mm_struct *mm)
{
+ unsigned long end;
+
/* Don't bother flushing if this address space is about to be
* destroyed.
*/
if(atomic_read(&mm->mm_users) == 0)
return;
- fix_range(mm, 0, host_task_size, 0);
+ end = proc_mm ? task_size : CONFIG_STUB_START;
+ fix_range(mm, 0, end, 0);
}
void force_flush_all_skas(void)
{
- fix_range(current->mm, 0, host_task_size, 1);
+ unsigned long end = proc_mm ? task_size : CONFIG_STUB_START;
+ fix_range(current->mm, 0, end, 1);
}
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index eda477edfdf..83ec8d4747f 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -18,13 +18,15 @@
#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
- unsigned long end_addr, int force, int data,
- void (*do_ops)(int, struct host_vm_op *, int))
+ unsigned long end_addr, int force,
+ void (*do_ops)(union mm_context *, struct host_vm_op *,
+ int))
{
pgd_t *npgd;
pud_t *npud;
pmd_t *npmd;
pte_t *npte;
+ union mm_context *mmu = &mm->context;
unsigned long addr, end;
int r, w, x;
struct host_vm_op ops[16];
@@ -40,7 +42,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
end = end_addr;
if(force || pgd_newpage(*npgd)){
op_index = add_munmap(addr, end - addr, ops,
- op_index, last_op, data,
+ op_index, last_op, mmu,
do_ops);
pgd_mkuptodate(*npgd);
}
@@ -55,7 +57,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
end = end_addr;
if(force || pud_newpage(*npud)){
op_index = add_munmap(addr, end - addr, ops,
- op_index, last_op, data,
+ op_index, last_op, mmu,
do_ops);
pud_mkuptodate(*npud);
}
@@ -70,7 +72,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
end = end_addr;
if(force || pmd_newpage(*npmd)){
op_index = add_munmap(addr, end - addr, ops,
- op_index, last_op, data,
+ op_index, last_op, mmu,
do_ops);
pmd_mkuptodate(*npmd);
}
@@ -93,21 +95,21 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
op_index = add_mmap(addr,
pte_val(*npte) & PAGE_MASK,
PAGE_SIZE, r, w, x, ops,
- op_index, last_op, data,
+ op_index, last_op, mmu,
do_ops);
else op_index = add_munmap(addr, PAGE_SIZE, ops,
- op_index, last_op, data,
+ op_index, last_op, mmu,
do_ops);
}
else if(pte_newprot(*npte))
op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
- op_index, last_op, data,
+ op_index, last_op, mmu,
do_ops);
*npte = pte_mkuptodate(*npte);
addr += PAGE_SIZE;
}
- (*do_ops)(data, ops, op_index);
+ (*do_ops)(mmu, ops, op_index);
}
int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
@@ -195,51 +197,6 @@ int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
return(updated);
}
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
-{
- address &= PAGE_MASK;
- flush_tlb_range(vma, address, address + PAGE_SIZE);
-}
-
-void flush_tlb_all(void)
-{
- flush_tlb_mm(current->mm);
-}
-
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
- flush_tlb_kernel_range_common, start, end);
-}
-
-void flush_tlb_kernel_vm(void)
-{
- CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
- flush_tlb_kernel_range_common(start_vm, end_vm));
-}
-
-void __flush_tlb_one(unsigned long addr)
-{
- CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
-}
-
-void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
- end);
-}
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
- CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
-}
-
-void force_flush_all(void)
-{
- CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
-}
-
pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
{
return(pgd_offset(mm, address));
@@ -270,9 +227,9 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr)
}
int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
- int r, int w, int x, struct host_vm_op *ops, int index,
- int last_filled, int data,
- void (*do_ops)(int, struct host_vm_op *, int))
+ int r, int w, int x, struct host_vm_op *ops, int index,
+ int last_filled, union mm_context *mmu,
+ void (*do_ops)(union mm_context *, struct host_vm_op *, int))
{
__u64 offset;
struct host_vm_op *last;
@@ -292,7 +249,7 @@ int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
}
if(index == last_filled){
- (*do_ops)(data, ops, last_filled);
+ (*do_ops)(mmu, ops, last_filled);
index = -1;
}
@@ -310,8 +267,8 @@ int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
}
int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
- int index, int last_filled, int data,
- void (*do_ops)(int, struct host_vm_op *, int))
+ int index, int last_filled, union mm_context *mmu,
+ void (*do_ops)(union mm_context *, struct host_vm_op *, int))
{
struct host_vm_op *last;
@@ -325,7 +282,7 @@ int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
}
if(index == last_filled){
- (*do_ops)(data, ops, last_filled);
+ (*do_ops)(mmu, ops, last_filled);
index = -1;
}
@@ -337,8 +294,9 @@ int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
}
int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
- struct host_vm_op *ops, int index, int last_filled, int data,
- void (*do_ops)(int, struct host_vm_op *, int))
+ struct host_vm_op *ops, int index, int last_filled,
+ union mm_context *mmu,
+ void (*do_ops)(union mm_context *, struct host_vm_op *, int))
{
struct host_vm_op *last;
@@ -354,7 +312,7 @@ int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
}
if(index == last_filled){
- (*do_ops)(data, ops, last_filled);
+ (*do_ops)(mmu, ops, last_filled);
index = -1;
}
@@ -367,3 +325,49 @@ int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
.x = x } } });
return(index);
}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
+{
+ address &= PAGE_MASK;
+ flush_tlb_range(vma, address, address + PAGE_SIZE);
+}
+
+void flush_tlb_all(void)
+{
+ flush_tlb_mm(current->mm);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
+ flush_tlb_kernel_range_common, start, end);
+}
+
+void flush_tlb_kernel_vm(void)
+{
+ CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
+ flush_tlb_kernel_range_common(start_vm, end_vm));
+}
+
+void __flush_tlb_one(unsigned long addr)
+{
+ CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
+ end);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
+}
+
+void force_flush_all(void)
+{
+ CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
+}
+
diff --git a/arch/um/kernel/tt/tlb.c b/arch/um/kernel/tt/tlb.c
index 203216ad86f..2eefb43bc9c 100644
--- a/arch/um/kernel/tt/tlb.c
+++ b/arch/um/kernel/tt/tlb.c
@@ -17,7 +17,7 @@
#include "os.h"
#include "tlb.h"
-static void do_ops(int unused, struct host_vm_op *ops, int last)
+static void do_ops(union mm_context *mmu, struct host_vm_op *ops, int last)
{
struct host_vm_op *op;
int i;
@@ -55,7 +55,7 @@ static void fix_range(struct mm_struct *mm, unsigned long start_addr,
panic("fix_range fixing wrong address space, current = 0x%p",
current);
- fix_range_common(mm, start_addr, end_addr, force, 0, do_ops);
+ fix_range_common(mm, start_addr, end_addr, force, do_ops);
}
atomic_t vmchange_seq = ATOMIC_INIT(1);
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 61dfd4fef75..163476a8cb1 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -30,6 +30,7 @@ SECTIONS
_einittext = .;
}
. = ALIGN(4096);
+
.text :
{
*(.text)
@@ -39,6 +40,12 @@ SECTIONS
/* .gnu.warning sections are handled specially by elf32.em. */
*(.gnu.warning)
*(.gnu.linkonce.t*)
+
+ . = ALIGN(4096);
+ __syscall_stub_start = .;
+ *(.__syscall_stub*)
+ __syscall_stub_end = .;
+ . = ALIGN(4096);
}
#include "asm/common.lds.S"