summaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/signal.c26
-rw-r--r--arch/um/kernel/skas/mmu.c25
-rw-r--r--arch/um/kernel/skas/uaccess.c4
3 files changed, 15 insertions, 40 deletions
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index e8b889d3bce..fb12f4c5e64 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -65,21 +65,10 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
#endif
err = setup_signal_stack_si(sp, signr, ka, regs, info, oldset);
- if (err) {
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = *oldset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ if (err)
force_sigsegv(signr, current);
- } else {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked,
- &ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ else
+ block_sigmask(ka, signr);
return err;
}
@@ -162,12 +151,11 @@ int do_signal(void)
*/
long sys_sigsuspend(int history0, int history1, old_sigset_t mask)
{
+ sigset_t blocked;
+
mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 1aee587e9c5..4947b319f53 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -92,8 +92,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
goto out_free;
}
- to_mm->stub_pages = NULL;
-
return 0;
out_free:
@@ -103,7 +101,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
return ret;
}
-void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+void uml_setup_stubs(struct mm_struct *mm)
{
struct page **pages;
int err, ret;
@@ -120,29 +118,20 @@ void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
if (ret)
goto out;
- pages = kmalloc(2 * sizeof(struct page *), GFP_KERNEL);
- if (pages == NULL) {
- printk(KERN_ERR "arch_dup_mmap failed to allocate 2 page "
- "pointers\n");
- goto out;
- }
-
- pages[0] = virt_to_page(&__syscall_stub_start);
- pages[1] = virt_to_page(mm->context.id.stack);
- mm->context.stub_pages = pages;
+ mm->context.stub_pages[0] = virt_to_page(&__syscall_stub_start);
+ mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
/* dup_mmap already holds mmap_sem */
err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
VM_READ | VM_MAYREAD | VM_EXEC |
- VM_MAYEXEC | VM_DONTCOPY, pages);
+ VM_MAYEXEC | VM_DONTCOPY,
+ mm->context.stub_pages);
if (err) {
printk(KERN_ERR "install_special_mapping returned %d\n", err);
- goto out_free;
+ goto out;
}
return;
-out_free:
- kfree(pages);
out:
force_sigsegv(SIGSEGV, current);
}
@@ -151,8 +140,6 @@ void arch_exit_mmap(struct mm_struct *mm)
{
pte_t *pte;
- if (mm->context.stub_pages != NULL)
- kfree(mm->context.stub_pages);
pte = virt_to_pte(mm, STUB_CODE);
if (pte != NULL)
pte_clear(mm, STUB_CODE, pte);
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 9fefd924fb4..cd7df79c6a5 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -69,7 +69,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
return -1;
page = pte_page(*pte);
- addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) +
+ addr = (unsigned long) kmap_atomic(page) +
(addr & ~PAGE_MASK);
current->thread.fault_catcher = &buf;
@@ -82,7 +82,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
current->thread.fault_catcher = NULL;
- kunmap_atomic((void *)addr, KM_UML_USERCOPY);
+ kunmap_atomic((void *)addr);
return n;
}