summaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/drivers/net_kern.c11
-rw-r--r--arch/um/include/asm/mmu.h2
-rw-r--r--arch/um/include/asm/mmu_context.h11
-rw-r--r--arch/um/kernel/signal.c26
-rw-r--r--arch/um/kernel/skas/mmu.c25
-rw-r--r--arch/um/kernel/skas/uaccess.c4
6 files changed, 33 insertions, 46 deletions
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 46ffd659be8..95f4416e6d9 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -293,7 +293,7 @@ static void uml_net_user_timer_expire(unsigned long _conn)
#endif
}
-static void setup_etheraddr(char *str, unsigned char *addr, char *name)
+static int setup_etheraddr(char *str, unsigned char *addr, char *name)
{
char *end;
int i;
@@ -334,12 +334,13 @@ static void setup_etheraddr(char *str, unsigned char *addr, char *name)
addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
addr[5]);
}
- return;
+ return 0;
random:
printk(KERN_INFO
"Choosing a random ethernet address for device %s\n", name);
random_ether_addr(addr);
+ return 1;
}
static DEFINE_SPINLOCK(devices_lock);
@@ -391,6 +392,7 @@ static void eth_configure(int n, void *init, char *mac,
struct net_device *dev;
struct uml_net_private *lp;
int err, size;
+ int random_mac;
size = transport->private_size + sizeof(struct uml_net_private);
@@ -417,7 +419,7 @@ static void eth_configure(int n, void *init, char *mac,
*/
snprintf(dev->name, sizeof(dev->name), "eth%d", n);
- setup_etheraddr(mac, device->mac, dev->name);
+ random_mac = setup_etheraddr(mac, device->mac, dev->name);
printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac);
@@ -474,6 +476,9 @@ static void eth_configure(int n, void *init, char *mac,
/* don't use eth_mac_addr, it will not work here */
memcpy(dev->dev_addr, device->mac, ETH_ALEN);
+ if (random_mac)
+ dev->addr_assign_type |= NET_ADDR_RANDOM;
+
dev->mtu = transport->user->mtu;
dev->netdev_ops = &uml_netdev_ops;
dev->ethtool_ops = &uml_net_ethtool_ops;
diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h
index 30509b9f37f..53e8b498ebb 100644
--- a/arch/um/include/asm/mmu.h
+++ b/arch/um/include/asm/mmu.h
@@ -12,7 +12,7 @@
typedef struct mm_context {
struct mm_id id;
struct uml_arch_mm_context arch;
- struct page **stub_pages;
+ struct page *stub_pages[2];
} mm_context_t;
extern void __switch_mm(struct mm_id * mm_idp);
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 591b3d8d761..aa4a743dc4a 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -9,7 +9,7 @@
#include <linux/sched.h>
#include <asm/mmu.h>
-extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
+extern void uml_setup_stubs(struct mm_struct *mm);
extern void arch_exit_mmap(struct mm_struct *mm);
#define deactivate_mm(tsk,mm) do { } while (0)
@@ -23,7 +23,9 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* when the new ->mm is used for the first time.
*/
__switch_mm(&new->context.id);
- arch_dup_mmap(old, new);
+ down_write(&new->mmap_sem);
+ uml_setup_stubs(new);
+ up_write(&new->mmap_sem);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
@@ -39,6 +41,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
}
+static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+ uml_setup_stubs(mm);
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index e8b889d3bce..fb12f4c5e64 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -65,21 +65,10 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
#endif
err = setup_signal_stack_si(sp, signr, ka, regs, info, oldset);
- if (err) {
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = *oldset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ if (err)
force_sigsegv(signr, current);
- } else {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked,
- &ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ else
+ block_sigmask(ka, signr);
return err;
}
@@ -162,12 +151,11 @@ int do_signal(void)
*/
long sys_sigsuspend(int history0, int history1, old_sigset_t mask)
{
+ sigset_t blocked;
+
mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 1aee587e9c5..4947b319f53 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -92,8 +92,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
goto out_free;
}
- to_mm->stub_pages = NULL;
-
return 0;
out_free:
@@ -103,7 +101,7 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
return ret;
}
-void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+void uml_setup_stubs(struct mm_struct *mm)
{
struct page **pages;
int err, ret;
@@ -120,29 +118,20 @@ void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
if (ret)
goto out;
- pages = kmalloc(2 * sizeof(struct page *), GFP_KERNEL);
- if (pages == NULL) {
- printk(KERN_ERR "arch_dup_mmap failed to allocate 2 page "
- "pointers\n");
- goto out;
- }
-
- pages[0] = virt_to_page(&__syscall_stub_start);
- pages[1] = virt_to_page(mm->context.id.stack);
- mm->context.stub_pages = pages;
+ mm->context.stub_pages[0] = virt_to_page(&__syscall_stub_start);
+ mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
/* dup_mmap already holds mmap_sem */
err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
VM_READ | VM_MAYREAD | VM_EXEC |
- VM_MAYEXEC | VM_DONTCOPY, pages);
+ VM_MAYEXEC | VM_DONTCOPY,
+ mm->context.stub_pages);
if (err) {
printk(KERN_ERR "install_special_mapping returned %d\n", err);
- goto out_free;
+ goto out;
}
return;
-out_free:
- kfree(pages);
out:
force_sigsegv(SIGSEGV, current);
}
@@ -151,8 +140,6 @@ void arch_exit_mmap(struct mm_struct *mm)
{
pte_t *pte;
- if (mm->context.stub_pages != NULL)
- kfree(mm->context.stub_pages);
pte = virt_to_pte(mm, STUB_CODE);
if (pte != NULL)
pte_clear(mm, STUB_CODE, pte);
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 9fefd924fb4..cd7df79c6a5 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -69,7 +69,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
return -1;
page = pte_page(*pte);
- addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) +
+ addr = (unsigned long) kmap_atomic(page) +
(addr & ~PAGE_MASK);
current->thread.fault_catcher = &buf;
@@ -82,7 +82,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
current->thread.fault_catcher = NULL;
- kunmap_atomic((void *)addr, KM_UML_USERCOPY);
+ kunmap_atomic((void *)addr);
return n;
}