diff options
34 files changed, 261 insertions, 269 deletions
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 255adb49826..fb00ab7b761 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -87,11 +87,7 @@ EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_callin_map; cpumask_t cpu_callout_map; EXPORT_SYMBOL(cpu_callout_map); -#ifdef CONFIG_HOTPLUG_CPU -cpumask_t cpu_possible_map = CPU_MASK_ALL; -#else cpumask_t cpu_possible_map; -#endif EXPORT_SYMBOL(cpu_possible_map); static cpumask_t smp_commenced_mask; diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 72a1b9cae2e..6e4c3baef6c 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -240,7 +240,7 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE; cpumask_t cpu_callin_map = CPU_MASK_NONE; cpumask_t cpu_callout_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_callout_map); -cpumask_t cpu_possible_map = CPU_MASK_ALL; +cpumask_t cpu_possible_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_possible_map); /* The per processor IRQ masks (these are usually kept in sync) */ diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 15fc3e98ac5..5500ab55d04 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -168,6 +168,8 @@ endef archclean: $(Q)$(MAKE) $(clean)=$(boot) + +archmrproper: $(Q)rm -rf arch/$(ARCH)/include archprepare: checkbin diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index ef706694a0c..5291b5f8788 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -195,9 +195,6 @@ sys32_sigaction(int sig, const struct old_sigaction32 __user *act, return ret; } -int -do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact); - asmlinkage long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigaction32 __user *oact, size_t sigsetsize) diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 076e07c1da3..a23816d3e9a 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -268,6 +268,8 @@ static struct tty_buffer *tty_buffer_alloc(size_t size) p->size = size; p->next = NULL; p->active = 0; + p->commit = 0; + p->read = 0; p->char_buf_ptr = (char *)(p->data); p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; /* printk("Flip create %p\n", p); */ @@ -298,6 +300,8 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) *tbh = t->next; t->next = NULL; t->used = 0; + t->commit = 0; + t->read = 0; /* DEBUG ONLY */ memset(t->data, '*', size); /* printk("Flip recycle %p\n", t); */ @@ -335,6 +339,7 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size) if (b != NULL) { b->next = n; b->active = 0; + b->commit = b->used; } else tty->buf.head = n; tty->buf.tail = n; @@ -2752,6 +2757,9 @@ static void flush_to_ldisc(void *private_) unsigned long flags; struct tty_ldisc *disc; struct tty_buffer *tbuf; + int count; + char *char_buf; + unsigned char *flag_buf; disc = tty_ldisc_ref(tty); if (disc == NULL) /* !TTY_LDISC */ @@ -2765,16 +2773,20 @@ static void flush_to_ldisc(void *private_) goto out; } spin_lock_irqsave(&tty->buf.lock, flags); - while((tbuf = tty->buf.head) != NULL && !tbuf->active) { + while((tbuf = tty->buf.head) != NULL) { + while ((count = tbuf->commit - tbuf->read) != 0) { + char_buf = tbuf->char_buf_ptr + tbuf->read; + flag_buf = tbuf->flag_buf_ptr + tbuf->read; + tbuf->read += count; + spin_unlock_irqrestore(&tty->buf.lock, flags); + disc->receive_buf(tty, char_buf, flag_buf, count); + spin_lock_irqsave(&tty->buf.lock, flags); + } + if (tbuf->active) + break; tty->buf.head = tbuf->next; if (tty->buf.head == NULL) tty->buf.tail = NULL; - spin_unlock_irqrestore(&tty->buf.lock, flags); - /* printk("Process buffer %p for %d\n", tbuf, tbuf->used); */ - disc->receive_buf(tty, tbuf->char_buf_ptr, - tbuf->flag_buf_ptr, - tbuf->used); - spin_lock_irqsave(&tty->buf.lock, flags); tty_buffer_free(tty, tbuf); } spin_unlock_irqrestore(&tty->buf.lock, flags); @@ -2871,8 +2883,10 @@ void tty_flip_buffer_push(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->buf.lock, flags); - if (tty->buf.tail != NULL) + if (tty->buf.tail != NULL) { tty->buf.tail->active = 0; + tty->buf.tail->commit = tty->buf.tail->used; + } spin_unlock_irqrestore(&tty->buf.lock, flags); if (tty->low_latency) diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 233a4f60808..ef85d76575a 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -148,7 +148,7 @@ config IPW2100 In order to use this driver, you will need a firmware image for it. You can obtain the firmware from <http://ipw2100.sf.net/>. Once you have the firmware image, you - will need to place it in /etc/firmware. + will need to place it in /lib/firmware. You will also very likely need the Wireless Tools in order to configure your card: diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 20b446f26ec..60e56c6e03d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -328,7 +328,7 @@ static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp, * bitmap of size NR_CPUS. * * #ifdef CONFIG_HOTPLUG_CPU - * cpu_possible_map - all NR_CPUS bits set + * cpu_possible_map - has bit 'cpu' set iff cpu is populatable * cpu_present_map - has bit 'cpu' set iff cpu is populated * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler * #else diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h index 3aed37314ab..e87c32a5c86 100644 --- a/include/linux/kbd_kern.h +++ b/include/linux/kbd_kern.h @@ -153,8 +153,10 @@ static inline void con_schedule_flip(struct tty_struct *t) { unsigned long flags; spin_lock_irqsave(&t->buf.lock, flags); - if (t->buf.tail != NULL) + if (t->buf.tail != NULL) { t->buf.tail->active = 0; + t->buf.tail->commit = t->buf.tail->used; + } spin_unlock_irqrestore(&t->buf.lock, flags); schedule_work(&t->buf.work); } diff --git a/include/linux/kexec.h b/include/linux/kexec.h index a311f58c8a7..cfb3410e32b 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -6,6 +6,7 @@ #include <linux/list.h> #include <linux/linkage.h> #include <linux/compat.h> +#include <linux/ioport.h> #include <asm/kexec.h> /* Verify architecture specific macros are defined */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 6a2ccf78a35..c256ebe2a7b 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -160,7 +160,8 @@ extern int netlink_unregister_notifier(struct notifier_block *nb); /* finegrained unicast helpers: */ struct sock *netlink_getsockbyfilp(struct file *filp); -int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo); +int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, + long timeo, struct sock *ssk); void netlink_detachskb(struct sock *sk, struct sk_buff *skb); int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol); diff --git a/include/linux/tty.h b/include/linux/tty.h index a7bd3b4558d..f45cd74e6f2 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -58,6 +58,8 @@ struct tty_buffer { int used; int size; int active; + int commit; + int read; /* Data points here */ unsigned long data[0]; }; diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 82961eb1988..222faf97d5f 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -29,8 +29,10 @@ _INLINE_ void tty_schedule_flip(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->buf.lock, flags); - if (tty->buf.tail != NULL) + if (tty->buf.tail != NULL) { tty->buf.tail->active = 0; + tty->buf.tail->commit = tty->buf.tail->used; + } spin_unlock_irqrestore(&tty->buf.lock, flags); schedule_delayed_work(&tty->buf.work, 1); } diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h index f55e86e7503..2127cae1e0a 100644 --- a/include/net/irda/irlap.h +++ b/include/net/irda/irlap.h @@ -50,6 +50,9 @@ /* May be different when we get VFIR */ #define LAP_MAX_HEADER (LAP_ADDR_HEADER + LAP_CTRL_HEADER) +/* Each IrDA device gets a random 32 bits IRLAP device address */ +#define LAP_ALEN 4 + #define BROADCAST 0xffffffff /* Broadcast device address */ #define CBROADCAST 0xfe /* Connection broadcast address */ #define XID_FORMAT 0x01 /* Discovery XID format */ diff --git a/init/initramfs.c b/init/initramfs.c index 0c5d9a3f951..637344b0598 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -466,10 +466,32 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only) extern char __initramfs_start[], __initramfs_end[]; #ifdef CONFIG_BLK_DEV_INITRD #include <linux/initrd.h> +#include <linux/kexec.h> static void __init free_initrd(void) { - free_initrd_mem(initrd_start, initrd_end); +#ifdef CONFIG_KEXEC + unsigned long crashk_start = (unsigned long)__va(crashk_res.start); + unsigned long crashk_end = (unsigned long)__va(crashk_res.end); + + /* + * If the initrd region is overlapped with crashkernel reserved region, + * free only memory that is not part of crashkernel region. + */ + if (initrd_start < crashk_end && initrd_end > crashk_start) { + /* + * Initialize initrd memory region since the kexec boot does + * not do. + */ + memset((void *)initrd_start, 0, initrd_end - initrd_start); + if (initrd_start < crashk_start) + free_initrd_mem(initrd_start, crashk_start); + if (initrd_end > crashk_end) + free_initrd_mem(crashk_end, initrd_end); + } else +#endif + free_initrd_mem(initrd_start, initrd_end); + initrd_start = 0; initrd_end = 0; } diff --git a/init/main.c b/init/main.c index 7c79da57d3a..4c194c47395 100644 --- a/init/main.c +++ b/init/main.c @@ -668,7 +668,6 @@ static int init(void * unused) */ child_reaper = current; - /* Sets up cpus_possible() */ smp_prepare_cpus(max_cpus); do_pre_smp_initcalls(); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 59302fc3643..fd2e26b6f96 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -1018,7 +1018,8 @@ retry: goto out; } - ret = netlink_attachskb(sock, nc, 0, MAX_SCHEDULE_TIMEOUT); + ret = netlink_attachskb(sock, nc, 0, + MAX_SCHEDULE_TIMEOUT, NULL); if (ret == 1) goto retry; if (ret) { diff --git a/ipc/shm.c b/ipc/shm.c index 4c28d2d8e30..9162123a7b2 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -870,6 +870,7 @@ asmlinkage long sys_shmdt(char __user *shmaddr) * could possibly have landed at. Also cast things to loff_t to * prevent overflows and make comparisions vs. equal-width types. */ + size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { next = vma->vm_next; diff --git a/kernel/panic.c b/kernel/panic.c index c5c4ab25583..126dc43f1c7 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -130,6 +130,7 @@ NORET_TYPE void panic(const char * fmt, ...) #endif local_irq_enable(); for (i = 0;;) { + touch_softlockup_watchdog(); i += panic_blink(i); mdelay(1); i++; diff --git a/kernel/sched.c b/kernel/sched.c index bc38804e40d..87d93be336a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -215,7 +215,6 @@ struct runqueue { */ unsigned long nr_running; #ifdef CONFIG_SMP - unsigned long prio_bias; unsigned long cpu_load[3]; #endif unsigned long long nr_switches; @@ -669,68 +668,13 @@ static int effective_prio(task_t *p) return prio; } -#ifdef CONFIG_SMP -static inline void inc_prio_bias(runqueue_t *rq, int prio) -{ - rq->prio_bias += MAX_PRIO - prio; -} - -static inline void dec_prio_bias(runqueue_t *rq, int prio) -{ - rq->prio_bias -= MAX_PRIO - prio; -} - -static inline void inc_nr_running(task_t *p, runqueue_t *rq) -{ - rq->nr_running++; - if (rt_task(p)) { - if (p != rq->migration_thread) - /* - * The migration thread does the actual balancing. Do - * not bias by its priority as the ultra high priority - * will skew balancing adversely. - */ - inc_prio_bias(rq, p->prio); - } else - inc_prio_bias(rq, p->static_prio); -} - -static inline void dec_nr_running(task_t *p, runqueue_t *rq) -{ - rq->nr_running--; - if (rt_task(p)) { - if (p != rq->migration_thread) - dec_prio_bias(rq, p->prio); - } else - dec_prio_bias(rq, p->static_prio); -} -#else -static inline void inc_prio_bias(runqueue_t *rq, int prio) -{ -} - -static inline void dec_prio_bias(runqueue_t *rq, int prio) -{ -} - -static inline void inc_nr_running(task_t *p, runqueue_t *rq) -{ - rq->nr_running++; -} - -static inline void dec_nr_running(task_t *p, runqueue_t *rq) -{ - rq->nr_running--; -} -#endif - /* * __activate_task - move a task to the runqueue. */ static inline void __activate_task(task_t *p, runqueue_t *rq) { enqueue_task(p, rq->active); - inc_nr_running(p, rq); + rq->nr_running++; } /* @@ -739,7 +683,7 @@ static inline void __activate_task(task_t *p, runqueue_t *rq) static inline void __activate_idle_task(task_t *p, runqueue_t *rq) { enqueue_task_head(p, rq->active); - inc_nr_running(p, rq); + rq->nr_running++; } static int recalc_task_prio(task_t *p, unsigned long long now) @@ -863,7 +807,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) */ static void deactivate_task(struct task_struct *p, runqueue_t *rq) { - dec_nr_running(p, rq); + rq->nr_running--; dequeue_task(p, p->array); p->array = NULL; } @@ -1007,61 +951,27 @@ void kick_process(task_t *p) * We want to under-estimate the load of migration sources, to * balance conservatively. */ -static unsigned long __source_load(int cpu, int type, enum idle_type idle) +static inline unsigned long source_load(int cpu, int type) { runqueue_t *rq = cpu_rq(cpu); - unsigned long running = rq->nr_running; - unsigned long source_load, cpu_load = rq->cpu_load[type-1], - load_now = running * SCHED_LOAD_SCALE; - + unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; if (type == 0) - source_load = load_now; - else - source_load = min(cpu_load, load_now); - - if (running > 1 || (idle == NOT_IDLE && running)) - /* - * If we are busy rebalancing the load is biased by - * priority to create 'nice' support across cpus. When - * idle rebalancing we should only bias the source_load if - * there is more than one task running on that queue to - * prevent idle rebalance from trying to pull tasks from a - * queue with only one running task. - */ - source_load = source_load * rq->prio_bias / running; + return load_now; - return source_load; -} - -static inline unsigned long source_load(int cpu, int type) -{ - return __source_load(cpu, type, NOT_IDLE); + return min(rq->cpu_load[type-1], load_now); } /* * Return a high guess at the load of a migration-target cpu */ -static inline unsigned long __target_load(int cpu, int type, enum idle_type idle) +static inline unsigned long target_load(int cpu, int type) { runqueue_t *rq = cpu_rq(cpu); - unsigned long running = rq->nr_running; - unsigned long target_load, cpu_load = rq->cpu_load[type-1], - load_now = running * SCHED_LOAD_SCALE; - + unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE; if (type == 0) - target_load = load_now; - else - target_load = max(cpu_load, load_now); + return load_now; - if (running > 1 || (idle == NOT_IDLE && running)) - target_load = target_load * rq->prio_bias / running; - - return target_load; -} - -static inline unsigned long target_load(int cpu, int type) -{ - return __target_load(cpu, type, NOT_IDLE); + return max(rq->cpu_load[type-1], load_now); } /* @@ -1530,7 +1440,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) list_add_tail(&p->run_list, ¤t->run_list); p->array = current->array; p->array->nr_active++; - inc_nr_running(p, rq); + rq->nr_running++; } set_need_resched(); } else @@ -1875,9 +1785,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) { dequeue_task(p, src_array); - dec_nr_running(p, src_rq); + src_rq->nr_running--; set_task_cpu(p, this_cpu); - inc_nr_running(p, this_rq); + this_rq->nr_running++; enqueue_task(p, this_array); p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) + this_rq->timestamp_last_tick; @@ -2056,9 +1966,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, /* Bias balancing toward cpus of our domain */ if (local_group) - load = __target_load(i, load_idx, idle); + load = target_load(i, load_idx); else - load = __source_load(i, load_idx, idle); + load = source_load(i, load_idx); avg_load += load; } @@ -2171,7 +2081,7 @@ static runqueue_t *find_busiest_queue(struct sched_group *group, int i; for_each_cpu_mask(i, group->cpumask) { - load = __source_load(i, 0, idle); + load = source_load(i, 0); if (load > max_load) { max_load = load; @@ -3571,10 +3481,8 @@ void set_user_nice(task_t *p, long nice) goto out_unlock; } array = p->array; - if (array) { + if (array) dequeue_task(p, array); - dec_prio_bias(rq, p->static_prio); - } old_prio = p->prio; new_prio = NICE_TO_PRIO(nice); @@ -3584,7 +3492,6 @@ void set_user_nice(task_t *p, long nice) if (array) { enqueue_task(p, array); - inc_prio_bias(rq, p->static_prio); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: diff --git a/mm/slab.c b/mm/slab.c index d66c2b0d971..add05d808a4 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1717,6 +1717,12 @@ kmem_cache_create (const char *name, size_t size, size_t align, BUG(); } + /* + * Prevent CPUs from coming and going. + * lock_cpu_hotplug() nests outside cache_chain_mutex + */ + lock_cpu_hotplug(); + mutex_lock(&cache_chain_mutex); list_for_each(p, &cache_chain) { @@ -1918,8 +1924,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, cachep->dtor = dtor; cachep->name = name; - /* Don't let CPUs to come and go */ - lock_cpu_hotplug(); if (g_cpucache_up == FULL) { enable_cpucache(cachep); @@ -1978,12 +1982,12 @@ kmem_cache_create (const char *name, size_t size, size_t align, /* cache setup completed, link it into the list */ list_add(&cachep->next, &cache_chain); - unlock_cpu_hotplug(); oops: if (!cachep && (flags & SLAB_PANIC)) panic("kmem_cache_create(): failed to create slab `%s'\n", name); mutex_unlock(&cache_chain_mutex); + unlock_cpu_hotplug(); return cachep; } EXPORT_SYMBOL(kmem_cache_create); diff --git a/mm/vmscan.c b/mm/vmscan.c index 5a610804cd0..5db32fdfaf3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -632,7 +632,7 @@ static int swap_page(struct page *page) struct address_space *mapping = page_mapping(page); if (page_mapped(page) && mapping) - if (try_to_unmap(page, 0) != SWAP_SUCCESS) + if (try_to_unmap(page, 1) != SWAP_SUCCESS) goto unlock_retry; if (PageDirty(page)) { @@ -839,7 +839,7 @@ EXPORT_SYMBOL(migrate_page); * pages are swapped out. * * The function returns after 10 attempts or if no pages - * are movable anymore because t has become empty + * are movable anymore because to has become empty * or no retryable pages exist anymore. * * Return: Number of pages not migrated when "to" ran empty. @@ -928,12 +928,21 @@ redo: goto unlock_both; if (mapping->a_ops->migratepage) { + /* + * Most pages have a mapping and most filesystems + * should provide a migration function. Anonymous + * pages are part of swap space which also has its + * own migration function. This is the most common + * path for page migration. + */ rc = mapping->a_ops->migratepage(newpage, page); goto unlock_both; } /* - * Trigger writeout if page is dirty + * Default handling if a filesystem does not provide + * a migration function. We can only migrate clean + * pages so try to write out any dirty pages first. */ if (PageDirty(page)) { switch (pageout(page, mapping)) { @@ -949,9 +958,10 @@ redo: ; /* try to migrate the page below */ } } + /* - * If we have no buffer or can release the buffer - * then do a simple migration. + * Buffers are managed in a filesystem specific way. + * We must have no buffers or drop them. */ if (!page_has_buffers(page) || try_to_release_page(page, GFP_KERNEL)) { @@ -966,6 +976,11 @@ redo: * swap them out. */ if (pass > 4) { + /* + * Persistently unable to drop buffers..... As a + * measure of last resort we fall back to + * swap_page(). + */ unlock_page(newpage); newpage = NULL; rc = swap_page(page); diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index da687c8dc6f..7fa3a5a9971 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -79,9 +79,14 @@ static int port_cost(struct net_device *dev) */ static void port_carrier_check(void *arg) { - struct net_bridge_port *p = arg; + struct net_device *dev = arg; + struct net_bridge_port *p; rtnl_lock(); + p = dev->br_port; + if (!p) + goto done; + if (netif_carrier_ok(p->dev)) { u32 cost = port_cost(p->dev); @@ -97,19 +102,33 @@ static void port_carrier_check(void *arg) br_stp_disable_port(p); spin_unlock_bh(&p->br->lock); } +done: rtnl_unlock(); } +static void release_nbp(struct kobject *kobj) +{ + struct net_bridge_port *p + = container_of(kobj, struct net_bridge_port, kobj); + kfree(p); +} + +static struct kobj_type brport_ktype = { +#ifdef CONFIG_SYSFS + .sysfs_ops = &brport_sysfs_ops, +#endif + .release = release_nbp, +}; + static void destroy_nbp(struct net_bridge_port *p) { struct net_device *dev = p->dev; - dev->br_port = NULL; p->br = NULL; p->dev = NULL; dev_put(dev); - br_sysfs_freeif(p); + kobject_put(&p->kobj); } static void destroy_nbp_rcu(struct rcu_head *head) @@ -133,24 +152,24 @@ static void del_nbp(struct net_bridge_port *p) struct net_bridge *br = p->br; struct net_device *dev = p->dev; - /* Race between RTNL notify and RCU callback */ - if (p->deleted) - return; + sysfs_remove_link(&br->ifobj, dev->name); dev_set_promiscuity(dev, -1); cancel_delayed_work(&p->carrier_check); - flush_scheduled_work(); spin_lock_bh(&br->lock); br_stp_disable_port(p); - p->deleted = 1; spin_unlock_bh(&br->lock); br_fdb_delete_by_port(br, p); list_del_rcu(&p->list); + rcu_assign_pointer(dev->br_port, NULL); + + kobject_del(&p->kobj); + call_rcu(&p->rcu, destroy_nbp_rcu); } @@ -160,7 +179,6 @@ static void del_br(struct net_bridge *br) struct net_bridge_port *p, *n; list_for_each_entry_safe(p, n, &br->port_list, list) { - br_sysfs_removeif(p); del_nbp(p); } @@ -254,13 +272,17 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, p->dev = dev; p->path_cost = port_cost(dev); p->priority = 0x8000 >> BR_PORT_BITS; - dev->br_port = p; p->port_no = index; br_init_port(p); p->state = BR_STATE_DISABLED; - INIT_WORK(&p->carrier_check, port_carrier_check, p); + INIT_WORK(&p->carrier_check, port_carrier_check, dev); kobject_init(&p->kobj); + kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR); + p->kobj.ktype = &brport_ktype; + p->kobj.parent = &(dev->class_dev.kobj); + p->kobj.kset = NULL; + return p; } @@ -388,30 +410,43 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (dev->br_port != NULL) return -EBUSY; - if (IS_ERR(p = new_nbp(br, dev))) + p = new_nbp(br, dev); + if (IS_ERR(p)) return PTR_ERR(p); - if ((err = br_fdb_insert(br, p, dev->dev_addr))) - destroy_nbp(p); - - else if ((err = br_sysfs_addif(p))) - del_nbp(p); - else { - dev_set_promiscuity(dev, 1); + err = kobject_add(&p->kobj); + if (err) + goto err0; - list_add_rcu(&p->list, &br->port_list); + err = br_fdb_insert(br, p, dev->dev_addr); + if (err) + goto err1; - spin_lock_bh(&br->lock); - br_stp_recalculate_bridge_id(br); - br_features_recompute(br); - if ((br->dev->flags & IFF_UP) - && (dev->flags & IFF_UP) && netif_carrier_ok(dev)) - br_stp_enable_port(p); - spin_unlock_bh(&br->lock); + err = br_sysfs_addif(p); + if (err) + goto err2; - dev_set_mtu(br->dev, br_min_mtu(br)); - } + rcu_assign_pointer(dev->br_port, p); + dev_set_promiscuity(dev, 1); + + list_add_rcu(&p->list, &br->port_list); + spin_lock_bh(&br->lock); + br_stp_recalculate_bridge_id(br); + br_features_recompute(br); + schedule_delayed_work(&p->carrier_check, BR_PORT_DEBOUNCE); + spin_unlock_bh(&br->lock); + + dev_set_mtu(br->dev, br_min_mtu(br)); + kobject_uevent(&p->kobj, KOBJ_ADD); + + return 0; +err2: + br_fdb_delete_by_port(br, p); +err1: + kobject_del(&p->kobj); +err0: + kobject_put(&p->kobj); return err; } @@ -423,7 +458,6 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) if (!p || p->br != br) return -EINVAL; - br_sysfs_removeif(p); del_nbp(p); spin_lock_bh(&br->lock); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index e3a73cead6b..4eef8375531 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -45,18 +45,20 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) int br_handle_frame_finish(struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; - struct net_bridge_port *p = skb->dev->br_port; - struct net_bridge *br = p->br; + struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); + struct net_bridge *br; struct net_bridge_fdb_entry *dst; int passedup = 0; + if (!p || p->state == BR_STATE_DISABLED) + goto drop; + /* insert into forwarding database after filtering to avoid spoofing */ - br_fdb_update(p->br, p, eth_hdr(skb)->h_source); + br = p->br; + br_fdb_update(br, p, eth_hdr(skb)->h_source); - if (p->state == BR_STATE_LEARNING) { - kfree_skb(skb); - goto out; - } + if (p->state == BR_STATE_LEARNING) + goto drop; if (br->dev->flags & IFF_PROMISC) { struct sk_buff *skb2; @@ -93,6 +95,9 @@ int br_handle_frame_finish(struct sk_buff *skb) out: return 0; +drop: + kfree_skb(skb); + goto out; } /* diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 7cac3fb9f80..b5018166b0e 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -51,9 +51,6 @@ #define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr) #define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr) -#define has_bridge_parent(device) ((device)->br_port != NULL) -#define bridge_parent(device) ((device)->br_port->br->dev) - #ifdef CONFIG_SYSCTL static struct ctl_table_header *brnf_sysctl_header; static int brnf_call_iptables = 1; @@ -98,6 +95,12 @@ static struct rtable __fake_rtable = { .rt_flags = 0, }; +static inline struct net_device *bridge_parent(const struct net_device *dev) +{ + struct net_bridge_port *port = rcu_dereference(dev->br_port); + + return port ? port->br->dev : NULL; +} /* PF_BRIDGE/PRE_ROUTING *********************************************/ /* Undo the changes made for ip6tables PREROUTING and continue the @@ -189,11 +192,15 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; skb->dev = bridge_parent(skb->dev); - if (skb->protocol == __constant_htons(ETH_P_8021Q)) { - skb_pull(skb, VLAN_HLEN); - skb->nh.raw += VLAN_HLEN; + if (!skb->dev) + kfree_skb(skb); + else { + if (skb->protocol == __constant_htons(ETH_P_8021Q)) { + skb_pull(skb, VLAN_HLEN); + skb->nh.raw += VLAN_HLEN; + } + skb->dst->output(skb); } - skb->dst->output(skb); return 0; } @@ -270,7 +277,7 @@ bridged_dnat: } /* Some common code for IPv4/IPv6 */ -static void setup_pre_routing(struct sk_buff *skb) +static struct net_device *setup_pre_routing(struct sk_buff *skb) { struct nf_bridge_info *nf_bridge = skb->nf_bridge; @@ -282,6 +289,8 @@ static void setup_pre_routing(struct sk_buff *skb) nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; nf_bridge->physindev = skb->dev; skb->dev = bridge_parent(skb->dev); + + return skb->dev; } /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */ @@ -376,7 +385,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook, nf_bridge_put(skb->nf_bridge); if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) return NF_DROP; - setup_pre_routing(skb); + if (!setup_pre_routing(skb)) + return NF_DROP; NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_ipv6); @@ -465,7 +475,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, nf_bridge_put(skb->nf_bridge); if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) return NF_DROP; - setup_pre_routing(skb); + if (!setup_pre_routing(skb)) + return NF_DROP; store_orig_dstaddr(skb); NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, @@ -539,11 +550,16 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, struct sk_buff *skb = *pskb; struct nf_bridge_info *nf_bridge; struct vlan_ethhdr *hdr = vlan_eth_hdr(skb); + struct net_device *parent; int pf; if (!skb->nf_bridge) return NF_ACCEPT; + parent = bridge_parent(out); + if (!parent) + return NF_DROP; + if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) pf = PF_INET; else @@ -564,8 +580,8 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, nf_bridge->mask |= BRNF_BRIDGED; nf_bridge->physoutdev = skb->dev; - NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in), - bridge_parent(out), br_nf_forward_finish); + NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in), parent, + br_nf_forward_finish); return NF_STOLEN; } @@ -688,6 +704,8 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, goto out; } realoutdev = bridge_parent(skb->dev); + if (!realoutdev) + return NF_DROP; #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) /* iptables should match -o br0.x */ @@ -701,9 +719,11 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, /* IP forwarded traffic has a physindev, locally * generated traffic hasn't. */ if (realindev != NULL) { - if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) && - has_bridge_parent(realindev)) - realindev = bridge_parent(realindev); + if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) ) { + struct net_device *parent = bridge_parent(realindev); + if (parent) + realindev = parent; + } NF_HOOK_THRESH(pf, NF_IP_FORWARD, skb, realindev, realoutdev, br_nf_local_out_finish, @@ -743,6 +763,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, if (!nf_bridge) return NF_ACCEPT; + if (!realoutdev) + return NF_DROP; + if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) pf = PF_INET; else diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e330b17b6d8..8f10e09f251 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -68,7 +68,6 @@ struct net_bridge_port /* STP */ u8 priority; u8 state; - u8 deleted; u16 port_no; unsigned char topology_change_ack; unsigned char config_pending; @@ -233,9 +232,8 @@ extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent); #ifdef CONFIG_SYSFS /* br_sysfs_if.c */ +extern struct sysfs_ops brport_sysfs_ops; extern int br_sysfs_addif(struct net_bridge_port *p); -extern void br_sysfs_removeif(struct net_bridge_port *p); -extern void br_sysfs_freeif(struct net_bridge_port *p); /* br_sysfs_br.c */ extern int br_sysfs_addbr(struct net_device *dev); @@ -244,8 +242,6 @@ extern void br_sysfs_delbr(struct net_device *dev); #else #define br_sysfs_addif(p) (0) -#define br_sysfs_removeif(p) do { } while(0) -#define br_sysfs_freeif(p) kfree(p) #define br_sysfs_addbr(dev) (0) #define br_sysfs_delbr(dev) do { } while(0) #endif /* CONFIG_SYSFS */ diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index d071f1c9ad0..296f6a487c5 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -133,29 +133,35 @@ void br_send_tcn_bpdu(struct net_bridge_port *p) static const unsigned char header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00}; -/* NO locks */ +/* NO locks, but rcu_read_lock (preempt_disabled) */ int br_stp_handle_bpdu(struct sk_buff *skb) { - struct net_bridge_port *p = skb->dev->br_port; - struct net_bridge *br = p->br; + struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); + struct net_bridge *br; unsigned char *buf; + if (!p) + goto err; + + br = p->br; + spin_lock(&br->lock); + + if (p->state == BR_STATE_DISABLED || !(br->dev->flags & IFF_UP)) + goto out; + /* insert into forwarding database after filtering to avoid spoofing */ - br_fdb_update(p->br, p, eth_hdr(skb)->h_source); + br_fdb_update(br, p, eth_hdr(skb)->h_source); + + if (!br->stp_enabled) + goto out; /* need at least the 802 and STP headers */ if (!pskb_may_pull(skb, sizeof(header)+1) || memcmp(skb->data, header, sizeof(header))) - goto err; + goto out; buf = skb_pull(skb, sizeof(header)); - spin_lock_bh(&br->lock); - if (p->state == BR_STATE_DISABLED - || !(br->dev->flags & IFF_UP) - || !br->stp_enabled) - goto out; - if (buf[0] == BPDU_TYPE_CONFIG) { struct br_config_bpdu bpdu; @@ -201,7 +207,7 @@ int br_stp_handle_bpdu(struct sk_buff *skb) br_received_tcn_bpdu(p); } out: - spin_unlock_bh(&br->lock); + spin_unlock(&br->lock); err: kfree_skb(skb); return 0; diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 0ac0355d16d..c51c9e42aeb 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -195,23 +195,11 @@ static ssize_t brport_store(struct kobject * kobj, return ret; } -/* called from kobject_put when port ref count goes to zero. */ -static void brport_release(struct kobject *kobj) -{ - kfree(container_of(kobj, struct net_bridge_port, kobj)); -} - -static struct sysfs_ops brport_sysfs_ops = { +struct sysfs_ops brport_sysfs_ops = { .show = brport_show, .store = brport_store, }; -static struct kobj_type brport_ktype = { - .sysfs_ops = &brport_sysfs_ops, - .release = brport_release, -}; - - /* * Add sysfs entries to ethernet device added to a bridge. * Creates a brport subdirectory with bridge attributes. @@ -223,17 +211,6 @@ int br_sysfs_addif(struct net_bridge_port *p) struct brport_attribute **a; int err; - ASSERT_RTNL(); - - kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR); - p->kobj.ktype = &brport_ktype; - p->kobj.parent = &(p->dev->class_dev.kobj); - p->kobj.kset = NULL; - - err = kobject_add(&p->kobj); - if(err) - goto out1; - err = sysfs_create_link(&p->kobj, &br->dev->class_dev.kobj, SYSFS_BRIDGE_PORT_LINK); if (err) @@ -245,28 +222,7 @@ int br_sysfs_addif(struct net_bridge_port *p) goto out2; } - err = sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name); - if (err) - goto out2; - - kobject_uevent(&p->kobj, KOBJ_ADD); - return 0; - out2: - kobject_del(&p->kobj); - out1: + err= sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name); +out2: return err; } - -void br_sysfs_removeif(struct net_bridge_port *p) -{ - pr_debug("br_sysfs_removeif\n"); - sysfs_remove_link(&p->br->ifobj, p->dev->name); - kobject_uevent(&p->kobj, KOBJ_REMOVE); - kobject_del(&p->kobj); -} - -void br_sysfs_freeif(struct net_bridge_port *p) -{ - pr_debug("br_sysfs_freeif\n"); - kobject_put(&p->kobj); -} diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8700379685e..eca2976abb2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -455,7 +455,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) if (!skb) return; - if (rtnetlink_fill_ifinfo(skb, dev, type, current->pid, 0, change, 0) < 0) { + if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, change, 0) < 0) { kfree_skb(skb); return; } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 95b9d81ac48..3ffa60dadc0 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1135,7 +1135,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa) if (!skb) netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, ENOBUFS); - else if (inet_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) { + else if (inet_fill_ifaddr(skb, ifa, 0, 0, event, 0) < 0) { kfree_skb(skb); netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, EINVAL); } else { diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ef4724de735..0f4145babb1 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1045,7 +1045,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm, } nl->nlmsg_flags = NLM_F_REQUEST; - nl->nlmsg_pid = current->pid; + nl->nlmsg_pid = 0; nl->nlmsg_seq = 0; nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm)); if (cmd == SIOCDELRT) { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a97ed5416c2..e9a54ae7d69 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -456,7 +456,8 @@ void tcp_rcv_space_adjust(struct sock *sk) tp->rcvq_space.space = space; - if (sysctl_tcp_moderate_rcvbuf) { + if (sysctl_tcp_moderate_rcvbuf && + !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int new_clamp = space; /* Receive space grows, normalize in order to diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index 890bac0d4a5..e3debbdb67f 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c @@ -343,12 +343,12 @@ static void irda_task_timer_expired(void *data) static void irda_device_setup(struct net_device *dev) { dev->hard_header_len = 0; - dev->addr_len = 0; + dev->addr_len = LAP_ALEN; dev->type = ARPHRD_IRDA; dev->tx_queue_len = 8; /* Window size + 1 s-frame */ - memset(dev->broadcast, 0xff, 4); + memset(dev->broadcast, 0xff, LAP_ALEN); dev->mtu = 2048; dev->flags = IFF_NOARP; diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c index 07ec326c71f..f65c7a83bc5 100644 --- a/net/irda/irnet/irnet_irda.c +++ b/net/irda/irnet/irnet_irda.c @@ -696,7 +696,7 @@ irnet_daddr_to_dname(irnet_socket * self) { /* Yes !!! Get it.. */ strlcpy(self->rname, discoveries[i].info, sizeof(self->rname)); - self->rname[NICKNAME_MAX_LEN + 1] = '\0'; + self->rname[sizeof(self->rname) - 1] = '\0'; DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n", self->daddr, self->rname); kfree(discoveries); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2101b45d2ec..6b9772d9587 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -702,7 +702,8 @@ struct sock *netlink_getsockbyfilp(struct file *filp) * 0: continue * 1: repeat lookup - reference dropped while waiting for socket memory. */ -int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo) +int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, + long timeo, struct sock *ssk) { struct netlink_sock *nlk; @@ -712,7 +713,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t test_bit(0, &nlk->state)) { DECLARE_WAITQUEUE(wait, current); if (!timeo) { - if (!nlk->pid) + if (!ssk || nlk_sk(ssk)->pid == 0) netlink_overrun(sk); sock_put(sk); kfree_skb(skb); @@ -797,7 +798,7 @@ retry: kfree_skb(skb); return PTR_ERR(sk); } - err = netlink_attachskb(sk, skb, nonblock, timeo); + err = netlink_attachskb(sk, skb, nonblock, timeo, ssk); if (err == 1) goto retry; if (err) |