summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/irq/chip.c5
-rw-r--r--kernel/irq/handle.c57
-rw-r--r--kernel/irq/internals.h7
-rw-r--r--kernel/irq/manage.c12
-rw-r--r--kernel/irq/migration.c12
-rw-r--r--kernel/irq/numa_migrate.c19
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/panic.c12
-rw-r--r--kernel/sched.c7
-rw-r--r--kernel/sched_rt.c36
-rw-r--r--kernel/softirq.c5
13 files changed, 121 insertions, 65 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index f80dec3f187..70612c19ac9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -977,12 +977,9 @@ static void check_stack_usage(void)
{
static DEFINE_SPINLOCK(low_water_lock);
static int lowest_to_date = THREAD_SIZE;
- unsigned long *n = end_of_stack(current);
unsigned long free;
- while (*n == 0)
- n++;
- free = (unsigned long)n - (unsigned long)end_of_stack(current);
+ free = stack_not_used(current);
if (free >= lowest_to_date)
return;
diff --git a/kernel/fork.c b/kernel/fork.c
index 242a706e772..99309df985b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -61,6 +61,7 @@
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
#include <trace/sched.h>
+#include <linux/magic.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -212,6 +213,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
+ unsigned long *stackend;
+
int err;
prepare_to_copy(orig);
@@ -237,6 +240,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
goto out;
setup_thread_stack(tsk, orig);
+ stackend = end_of_stack(tsk);
+ *stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f63c706d25e..c248eba98b4 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
desc->irq_count = 0;
desc->irqs_unhandled = 0;
#ifdef CONFIG_SMP
- cpumask_setall(&desc->affinity);
+ cpumask_setall(desc->affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_clear(desc->pending_mask);
+#endif
#endif
spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3aba8d12f32..f51eaee921b 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,7 @@
#include <linux/kernel_stat.h>
#include <linux/rculist.h>
#include <linux/hash.h>
+#include <linux/bootmem.h>
#include "internals.h"
@@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
#ifdef CONFIG_SPARSE_IRQ
+
static struct irq_desc irq_desc_init = {
.irq = -1,
.status = IRQ_DISABLED,
@@ -76,9 +78,6 @@ static struct irq_desc irq_desc_init = {
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
};
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
@@ -113,6 +112,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
printk(KERN_ERR "can not alloc kstat_irqs\n");
BUG_ON(1);
}
+ if (!init_alloc_desc_masks(desc, cpu, false)) {
+ printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
+ BUG_ON(1);
+ }
arch_init_chip_data(desc, cpu);
}
@@ -121,7 +124,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
*/
DEFINE_SPINLOCK(sparse_irq_lock);
-struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+struct irq_desc **irq_desc_ptrs __read_mostly;
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS_LEGACY-1] = {
@@ -131,14 +134,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
}
};
-/* FIXME: use bootmem alloc ...*/
-static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+static unsigned int *kstat_irqs_legacy;
int __init early_irq_init(void)
{
@@ -148,18 +147,30 @@ int __init early_irq_init(void)
init_irq_default_affinity();
+ /* initialize nr_irqs based on nr_cpu_ids */
+ arch_probe_nr_irqs();
+ printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
+
desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy);
+ /* allocate irq_desc_ptrs array based on nr_irqs */
+ irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
+
+ /* allocate based on nr_cpu_ids */
+ /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
+ kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
+ sizeof(int));
+
for (i = 0; i < legacy_count; i++) {
desc[i].irq = i;
- desc[i].kstat_irqs = kstat_irqs_legacy[i];
+ desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-
+ init_alloc_desc_masks(&desc[i], 0, true);
irq_desc_ptrs[i] = desc + i;
}
- for (i = legacy_count; i < NR_IRQS; i++)
+ for (i = legacy_count; i < nr_irqs; i++)
irq_desc_ptrs[i] = NULL;
return arch_early_irq_init();
@@ -167,7 +178,10 @@ int __init early_irq_init(void)
struct irq_desc *irq_to_desc(unsigned int irq)
{
- return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+ if (irq_desc_ptrs && irq < nr_irqs)
+ return irq_desc_ptrs[irq];
+
+ return NULL;
}
struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -176,10 +190,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
unsigned long flags;
int node;
- if (irq >= NR_IRQS) {
- printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
- irq, NR_IRQS);
- WARN_ON(1);
+ if (irq >= nr_irqs) {
+ WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
+ irq, nr_irqs);
return NULL;
}
@@ -221,9 +234,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
.handle_irq = handle_bad_irq,
.depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
}
};
@@ -235,12 +245,15 @@ int __init early_irq_init(void)
init_irq_default_affinity();
+ printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
desc = irq_desc;
count = ARRAY_SIZE(irq_desc);
- for (i = 0; i < count; i++)
+ for (i = 0; i < count; i++) {
desc[i].irq = i;
-
+ init_alloc_desc_masks(&desc[i], 0, true);
+ }
return arch_early_irq_init();
}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index e6d0a43cc12..40416a81a0f 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
extern spinlock_t sparse_irq_lock;
+
+#ifdef CONFIG_SPARSE_IRQ
+/* irq_desc_ptrs allocated at boot time */
+extern struct irq_desc **irq_desc_ptrs;
+#else
+/* irq_desc_ptrs is a fixed size array */
extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+#endif
#ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 291f0366455..a3a5dc9ef34 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
- cpumask_copy(&desc->affinity, cpumask);
+ cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask);
} else {
desc->status |= IRQ_MOVE_PENDING;
- cpumask_copy(&desc->pending_mask, cpumask);
+ cpumask_copy(desc->pending_mask, cpumask);
}
#else
- cpumask_copy(&desc->affinity, cpumask);
+ cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask);
#endif
desc->status |= IRQ_AFFINITY_SET;
@@ -119,16 +119,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
* one of the targets is online.
*/
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
- if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+ if (cpumask_any_and(desc->affinity, cpu_online_mask)
< nr_cpu_ids)
goto set_affinity;
else
desc->status &= ~IRQ_AFFINITY_SET;
}
- cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+ cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
set_affinity:
- desc->chip->set_affinity(irq, &desc->affinity);
+ desc->chip->set_affinity(irq, desc->affinity);
return 0;
}
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index bd72329e630..e05ad9be43b 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
desc->status &= ~IRQ_MOVE_PENDING;
- if (unlikely(cpumask_empty(&desc->pending_mask)))
+ if (unlikely(cpumask_empty(desc->pending_mask)))
return;
if (!desc->chip->set_affinity)
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
* For correct operation this depends on the caller
* masking the irqs.
*/
- if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
+ if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
< nr_cpu_ids)) {
- cpumask_and(&desc->affinity,
- &desc->pending_mask, cpu_online_mask);
- desc->chip->set_affinity(irq, &desc->affinity);
+ cpumask_and(desc->affinity,
+ desc->pending_mask, cpu_online_mask);
+ desc->chip->set_affinity(irq, desc->affinity);
}
- cpumask_clear(&desc->pending_mask);
+ cpumask_clear(desc->pending_mask);
}
void move_native_irq(int irq)
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index ecf765c6a77..666260e4c06 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
old_desc->kstat_irqs = NULL;
}
-static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
+static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
struct irq_desc *desc, int cpu)
{
memcpy(desc, old_desc, sizeof(struct irq_desc));
+ if (!init_alloc_desc_masks(desc, cpu, false)) {
+ printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
+ "for migration.\n", irq);
+ return false;
+ }
spin_lock_init(&desc->lock);
desc->cpu = cpu;
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
+ init_copy_desc_masks(old_desc, desc);
arch_init_copy_chip_data(old_desc, desc, cpu);
+ return true;
}
static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
node = cpu_to_node(cpu);
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
if (!desc) {
- printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
+ printk(KERN_ERR "irq %d: can not get new irq_desc "
+ "for migration.\n", irq);
+ /* still use old one */
+ desc = old_desc;
+ goto out_unlock;
+ }
+ if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
/* still use old one */
+ kfree(desc);
desc = old_desc;
goto out_unlock;
}
- init_copy_one_irq_desc(irq, old_desc, desc, cpu);
irq_desc_ptrs[irq] = desc;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index aae3f742bce..692363dd591 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
- const struct cpumask *mask = &desc->affinity;
+ const struct cpumask *mask = desc->affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PENDING)
- mask = &desc->pending_mask;
+ mask = desc->pending_mask;
#endif
seq_cpumask(m, mask);
seq_putc(m, '\n');
diff --git a/kernel/panic.c b/kernel/panic.c
index 2a2ff36ff44..33cab3de176 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -74,6 +74,9 @@ NORET_TYPE void panic(const char * fmt, ...)
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ dump_stack();
+#endif
bust_spinlocks(0);
/*
@@ -355,15 +358,22 @@ EXPORT_SYMBOL(warn_slowpath);
#endif
#ifdef CONFIG_CC_STACKPROTECTOR
+
+#ifndef GCC_HAS_SP
+#warning You have selected the CONFIG_CC_STACKPROTECTOR option, but the gcc used does not support this.
+#endif
+
/*
* Called when gcc's -fstack-protector feature is used, and
* gcc detects corruption of the on-stack canary value
*/
void __stack_chk_fail(void)
{
- panic("stack-protector: Kernel stack is corrupted");
+ panic("stack-protector: Kernel stack is corrupted in: %p\n",
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__stack_chk_fail);
+
#endif
core_param(panic, panic_timeout, int, 0644);
diff --git a/kernel/sched.c b/kernel/sched.c
index 52bbf1c842a..c71d7d501ed 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5939,12 +5939,7 @@ void sched_show_task(struct task_struct *p)
printk(KERN_CONT " %016lx ", thread_saved_pc(p));
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
- {
- unsigned long *n = end_of_stack(p);
- while (!*n)
- n++;
- free = (unsigned long)n - (unsigned long)end_of_stack(p);
- }
+ free = stack_not_used(p);
#endif
printk(KERN_CONT "%5lu %5d %6d\n", free,
task_pid_nr(p), task_pid_nr(p->real_parent));
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 954e1a81b79..da932f4c852 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -960,16 +960,17 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
-static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
+static inline int pick_optimal_cpu(int this_cpu,
+ const struct cpumask *mask)
{
int first;
/* "this_cpu" is cheaper to preempt than a remote processor */
- if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
+ if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
return this_cpu;
- first = first_cpu(*mask);
- if (first != NR_CPUS)
+ first = cpumask_first(mask);
+ if (first < nr_cpu_ids)
return first;
return -1;
@@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task)
struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
+ cpumask_var_t domain_mask;
if (task->rt.nr_cpus_allowed == 1)
return -1; /* No other targets possible */
@@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task)
if (this_cpu == cpu)
this_cpu = -1; /* Skip this_cpu opt if the same */
- for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_AFFINE) {
- cpumask_t domain_mask;
- int best_cpu;
+ if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_AFFINE) {
+ int best_cpu;
- cpumask_and(&domain_mask, sched_domain_span(sd),
- lowest_mask);
+ cpumask_and(domain_mask,
+ sched_domain_span(sd),
+ lowest_mask);
- best_cpu = pick_optimal_cpu(this_cpu,
- &domain_mask);
- if (best_cpu != -1)
- return best_cpu;
+ best_cpu = pick_optimal_cpu(this_cpu,
+ domain_mask);
+
+ if (best_cpu != -1) {
+ free_cpumask_var(domain_mask);
+ return best_cpu;
+ }
+ }
}
+ free_cpumask_var(domain_mask);
}
/*
diff --git a/kernel/softirq.c b/kernel/softirq.c
index bdbe9de9cd8..0365b4899a3 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -795,6 +795,11 @@ int __init __weak early_irq_init(void)
return 0;
}
+int __init __weak arch_probe_nr_irqs(void)
+{
+ return 0;
+}
+
int __init __weak arch_early_irq_init(void)
{
return 0;