summaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/drivers/pci/pci.c32
-rw-r--r--arch/sh/kernel/smp.c62
-rw-r--r--arch/sh/kernel/stacktrace.c1
4 files changed, 17 insertions, 79 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 9a854c8e527..3e7384f4619 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -688,6 +688,7 @@ config CRASH_DUMP
config SMP
bool "Symmetric multi-processing support"
depends on SYS_SUPPORTS_SMP
+ select USE_GENERIC_SMP_HELPERS
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 08d2e732525..f57095a2617 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -76,38 +76,6 @@ void __devinit __weak pcibios_fixup_bus(struct pci_bus *bus)
pci_read_bridge_bases(bus);
}
-void
-pcibios_update_resource(struct pci_dev *dev, struct resource *root,
- struct resource *res, int resource)
-{
- u32 new, check;
- int reg;
-
- new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
- if (resource < 6) {
- reg = PCI_BASE_ADDRESS_0 + 4*resource;
- } else if (resource == PCI_ROM_RESOURCE) {
- res->flags |= IORESOURCE_ROM_ENABLE;
- new |= PCI_ROM_ADDRESS_ENABLE;
- reg = dev->rom_base_reg;
- } else {
- /*
- * Somebody might have asked allocation of a non-standard
- * resource
- */
- return;
- }
-
- pci_write_config_dword(dev, reg, new);
- pci_read_config_dword(dev, reg, &check);
- if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ?
- PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
- printk(KERN_ERR "PCI: Error while updating region "
- "%s/%d (%08x != %08x)\n", pci_name(dev), resource,
- new, check);
- }
-}
-
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align)
__attribute__ ((weak));
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 5d039d168f5..60c50841143 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -36,13 +36,6 @@ EXPORT_SYMBOL(cpu_possible_map);
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
-static atomic_t cpus_booted = ATOMIC_INIT(0);
-
-/*
- * Run specified function on a particular processor.
- */
-void __smp_call_function(unsigned int cpu);
-
static inline void __init smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
@@ -175,45 +168,20 @@ static void stop_this_cpu(void *unused)
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, 0, 1, 0);
+ smp_call_function(stop_this_cpu, 0, 0);
}
-struct smp_fn_call_struct smp_fn_call = {
- .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock),
- .finished = ATOMIC_INIT(0),
-};
-
-/*
- * The caller of this wants the passed function to run on every cpu. If wait
- * is set, wait until all cpus have finished the function before returning.
- * The lock is here to protect the call structure.
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
{
- unsigned int nr_cpus = atomic_read(&cpus_booted);
- int i;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- spin_lock(&smp_fn_call.lock);
-
- atomic_set(&smp_fn_call.finished, 0);
- smp_fn_call.fn = func;
- smp_fn_call.data = info;
-
- for (i = 0; i < nr_cpus; i++)
- if (i != smp_processor_id())
- plat_send_ipi(i, SMP_MSG_FUNCTION);
-
- if (wait)
- while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
+ int cpu;
- spin_unlock(&smp_fn_call.lock);
+ for_each_cpu_mask(cpu, mask)
+ plat_send_ipi(cpu, SMP_MSG_FUNCTION);
+}
- return 0;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
}
/* Not really SMP stuff ... */
@@ -229,7 +197,7 @@ static void flush_tlb_all_ipi(void *info)
void flush_tlb_all(void)
{
- on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
+ on_each_cpu(flush_tlb_all_ipi, 0, 1);
}
static void flush_tlb_mm_ipi(void *mm)
@@ -255,7 +223,7 @@ void flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+ smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
@@ -292,7 +260,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
fd.vma = vma;
fd.addr1 = start;
fd.addr2 = end;
- smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+ smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
@@ -316,7 +284,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
fd.addr1 = start;
fd.addr2 = end;
- on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
+ on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
}
static void flush_tlb_page_ipi(void *info)
@@ -335,7 +303,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
fd.vma = vma;
fd.addr1 = page;
- smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+ smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
@@ -359,6 +327,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr)
fd.addr1 = asid;
fd.addr2 = vaddr;
- smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1);
+ smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
local_flush_tlb_one(asid, vaddr);
}
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index d41e561be20..1b2ae35c4a7 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -34,3 +34,4 @@ void save_stack_trace(struct stack_trace *trace)
}
}
}
+EXPORT_SYMBOL_GPL(save_stack_trace);