summaryrefslogtreecommitdiffstats
path: root/arch/arm/common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common')
-rw-r--r--arch/arm/common/bL_switcher.c16
-rw-r--r--arch/arm/common/edma.c31
-rw-r--r--arch/arm/common/mcpm_entry.c52
-rw-r--r--arch/arm/common/timer-sp.c4
4 files changed, 87 insertions, 16 deletions
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index 490f3dced74..6eaddc47c43 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -58,16 +58,6 @@ static int read_mpidr(void)
}
/*
- * Get a global nanosecond time stamp for tracing.
- */
-static s64 get_ns(void)
-{
- struct timespec ts;
- getnstimeofday(&ts);
- return timespec_to_ns(&ts);
-}
-
-/*
* bL switcher core code.
*/
@@ -224,7 +214,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
*/
local_irq_disable();
local_fiq_disable();
- trace_cpu_migrate_begin(get_ns(), ob_mpidr);
+ trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr);
/* redirect GIC's SGIs to our counterpart */
gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
@@ -267,7 +257,7 @@ static int bL_switch_to(unsigned int new_cluster_id)
tdev->evtdev->next_event, 1);
}
- trace_cpu_migrate_finish(get_ns(), ib_mpidr);
+ trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
local_fiq_enable();
local_irq_enable();
@@ -558,7 +548,7 @@ int bL_switcher_get_logical_index(u32 mpidr)
static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
{
- trace_cpu_migrate_current(get_ns(), read_mpidr());
+ trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr());
}
int bL_switcher_trace_trigger(void)
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 485be42519b..88099175fc5 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -1414,6 +1414,34 @@ void edma_clear_event(unsigned channel)
}
EXPORT_SYMBOL(edma_clear_event);
+/*
+ * edma_assign_channel_eventq - move given channel to desired eventq
+ * Arguments:
+ * channel - channel number
+ * eventq_no - queue to move the channel
+ *
+ * Can be used to move a channel to a selected event queue.
+ */
+void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no)
+{
+ unsigned ctlr;
+
+ ctlr = EDMA_CTLR(channel);
+ channel = EDMA_CHAN_SLOT(channel);
+
+ if (channel >= edma_cc[ctlr]->num_channels)
+ return;
+
+ /* default to low priority queue */
+ if (eventq_no == EVENTQ_DEFAULT)
+ eventq_no = edma_cc[ctlr]->default_queue;
+ if (eventq_no >= edma_cc[ctlr]->num_tc)
+ return;
+
+ map_dmach_queue(ctlr, channel, eventq_no);
+}
+EXPORT_SYMBOL(edma_assign_channel_eventq);
+
static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
struct edma *edma_cc)
{
@@ -1470,7 +1498,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
queue_priority_map[i][1] = -1;
pdata->queue_priority_mapping = queue_priority_map;
- pdata->default_queue = 0;
+ /* Default queue has the lowest priority */
+ pdata->default_queue = i - 1;
return 0;
}
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index f91136ab447..3c165fc2dce 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -12,11 +12,13 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irqflags.h>
+#include <linux/cpu_pm.h>
#include <asm/mcpm.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/cputype.h>
+#include <asm/suspend.h>
extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
@@ -146,6 +148,56 @@ int mcpm_cpu_powered_up(void)
return 0;
}
+#ifdef CONFIG_ARM_CPU_SUSPEND
+
+static int __init nocache_trampoline(unsigned long _arg)
+{
+ void (*cache_disable)(void) = (void *)_arg;
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ phys_reset_t phys_reset;
+
+ mcpm_set_entry_vector(cpu, cluster, cpu_resume);
+ setup_mm_for_reboot();
+
+ __mcpm_cpu_going_down(cpu, cluster);
+ BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
+ cache_disable();
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ __mcpm_cpu_down(cpu, cluster);
+
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+ BUG();
+}
+
+int __init mcpm_loopback(void (*cache_disable)(void))
+{
+ int ret;
+
+ /*
+ * We're going to soft-restart the current CPU through the
+ * low-level MCPM code by leveraging the suspend/resume
+ * infrastructure. Let's play it safe by using cpu_pm_enter()
+ * in case the CPU init code path resets the VFP or similar.
+ */
+ local_irq_disable();
+ local_fiq_disable();
+ ret = cpu_pm_enter();
+ if (!ret) {
+ ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
+ cpu_pm_exit();
+ }
+ local_fiq_enable();
+ local_irq_enable();
+ if (ret)
+ pr_err("%s returned %d\n", __func__, ret);
+ return ret;
+}
+
+#endif
+
struct sync_struct mcpm_sync;
/*
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
index fd6bff0c5b9..19211324772 100644
--- a/arch/arm/common/timer-sp.c
+++ b/arch/arm/common/timer-sp.c
@@ -233,13 +233,13 @@ static void __init sp804_of_init(struct device_node *np)
if (IS_ERR(clk1))
clk1 = NULL;
- /* Get the 2nd clock if the timer has 2 timer clocks */
+ /* Get the 2nd clock if the timer has 3 timer clocks */
if (of_count_phandle_with_args(np, "clocks", "#clock-cells") == 3) {
clk2 = of_clk_get(np, 1);
if (IS_ERR(clk2)) {
pr_err("sp804: %s clock not found: %d\n", np->name,
(int)PTR_ERR(clk2));
- goto err;
+ clk2 = NULL;
}
} else
clk2 = clk1;