summaryrefslogtreecommitdiffstats
path: root/arch/mips/netlogic/xlp/wakeup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/netlogic/xlp/wakeup.c')
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c92
1 files changed, 70 insertions, 22 deletions
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index 682d5638dc0..9a92617a2af 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -32,7 +32,6 @@
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/threads.h>
@@ -47,14 +46,14 @@
#include <asm/netlogic/mips-extns.h>
#include <asm/netlogic/xlp-hal/iomap.h>
-#include <asm/netlogic/xlp-hal/pic.h>
#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
#include <asm/netlogic/xlp-hal/sys.h>
static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
{
uint32_t coremask, value;
- int count;
+ int count, resetreg;
coremask = (1 << core);
@@ -65,12 +64,24 @@ static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
}
+ /* On 9XX, mark coherent first */
+ if (cpu_is_xlp9xx()) {
+ value = nlm_read_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE, value);
+ }
+
/* Remove CPU Reset */
- value = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
+ resetreg = cpu_is_xlp9xx() ? SYS_9XX_CPU_RESET : SYS_CPU_RESET;
+ value = nlm_read_sys_reg(sysbase, resetreg);
value &= ~coremask;
- nlm_write_sys_reg(sysbase, SYS_CPU_RESET, value);
+ nlm_write_sys_reg(sysbase, resetreg, value);
+
+ /* We are done on 9XX */
+ if (cpu_is_xlp9xx())
+ return 1;
- /* Poll for CPU to mark itself coherent */
+ /* Poll for CPU to mark itself coherent on other type of XLP */
count = 100000;
do {
value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
@@ -84,7 +95,7 @@ static int wait_for_cpus(int cpu, int bootcpu)
volatile uint32_t *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
int i, count, notready;
- count = 0x20000000;
+ count = 0x800000;
do {
notready = nlm_threads_per_core;
for (i = 0; i < nlm_threads_per_core; i++)
@@ -98,27 +109,62 @@ static int wait_for_cpus(int cpu, int bootcpu)
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
{
struct nlm_soc_info *nodep;
- uint64_t syspcibase;
- uint32_t syscoremask;
+ uint64_t syspcibase, fusebase;
+ uint32_t syscoremask, mask, fusemask;
int core, n, cpu;
for (n = 0; n < NLM_NR_NODES; n++) {
- syspcibase = nlm_get_sys_pcibase(n);
- if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
- break;
+ if (n != 0) {
+ /* check if node exists and is online */
+ if (cpu_is_xlp9xx()) {
+ int b = xlp9xx_get_socbus(n);
+ pr_info("Node %d SoC PCI bus %d.\n", n, b);
+ if (b == 0)
+ break;
+ } else {
+ syspcibase = nlm_get_sys_pcibase(n);
+ if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
+ break;
+ }
+ nlm_node_init(n);
+ }
/* read cores in reset from SYS */
- if (n != 0)
- nlm_node_init(n);
nodep = nlm_get_node(n);
- syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
+
+ if (cpu_is_xlp9xx()) {
+ fusebase = nlm_get_fuse_regbase(n);
+ fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6);
+ mask = 0xfffff;
+ } else {
+ fusemask = nlm_read_sys_reg(nodep->sysbase,
+ SYS_EFUSE_DEVICE_CFG_STATUS0);
+ switch (read_c0_prid() & 0xff00) {
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ mask = 0xf;
+ break;
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ mask = 0x3;
+ break;
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ default:
+ mask = 0xff;
+ break;
+ }
+ }
+
+ /*
+ * Fused out cores are set in the fusemask, and the remaining
+ * cores are renumbered to range 0 .. nactive-1
+ */
+ syscoremask = (1 << hweight32(~fusemask & mask)) - 1;
+
/* The boot cpu */
- if (n == 0) {
- syscoremask |= 1;
+ if (n == 0)
nodep->coremask = 1;
- }
- for (core = 0; core < NLM_CORES_PER_NODE; core++) {
+ pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
+ for (core = 0; core < nlm_cores_per_node(); core++) {
/* we will be on node 0 core 0 */
if (n == 0 && core == 0)
continue;
@@ -128,7 +174,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
continue;
/* see if at least the first hw thread is enabled */
- cpu = (n * NLM_CORES_PER_NODE + core)
+ cpu = (n * nlm_cores_per_node() + core)
* NLM_THREADS_PER_CORE;
if (!cpumask_test_cpu(cpu, wakeup_mask))
continue;
@@ -141,7 +187,8 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
nodep->coremask |= 1u << core;
/* spin until the hw threads sets their ready */
- wait_for_cpus(cpu, 0);
+ if (!wait_for_cpus(cpu, 0))
+ pr_err("Node %d : timeout core %d\n", n, core);
}
}
}
@@ -153,7 +200,8 @@ void xlp_wakeup_secondary_cpus()
* first wakeup core 0 threads
*/
xlp_boot_core0_siblings();
- wait_for_cpus(0, 0);
+ if (!wait_for_cpus(0, 0))
+ pr_err("Node 0 : timeout core 0\n");
/* now get other cores out of reset */
xlp_enable_secondary_cores(&nlm_cpumask);