diff options
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 44 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 21 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 19 | ||||
-rw-r--r-- | arch/powerpc/xmon/xmon.c | 1 | ||||
-rw-r--r-- | include/asm-powerpc/spu.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/spu_csa.h | 2 |
6 files changed, 55 insertions, 33 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 70c660121ec..78f905bc6a4 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -219,15 +219,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) { + int ret; + pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); - /* Handle kernel space hash faults immediately. - User hash faults need to be deferred to process context. */ - if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) - && REGION_ID(ea) != USER_REGION_ID - && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { - spu_restart_dma(spu); - return 0; + /* + * Handle kernel space hash faults immediately. User hash + * faults need to be deferred to process context. + */ + if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) && + (REGION_ID(ea) != USER_REGION_ID)) { + + spin_unlock(&spu->register_lock); + ret = hash_page(ea, _PAGE_PRESENT, 0x300); + spin_lock(&spu->register_lock); + + if (!ret) { + spu_restart_dma(spu); + return 0; + } } spu->class_1_dar = ea; @@ -324,17 +334,13 @@ spu_irq_class_0(int irq, void *data) stat = spu_int_stat_get(spu, 0) & mask; spu->class_0_pending |= stat; - spu->class_0_dsisr = spu_mfc_dsisr_get(spu); spu->class_0_dar = spu_mfc_dar_get(spu); - spin_unlock(&spu->register_lock); - spu->stop_callback(spu, 0); - spu->class_0_pending = 0; - spu->class_0_dsisr = 0; spu->class_0_dar = 0; spu_int_stat_clear(spu, 0, stat); + spin_unlock(&spu->register_lock); return IRQ_HANDLED; } @@ -357,13 +363,12 @@ spu_irq_class_1(int irq, void *data) spu_mfc_dsisr_set(spu, 0ul); spu_int_stat_clear(spu, 1, stat); - if (stat & CLASS1_SEGMENT_FAULT_INTR) - __spu_trap_data_seg(spu, dar); - - spin_unlock(&spu->register_lock); pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, dar, dsisr); + if (stat & CLASS1_SEGMENT_FAULT_INTR) + __spu_trap_data_seg(spu, dar); + if (stat & CLASS1_STORAGE_FAULT_INTR) __spu_trap_data_map(spu, dar, dsisr); @@ -376,6 +381,8 @@ spu_irq_class_1(int irq, void *data) spu->class_1_dsisr = 0; spu->class_1_dar = 0; + spin_unlock(&spu->register_lock); + return stat ? IRQ_HANDLED : IRQ_NONE; } @@ -394,14 +401,12 @@ spu_irq_class_2(int irq, void *data) mask = spu_int_mask_get(spu, 2); /* ignore interrupts we're not waiting for */ stat &= mask; - /* mailbox interrupts are level triggered. mask them now before * acknowledging */ if (stat & mailbox_intrs) spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); /* acknowledge all interrupts before the callbacks */ spu_int_stat_clear(spu, 2, stat); - spin_unlock(&spu->register_lock); pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); @@ -421,6 +426,9 @@ spu_irq_class_2(int irq, void *data) spu->wbox_callback(spu); spu->stats.class2_intr++; + + spin_unlock(&spu->register_lock); + return stat ? IRQ_HANDLED : IRQ_NONE; } diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index b7493b86581..f7edba6cb79 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -27,7 +27,6 @@ void spufs_stop_callback(struct spu *spu, int irq) switch(irq) { case 0 : ctx->csa.class_0_pending = spu->class_0_pending; - ctx->csa.class_0_dsisr = spu->class_0_dsisr; ctx->csa.class_0_dar = spu->class_0_dar; break; case 1 : @@ -51,18 +50,22 @@ int spu_stopped(struct spu_context *ctx, u32 *stat) u64 dsisr; u32 stopped; - *stat = ctx->ops->status_read(ctx); - - if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) - return 1; - stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; - if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) + +top: + *stat = ctx->ops->status_read(ctx); + if (*stat & stopped) { + /* + * If the spu hasn't finished stopping, we need to + * re-read the register to get the stopped value. + */ + if (*stat & SPU_STATUS_RUNNING) + goto top; return 1; + } - dsisr = ctx->csa.class_0_dsisr; - if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) + if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) return 1; dsisr = ctx->csa.class_1_dsisr; diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 745dd51ec37..e929e70a84e 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -230,19 +230,23 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) ctx->stats.slb_flt_base = spu->stats.slb_flt; ctx->stats.class2_intr_base = spu->stats.class2_intr; + spu_associate_mm(spu, ctx->owner); + + spin_lock_irq(&spu->register_lock); spu->ctx = ctx; spu->flags = 0; ctx->spu = spu; ctx->ops = &spu_hw_ops; spu->pid = current->pid; spu->tgid = current->tgid; - spu_associate_mm(spu, ctx->owner); spu->ibox_callback = spufs_ibox_callback; spu->wbox_callback = spufs_wbox_callback; spu->stop_callback = spufs_stop_callback; spu->mfc_callback = spufs_mfc_callback; - mb(); + spin_unlock_irq(&spu->register_lock); + spu_unmap_mappings(ctx); + spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); spu_restore(&ctx->csa, spu); spu->timestamp = jiffies; @@ -403,6 +407,8 @@ static int has_affinity(struct spu_context *ctx) */ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) { + u32 status; + spu_context_trace(spu_unbind_context__enter, ctx, spu); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); @@ -423,18 +429,22 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) spu_unmap_mappings(ctx); spu_save(&ctx->csa, spu); spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); + + spin_lock_irq(&spu->register_lock); spu->timestamp = jiffies; ctx->state = SPU_STATE_SAVED; spu->ibox_callback = NULL; spu->wbox_callback = NULL; spu->stop_callback = NULL; spu->mfc_callback = NULL; - spu_associate_mm(spu, NULL); spu->pid = 0; spu->tgid = 0; ctx->ops = &spu_backing_ops; spu->flags = 0; spu->ctx = NULL; + spin_unlock_irq(&spu->register_lock); + + spu_associate_mm(spu, NULL); ctx->stats.slb_flt += (spu->stats.slb_flt - ctx->stats.slb_flt_base); @@ -444,6 +454,9 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) /* This maps the underlying spu state to idle */ spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); ctx->spu = NULL; + + if (spu_stopped(ctx, &status)) + wake_up_all(&ctx->stop_wq); } /** diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 1702de9395e..bfcf70ee895 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2844,7 +2844,6 @@ static void dump_spu_fields(struct spu *spu) DUMP_FIELD(spu, "0x%lx", flags); DUMP_FIELD(spu, "%d", class_0_pending); DUMP_FIELD(spu, "0x%lx", class_0_dar); - DUMP_FIELD(spu, "0x%lx", class_0_dsisr); DUMP_FIELD(spu, "0x%lx", class_1_dar); DUMP_FIELD(spu, "0x%lx", class_1_dsisr); DUMP_FIELD(spu, "0x%lx", irqs[0]); diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h index 6abead6e681..99348c1f4ca 100644 --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h @@ -131,7 +131,6 @@ struct spu { u64 flags; u64 class_0_pending; u64 class_0_dar; - u64 class_0_dsisr; u64 class_1_dar; u64 class_1_dsisr; size_t ls_size; diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index 129ec148d45..a40fd491250 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h @@ -254,7 +254,7 @@ struct spu_state { u64 spu_chnldata_RW[32]; u32 spu_mailbox_data[4]; u32 pu_mailbox_data[1]; - u64 class_0_dar, class_0_dsisr, class_0_pending; + u64 class_0_dar, class_0_pending; u64 class_1_dar, class_1_dsisr; unsigned long suspend_time; spinlock_t register_lock; |