From d84050f48ebba73994b93ccf61cea2364dac8d75 Mon Sep 17 00:00:00 2001 From: Luke Browning Date: Thu, 29 May 2008 17:46:10 -0300 Subject: [PATCH 1/4] powerpc/spufs: wait for stable spu status in spu_stopped() If the spu is stopping (ie, the SPU_STATUS_RUNNING bit is still set), re-read the register to get the final stopped value. Signed-off-by: Luke Browning Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/run.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index b7493b865812..0046bcfe495a 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -51,14 +51,22 @@ int spu_stopped(struct spu_context *ctx, u32 *stat) u64 dsisr; u32 stopped; - *stat = ctx->ops->status_read(ctx); - - if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) - return 1; - stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; - if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) + +top: + *stat = ctx->ops->status_read(ctx); + if (*stat & stopped) { + /* + * If the spu hasn't finished stopping, we need to + * re-read the register to get the stopped value. + */ + if (*stat & SPU_STATUS_RUNNING) + goto top; + return 1; + } + + if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) return 1; dsisr = ctx->csa.class_0_dsisr; From 1f64643aa5f5a17f1723f7ea0f17b7a3a8f632b3 Mon Sep 17 00:00:00 2001 From: Luke Browning Date: Thu, 5 Jun 2008 17:30:25 +0800 Subject: [PATCH 2/4] powerpc/spufs: remove class_0_dsisr from spu exception handling According to the CBEA, the SPU dsisr is not updated for class 0 exceptions. spu_stopped() is testing the dsisr that was passed to it from the class 0 exception handler, so we return a false positive here. This patch cleans up the interrupt handler and erroneous tests in spu_stopped. It also removes the fields from the csa since it is not needed to process class 0 events. Signed-off-by: Luke Browning Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spu_base.c | 2 -- arch/powerpc/platforms/cell/spufs/run.c | 5 ----- arch/powerpc/xmon/xmon.c | 1 - include/asm-powerpc/spu.h | 1 - include/asm-powerpc/spu_csa.h | 2 +- 5 files changed, 1 insertion(+), 10 deletions(-) diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 70c660121ec4..96b5f0f1c11e 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -324,14 +324,12 @@ spu_irq_class_0(int irq, void *data) stat = spu_int_stat_get(spu, 0) & mask; spu->class_0_pending |= stat; - spu->class_0_dsisr = spu_mfc_dsisr_get(spu); spu->class_0_dar = spu_mfc_dar_get(spu); spin_unlock(&spu->register_lock); spu->stop_callback(spu, 0); spu->class_0_pending = 0; - spu->class_0_dsisr = 0; spu->class_0_dar = 0; spu_int_stat_clear(spu, 0, stat); diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 0046bcfe495a..f7edba6cb795 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -27,7 +27,6 @@ void spufs_stop_callback(struct spu *spu, int irq) switch(irq) { case 0 : ctx->csa.class_0_pending = spu->class_0_pending; - ctx->csa.class_0_dsisr = spu->class_0_dsisr; ctx->csa.class_0_dar = spu->class_0_dar; break; case 1 : @@ -69,10 +68,6 @@ top: if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) return 1; - dsisr = ctx->csa.class_0_dsisr; - if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) - return 1; - dsisr = ctx->csa.class_1_dsisr; if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) return 1; diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 1702de9395ee..bfcf70ee8959 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2844,7 +2844,6 @@ static void dump_spu_fields(struct spu *spu) DUMP_FIELD(spu, "0x%lx", flags); DUMP_FIELD(spu, "%d", class_0_pending); DUMP_FIELD(spu, "0x%lx", class_0_dar); - DUMP_FIELD(spu, "0x%lx", class_0_dsisr); DUMP_FIELD(spu, "0x%lx", class_1_dar); DUMP_FIELD(spu, "0x%lx", class_1_dsisr); DUMP_FIELD(spu, "0x%lx", irqs[0]); diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h index 6abead6e681a..99348c1f4cab 100644 --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h @@ -131,7 +131,6 @@ struct spu { u64 flags; u64 class_0_pending; u64 class_0_dar; - u64 class_0_dsisr; u64 class_1_dar; u64 class_1_dsisr; size_t ls_size; diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index 129ec148d451..a40fd491250c 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h @@ -254,7 +254,7 @@ struct spu_state { u64 spu_chnldata_RW[32]; u32 spu_mailbox_data[4]; u32 pu_mailbox_data[1]; - u64 class_0_dar, class_0_dsisr, class_0_pending; + u64 class_0_dar, class_0_pending; u64 class_1_dar, class_1_dsisr; unsigned long suspend_time; spinlock_t register_lock; From 2c911a14b74fa9cf815a936f310e4fa85bee77ce Mon Sep 17 00:00:00 2001 From: Luke Browning Date: Fri, 13 Jun 2008 14:17:35 +1000 Subject: [PATCH 3/4] powerpc/spufs: synchronize interaction between spu exception handling and time slicing Time slicing can occur at the same time as spu exception handling resulting in the wakeup of the wrong thread. This change uses the the spu's register_lock to enforce synchronization between bind/unbind and spu exception handling so that they are mutually exclusive. Signed-off-by: Luke Browning Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spu_base.c | 42 ++++++++++++++--------- arch/powerpc/platforms/cell/spufs/sched.c | 14 ++++++-- 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 96b5f0f1c11e..78f905bc6a42 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -219,15 +219,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) { + int ret; + pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); - /* Handle kernel space hash faults immediately. - User hash faults need to be deferred to process context. */ - if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) - && REGION_ID(ea) != USER_REGION_ID - && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { - spu_restart_dma(spu); - return 0; + /* + * Handle kernel space hash faults immediately. User hash + * faults need to be deferred to process context. + */ + if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) && + (REGION_ID(ea) != USER_REGION_ID)) { + + spin_unlock(&spu->register_lock); + ret = hash_page(ea, _PAGE_PRESENT, 0x300); + spin_lock(&spu->register_lock); + + if (!ret) { + spu_restart_dma(spu); + return 0; + } } spu->class_1_dar = ea; @@ -325,14 +335,12 @@ spu_irq_class_0(int irq, void *data) spu->class_0_pending |= stat; spu->class_0_dar = spu_mfc_dar_get(spu); - spin_unlock(&spu->register_lock); - spu->stop_callback(spu, 0); - spu->class_0_pending = 0; spu->class_0_dar = 0; spu_int_stat_clear(spu, 0, stat); + spin_unlock(&spu->register_lock); return IRQ_HANDLED; } @@ -355,13 +363,12 @@ spu_irq_class_1(int irq, void *data) spu_mfc_dsisr_set(spu, 0ul); spu_int_stat_clear(spu, 1, stat); - if (stat & CLASS1_SEGMENT_FAULT_INTR) - __spu_trap_data_seg(spu, dar); - - spin_unlock(&spu->register_lock); pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, dar, dsisr); + if (stat & CLASS1_SEGMENT_FAULT_INTR) + __spu_trap_data_seg(spu, dar); + if (stat & CLASS1_STORAGE_FAULT_INTR) __spu_trap_data_map(spu, dar, dsisr); @@ -374,6 +381,8 @@ spu_irq_class_1(int irq, void *data) spu->class_1_dsisr = 0; spu->class_1_dar = 0; + spin_unlock(&spu->register_lock); + return stat ? IRQ_HANDLED : IRQ_NONE; } @@ -392,14 +401,12 @@ spu_irq_class_2(int irq, void *data) mask = spu_int_mask_get(spu, 2); /* ignore interrupts we're not waiting for */ stat &= mask; - /* mailbox interrupts are level triggered. mask them now before * acknowledging */ if (stat & mailbox_intrs) spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); /* acknowledge all interrupts before the callbacks */ spu_int_stat_clear(spu, 2, stat); - spin_unlock(&spu->register_lock); pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); @@ -419,6 +426,9 @@ spu_irq_class_2(int irq, void *data) spu->wbox_callback(spu); spu->stats.class2_intr++; + + spin_unlock(&spu->register_lock); + return stat ? IRQ_HANDLED : IRQ_NONE; } diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 745dd51ec37f..cd725670b1b5 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -230,19 +230,23 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) ctx->stats.slb_flt_base = spu->stats.slb_flt; ctx->stats.class2_intr_base = spu->stats.class2_intr; + spu_associate_mm(spu, ctx->owner); + + spin_lock_irq(&spu->register_lock); spu->ctx = ctx; spu->flags = 0; ctx->spu = spu; ctx->ops = &spu_hw_ops; spu->pid = current->pid; spu->tgid = current->tgid; - spu_associate_mm(spu, ctx->owner); spu->ibox_callback = spufs_ibox_callback; spu->wbox_callback = spufs_wbox_callback; spu->stop_callback = spufs_stop_callback; spu->mfc_callback = spufs_mfc_callback; - mb(); + spin_unlock_irq(&spu->register_lock); + spu_unmap_mappings(ctx); + spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); spu_restore(&ctx->csa, spu); spu->timestamp = jiffies; @@ -423,18 +427,22 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) spu_unmap_mappings(ctx); spu_save(&ctx->csa, spu); spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); + + spin_lock_irq(&spu->register_lock); spu->timestamp = jiffies; ctx->state = SPU_STATE_SAVED; spu->ibox_callback = NULL; spu->wbox_callback = NULL; spu->stop_callback = NULL; spu->mfc_callback = NULL; - spu_associate_mm(spu, NULL); spu->pid = 0; spu->tgid = 0; ctx->ops = &spu_backing_ops; spu->flags = 0; spu->ctx = NULL; + spin_unlock_irq(&spu->register_lock); + + spu_associate_mm(spu, NULL); ctx->stats.slb_flt += (spu->stats.slb_flt - ctx->stats.slb_flt_base); From 028fda0a6c80c26f1d9f403b4490b9ddc74ffa3b Mon Sep 17 00:00:00 2001 From: Luke Browning Date: Mon, 16 Jun 2008 10:42:38 +1000 Subject: [PATCH 4/4] powerpc/spufs: fix missed stop-and-signal event There is a delay in the transition to the stopped state for class 2 interrupts. In some cases, the controlling thread detects the state of the spu as running, and goes back to sleep resulting in a hung application as the event is missed. This change detects the stop condition and re-generates the wakeup event after a context save. Signed-off-by: Luke Browning Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/sched.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index cd725670b1b5..e929e70a84e3 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -407,6 +407,8 @@ static int has_affinity(struct spu_context *ctx) */ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) { + u32 status; + spu_context_trace(spu_unbind_context__enter, ctx, spu); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); @@ -452,6 +454,9 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) /* This maps the underlying spu state to idle */ spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); ctx->spu = NULL; + + if (spu_stopped(ctx, &status)) + wake_up_all(&ctx->stop_wq); } /**