mirror of https://gitee.com/openkylin/linux.git
[POWERPC] spufs: fix concurrent delivery of class 0 & 1 exceptions
SPU class 0 & 1 exceptions may occur in parallel, so we may end up overwriting csa.dsisr. This change adds dedicated fields for each class to the spu and the spu context so that fault data is not overwritten. Signed-off-by: Luke Browning <lukebr@linux.vnet.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
This commit is contained in:
parent
7a2142002f
commit
f3d69e0507
|
@ -226,11 +226,13 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
spu->class_0_pending = 0;
|
||||
spu->dar = ea;
|
||||
spu->dsisr = dsisr;
|
||||
spu->class_1_dar = ea;
|
||||
spu->class_1_dsisr = dsisr;
|
||||
|
||||
spu->stop_callback(spu);
|
||||
spu->stop_callback(spu, 1);
|
||||
|
||||
spu->class_1_dar = 0;
|
||||
spu->class_1_dsisr = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -318,11 +320,15 @@ spu_irq_class_0(int irq, void *data)
|
|||
stat = spu_int_stat_get(spu, 0) & mask;
|
||||
|
||||
spu->class_0_pending |= stat;
|
||||
spu->dsisr = spu_mfc_dsisr_get(spu);
|
||||
spu->dar = spu_mfc_dar_get(spu);
|
||||
spu->class_0_dsisr = spu_mfc_dsisr_get(spu);
|
||||
spu->class_0_dar = spu_mfc_dar_get(spu);
|
||||
spin_unlock(&spu->register_lock);
|
||||
|
||||
spu->stop_callback(spu);
|
||||
spu->stop_callback(spu, 0);
|
||||
|
||||
spu->class_0_pending = 0;
|
||||
spu->class_0_dsisr = 0;
|
||||
spu->class_0_dar = 0;
|
||||
|
||||
spu_int_stat_clear(spu, 0, stat);
|
||||
|
||||
|
@ -363,6 +369,9 @@ spu_irq_class_1(int irq, void *data)
|
|||
if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
|
||||
;
|
||||
|
||||
spu->class_1_dsisr = 0;
|
||||
spu->class_1_dar = 0;
|
||||
|
||||
return stat ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
|
@ -396,10 +405,10 @@ spu_irq_class_2(int irq, void *data)
|
|||
spu->ibox_callback(spu);
|
||||
|
||||
if (stat & CLASS2_SPU_STOP_INTR)
|
||||
spu->stop_callback(spu);
|
||||
spu->stop_callback(spu, 2);
|
||||
|
||||
if (stat & CLASS2_SPU_HALT_INTR)
|
||||
spu->stop_callback(spu);
|
||||
spu->stop_callback(spu, 2);
|
||||
|
||||
if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
|
||||
spu->mfc_callback(spu);
|
||||
|
|
|
@ -83,13 +83,18 @@ int spufs_handle_class0(struct spu_context *ctx)
|
|||
return 0;
|
||||
|
||||
if (stat & CLASS0_DMA_ALIGNMENT_INTR)
|
||||
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);
|
||||
spufs_handle_event(ctx, ctx->csa.class_0_dar,
|
||||
SPE_EVENT_DMA_ALIGNMENT);
|
||||
|
||||
if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
|
||||
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);
|
||||
spufs_handle_event(ctx, ctx->csa.class_0_dar,
|
||||
SPE_EVENT_INVALID_DMA);
|
||||
|
||||
if (stat & CLASS0_SPU_ERROR_INTR)
|
||||
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);
|
||||
spufs_handle_event(ctx, ctx->csa.class_0_dar,
|
||||
SPE_EVENT_SPE_ERROR);
|
||||
|
||||
ctx->csa.class_0_pending = 0;
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -119,8 +124,8 @@ int spufs_handle_class1(struct spu_context *ctx)
|
|||
* in time, we can still expect to get the same fault
|
||||
* the immediately after the context restore.
|
||||
*/
|
||||
ea = ctx->csa.dar;
|
||||
dsisr = ctx->csa.dsisr;
|
||||
ea = ctx->csa.class_1_dar;
|
||||
dsisr = ctx->csa.class_1_dsisr;
|
||||
|
||||
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
|
||||
return 0;
|
||||
|
@ -158,7 +163,7 @@ int spufs_handle_class1(struct spu_context *ctx)
|
|||
* time slicing will not preempt the context while the page fault
|
||||
* handler is running. Context switch code removes mappings.
|
||||
*/
|
||||
ctx->csa.dar = ctx->csa.dsisr = 0;
|
||||
ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0;
|
||||
|
||||
/*
|
||||
* If we handled the fault successfully and are in runnable
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include "spufs.h"
|
||||
|
||||
/* interrupt-level stop callback function. */
|
||||
void spufs_stop_callback(struct spu *spu)
|
||||
void spufs_stop_callback(struct spu *spu, int irq)
|
||||
{
|
||||
struct spu_context *ctx = spu->ctx;
|
||||
|
||||
|
@ -24,9 +24,19 @@ void spufs_stop_callback(struct spu *spu)
|
|||
*/
|
||||
if (ctx) {
|
||||
/* Copy exception arguments into module specific structure */
|
||||
ctx->csa.class_0_pending = spu->class_0_pending;
|
||||
ctx->csa.dsisr = spu->dsisr;
|
||||
ctx->csa.dar = spu->dar;
|
||||
switch(irq) {
|
||||
case 0 :
|
||||
ctx->csa.class_0_pending = spu->class_0_pending;
|
||||
ctx->csa.class_0_dsisr = spu->class_0_dsisr;
|
||||
ctx->csa.class_0_dar = spu->class_0_dar;
|
||||
break;
|
||||
case 1 :
|
||||
ctx->csa.class_1_dsisr = spu->class_1_dsisr;
|
||||
ctx->csa.class_1_dar = spu->class_1_dar;
|
||||
break;
|
||||
case 2 :
|
||||
break;
|
||||
}
|
||||
|
||||
/* ensure that the exception status has hit memory before a
|
||||
* thread waiting on the context's stop queue is woken */
|
||||
|
@ -34,11 +44,6 @@ void spufs_stop_callback(struct spu *spu)
|
|||
|
||||
wake_up_all(&ctx->stop_wq);
|
||||
}
|
||||
|
||||
/* Clear callback arguments from spu structure */
|
||||
spu->class_0_pending = 0;
|
||||
spu->dsisr = 0;
|
||||
spu->dar = 0;
|
||||
}
|
||||
|
||||
int spu_stopped(struct spu_context *ctx, u32 *stat)
|
||||
|
@ -56,7 +61,11 @@ int spu_stopped(struct spu_context *ctx, u32 *stat)
|
|||
if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped))
|
||||
return 1;
|
||||
|
||||
dsisr = ctx->csa.dsisr;
|
||||
dsisr = ctx->csa.class_0_dsisr;
|
||||
if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
|
||||
return 1;
|
||||
|
||||
dsisr = ctx->csa.class_1_dsisr;
|
||||
if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
|
||||
return 1;
|
||||
|
||||
|
|
|
@ -332,7 +332,7 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
|
|||
/* irq callback funcs. */
|
||||
void spufs_ibox_callback(struct spu *spu);
|
||||
void spufs_wbox_callback(struct spu *spu);
|
||||
void spufs_stop_callback(struct spu *spu);
|
||||
void spufs_stop_callback(struct spu *spu, int irq);
|
||||
void spufs_mfc_callback(struct spu *spu);
|
||||
void spufs_dma_callback(struct spu *spu, int type);
|
||||
|
||||
|
|
|
@ -2842,9 +2842,11 @@ static void dump_spu_fields(struct spu *spu)
|
|||
DUMP_FIELD(spu, "0x%lx", ls_size);
|
||||
DUMP_FIELD(spu, "0x%x", node);
|
||||
DUMP_FIELD(spu, "0x%lx", flags);
|
||||
DUMP_FIELD(spu, "0x%lx", dar);
|
||||
DUMP_FIELD(spu, "0x%lx", dsisr);
|
||||
DUMP_FIELD(spu, "%d", class_0_pending);
|
||||
DUMP_FIELD(spu, "0x%lx", class_0_dar);
|
||||
DUMP_FIELD(spu, "0x%lx", class_0_dsisr);
|
||||
DUMP_FIELD(spu, "0x%lx", class_1_dar);
|
||||
DUMP_FIELD(spu, "0x%lx", class_1_dsisr);
|
||||
DUMP_FIELD(spu, "0x%lx", irqs[0]);
|
||||
DUMP_FIELD(spu, "0x%lx", irqs[1]);
|
||||
DUMP_FIELD(spu, "0x%lx", irqs[2]);
|
||||
|
|
|
@ -128,9 +128,11 @@ struct spu {
|
|||
unsigned int irqs[3];
|
||||
u32 node;
|
||||
u64 flags;
|
||||
u64 dar;
|
||||
u64 dsisr;
|
||||
u64 class_0_pending;
|
||||
u64 class_0_dar;
|
||||
u64 class_0_dsisr;
|
||||
u64 class_1_dar;
|
||||
u64 class_1_dsisr;
|
||||
size_t ls_size;
|
||||
unsigned int slb_replace;
|
||||
struct mm_struct *mm;
|
||||
|
@ -143,7 +145,7 @@ struct spu {
|
|||
|
||||
void (* wbox_callback)(struct spu *spu);
|
||||
void (* ibox_callback)(struct spu *spu);
|
||||
void (* stop_callback)(struct spu *spu);
|
||||
void (* stop_callback)(struct spu *spu, int irq);
|
||||
void (* mfc_callback)(struct spu *spu);
|
||||
|
||||
char irq_c0[8];
|
||||
|
|
|
@ -254,7 +254,8 @@ struct spu_state {
|
|||
u64 spu_chnldata_RW[32];
|
||||
u32 spu_mailbox_data[4];
|
||||
u32 pu_mailbox_data[1];
|
||||
u64 dar, dsisr, class_0_pending;
|
||||
u64 class_0_dar, class_0_dsisr, class_0_pending;
|
||||
u64 class_1_dar, class_1_dsisr;
|
||||
unsigned long suspend_time;
|
||||
spinlock_t register_lock;
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue