2006-10-25 00:31:18 +08:00
|
|
|
#define DEBUG
|
|
|
|
|
2006-01-05 03:31:29 +08:00
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
|
|
|
|
#include <asm/spu.h>
|
2006-11-21 01:45:10 +08:00
|
|
|
#include <asm/spu_priv1.h>
|
|
|
|
#include <asm/io.h>
|
2006-04-01 12:53:09 +08:00
|
|
|
#include <asm/unistd.h>
|
2006-01-05 03:31:29 +08:00
|
|
|
|
|
|
|
#include "spufs.h"
|
|
|
|
|
|
|
|
/* interrupt-level stop callback function. */
|
|
|
|
void spufs_stop_callback(struct spu *spu)
|
|
|
|
{
|
|
|
|
struct spu_context *ctx = spu->ctx;
|
|
|
|
|
|
|
|
wake_up_all(&ctx->stop_wq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
|
|
|
|
{
|
|
|
|
struct spu *spu;
|
|
|
|
u64 pte_fault;
|
|
|
|
|
|
|
|
*stat = ctx->ops->status_read(ctx);
|
|
|
|
if (ctx->state != SPU_STATE_RUNNABLE)
|
|
|
|
return 1;
|
|
|
|
spu = ctx->spu;
|
|
|
|
pte_fault = spu->dsisr &
|
|
|
|
(MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
|
|
|
|
return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2006-11-21 01:45:10 +08:00
|
|
|
static int spu_setup_isolated(struct spu_context *ctx)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u64 __iomem *mfc_cntl;
|
|
|
|
u64 sr1;
|
|
|
|
u32 status;
|
|
|
|
unsigned long timeout;
|
|
|
|
const u32 status_loading = SPU_STATUS_RUNNING
|
|
|
|
| SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
|
|
|
|
|
2007-04-24 03:08:12 +08:00
|
|
|
ret = -ENODEV;
|
2006-11-21 01:45:10 +08:00
|
|
|
if (!isolated_loader)
|
|
|
|
goto out;
|
|
|
|
|
2007-04-24 03:08:12 +08:00
|
|
|
/*
|
|
|
|
* We need to exclude userspace access to the context.
|
|
|
|
*
|
|
|
|
* To protect against memory access we invalidate all ptes
|
|
|
|
* and make sure the pagefault handlers block on the mutex.
|
|
|
|
*/
|
|
|
|
spu_unmap_mappings(ctx);
|
|
|
|
|
2006-11-21 01:45:10 +08:00
|
|
|
mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
|
|
|
|
|
|
|
|
/* purge the MFC DMA queue to ensure no spurious accesses before we
|
|
|
|
* enter kernel mode */
|
|
|
|
timeout = jiffies + HZ;
|
|
|
|
out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
|
|
|
|
while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
|
|
|
|
!= MFC_CNTL_PURGE_DMA_COMPLETE) {
|
|
|
|
if (time_after(jiffies, timeout)) {
|
|
|
|
printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
|
|
|
|
__FUNCTION__);
|
|
|
|
ret = -EIO;
|
2007-04-24 03:08:12 +08:00
|
|
|
goto out;
|
2006-11-21 01:45:10 +08:00
|
|
|
}
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* put the SPE in kernel mode to allow access to the loader */
|
|
|
|
sr1 = spu_mfc_sr1_get(ctx->spu);
|
|
|
|
sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
|
|
|
|
spu_mfc_sr1_set(ctx->spu, sr1);
|
|
|
|
|
|
|
|
/* start the loader */
|
|
|
|
ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
|
|
|
|
ctx->ops->signal2_write(ctx,
|
|
|
|
(unsigned long)isolated_loader & 0xffffffff);
|
|
|
|
|
|
|
|
ctx->ops->runcntl_write(ctx,
|
|
|
|
SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
timeout = jiffies + HZ;
|
|
|
|
while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
|
|
|
|
status_loading) {
|
|
|
|
if (time_after(jiffies, timeout)) {
|
|
|
|
printk(KERN_ERR "%s: timeout waiting for loader\n",
|
|
|
|
__FUNCTION__);
|
|
|
|
ret = -EIO;
|
|
|
|
goto out_drop_priv;
|
|
|
|
}
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(status & SPU_STATUS_RUNNING)) {
|
|
|
|
/* If isolated LOAD has failed: run SPU, we will get a stop-and
|
|
|
|
* signal later. */
|
|
|
|
pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
|
|
|
|
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
|
|
|
|
ret = -EACCES;
|
2007-04-24 03:08:12 +08:00
|
|
|
goto out_drop_priv;
|
|
|
|
}
|
2006-11-21 01:45:10 +08:00
|
|
|
|
2007-04-24 03:08:12 +08:00
|
|
|
if (!(status & SPU_STATUS_ISOLATED_STATE)) {
|
2006-11-21 01:45:10 +08:00
|
|
|
/* This isn't allowed by the CBEA, but check anyway */
|
|
|
|
pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
|
|
|
|
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
|
|
|
|
ret = -EINVAL;
|
2007-04-24 03:08:12 +08:00
|
|
|
goto out_drop_priv;
|
2006-11-21 01:45:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
out_drop_priv:
|
|
|
|
/* Finished accessing the loader. Drop kernel mode */
|
|
|
|
sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
|
|
|
|
spu_mfc_sr1_set(ctx->spu, sr1);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-10-04 23:26:14 +08:00
|
|
|
static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
|
2006-01-05 03:31:29 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2006-10-25 00:31:16 +08:00
|
|
|
unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
|
2006-01-05 03:31:29 +08:00
|
|
|
|
2007-03-10 07:05:36 +08:00
|
|
|
ret = spu_acquire_runnable(ctx, 0);
|
2006-11-21 01:45:10 +08:00
|
|
|
if (ret)
|
2006-01-05 03:31:29 +08:00
|
|
|
return ret;
|
2006-10-25 00:31:16 +08:00
|
|
|
|
2006-11-21 01:45:10 +08:00
|
|
|
if (ctx->flags & SPU_CREATE_ISOLATE) {
|
|
|
|
if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
|
|
|
|
ret = spu_setup_isolated(ctx);
|
2007-04-24 03:08:12 +08:00
|
|
|
if (ret)
|
|
|
|
spu_release(ctx);
|
2006-11-21 01:45:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* if userspace has set the runcntrl register (eg, to issue an
|
|
|
|
* isolated exit), we need to re-set it here */
|
|
|
|
runcntl = ctx->ops->runcntl_read(ctx) &
|
|
|
|
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
|
|
|
|
if (runcntl == 0)
|
|
|
|
runcntl = SPU_RUNCNTL_RUNNABLE;
|
2007-02-14 04:54:29 +08:00
|
|
|
} else {
|
|
|
|
spu_start_tick(ctx);
|
2006-10-25 00:31:16 +08:00
|
|
|
ctx->ops->npc_write(ctx, *npc);
|
2007-02-14 04:54:29 +08:00
|
|
|
}
|
2006-11-21 01:45:10 +08:00
|
|
|
|
|
|
|
ctx->ops->runcntl_write(ctx, runcntl);
|
|
|
|
return ret;
|
2006-01-05 03:31:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
|
|
|
|
u32 * status)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2007-02-14 04:54:29 +08:00
|
|
|
spu_stop_tick(ctx);
|
2006-01-05 03:31:29 +08:00
|
|
|
*status = ctx->ops->status_read(ctx);
|
|
|
|
*npc = ctx->ops->npc_read(ctx);
|
|
|
|
spu_release(ctx);
|
|
|
|
|
|
|
|
if (signal_pending(current))
|
|
|
|
ret = -ERESTARTSYS;
|
2006-11-21 01:45:04 +08:00
|
|
|
|
2006-01-05 03:31:29 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
|
|
|
|
u32 *status)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if ((ret = spu_run_fini(ctx, npc, status)) != 0)
|
|
|
|
return ret;
|
|
|
|
if (*status & (SPU_STATUS_STOPPED_BY_STOP |
|
|
|
|
SPU_STATUS_STOPPED_BY_HALT)) {
|
|
|
|
return *status;
|
|
|
|
}
|
2006-10-04 23:26:14 +08:00
|
|
|
if ((ret = spu_run_init(ctx, npc)) != 0)
|
2006-01-05 03:31:29 +08:00
|
|
|
return ret;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
[PATCH] spufs: allow SPU code to do syscalls
An SPU does not have a way to implement system calls
itself, but it can create intercepts to the kernel.
This patch uses the method defined by the JSRE interface
for C99 host library calls from an SPU to implement
Linux system calls. It uses the reserved SPU stop code
0x2104 for this, using the structure layout and syscall
numbers for ppc64-linux.
I'm still undecided wether it is better to have a list
of allowed syscalls or a list of forbidden syscalls,
since we can't allow an SPU to call all syscalls that
are defined for ppc64-linux.
This patch implements the easier choice of them, with a
blacklist that only prevents an SPU from calling anything
that interacts with its own execution, e.g fork, execve,
clone, vfork, exit, spu_run and spu_create and everything
that deals with signals.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-03-23 07:00:09 +08:00
|
|
|
/*
|
|
|
|
* SPU syscall restarting is tricky because we violate the basic
|
|
|
|
* assumption that the signal handler is running on the interrupted
|
|
|
|
* thread. Here instead, the handler runs on PowerPC user space code,
|
|
|
|
* while the syscall was called from the SPU.
|
|
|
|
* This means we can only do a very rough approximation of POSIX
|
|
|
|
* signal semantics.
|
|
|
|
*/
|
|
|
|
int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
|
|
|
|
unsigned int *npc)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (*spu_ret) {
|
|
|
|
case -ERESTARTSYS:
|
|
|
|
case -ERESTARTNOINTR:
|
|
|
|
/*
|
|
|
|
* Enter the regular syscall restarting for
|
|
|
|
* sys_spu_run, then restart the SPU syscall
|
|
|
|
* callback.
|
|
|
|
*/
|
|
|
|
*npc -= 8;
|
|
|
|
ret = -ERESTARTSYS;
|
|
|
|
break;
|
|
|
|
case -ERESTARTNOHAND:
|
|
|
|
case -ERESTART_RESTARTBLOCK:
|
|
|
|
/*
|
|
|
|
* Restart block is too hard for now, just return -EINTR
|
|
|
|
* to the SPU.
|
|
|
|
* ERESTARTNOHAND comes from sys_pause, we also return
|
|
|
|
* -EINTR from there.
|
|
|
|
* Assume that we need to be restarted ourselves though.
|
|
|
|
*/
|
|
|
|
*spu_ret = -EINTR;
|
|
|
|
ret = -ERESTARTSYS;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printk(KERN_WARNING "%s: unexpected return code %ld\n",
|
|
|
|
__FUNCTION__, *spu_ret);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int spu_process_callback(struct spu_context *ctx)
|
|
|
|
{
|
|
|
|
struct spu_syscall_block s;
|
|
|
|
u32 ls_pointer, npc;
|
|
|
|
char *ls;
|
|
|
|
long spu_ret;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* get syscall block from local store */
|
|
|
|
npc = ctx->ops->npc_read(ctx);
|
|
|
|
ls = ctx->ops->get_ls(ctx);
|
|
|
|
ls_pointer = *(u32*)(ls + npc);
|
|
|
|
if (ls_pointer > (LS_SIZE - sizeof(s)))
|
|
|
|
return -EFAULT;
|
|
|
|
memcpy(&s, ls + ls_pointer, sizeof (s));
|
|
|
|
|
|
|
|
/* do actual syscall without pinning the spu */
|
|
|
|
ret = 0;
|
|
|
|
spu_ret = -ENOSYS;
|
|
|
|
npc += 4;
|
|
|
|
|
|
|
|
if (s.nr_ret < __NR_syscalls) {
|
|
|
|
spu_release(ctx);
|
|
|
|
/* do actual system call from here */
|
|
|
|
spu_ret = spu_sys_callback(&s);
|
|
|
|
if (spu_ret <= -ERESTARTSYS) {
|
|
|
|
ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
|
|
|
|
}
|
|
|
|
spu_acquire(ctx);
|
|
|
|
if (ret == -ERESTARTSYS)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* write result, jump over indirect pointer */
|
|
|
|
memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret));
|
|
|
|
ctx->ops->npc_write(ctx, npc);
|
|
|
|
ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-01-05 03:31:29 +08:00
|
|
|
static inline int spu_process_events(struct spu_context *ctx)
|
|
|
|
{
|
|
|
|
struct spu *spu = ctx->spu;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (spu->class_0_pending)
|
|
|
|
ret = spu_irq_class_0_bottom(spu);
|
|
|
|
if (!ret && signal_pending(current))
|
|
|
|
ret = -ERESTARTSYS;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
long spufs_run_spu(struct file *file, struct spu_context *ctx,
|
2006-10-04 23:26:14 +08:00
|
|
|
u32 *npc, u32 *event)
|
2006-01-05 03:31:29 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2006-10-04 23:26:14 +08:00
|
|
|
u32 status;
|
2006-01-05 03:31:29 +08:00
|
|
|
|
|
|
|
if (down_interruptible(&ctx->run_sema))
|
|
|
|
return -ERESTARTSYS;
|
|
|
|
|
2006-11-21 01:45:08 +08:00
|
|
|
ctx->ops->master_start(ctx);
|
2006-10-04 23:26:14 +08:00
|
|
|
ctx->event_return = 0;
|
|
|
|
ret = spu_run_init(ctx, npc);
|
2006-01-05 03:31:29 +08:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
do {
|
2006-10-04 23:26:14 +08:00
|
|
|
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
|
2006-01-05 03:31:29 +08:00
|
|
|
if (unlikely(ret))
|
|
|
|
break;
|
2006-10-04 23:26:14 +08:00
|
|
|
if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
|
|
|
|
(status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
|
[PATCH] spufs: allow SPU code to do syscalls
An SPU does not have a way to implement system calls
itself, but it can create intercepts to the kernel.
This patch uses the method defined by the JSRE interface
for C99 host library calls from an SPU to implement
Linux system calls. It uses the reserved SPU stop code
0x2104 for this, using the structure layout and syscall
numbers for ppc64-linux.
I'm still undecided wether it is better to have a list
of allowed syscalls or a list of forbidden syscalls,
since we can't allow an SPU to call all syscalls that
are defined for ppc64-linux.
This patch implements the easier choice of them, with a
blacklist that only prevents an SPU from calling anything
that interacts with its own execution, e.g fork, execve,
clone, vfork, exit, spu_run and spu_create and everything
that deals with signals.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-03-23 07:00:09 +08:00
|
|
|
ret = spu_process_callback(ctx);
|
|
|
|
if (ret)
|
|
|
|
break;
|
2006-10-04 23:26:14 +08:00
|
|
|
status &= ~SPU_STATUS_STOPPED_BY_STOP;
|
[PATCH] spufs: allow SPU code to do syscalls
An SPU does not have a way to implement system calls
itself, but it can create intercepts to the kernel.
This patch uses the method defined by the JSRE interface
for C99 host library calls from an SPU to implement
Linux system calls. It uses the reserved SPU stop code
0x2104 for this, using the structure layout and syscall
numbers for ppc64-linux.
I'm still undecided wether it is better to have a list
of allowed syscalls or a list of forbidden syscalls,
since we can't allow an SPU to call all syscalls that
are defined for ppc64-linux.
This patch implements the easier choice of them, with a
blacklist that only prevents an SPU from calling anything
that interacts with its own execution, e.g fork, execve,
clone, vfork, exit, spu_run and spu_create and everything
that deals with signals.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-03-23 07:00:09 +08:00
|
|
|
}
|
2007-04-24 03:08:15 +08:00
|
|
|
ret = spufs_handle_class1(ctx);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
2006-01-05 03:31:29 +08:00
|
|
|
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
|
2006-10-04 23:26:14 +08:00
|
|
|
ret = spu_reacquire_runnable(ctx, npc, &status);
|
2007-02-14 04:54:29 +08:00
|
|
|
if (ret) {
|
|
|
|
spu_stop_tick(ctx);
|
2006-11-21 01:45:04 +08:00
|
|
|
goto out2;
|
2007-02-14 04:54:29 +08:00
|
|
|
}
|
2006-01-05 03:31:29 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ret = spu_process_events(ctx);
|
|
|
|
|
2006-10-04 23:26:14 +08:00
|
|
|
} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
|
2006-01-05 03:31:29 +08:00
|
|
|
SPU_STATUS_STOPPED_BY_HALT)));
|
|
|
|
|
2006-11-21 01:45:08 +08:00
|
|
|
ctx->ops->master_stop(ctx);
|
2006-10-04 23:26:14 +08:00
|
|
|
ret = spu_run_fini(ctx, npc, &status);
|
2006-01-05 03:31:29 +08:00
|
|
|
spu_yield(ctx);
|
|
|
|
|
2006-11-21 01:45:04 +08:00
|
|
|
out2:
|
|
|
|
if ((ret == 0) ||
|
|
|
|
((ret == -ERESTARTSYS) &&
|
|
|
|
((status & SPU_STATUS_STOPPED_BY_HALT) ||
|
|
|
|
((status & SPU_STATUS_STOPPED_BY_STOP) &&
|
|
|
|
(status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
|
|
|
|
ret = status;
|
|
|
|
|
2006-11-28 02:18:53 +08:00
|
|
|
if ((status & SPU_STATUS_STOPPED_BY_STOP)
|
|
|
|
&& (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
|
|
|
|
force_sig(SIGTRAP, current);
|
|
|
|
ret = -ERESTARTSYS;
|
2006-11-21 01:45:04 +08:00
|
|
|
}
|
|
|
|
|
2006-01-05 03:31:29 +08:00
|
|
|
out:
|
2006-10-04 23:26:14 +08:00
|
|
|
*event = ctx->event_return;
|
2006-01-05 03:31:29 +08:00
|
|
|
up(&ctx->run_sema);
|
|
|
|
return ret;
|
|
|
|
}
|