summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/context.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2005-11-15 15:53:52 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-09 14:49:30 +1100
commit8b3d6663c6217e4f50cc3720935a96da9b984117 (patch)
tree5295c29787ac66c26ddf715868fda7fcd3ad5f97 /arch/powerpc/platforms/cell/spufs/context.c
parent05b841174c289ca62a6b42d883b8791d9ac3a4bd (diff)
downloadop-kernel-dev-8b3d6663c6217e4f50cc3720935a96da9b984117.zip
op-kernel-dev-8b3d6663c6217e4f50cc3720935a96da9b984117.tar.gz
[PATCH] spufs: cooperative scheduler support
This adds a scheduler for SPUs to make it possible to use more logical SPUs than physical ones are present in the system. Currently, there is no support for preempting a running SPU thread, they have to leave the SPU by either triggering an event on the SPU that causes it to return to the owning thread or by sending a signal to it. This patch also adds operations that enable accessing an SPU in either runnable or saved state. We use an RW semaphore to protect the state of the SPU from changing underneath us, while we are holding it readable. In order to change the state, it is acquired writeable and a context save or restore is executed before downgrading the semaphore to read-only. From: Mark Nutter <mnutter@us.ibm.com>, Uli Weigand <Ulrich.Weigand@de.ibm.com> Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/context.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c114
1 files changed, 96 insertions, 18 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 41eea45..5d6195f 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -20,39 +20,38 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/fs.h>
+#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
#include "spufs.h"
-struct spu_context *alloc_spu_context(void)
+struct spu_context *alloc_spu_context(struct address_space *local_store)
{
struct spu_context *ctx;
ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
goto out;
- /* Future enhancement: do not call spu_alloc()
- * here. This step should be deferred until
- * spu_run()!!
- *
- * More work needs to be done to read(),
- * write(), mmap(), etc., so that operations
- * are performed on CSA when the context is
- * not currently being run. In this way we
- * can support arbitrarily large number of
- * entries in /spu, allow state queries, etc.
+ /* Binding to physical processor deferred
+ * until spu_activate().
*/
- ctx->spu = spu_alloc();
- if (!ctx->spu)
- goto out_free;
spu_init_csa(&ctx->csa);
if (!ctx->csa.lscsa) {
- spu_free(ctx->spu);
goto out_free;
}
- init_rwsem(&ctx->backing_sema);
spin_lock_init(&ctx->mmio_lock);
kref_init(&ctx->kref);
+ init_rwsem(&ctx->state_sema);
+ init_waitqueue_head(&ctx->ibox_wq);
+ init_waitqueue_head(&ctx->wbox_wq);
+ ctx->ibox_fasync = NULL;
+ ctx->wbox_fasync = NULL;
+ ctx->state = SPU_STATE_SAVED;
+ ctx->local_store = local_store;
+ ctx->spu = NULL;
+ ctx->ops = &spu_backing_ops;
+ ctx->owner = get_task_mm(current);
goto out;
out_free:
kfree(ctx);
@@ -65,8 +64,11 @@ void destroy_spu_context(struct kref *kref)
{
struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref);
- if (ctx->spu)
- spu_free(ctx->spu);
+ down_write(&ctx->state_sema);
+ spu_deactivate(ctx);
+ ctx->ibox_fasync = NULL;
+ ctx->wbox_fasync = NULL;
+ up_write(&ctx->state_sema);
spu_fini_csa(&ctx->csa);
kfree(ctx);
}
@@ -82,4 +84,80 @@ int put_spu_context(struct spu_context *ctx)
return kref_put(&ctx->kref, &destroy_spu_context);
}
+/* give up the mm reference when the context is about to be destroyed */
+void spu_forget(struct spu_context *ctx)
+{
+ struct mm_struct *mm;
+ spu_acquire_saved(ctx);
+ mm = ctx->owner;
+ ctx->owner = NULL;
+ mmput(mm);
+ spu_release(ctx);
+}
+
+void spu_acquire(struct spu_context *ctx)
+{
+ down_read(&ctx->state_sema);
+}
+
+void spu_release(struct spu_context *ctx)
+{
+ up_read(&ctx->state_sema);
+}
+
+static void spu_unmap_mappings(struct spu_context *ctx)
+{
+ unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
+}
+
+int spu_acquire_runnable(struct spu_context *ctx)
+{
+ int ret = 0;
+ down_read(&ctx->state_sema);
+ if (ctx->state == SPU_STATE_RUNNABLE)
+ return 0;
+ /* ctx is about to be freed, can't acquire any more */
+ if (!ctx->owner) {
+ ret = -EINVAL;
+ goto out;
+ }
+ up_read(&ctx->state_sema);
+
+ down_write(&ctx->state_sema);
+ if (ctx->state == SPU_STATE_SAVED) {
+ spu_unmap_mappings(ctx);
+ ret = spu_activate(ctx, 0);
+ ctx->state = SPU_STATE_RUNNABLE;
+ }
+ downgrade_write(&ctx->state_sema);
+ if (ret)
+ goto out;
+
+ /* On success, we return holding the lock */
+ return ret;
+out:
+ /* Release here, to simplify calling code. */
+ up_read(&ctx->state_sema);
+
+ return ret;
+}
+
+void spu_acquire_saved(struct spu_context *ctx)
+{
+ down_read(&ctx->state_sema);
+
+ if (ctx->state == SPU_STATE_SAVED)
+ return;
+
+ up_read(&ctx->state_sema);
+ down_write(&ctx->state_sema);
+
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ spu_unmap_mappings(ctx);
+ spu_deactivate(ctx);
+ ctx->state = SPU_STATE_SAVED;
+ }
+
+ downgrade_write(&ctx->state_sema);
+}
OpenPOWER on IntegriCloud