diff options
author | Bob Nelson <rrnelson@linux.vnet.ibm.com> | 2007-07-20 21:39:52 +0200 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe.arndb.de> | 2007-07-20 21:42:20 +0200 |
commit | 36aaccc1e96481e8310b1d13600096da0f24ff43 (patch) | |
tree | 2726a068ae0d4529ccde030136255abbd1b1e069 /arch | |
parent | 2414059420311e5384de646eebfd529c184afd3c (diff) | |
download | lwn-36aaccc1e96481e8310b1d13600096da0f24ff43.tar.gz lwn-36aaccc1e96481e8310b1d13600096da0f24ff43.zip |
[CELL] oprofile: enable SPU switch notification to detect currently active SPU tasks
From: Maynard Johnson <mpjohn@us.ibm.com>
This patch adds to the capability of spu_switch_event_register so that
the caller is also notified of currently active SPU tasks.
Exports spu_switch_event_register and spu_switch_event_unregister so
that OProfile can get access to the notifications provided.
Signed-off-by: Maynard Johnson <mpjohn@us.ibm.com>
Signed-off-by: Carl Love <carll@us.ibm.com>
Signed-off-by: Bob Nelson <rrnelson@us.ibm.com>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Acked-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 23 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 34 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 6 |
3 files changed, 55 insertions, 8 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index c0238ea5b55a..0b50fa5cb39d 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -18,15 +18,17 @@ void spufs_stop_callback(struct spu *spu) wake_up_all(&ctx->stop_wq); } -static inline int spu_stopped(struct spu_context *ctx, u32 * stat) +static inline int spu_stopped(struct spu_context *ctx, u32 *stat) { struct spu *spu; u64 pte_fault; *stat = ctx->ops->status_read(ctx); - if (ctx->state != SPU_STATE_RUNNABLE) - return 1; + spu = ctx->spu; + if (ctx->state != SPU_STATE_RUNNABLE || + test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) + return 1; pte_fault = spu->dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ? @@ -124,7 +126,7 @@ out: return ret; } -static int spu_run_init(struct spu_context *ctx, u32 * npc) +static int spu_run_init(struct spu_context *ctx, u32 *npc) { spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); @@ -158,8 +160,8 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc) return 0; } -static int spu_run_fini(struct spu_context *ctx, u32 * npc, - u32 * status) +static int spu_run_fini(struct spu_context *ctx, u32 *npc, + u32 *status) { int ret = 0; @@ -298,6 +300,7 @@ static inline int spu_process_events(struct spu_context *ctx) long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) { int ret; + struct spu *spu; u32 status; if (mutex_lock_interruptible(&ctx->run_mutex)) @@ -333,6 +336,14 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); if (unlikely(ret)) break; + spu = ctx->spu; + if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, + &ctx->sched_flags))) { + if (!(status & SPU_STATUS_STOPPED_BY_STOP)) { + spu_switch_notify(spu, ctx); + continue; + } + } spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 49b8f6867a96..88ec333e90d3 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -204,21 +204,51 @@ static void spu_remove_from_active_list(struct spu *spu) static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); -static void spu_switch_notify(struct spu *spu, struct spu_context *ctx) +void spu_switch_notify(struct spu *spu, struct spu_context *ctx) { blocking_notifier_call_chain(&spu_switch_notifier, ctx ? ctx->object_id : 0, spu); } +static void notify_spus_active(void) +{ + int node; + + /* + * Wake up the active spu_contexts. + * + * When the awakened processes see their "notify_active" flag is set, + * they will call spu_switch_notify(); + */ + for_each_online_node(node) { + struct spu *spu; + mutex_lock(&spu_prio->active_mutex[node]); + list_for_each_entry(spu, &spu_prio->active_list[node], list) { + struct spu_context *ctx = spu->ctx; + set_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags); + mb(); /* make sure any tasks woken up below */ + /* can see the bit(s) set above */ + wake_up_all(&ctx->stop_wq); + } + mutex_unlock(&spu_prio->active_mutex[node]); + } +} + int spu_switch_event_register(struct notifier_block * n) { - return blocking_notifier_chain_register(&spu_switch_notifier, n); + int ret; + ret = blocking_notifier_chain_register(&spu_switch_notifier, n); + if (!ret) + notify_spus_active(); + return ret; } +EXPORT_SYMBOL_GPL(spu_switch_event_register); int spu_switch_event_unregister(struct notifier_block * n) { return blocking_notifier_chain_unregister(&spu_switch_notifier, n); } +EXPORT_SYMBOL_GPL(spu_switch_event_unregister); /** * spu_bind_context - bind spu context to physical spu diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 42d8da8f0fb5..692dbd0edc37 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -44,6 +44,11 @@ enum { SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */ }; +/* ctx->sched_flags */ +enum { + SPU_SCHED_NOTIFY_ACTIVE, +}; + struct spu_context { struct spu *spu; /* pointer to a physical SPU */ struct spu_state csa; /* SPU context save area. */ @@ -240,6 +245,7 @@ void spu_release_saved(struct spu_context *ctx); int spu_activate(struct spu_context *ctx, unsigned long flags); void spu_deactivate(struct spu_context *ctx); void spu_yield(struct spu_context *ctx); +void spu_switch_notify(struct spu *spu, struct spu_context *ctx); void spu_set_timeslice(struct spu_context *ctx); void spu_update_sched_info(struct spu_context *ctx); void __spu_update_sched_info(struct spu_context *ctx); |