summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spu_base.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2006-01-05 17:05:29 +0300
committerPaul Mackerras <paulus@samba.org>2006-01-09 07:44:57 +0300
commit2fb9d2063626374dd8a2514b3a730facac8235d8 (patch)
treeb410dcdbc5aee656c37951be36951130450549e7 /arch/powerpc/platforms/cell/spu_base.c
parentaeb013772a2cc85a8d0baffd64977d2888bc781d (diff)
downloadlinux-2fb9d2063626374dd8a2514b3a730facac8235d8.tar.xz
[PATCH] spufs: set irq affinity for running threads
For far, all SPU triggered interrupts always end up on the first SMT thread, which is a bad solution. This patch implements setting the affinity to the CPU that was running last when entering execution on an SPU. This should result in a significant reduction in IPI calls and better cache locality for SPE thread specific data. Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spu_base.c')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 7fe3fa3da0e9..d75ae03df686 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -507,6 +507,14 @@ int spu_irq_class_1_bottom(struct spu *spu)
return ret;
}
+void spu_irq_setaffinity(struct spu *spu, int cpu)
+{
+ u64 target = iic_get_target_id(cpu);
+ u64 route = target << 48 | target << 32 | target << 16;
+ spu_int_route_set(spu, route);
+}
+EXPORT_SYMBOL_GPL(spu_irq_setaffinity);
+
static void __iomem * __init map_spe_prop(struct device_node *n,
const char *name)
{