1
0
mirror of git://projects.qi-hardware.com/openwrt-xburst.git synced 2024-11-05 13:30:38 +02:00
openwrt-xburst/target/linux/cns3xxx/patches/049-cns3xxx_smp_support.patch
kaloz c8ba3d408b [cns3xxx]: upgrade to 2.6.39
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@27030 3c298f89-4303-0410-b956-a3cf2f4a3e73
2011-05-30 13:55:34 +00:00

420 lines
10 KiB
Diff

--- a/arch/arm/mach-cns3xxx/Makefile
+++ b/arch/arm/mach-cns3xxx/Makefile
@@ -1,3 +1,6 @@
obj-$(CONFIG_ARCH_CNS3XXX) += core.o pm.o devices.o
obj-$(CONFIG_PCI) += pcie.o
obj-$(CONFIG_MACH_CNS3420VB) += cns3420vb.o
+obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/headsmp.S
@@ -0,0 +1,43 @@
+/*
+ * linux/arch/arm/mach-cns3xxx/headsmp.S
+ *
+ * Copyright (c) 2003 ARM Limited
+ * Copyright 2011 Gateworks Corporation
+ * Chris Lang <clang@gateworks.com
+ *
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ __INIT
+
+/*
+ * CNS3XXX specific entry point for secondary CPUs. This
+ * provides a "holding pen" into which all secondary cores are held
+ * until we're ready for them to initialise.
+ */
+ENTRY(cns3xxx_secondary_startup)
+ mrc p15, 0, r0, c0, c0, 5
+ and r0, r0, #15
+ adr r4, 1f
+ ldmia r4, {r5, r6}
+ sub r4, r4, r5
+ add r6, r6, r4
+pen: ldr r7, [r6]
+ cmp r7, r0
+ bne pen
+
+ /*
+ * we've been released from the holding pen: secondary_stack
+ * should now contain the SVC stack for this core
+ */
+ b secondary_startup
+
+ .align
+1: .long .
+ .long pen_release
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/hotplug.c
@@ -0,0 +1,131 @@
+/*
+ * linux/arch/arm/mach-cns3xxx/hotplug.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * Copyright 2011 Gateworks Corporation
+ * Chris Lang <clang@gateworks.com>
+ *
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+
+extern volatile int pen_release;
+
+static inline void cpu_enter_lowpower(void)
+{
+ unsigned int v;
+
+ flush_cache_all();
+ asm volatile(
+ "mcr p15, 0, %1, c7, c5, 0\n"
+ " mcr p15, 0, %1, c7, c10, 4\n"
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %3\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ /*
+ * there is no power-control hardware on this platform, so all
+ * we can do is put the core into WFI; this is safe as the calling
+ * code will have already disabled interrupts
+ */
+ for (;;) {
+ /*
+ * here's the WFI
+ */
+ asm(".word 0xe320f003\n"
+ :
+ :
+ : "memory", "cc");
+
+ if (pen_release == cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * Getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
+ */
+ (*spurious)++;
+ }
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+ return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void platform_cpu_die(unsigned int cpu)
+{
+ int spurious = 0;
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu, &spurious);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+ /*
+ * we don't allow CPU 0 to be shutdown (it is still too special
+ * e.g. clock tick interrupts)
+ */
+ return cpu == 0 ? -EPERM : 0;
+}
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/localtimer.c
@@ -0,0 +1,29 @@
+/*
+ * linux/arch/arm/mach-cns3xxx/localtimer.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * Copyright 2011 Gateworks Corporation
+ * Chris Lang <clang@gateworks.com>
+ *
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/clockchips.h>
+
+#include <asm/smp_twd.h>
+#include <asm/localtimer.h>
+#include <mach/irqs.h>
+
+/*
+ * Setup the local clock events for a CPU.
+ */
+void __cpuinit local_timer_setup(struct clock_event_device *evt)
+{
+ evt->irq = IRQ_LOCALTIMER;
+ twd_timer_setup(evt);
+}
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/platsmp.c
@@ -0,0 +1,168 @@
+/*
+ * linux/arch/arm/mach-cns3xxx/platsmp.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * Copyright 2011 Gateworks Corporation
+ * Chris Lang <clang@gateworks.com>
+ *
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp_scu.h>
+#include <asm/unified.h>
+#include <mach/hardware.h>
+#include <mach/cns3xxx.h>
+
+#include "core.h"
+
+extern void cns3xxx_secondary_startup(void);
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+static void __cpuinit write_pen_release(int val)
+{
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+}
+
+static void __iomem *scu_base_addr(void)
+{
+ return (void __iomem *)(CNS3XXX_TC11MP_SCU_BASE_VIRT);
+}
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ /*
+ * if any interrupts are already enabled for the primary
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+ */
+ gic_secondary_init(0);
+
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ write_pen_release(-1);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+ * CPUs in the holding pen until we're ready for them. However,
+ * since we haven't sent them a soft interrupt, they shouldn't
+ * be there.
+ */
+ write_pen_release(cpu);
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+ * the boot monitor to read the system wide flags register,
+ * and branch to the address found there.
+ */
+ smp_cross_call(cpumask_of(cpu), 1);
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ smp_rmb();
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+ void __iomem *scu_base = scu_base_addr();
+ unsigned int i, ncores;
+
+ ncores = scu_base ? scu_get_core_count(scu_base) : 1;
+
+ /* sanity check */
+ if (ncores > NR_CPUS) {
+ printk(KERN_WARNING
+ "cns3xxx: no. of cores (%d) greater than configured "
+ "maximum of %d - clipping\n",
+ ncores, NR_CPUS);
+ ncores = NR_CPUS;
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+
+ scu_enable(scu_base_addr());
+
+ /*
+ * Write the address of secondary startup into the
+ * system-wide flags register. The boot monitor waits
+ * until it receives a soft interrupt, and then the
+ * secondary CPU branches to this address.
+ */
+ __raw_writel(virt_to_phys(cns3xxx_secondary_startup),
+ (void __iomem *)(0xFFF07000 + 0x0600));
+}
--- /dev/null
+++ b/arch/arm/mach-cns3xxx/include/mach/smp.h
@@ -0,0 +1,13 @@
+#ifndef __MACH_SMP_H
+#define __MACH_SMP_H
+
+#include <asm/hardware/gic.h>
+
+/*
+ * We use IRQ1 as the IPI
+ */
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
+{
+ gic_raise_softirq(mask, ipi);
+}
+#endif
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1325,7 +1325,7 @@ config SMP
depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
ARCH_EXYNOS4 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
- ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE
+ ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE || ARCH_CNS3XXX
select USE_GENERIC_SMP_HELPERS
select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
help