summaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/smp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-30 15:11:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-30 15:11:26 -0700
commitd5fd43bac8396c9b213faf14cd4560d73b30f618 (patch)
tree3d872c274360358a482619c09492752848f638be /arch/parisc/kernel/smp.c
parent57c06b6e1e74b62eabaacacbe40bdb29c7e990eb (diff)
parenta9fe7fa7d874a536e0540469f314772c054a0323 (diff)
downloadlinux-d5fd43bac8396c9b213faf14cd4560d73b30f618.tar.gz
linux-d5fd43bac8396c9b213faf14cd4560d73b30f618.tar.bz2
linux-d5fd43bac8396c9b213faf14cd4560d73b30f618.zip
Merge tag 'for-5.18/parisc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull more parisc architecture updates from Helge Deller: - Revert a patch to the invalidate/flush vmap routines which broke kernel patching functions on older PA-RISC machines. - Fix the kernel patching code wrt locking and flushing. Works now on B160L machine as well. - Fix CPU IRQ affinity for LASI, WAX and Dino chips - Add CPU hotplug support - Detect the hppa-suse-linux-gcc compiler when cross-compiling * tag 'for-5.18/parisc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: Fix patch code locking and flushing parisc: Find a new timesync master if current CPU is removed parisc: Move common_stext into .text section when CONFIG_HOTPLUG_CPU=y parisc: Rewrite arch_cpu_idle_dead() for CPU hotplugging parisc: Implement __cpu_die() and __cpu_disable() for CPU hotplugging parisc: Add PDC locking functions for rendezvous code parisc: Move disable_sr_hashing_asm() into .text section parisc: Move CPU startup-related functions into .text section parisc: Move store_cpu_topology() into text section parisc: Switch from GENERIC_CPU_DEVICES to GENERIC_ARCH_TOPOLOGY parisc: Ensure set_firmware_width() is called only once parisc: Add constants for control registers and clean up mfctl() parisc: Detect hppa-suse-linux-gcc compiler for cross-building parisc: Clean up cpu_check_affinity() and drop cpu_set_affinity_irq() parisc: Fix CPU affinity for Lasi, WAX and Dino chips Revert "parisc: Fix invalidate/flush vmap routines"
Diffstat (limited to 'arch/parisc/kernel/smp.c')
-rw-r--r--arch/parisc/kernel/smp.c108
1 files changed, 93 insertions, 15 deletions
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index a32a882a2d58..24d0744c3b3a 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -30,6 +30,7 @@
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/kgdb.h>
+#include <linux/sched/hotplug.h>
#include <linux/atomic.h>
#include <asm/current.h>
@@ -60,8 +61,6 @@ volatile struct task_struct *smp_init_current_idle_task;
/* track which CPU is booting */
static volatile int cpu_now_booting;
-static int parisc_max_cpus = 1;
-
static DEFINE_PER_CPU(spinlock_t, ipi_lock);
enum ipi_message_type {
@@ -269,7 +268,7 @@ void arch_send_call_function_single_ipi(int cpu)
/*
* Called by secondaries to update state and initialize CPU registers.
*/
-static void __init
+static void
smp_cpu_init(int cpunum)
{
extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
@@ -309,7 +308,7 @@ smp_cpu_init(int cpunum)
* Slaves start using C here. Indirectly called from smp_slave_stext.
* Do what start_kernel() and main() do for boot strap processor (aka monarch)
*/
-void __init smp_callin(unsigned long pdce_proc)
+void smp_callin(unsigned long pdce_proc)
{
int slave_id = cpu_now_booting;
@@ -334,11 +333,28 @@ void __init smp_callin(unsigned long pdce_proc)
/*
* Bring one cpu online.
*/
-int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
+static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
long timeout;
+#ifdef CONFIG_HOTPLUG_CPU
+ int i;
+
+ /* reset irq statistics for this CPU */
+ memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t));
+ for (i = 0; i < NR_IRQS; i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+
+ if (desc && desc->kstat_irqs)
+ *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
+ }
+#endif
+
+ /* wait until last booting CPU has started. */
+ while (cpu_now_booting)
+ ;
+
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
@@ -374,7 +390,6 @@ int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
if(cpu_online(cpuid)) {
/* Which implies Slave has started up */
cpu_now_booting = 0;
- smp_init_current_idle_task = NULL;
goto alive ;
}
udelay(100);
@@ -415,25 +430,88 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
spin_lock_init(&per_cpu(ipi_lock, cpu));
init_cpu_present(cpumask_of(0));
-
- parisc_max_cpus = max_cpus;
- if (!max_cpus)
- printk(KERN_INFO "SMP mode deactivated.\n");
}
-void smp_cpus_done(unsigned int cpu_max)
+void __init smp_cpus_done(unsigned int cpu_max)
{
- return;
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
- return -ENOSYS;
+ if (cpu_online(cpu))
+ return 0;
+
+ if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
+ return -EIO;
+
+ return cpu_online(cpu) ? 0 : -EIO;
+}
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ unsigned int cpu = smp_processor_id();
+
+ remove_cpu_topology(cpu);
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /* Find a new timesync master */
+ if (cpu == time_keeper_id) {
+ time_keeper_id = cpumask_first(cpu_online_mask);
+ pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
+ }
+
+ disable_percpu_irq(IPI_IRQ);
+
+ irq_migrate_all_off_this_cpu();
+
+ flush_cache_all_local();
+ flush_tlb_all_local(NULL);
+
+ /* disable all irqs, including timer irq */
+ local_irq_disable();
+
+ /* wait for next timer irq ... */
+ mdelay(1000/HZ+100);
+
+ /* ... and then clear all pending external irqs */
+ set_eiem(0);
+ mtctl(~0UL, CR_EIRR);
+ mfctl(CR_EIRR);
+ mtctl(0, CR_EIRR);
+#endif
+ return 0;
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ pdc_cpu_rendezvous_lock();
+
+ if (!cpu_wait_death(cpu, 5)) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
+ pr_info("CPU%u: is shutting down\n", cpu);
+
+ /* set task's state to interruptible sleep */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ);
- return cpu_online(cpu) ? 0 : -ENOSYS;
+ pdc_cpu_rendezvous_unlock();
}
#ifdef CONFIG_PROC_FS