RISC-V: Enable HOTPLUG_PARALLEL for secondary CPUs

The core kernel already supports parallel bringup of secondary
CPUs (aka HOTPLUG_PARALLEL). The x86 and MIPS architectures
already use HOTPLUG_PARALLEL and ARM is also moving toward it.

On RISC-V, there is no arch specific global data accessed in the
RISC-V secondary CPU bringup path so enabling HOTPLUG_PARALLEL for
RISC-V would only require:
1) Providing RISC-V specific arch_cpuhp_kick_ap_alive()
2) Calling cpuhp_ap_sync_alive() from smp_callin()

This patch is tested natively with OpenSBI on QEMU RV64 virt machine
with 64 cores and also tested with KVM RISC-V guest with 32 VCPUs.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Link: https://patch.msgid.link/20250905122512.71684-1-apatel@ventanamicro.com
Signed-off-by: Paul Walmsley <pjw@kernel.org>
This commit is contained in:
Anup Patel
2025-11-17 21:19:10 -07:00
committed by Paul Walmsley
parent 4427259cc7
commit 231fb999a9
2 changed files with 16 additions and 1 deletions

View File

@@ -198,7 +198,7 @@ config RISCV
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
select HOTPLUG_PARALLEL if HOTPLUG_CPU
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN

View File

@@ -39,7 +39,9 @@
#include "head.h"
#ifndef CONFIG_HOTPLUG_PARALLEL
static DECLARE_COMPLETION(cpu_running);
#endif
void __init smp_prepare_cpus(unsigned int max_cpus)
{
@@ -179,6 +181,12 @@ static int start_secondary_cpu(int cpu, struct task_struct *tidle)
return -EOPNOTSUPP;
}
#ifdef CONFIG_HOTPLUG_PARALLEL
int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
{
return start_secondary_cpu(cpu, tidle);
}
#else
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = 0;
@@ -199,6 +207,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return ret;
}
#endif
void __init smp_cpus_done(unsigned int max_cpus)
{
@@ -225,6 +234,10 @@ asmlinkage __visible void smp_callin(void)
mmgrab(mm);
current->active_mm = mm;
#ifdef CONFIG_HOTPLUG_PARALLEL
cpuhp_ap_sync_alive();
#endif
store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid);
@@ -243,7 +256,9 @@ asmlinkage __visible void smp_callin(void)
*/
local_flush_icache_all();
local_flush_tlb_all();
#ifndef CONFIG_HOTPLUG_PARALLEL
complete(&cpu_running);
#endif
/*
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.