From ed0bc98f8cbe4f8254759d333a47aedc816ff8c5 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 11 Jul 2019 12:24:03 +1000 Subject: powerpc/64s: Reimplement power4_idle code in C This implements the tricky tracing and soft irq handling bits in C, leaving the low level bit to asm. A functional difference is that this redirects the interrupt exit to a return stub to execute blr, rather than the lr address itself. This is probably barely measurable on real hardware, but it keeps the link stack balanced. Tested with QEMU. Signed-off-by: Nicholas Piggin [mpe: Move power4_fixup_nap back into exceptions-64s.S] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190711022404.18132-1-npiggin@gmail.com --- arch/powerpc/kernel/idle.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'arch/powerpc/kernel/idle.c') diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index a36fd053c3db..422e31d2f5a2 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -77,6 +77,31 @@ void arch_cpu_idle(void) int powersave_nap; +#ifdef CONFIG_PPC_970_NAP +void power4_idle(void) +{ + if (!cpu_has_feature(CPU_FTR_CAN_NAP)) + return; + + if (!powersave_nap) + return; + + if (!prep_irq_for_idle()) + return; + + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + asm volatile("DSSALL ; sync" ::: "memory"); + + power4_idle_nap(); + + /* + * power4_idle_nap returns with interrupts enabled (soft and hard). + * to our caller with interrupts enabled (soft and hard). Our caller + * can cope with either interrupts disabled or enabled upon return. + */ +} +#endif + #ifdef CONFIG_SYSCTL /* * Register the sysctl to set/clear powersave_nap. -- cgit v1.2.3