summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm/kvm_vcpu_timer.h
diff options
context:
space:
mode:
authorAtish Patra <atishp@rivosinc.com>2022-07-22 09:50:47 -0700
committerPalmer Dabbelt <palmer@rivosinc.com>2022-08-12 07:43:57 -0700
commit8f5cb44b1bae8520c0705ce348b30ffb1fdda43a (patch)
tree4e80fff0b3baebf935b46b2aa13a2c62678754a6 /arch/riscv/include/asm/kvm_vcpu_timer.h
parent9801002f76c63327cae6e90097d3d0afb1e1b562 (diff)
downloadlinux-8f5cb44b1bae8520c0705ce348b30ffb1fdda43a.tar.gz
linux-8f5cb44b1bae8520c0705ce348b30ffb1fdda43a.tar.bz2
linux-8f5cb44b1bae8520c0705ce348b30ffb1fdda43a.zip
RISC-V: KVM: Support sstc extension
Sstc extension allows the guest to program the vstimecmp CSR directly instead of making an SBI call to the hypervisor to program the next event. The timer interrupt is also directly injected to the guest by the hardware in this case. To maintain backward compatibility, the hypervisors also update the vstimecmp in an SBI set_time call if the hardware supports it. Thus, the older kernels in guest also take advantage of the sstc extension. Reviewed-by: Anup Patel <anup@brainfault.org> Signed-off-by: Atish Patra <atishp@rivosinc.com> Acked-by: Anup Patel <anup@brainfault.org> Link: https://lore.kernel.org/all/CAAhSdy2mb6wyqy0NAn9BcTWKMYEc0Z4zU3s3j7oNqBz6eDQ9sg@mail.gmail.com/ Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch/riscv/include/asm/kvm_vcpu_timer.h')
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_timer.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h
index 50138e2eb91b..0d8fdb8ec63a 100644
--- a/arch/riscv/include/asm/kvm_vcpu_timer.h
+++ b/arch/riscv/include/asm/kvm_vcpu_timer.h
@@ -28,6 +28,11 @@ struct kvm_vcpu_timer {
u64 next_cycles;
/* Underlying hrtimer instance */
struct hrtimer hrt;
+
+ /* Flag to check if sstc is enabled or not */
+ bool sstc_enabled;
+ /* A function pointer to switch between stimecmp or hrtimer at runtime */
+ int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles);
};
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles);
@@ -40,5 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
void kvm_riscv_guest_timer_init(struct kvm *kvm);
+void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
+bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
#endif