diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-12 18:39:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-12 18:39:43 -0700 |
commit | 69dac8e431af26173ca0a1ebc87054e01c585bcc (patch) | |
tree | a39774497b82ceb4fda0d5dad3e7dd56700ad312 /arch/riscv/kvm/vcpu_timer.c | |
parent | 6c833c0581f1c15db2e0344da19360cba75a3351 (diff) | |
parent | 5cef38dd03f33ef206eb792df0fb3b200d762546 (diff) | |
download | linux-69dac8e431af26173ca0a1ebc87054e01c585bcc.tar.gz linux-69dac8e431af26173ca0a1ebc87054e01c585bcc.tar.bz2 linux-69dac8e431af26173ca0a1ebc87054e01c585bcc.zip |
Merge tag 'riscv-for-linus-5.20-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull more RISC-V updates from Palmer Dabbelt:
"There's still a handful of new features in here, but there are a lot
of fixes/cleanups as well:
- Support for the Zicbom extension for explicit cache-block
management, along with the necessary bits to make the non-standard
cache management ops on the Allwinner D1 function
- Support for the Zihintpause extension, which codifies a go-slow
instruction used for cpu_relax()
- Support for the Sstc extension for supervisor-mode timer/counter
management
- Many device tree fixes and cleanups, including a large set for the
Canaan device trees
- A handful of fixes and cleanups for the PMU driver"
* tag 'riscv-for-linus-5.20-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (43 commits)
dt-bindings: gpio: sifive: add gpio-line-names
wireguard: selftests: set CONFIG_NONPORTABLE on riscv32
RISC-V: KVM: Support sstc extension
RISC-V: Improve SBI definitions
RISC-V: Move counter info definition to sbi header file
RISC-V: Fix SBI PMU calls for RV32
RISC-V: Update user page mapping only once during start
RISC-V: Fix counter restart during overflow for RV32
RISC-V: Prefer sstc extension if available
RISC-V: Enable sstc extension parsing from DT
RISC-V: Add SSTC extension CSR details
riscv:uprobe fix SR_SPIE set/clear handling
dt-bindings: riscv: fix SiFive l2-cache's cache-sets
riscv: ensure cpu_ops_sbi is declared
RISC-V: cpu_ops_spinwait.c should include head.h
RISC-V: Declare cpu_ops_spinwait in <asm/cpu_ops.h>
riscv: dts: starfive: correct number of external interrupts
riscv: dts: sifive unmatched: Add PWM controlled LEDs
riscv/purgatory: Omit use of bin2c
riscv/purgatory: hard-code obj-y in Makefile
...
Diffstat (limited to 'arch/riscv/kvm/vcpu_timer.c')
-rw-r--r-- | arch/riscv/kvm/vcpu_timer.c | 144 |
1 files changed, 138 insertions, 6 deletions
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index 595043857049..16f50c46ba39 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -69,7 +69,18 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t) return 0; } -int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) +static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles) +{ +#if defined(CONFIG_32BIT) + csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF); + csr_write(CSR_VSTIMECMPH, ncycles >> 32); +#else + csr_write(CSR_VSTIMECMP, ncycles); +#endif + return 0; +} + +static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles) { struct kvm_vcpu_timer *t = &vcpu->arch.timer; struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; @@ -88,6 +99,65 @@ int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) return 0; } +int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + return t->timer_next_event(vcpu, ncycles); +} + +static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h) +{ + u64 delta_ns; + struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); + struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + + if (kvm_riscv_current_cycles(gt) < t->next_cycles) { + delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); + hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); + return HRTIMER_RESTART; + } + + t->next_set = false; + kvm_vcpu_kick(vcpu); + + return HRTIMER_NORESTART; +} + +bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + + if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) || + kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER)) + return true; + else + return false; +} + +static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + u64 delta_ns; + + if (!t->init_done) + return; + + delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); + if (delta_ns) { + hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); + t->next_set = true; + } +} + +static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu) +{ + kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); +} + int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -180,10 +250,20 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu) return -EINVAL; hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; t->init_done = true; t->next_set = false; + /* Enable sstc for every vcpu if available in hardware */ + if (riscv_isa_extension_available(NULL, SSTC)) { + t->sstc_enabled = true; + t->hrt.function = kvm_riscv_vcpu_vstimer_expired; + t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp; + } else { + t->sstc_enabled = false; + t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; + t->timer_next_event = kvm_riscv_vcpu_update_hrtimer; + } + return 0; } @@ -199,21 +279,73 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu) int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu) { + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + t->next_cycles = -1ULL; return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); } -void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) +static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu) { struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; -#ifdef CONFIG_64BIT - csr_write(CSR_HTIMEDELTA, gt->time_delta); -#else +#if defined(CONFIG_32BIT) csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta)); csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32)); +#else + csr_write(CSR_HTIMEDELTA, gt->time_delta); #endif } +void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr; + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + kvm_riscv_vcpu_update_timedelta(vcpu); + + if (!t->sstc_enabled) + return; + + csr = &vcpu->arch.guest_csr; +#if defined(CONFIG_32BIT) + csr_write(CSR_VSTIMECMP, (u32)t->next_cycles); + csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32)); +#else + csr_write(CSR_VSTIMECMP, t->next_cycles); +#endif + + /* timer should be enabled for the remaining operations */ + if (unlikely(!t->init_done)) + return; + + kvm_riscv_vcpu_timer_unblocking(vcpu); +} + +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr; + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + if (!t->sstc_enabled) + return; + + csr = &vcpu->arch.guest_csr; + t = &vcpu->arch.timer; +#if defined(CONFIG_32BIT) + t->next_cycles = csr_read(CSR_VSTIMECMP); + t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; +#else + t->next_cycles = csr_read(CSR_VSTIMECMP); +#endif + /* timer should be enabled for the remaining operations */ + if (unlikely(!t->init_done)) + return; + + if (kvm_vcpu_is_blocking(vcpu)) + kvm_riscv_vcpu_timer_blocking(vcpu); +} + void kvm_riscv_guest_timer_init(struct kvm *kvm) { struct kvm_guest_timer *gt = &kvm->arch.timer; |