diff options
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r-- | arch/riscv/include/asm/kvm_host.h | 10 | ||||
-rw-r--r-- | arch/riscv/include/asm/kvm_nacl.h | 245 | ||||
-rw-r--r-- | arch/riscv/include/asm/perf_event.h | 3 | ||||
-rw-r--r-- | arch/riscv/include/asm/sbi.h | 120 |
4 files changed, 378 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 2e2254fd2a2a..35eab6e0f4ae 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -286,6 +286,16 @@ struct kvm_vcpu_arch { } sta; }; +/* + * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, + * arrived in guest context. For riscv, any event that arrives while a vCPU is + * loaded is considered to be "in guest". + */ +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; +} + static inline void kvm_arch_sync_events(struct kvm *kvm) {} #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 diff --git a/arch/riscv/include/asm/kvm_nacl.h b/arch/riscv/include/asm/kvm_nacl.h new file mode 100644 index 000000000000..4124d5e06a0f --- /dev/null +++ b/arch/riscv/include/asm/kvm_nacl.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Ventana Micro Systems Inc. + */ + +#ifndef __KVM_NACL_H +#define __KVM_NACL_H + +#include <linux/jump_label.h> +#include <linux/percpu.h> +#include <asm/byteorder.h> +#include <asm/csr.h> +#include <asm/sbi.h> + +struct kvm_vcpu_arch; + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_available); +#define kvm_riscv_nacl_available() \ + static_branch_unlikely(&kvm_riscv_nacl_available) + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_csr_available); +#define kvm_riscv_nacl_sync_csr_available() \ + static_branch_unlikely(&kvm_riscv_nacl_sync_csr_available) + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_hfence_available); +#define kvm_riscv_nacl_sync_hfence_available() \ + static_branch_unlikely(&kvm_riscv_nacl_sync_hfence_available) + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_sret_available); +#define kvm_riscv_nacl_sync_sret_available() \ + static_branch_unlikely(&kvm_riscv_nacl_sync_sret_available) + +DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_autoswap_csr_available); +#define kvm_riscv_nacl_autoswap_csr_available() \ + static_branch_unlikely(&kvm_riscv_nacl_autoswap_csr_available) + +struct kvm_riscv_nacl { + void *shmem; + phys_addr_t shmem_phys; +}; +DECLARE_PER_CPU(struct kvm_riscv_nacl, kvm_riscv_nacl); + +void __kvm_riscv_nacl_hfence(void *shmem, + unsigned long control, + unsigned long page_num, + unsigned long page_count); + +void __kvm_riscv_nacl_switch_to(struct kvm_vcpu_arch *vcpu_arch, + unsigned long sbi_ext_id, + unsigned long sbi_func_id); + +int kvm_riscv_nacl_enable(void); + +void kvm_riscv_nacl_disable(void); + +void kvm_riscv_nacl_exit(void); + +int kvm_riscv_nacl_init(void); + +#ifdef CONFIG_32BIT +#define lelong_to_cpu(__x) le32_to_cpu(__x) +#define cpu_to_lelong(__x) cpu_to_le32(__x) +#else +#define lelong_to_cpu(__x) le64_to_cpu(__x) +#define cpu_to_lelong(__x) cpu_to_le64(__x) +#endif + +#define nacl_shmem() \ + this_cpu_ptr(&kvm_riscv_nacl)->shmem + +#define nacl_scratch_read_long(__shmem, __offset) \ +({ \ + unsigned long *__p = (__shmem) + \ + SBI_NACL_SHMEM_SCRATCH_OFFSET + \ + (__offset); \ + lelong_to_cpu(*__p); \ +}) + +#define nacl_scratch_write_long(__shmem, __offset, __val) \ +do { \ + unsigned long *__p = (__shmem) + \ + SBI_NACL_SHMEM_SCRATCH_OFFSET + \ + (__offset); \ + *__p = cpu_to_lelong(__val); \ +} while (0) + +#define nacl_scratch_write_longs(__shmem, __offset, __array, __count) \ +do { \ + unsigned int __i; \ + unsigned long *__p = (__shmem) + \ + SBI_NACL_SHMEM_SCRATCH_OFFSET + \ + (__offset); \ + for (__i = 0; __i < (__count); __i++) \ + __p[__i] = cpu_to_lelong((__array)[__i]); \ +} while (0) + +#define nacl_sync_hfence(__e) \ + sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_HFENCE, \ + (__e), 0, 0, 0, 0, 0) + +#define nacl_hfence_mkconfig(__type, __order, __vmid, __asid) \ +({ \ + unsigned long __c = SBI_NACL_SHMEM_HFENCE_CONFIG_PEND; \ + __c |= ((__type) & SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK) \ + << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT; \ + __c |= (((__order) - SBI_NACL_SHMEM_HFENCE_ORDER_BASE) & \ + SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK) \ + << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT; \ + __c |= ((__vmid) & SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK) \ + << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT; \ + __c |= ((__asid) & SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK); \ + __c; \ +}) + +#define nacl_hfence_mkpnum(__order, __addr) \ + ((__addr) >> (__order)) + +#define nacl_hfence_mkpcount(__order, __size) \ + ((__size) >> (__order)) + +#define nacl_hfence_gvma(__shmem, __gpa, __gpsz, __order) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA, \ + __order, 0, 0), \ + nacl_hfence_mkpnum(__order, __gpa), \ + nacl_hfence_mkpcount(__order, __gpsz)) + +#define nacl_hfence_gvma_all(__shmem) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL, \ + 0, 0, 0), 0, 0) + +#define nacl_hfence_gvma_vmid(__shmem, __vmid, __gpa, __gpsz, __order) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID, \ + __order, __vmid, 0), \ + nacl_hfence_mkpnum(__order, __gpa), \ + nacl_hfence_mkpcount(__order, __gpsz)) + +#define nacl_hfence_gvma_vmid_all(__shmem, __vmid) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL, \ + 0, __vmid, 0), 0, 0) + +#define nacl_hfence_vvma(__shmem, __vmid, __gva, __gvsz, __order) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA, \ + __order, __vmid, 0), \ + nacl_hfence_mkpnum(__order, __gva), \ + nacl_hfence_mkpcount(__order, __gvsz)) + +#define nacl_hfence_vvma_all(__shmem, __vmid) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL, \ + 0, __vmid, 0), 0, 0) + +#define nacl_hfence_vvma_asid(__shmem, __vmid, __asid, __gva, __gvsz, __order)\ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID, \ + __order, __vmid, __asid), \ + nacl_hfence_mkpnum(__order, __gva), \ + nacl_hfence_mkpcount(__order, __gvsz)) + +#define nacl_hfence_vvma_asid_all(__shmem, __vmid, __asid) \ +__kvm_riscv_nacl_hfence(__shmem, \ + nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL, \ + 0, __vmid, __asid), 0, 0) + +#define nacl_csr_read(__shmem, __csr) \ +({ \ + unsigned long *__a = (__shmem) + SBI_NACL_SHMEM_CSR_OFFSET; \ + lelong_to_cpu(__a[SBI_NACL_SHMEM_CSR_INDEX(__csr)]); \ +}) + +#define nacl_csr_write(__shmem, __csr, __val) \ +do { \ + void *__s = (__shmem); \ + unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr); \ + unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET; \ + u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET; \ + __a[__i] = cpu_to_lelong(__val); \ + __b[__i >> 3] |= 1U << (__i & 0x7); \ +} while (0) + +#define nacl_csr_swap(__shmem, __csr, __val) \ +({ \ + void *__s = (__shmem); \ + unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr); \ + unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET; \ + u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET; \ + unsigned long __r = lelong_to_cpu(__a[__i]); \ + __a[__i] = cpu_to_lelong(__val); \ + __b[__i >> 3] |= 1U << (__i & 0x7); \ + __r; \ +}) + +#define nacl_sync_csr(__csr) \ + sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_CSR, \ + (__csr), 0, 0, 0, 0, 0) + +/* + * Each ncsr_xyz() macro defined below has it's own static-branch so every + * use of ncsr_xyz() macro emits a patchable direct jump. This means multiple + * back-to-back ncsr_xyz() macro usage will emit multiple patchable direct + * jumps which is sub-optimal. + * + * Based on the above, it is recommended to avoid multiple back-to-back + * ncsr_xyz() macro usage. + */ + +#define ncsr_read(__csr) \ +({ \ + unsigned long __r; \ + if (kvm_riscv_nacl_available()) \ + __r = nacl_csr_read(nacl_shmem(), __csr); \ + else \ + __r = csr_read(__csr); \ + __r; \ +}) + +#define ncsr_write(__csr, __val) \ +do { \ + if (kvm_riscv_nacl_sync_csr_available()) \ + nacl_csr_write(nacl_shmem(), __csr, __val); \ + else \ + csr_write(__csr, __val); \ +} while (0) + +#define ncsr_swap(__csr, __val) \ +({ \ + unsigned long __r; \ + if (kvm_riscv_nacl_sync_csr_available()) \ + __r = nacl_csr_swap(nacl_shmem(), __csr, __val); \ + else \ + __r = csr_swap(__csr, __val); \ + __r; \ +}) + +#define nsync_csr(__csr) \ +do { \ + if (kvm_riscv_nacl_sync_csr_available()) \ + nacl_sync_csr(__csr); \ +} while (0) + +#endif diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h index 665bbc9b2f84..bcc928fd3785 100644 --- a/arch/riscv/include/asm/perf_event.h +++ b/arch/riscv/include/asm/perf_event.h @@ -8,6 +8,7 @@ #ifndef _ASM_RISCV_PERF_EVENT_H #define _ASM_RISCV_PERF_EVENT_H +#ifdef CONFIG_PERF_EVENTS #include <linux/perf_event.h> #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs @@ -17,4 +18,6 @@ (regs)->sp = current_stack_pointer; \ (regs)->status = SR_PP; \ } +#endif + #endif /* _ASM_RISCV_PERF_EVENT_H */ diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 98f631b051db..6c82318065cf 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -34,6 +34,7 @@ enum sbi_ext_id { SBI_EXT_PMU = 0x504D55, SBI_EXT_DBCN = 0x4442434E, SBI_EXT_STA = 0x535441, + SBI_EXT_NACL = 0x4E41434C, /* Experimentals extensions must lie within this range */ SBI_EXT_EXPERIMENTAL_START = 0x08000000, @@ -281,6 +282,125 @@ struct sbi_sta_struct { #define SBI_SHMEM_DISABLE -1 +enum sbi_ext_nacl_fid { + SBI_EXT_NACL_PROBE_FEATURE = 0x0, + SBI_EXT_NACL_SET_SHMEM = 0x1, + SBI_EXT_NACL_SYNC_CSR = 0x2, + SBI_EXT_NACL_SYNC_HFENCE = 0x3, + SBI_EXT_NACL_SYNC_SRET = 0x4, +}; + +enum sbi_ext_nacl_feature { + SBI_NACL_FEAT_SYNC_CSR = 0x0, + SBI_NACL_FEAT_SYNC_HFENCE = 0x1, + SBI_NACL_FEAT_SYNC_SRET = 0x2, + SBI_NACL_FEAT_AUTOSWAP_CSR = 0x3, +}; + +#define SBI_NACL_SHMEM_ADDR_SHIFT 12 +#define SBI_NACL_SHMEM_SCRATCH_OFFSET 0x0000 +#define SBI_NACL_SHMEM_SCRATCH_SIZE 0x1000 +#define SBI_NACL_SHMEM_SRET_OFFSET 0x0000 +#define SBI_NACL_SHMEM_SRET_SIZE 0x0200 +#define SBI_NACL_SHMEM_AUTOSWAP_OFFSET (SBI_NACL_SHMEM_SRET_OFFSET + \ + SBI_NACL_SHMEM_SRET_SIZE) +#define SBI_NACL_SHMEM_AUTOSWAP_SIZE 0x0080 +#define SBI_NACL_SHMEM_UNUSED_OFFSET (SBI_NACL_SHMEM_AUTOSWAP_OFFSET + \ + SBI_NACL_SHMEM_AUTOSWAP_SIZE) +#define SBI_NACL_SHMEM_UNUSED_SIZE 0x0580 +#define SBI_NACL_SHMEM_HFENCE_OFFSET (SBI_NACL_SHMEM_UNUSED_OFFSET + \ + SBI_NACL_SHMEM_UNUSED_SIZE) +#define SBI_NACL_SHMEM_HFENCE_SIZE 0x0780 +#define SBI_NACL_SHMEM_DBITMAP_OFFSET (SBI_NACL_SHMEM_HFENCE_OFFSET + \ + SBI_NACL_SHMEM_HFENCE_SIZE) +#define SBI_NACL_SHMEM_DBITMAP_SIZE 0x0080 +#define SBI_NACL_SHMEM_CSR_OFFSET (SBI_NACL_SHMEM_DBITMAP_OFFSET + \ + SBI_NACL_SHMEM_DBITMAP_SIZE) +#define SBI_NACL_SHMEM_CSR_SIZE ((__riscv_xlen / 8) * 1024) +#define SBI_NACL_SHMEM_SIZE (SBI_NACL_SHMEM_CSR_OFFSET + \ + SBI_NACL_SHMEM_CSR_SIZE) + +#define SBI_NACL_SHMEM_CSR_INDEX(__csr_num) \ + ((((__csr_num) & 0xc00) >> 2) | ((__csr_num) & 0xff)) + +#define SBI_NACL_SHMEM_HFENCE_ENTRY_SZ ((__riscv_xlen / 8) * 4) +#define SBI_NACL_SHMEM_HFENCE_ENTRY_MAX \ + (SBI_NACL_SHMEM_HFENCE_SIZE / \ + SBI_NACL_SHMEM_HFENCE_ENTRY_SZ) +#define SBI_NACL_SHMEM_HFENCE_ENTRY(__num) \ + (SBI_NACL_SHMEM_HFENCE_OFFSET + \ + (__num) * SBI_NACL_SHMEM_HFENCE_ENTRY_SZ) +#define SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(__num) \ + SBI_NACL_SHMEM_HFENCE_ENTRY(__num) +#define SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(__num)\ + (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + (__riscv_xlen / 8)) +#define SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(__num)\ + (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + \ + ((__riscv_xlen / 8) * 3)) + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS 1 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT \ + (__riscv_xlen - SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) - 1) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK << \ + SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT) + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS 3 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT - \ + SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS) + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS 4 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT - \ + SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) - 1) + +#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA 0x0 +#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL 0x1 +#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID 0x2 +#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL 0x3 +#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA 0x4 +#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL 0x5 +#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID 0x6 +#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL 0x7 + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS 1 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT - \ + SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS) + +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS 7 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT \ + (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT - \ + SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) - 1) +#define SBI_NACL_SHMEM_HFENCE_ORDER_BASE 12 + +#if __riscv_xlen == 32 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 9 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 7 +#else +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 16 +#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 14 +#endif +#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT \ + SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS +#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS) - 1) +#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK \ + ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS) - 1) + +#define SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS BIT(0) +#define SBI_NACL_SHMEM_AUTOSWAP_HSTATUS ((__riscv_xlen / 8) * 1) + +#define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i)) +#define SBI_NACL_SHMEM_SRET_X_LAST 31 + /* SBI spec version fields */ #define SBI_SPEC_VERSION_DEFAULT 0x1 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 |