diff options
author | Vincent Donnefort <vdonnefort@google.com> | 2023-08-11 12:20:37 +0100 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2023-08-26 12:00:54 +0100 |
commit | f156a7d13fc35d0078cd644b8cf0a6f97cbbe2e2 (patch) | |
tree | ea74d4df33d2d46d968819b918d1973d2cf00161 /arch/arm64/kvm/arm.c | |
parent | a6b33d009fc1fe80c935f18b714b36c81e1f1400 (diff) | |
download | linux-f156a7d13fc35d0078cd644b8cf0a6f97cbbe2e2.tar.gz linux-f156a7d13fc35d0078cd644b8cf0a6f97cbbe2e2.tar.bz2 linux-f156a7d13fc35d0078cd644b8cf0a6f97cbbe2e2.zip |
KVM: arm64: Remove size-order align in the nVHE hyp private VA range
commit f922c13e778d ("KVM: arm64: Introduce
pkvm_alloc_private_va_range()") and commit 92abe0f81e13 ("KVM: arm64:
Introduce hyp_alloc_private_va_range()") added an alignment for the
start address of any allocation into the nVHE hypervisor private VA
range.
This alignment (order of the size of the allocation) intends to enable
efficient stack verification (if the PAGE_SHIFT bit is zero, the stack
pointer is on the guard page and a stack overflow occurred).
But this is only necessary for stack allocation and can waste a lot of
VA space. So instead make stack-specific functions, handling the guard
page requirements, while other users (e.g. fixmap) will only get page
alignment.
Reviewed-by: Kalesh Singh <kaleshsingh@google.com>
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230811112037.1147863-1-vdonnefort@google.com
Diffstat (limited to 'arch/arm64/kvm/arm.c')
-rw-r--r-- | arch/arm64/kvm/arm.c | 26 |
1 files changed, 1 insertions, 25 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3c015bdd35ee..93ada0aae507 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2283,30 +2283,8 @@ static int __init init_hyp_mode(void) for_each_possible_cpu(cpu) { struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); - unsigned long hyp_addr; - /* - * Allocate a contiguous HYP private VA range for the stack - * and guard page. The allocation is also aligned based on - * the order of its size. - */ - err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr); - if (err) { - kvm_err("Cannot allocate hyp stack guard page\n"); - goto out_err; - } - - /* - * Since the stack grows downwards, map the stack to the page - * at the higher address and leave the lower guard page - * unbacked. - * - * Any valid stack address now has the PAGE_SHIFT bit as 1 - * and addresses corresponding to the guard page have the - * PAGE_SHIFT bit as 0 - this is used for overflow detection. - */ - err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE, - __pa(stack_page), PAGE_HYP); + err = create_hyp_stack(__pa(stack_page), ¶ms->stack_hyp_va); if (err) { kvm_err("Cannot map hyp stack\n"); goto out_err; @@ -2319,8 +2297,6 @@ static int __init init_hyp_mode(void) * has been mapped in the flexible private VA space. */ params->stack_pa = __pa(stack_page); - - params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE); } for_each_possible_cpu(cpu) { |