From 17ff4a4b479fdc0c4e4d07a83bc937a83d3e7021 Mon Sep 17 00:00:00 2001 From: eillon Date: Mon, 7 Jul 2025 14:50:42 +0800 Subject: [PATCH 1/5] arm64/sysreg: add HDBSS related register information The ARM architecture added the HDBSS feature and descriptions of related registers (HDBSSBR/HDBSSPROD) in the DDI0601(ID121123) version, add them to Linux. Signed-off-by: eillon Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/esr.h | 4 +++ arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/include/asm/sysreg.h | 61 ++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index d6a4234afa54..b13ea7ad7737 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -124,6 +124,10 @@ #define ESR_ELx_CM_SHIFT (8) #define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT) +/* ISS2 field definitions for Data Aborts */ +#define ESR_ELx_HDBSSF_SHIFT (11) +#define ESR_ELx_HDBSSF (UL(1) << ESR_ELx_HDBSSF_SHIFT) + /* ISS field definitions for exceptions taken in to Hyp */ #define ESR_ELx_CV (UL(1) << 24) #define ESR_ELx_COND_SHIFT (20) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index c847d76ef569..35e451972294 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -96,6 +96,7 @@ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK) /* VTCR_EL2 Registers bits */ +#define VTCR_EL2_HDBSS (1UL << 45) #define VTCR_EL2_RES1 (1U << 31) #define VTCR_EL2_HD (1 << 22) #define VTCR_EL2_HA (1 << 21) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 761b26417a5d..d26f20bdff74 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -429,6 +429,67 @@ #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) #define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2) + + +/* For SMCR_EL2 fields see SMCR_ELx */ + +#define REG_HDBSSBR_EL2 S3_4_C2_C3_2 +#define SYS_HDBSSBR_EL2 sys_reg(3, 4, 2, 3, 2) +#define SYS_HDBSSBR_EL2_Op0 3 +#define SYS_HDBSSBR_EL2_Op1 4 +#define SYS_HDBSSBR_EL2_CRn 2 +#define SYS_HDBSSBR_EL2_CRm 3 +#define SYS_HDBSSBR_EL2_Op2 2 + +#define HDBSSBR_EL2_BADDR GENMASK(55, 12) +#define HDBSSBR_EL2_BADDR_MASK GENMASK(55, 12) +#define HDBSSBR_EL2_BADDR_SHIFT 12 +#define HDBSSBR_EL2_BADDR_WIDTH 44 + +#define HDBSSBR_EL2_SZ GENMASK(3, 0) +#define HDBSSBR_EL2_SZ_MASK GENMASK(3, 0) +#define HDBSSBR_EL2_SZ_SHIFT 0 +#define HDBSSBR_EL2_SZ_WIDTH 4 +#define HDBSSBR_EL2_SZ_8KB UL(0b0001) +#define HDBSSBR_EL2_SZ_16KB UL(0b0010) +#define HDBSSBR_EL2_SZ_32KB UL(0b0011) +#define HDBSSBR_EL2_SZ_64KB UL(0b0100) +#define HDBSSBR_EL2_SZ_128KB UL(0b0101) +#define HDBSSBR_EL2_SZ_256KB UL(0b0110) +#define HDBSSBR_EL2_SZ_512KB UL(0b0111) +#define HDBSSBR_EL2_SZ_1MB UL(0b1000) +#define HDBSSBR_EL2_SZ_2MB UL(0b1001) + +#define HDBSSBR_EL2_RES0 (UL(0) | GENMASK_ULL(63, 56) | GENMASK_ULL(11, 4)) +#define HDBSSBR_EL2_RES1 (UL(0)) +#define HDBSSBR_EL2_UNKN (UL(0)) + +#define REG_HDBSSPROD_EL2 S3_4_C2_C3_3 +#define SYS_HDBSSPROD_EL2 sys_reg(3, 4, 2, 3, 3) +#define SYS_HDBSSPROD_EL2_Op0 3 +#define SYS_HDBSSPROD_EL2_Op1 4 +#define SYS_HDBSSPROD_EL2_CRn 2 +#define SYS_HDBSSPROD_EL2_CRm 3 +#define SYS_HDBSSPROD_EL2_Op2 3 + +#define HDBSSPROD_EL2_FSC GENMASK(31, 26) +#define HDBSSPROD_EL2_FSC_MASK GENMASK(31, 26) +#define HDBSSPROD_EL2_FSC_SHIFT 26 +#define HDBSSPROD_EL2_FSC_WIDTH 6 +#define HDBSSPROD_EL2_FSC_OK UL(0b000000) +#define HDBSSPROD_EL2_FSC_ExternalAbort UL(0b010000) +#define HDBSSPROD_EL2_FSC_GPF UL(0b101000) + +#define HDBSSPROD_EL2_INDEX GENMASK(18, 0) +#define HDBSSPROD_EL2_INDEX_MASK GENMASK(18, 0) +#define HDBSSPROD_EL2_INDEX_SHIFT 0 +#define HDBSSPROD_EL2_INDEX_WIDTH 19 + +#define HDBSSPROD_EL2_RES0 (UL(0) | GENMASK_ULL(63, 32) | GENMASK_ULL(25, 19)) +#define HDBSSPROD_EL2_RES1 (UL(0)) +#define HDBSSPROD_EL2_UNKN (UL(0)) + + #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) -- Gitee From eb40f4be88b1e124384f2b43cb17e1037db1f35f Mon Sep 17 00:00:00 2001 From: eillon Date: Mon, 7 Jul 2025 14:54:04 +0800 Subject: [PATCH 2/5] arm64/kvm: support set the DBM attr during memory abort Since the ARMv8, the page entry has supported the DBM attribute. Support set the attr during user_mem_abort(). Signed-off-by: eillon Signed-off-by: Xie Xiaodong <624338359@qq.com> --- virt/kvm/arm/mmu.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 6e20da9bf4f5..c360c3fd0ff6 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -37,6 +37,7 @@ static unsigned long io_map_base; #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) +#define S2_PTE_DBM (UL(1) << 51) static bool is_iomap(unsigned long flags) { @@ -73,7 +74,7 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) * @size: Size of the range from the base to invalidate */ -void kvm_tlb_flush_vmid_range(struct kvm *kvm, +void kvm_tlb_flush_vmid_range(struct kvm *kvm, phys_addr_t addr, size_t size) { unsigned long pages, inval_pages; @@ -302,7 +303,7 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd, if (!pte_none(*pte)) { pte_t old_pte = *pte; kvm_set_pte(pte, __pte(0)); - + if (!stage2_unmap_defer_tlb_flush()) kvm_tlb_flush_vmid_ipa(kvm, addr); @@ -407,7 +408,7 @@ static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, next = stage2_pgd_addr_end(kvm, addr, end); if (!stage2_pgd_none(kvm, *pgd)) unmap_stage2_puds(kvm, pgd, addr, next); - + if (stage2_unmap_defer_tlb_flush()) /* Perform the deferred TLB invalidations */ kvm_tlb_flush_vmid_range(kvm, addr, size); @@ -1904,6 +1905,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } else { pte_t new_pte = kvm_pfn_pte(pfn, mem_type); + /* 添加DBM位 */ + if (writable) + new_pte = __pte(pte_val(new_pte) | S2_PTE_DBM); + if (writable) { new_pte = kvm_s2pte_mkwrite(new_pte); mark_page_dirty(kvm, gfn); -- Gitee From 7553f5e346b41c31a00079eb5245ff6c9fdc11f2 Mon Sep 17 00:00:00 2001 From: eillon Date: Mon, 7 Jul 2025 15:02:13 +0800 Subject: [PATCH 3/5] arm64/kvm: using ioctl to enable/disable the HDBSS feature In ARM64, the buffer size corresponding to the HDBSS feature is configurable. Therefore, we cannot enable the HDBSS feature during KVM initialization, but we should enable it when triggering a live migration, where the buffer size can be configured by the user. The KVM_CAP_ARM_HW_DIRTY_STATE_TRACK ioctl is added to enable/disable this feature. Users (such as qemu) can invoke the ioctl to enable HDBSS at the beginning of the migration and disable the feature by invoking the ioctl again at the end of the migration with size set to 0. Signed-off-by: eillon Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/cpufeature.h | 12 +++++ arch/arm64/include/asm/kvm_host.h | 6 +++ arch/arm64/include/asm/kvm_mmu.h | 12 +++++ arch/arm64/include/asm/sysreg.h | 12 +++++ arch/arm64/kvm/hyp/switch.c | 1 + arch/arm64/kvm/hyp/sysreg-sr.c | 2 + arch/arm64/kvm/reset.c | 8 ++++ include/linux/kvm_host.h | 1 + include/uapi/linux/kvm.h | 4 +- tools/include/uapi/linux/kvm.h | 1 + virt/kvm/arm/arm.c | 3 ++ virt/kvm/arm/mmu.c | 2 +- virt/kvm/kvm_main.c | 73 ++++++++++++++++++++++++++++- 13 files changed, 133 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index f509501323e7..47ff1a5db978 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -622,6 +622,18 @@ static inline bool system_supports_fpsimd(void) return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); } +static inline bool system_supports_hdbss(void) +{ + u64 mmfr1; + u32 val; + + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + val = cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_HADBS_SHIFT); + + return val == ID_AA64MMFR1_EL1_HAFDBS_HDBSS; +} + static inline bool system_uses_ttbr0_pan(void) { return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 01886b83d120..9b4e2d03d1b7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -341,6 +341,12 @@ struct kvm_vcpu_arch { /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; + + /* HDBSS registers info */ + struct { + u64 br_el2; + u64 prod_el2; + } hdbss; }; /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index ffe0aad96b17..c153292f8e7a 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -606,5 +606,17 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; } +static __always_inline void __load_hdbss(struct kvm_vcpu *vcpu) +{ + if (!vcpu->kvm->enable_hdbss) + return; + + write_sysreg_s(vcpu->arch.hdbss.br_el2, SYS_HDBSSBR_EL2); + write_sysreg_s(vcpu->arch.hdbss.prod_el2, SYS_HDBSSPROD_EL2); + + dsb(sy); + isb(); +} + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index d26f20bdff74..10b93bba206b 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -876,6 +876,18 @@ /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) +/* + * Definitions for the HDBSS feature + */ +#define HDBSS_MAX_SIZE HDBSSBR_EL2_SZ_2MB + +#define HDBSSBR_EL2(baddr, sz) (((baddr) & GENMASK(55, 12 + sz)) | \ + ((sz) << HDBSSBR_EL2_SZ_SHIFT)) +#define HDBSSBR_BADDR(br) ((br) & GENMASK(55, (12 + HDBSSBR_SZ(br)))) +#define HDBSSBR_SZ(br) (((br) & HDBSSBR_EL2_SZ_MASK) >> HDBSSBR_EL2_SZ_SHIFT) + +#define HDBSSPROD_IDX(prod) (((prod) & HDBSSPROD_EL2_INDEX_MASK) >> HDBSSPROD_EL2_INDEX_SHIFT) + #ifdef __ASSEMBLY__ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 624e5c83a497..f59b439c9cec 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -633,6 +633,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) * (among other things). */ __activate_vm(vcpu->kvm); + __load_hdbss(vcpu); __activate_traps(vcpu); sysreg_restore_guest_state_vhe(guest_ctxt); diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 7ddbc849b580..e855df16f738 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -263,6 +263,8 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) __sysreg_restore_user_state(guest_ctxt); __sysreg_restore_el1_state(guest_ctxt); + __load_hdbss(vcpu); + vcpu->arch.sysregs_loaded_on_cpu = true; activate_traps_vhe_load(vcpu); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index a3105ae464be..7100925b21d4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -206,7 +206,15 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { + struct page *hdbss_pg; kfree(vcpu->arch.sve_state); + + if (vcpu->arch.hdbss.br_el2) { + hdbss_pg = phys_to_page(HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2)); + if (hdbss_pg) + __free_pages(hdbss_pg, + HDBSSBR_SZ(vcpu->arch.hdbss.br_el2)); + } } static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9019db30b8a7..0152276ae834 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -515,6 +515,7 @@ struct kvm { struct srcu_struct irq_srcu; pid_t userspace_pid; bool vm_bugged; + bool enable_hdbss; }; #define kvm_err(fmt, ...) \ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ebb362be8c95..cb6ab8c7f3bc 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1008,6 +1008,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175 #define KVM_CAP_ARM_VIRT_MSI_BYPASS 799 +#define KVM_CAP_ARM_HW_DIRTY_STATE_TRACK 502 + #ifdef KVM_CAP_IRQ_ROUTING @@ -1248,7 +1250,7 @@ struct kvm_master_dev_info { __u32 nvectors; struct kvm_msi msi[]; }; - + /* * ioctls for VM fds */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 52641d8ca9e8..c342a4eec53c 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -757,6 +757,7 @@ struct kvm_ppc_resize_hpt { __u32 pad; }; +#define KVM_CAP_ARM_HW_DIRTY_STATE_TRACK 502 #define KVMIO 0xAE /* machine type bits, to be used as argument to KVM_CREATE_VM */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 28cdd2f4d1ef..6ada153c0e05 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -229,6 +229,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = sdev_enable; break; #endif + case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: + r = system_supports_hdbss(); + break; default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); break; diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index c360c3fd0ff6..8d2e5f10543c 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1906,7 +1906,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, pte_t new_pte = kvm_pfn_pte(pfn, mem_type); /* 添加DBM位 */ - if (writable) + if (writable && kvm->enable_hdbss && logging_active) new_pte = __pte(pte_val(new_pte) | S2_PTE_DBM); if (writable) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ac74b31efb81..784d38dd6ceb 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1420,7 +1420,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, } } spin_unlock(&kvm->mmu_lock); - + if(flush) kvm_flush_remote_tlbs_memslot(kvm, memslot); @@ -1716,7 +1716,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, * tail pages of non-compound higher order allocations, which * would then underflow the refcount when the caller does the * required put_page. Don't allow those pages here. - */ + */ if (!kvm_try_get_pfn(pfn)) r = -EFAULT; @@ -3424,6 +3424,73 @@ int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, return -EINVAL; } +static int kvm_cap_arm_enable_hdbss(struct kvm *kvm, + struct kvm_enable_cap *cap) +{ + unsigned long i; + struct kvm_vcpu *vcpu; + struct page *hdbss_pg; + int size = cap->args[0]; + + if (!system_supports_hdbss()) { + kvm_err("This system does not support HDBSS!\n"); + return -EINVAL; + } + + if (size < 0 || size > HDBSS_MAX_SIZE) { + kvm_err("Invalid HDBSS buffer size: %d!\n", size); + return -EINVAL; + } + + /* Enable the HDBSS feature if size > 0, otherwise disable it. */ + if (size) { + kvm->enable_hdbss = true; + kvm->arch.vtcr |= VTCR_EL2_HD | VTCR_EL2_HDBSS; + + kvm_for_each_vcpu(i, vcpu, kvm) { + hdbss_pg = alloc_pages(GFP_KERNEL, size); + if (!hdbss_pg) { + kvm_err("Alloc HDBSS buffer failed!\n"); + return -EINVAL; + } + + vcpu->arch.hdbss.br_el2 = + HDBSSBR_EL2(page_to_phys(hdbss_pg), size); + vcpu->arch.hdbss.prod_el2 = 0; + + /* + * We should kick vcpus out of guest mode here to + * load new vtcr value to vtcr_el2 register when + * re-enter guest mode. + */ + kvm_vcpu_kick(vcpu); + } + + kvm_info("Enable HDBSS success, HDBSS buffer size: %d\n", size); + } else if (kvm->enable_hdbss) { + kvm->arch.vtcr &= ~(VTCR_EL2_HD | VTCR_EL2_HDBSS); + + kvm_for_each_vcpu(i, vcpu, kvm) { + /* Kick vcpus to flush hdbss buffer. */ + kvm_vcpu_kick(vcpu); + + hdbss_pg = phys_to_page( + HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2)); + if (hdbss_pg) + __free_pages(hdbss_pg, + HDBSSBR_SZ(vcpu->arch.hdbss.br_el2)); + + vcpu->arch.hdbss.br_el2 = 0; + vcpu->arch.hdbss.prod_el2 = 0; + } + + kvm->enable_hdbss = false; + kvm_info("Disable HDBSS success\n"); + } + + return 0; +} + static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, struct kvm_enable_cap *cap) { @@ -3435,6 +3502,8 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, kvm->manual_dirty_log_protect = cap->args[0]; return 0; #endif + case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: + return kvm_cap_arm_enable_hdbss(kvm, cap); default: return kvm_vm_ioctl_enable_cap(kvm, cap); } -- Gitee From b91369bc145b973f98d98fe91b62491745f2dc7d Mon Sep 17 00:00:00 2001 From: eillon Date: Mon, 7 Jul 2025 15:50:20 +0800 Subject: [PATCH 4/5] arm64/kvm: support to handle the HDBSSF event Updating the dirty bitmap based on the HDBSS buffer. Similar to the implementation of the x86 pml feature, KVM flushes the buffers on all VM-Exits, thus we only need to kick running vCPUs to force a VM-Exit. Signed-off-by: eillon Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/esr.h | 4 +++ arch/arm64/include/asm/kvm_emulate.h | 5 +++ arch/arm64/kvm/handle_exit.c | 48 ++++++++++++++++++++++++++++ include/linux/kvm_host.h | 1 + virt/kvm/arm/arm.c | 15 +++++++++ virt/kvm/arm/mmu.c | 10 +++++- virt/kvm/kvm_main.c | 6 ++++ 7 files changed, 88 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index b13ea7ad7737..18476aad385b 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -73,6 +73,10 @@ #define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT) #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) +#define ESR_ELx_ISS2_SHIFT (32) +#define ESR_ELx_ISS2_MASK (GENMASK_ULL(55, 32)) +#define ESR_ELx_ISS2(esr) (((esr) & ESR_ELx_ISS2_MASK) >> ESR_ELx_ISS2_SHIFT) + /* ISS field definitions shared by different classes */ #define ESR_ELx_WNR_SHIFT (6) #define ESR_ELx_WNR (UL(1) << ESR_ELx_WNR_SHIFT) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 2ad8a666b7b3..9ddeaf6d4eb9 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -268,6 +268,11 @@ static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) return vcpu->arch.fault.esr_el2; } +static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.esr_el2; +} + static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_hsr(vcpu); diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 1249f68a9418..e258bd0bb5d8 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -217,6 +217,51 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) return arm_exit_handlers[hsr_ec]; } +#define HDBSS_ENTRY_VALID_SHIFT 0 +#define HDBSS_ENTRY_VALID_MASK (1UL << HDBSS_ENTRY_VALID_SHIFT) +#define HDBSS_ENTRY_IPA_SHIFT 12 +#define HDBSS_ENTRY_IPA_MASK GENMASK_ULL(55, HDBSS_ENTRY_IPA_SHIFT) + +static void kvm_flush_hdbss_buffer(struct kvm_vcpu *vcpu) +{ + int idx, curr_idx; + u64 *hdbss_buf; + + if (!vcpu->kvm->enable_hdbss) + return; + + dsb(sy); + isb(); + curr_idx = HDBSSPROD_IDX(read_sysreg_s(SYS_HDBSSPROD_EL2)); + + /* Do nothing if HDBSS buffer is empty or br_el2 is NULL */ + if (curr_idx == 0 || vcpu->arch.hdbss.br_el2 == 0) + return; + + hdbss_buf = page_address( + phys_to_page(HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2))); + if (!hdbss_buf) { + kvm_err("Enter flush hdbss buffer with buffer == NULL!"); + return; + } + + for (idx = 0; idx < curr_idx; idx++) { + u64 gpa; + + gpa = hdbss_buf[idx]; + if (!(gpa & HDBSS_ENTRY_VALID_MASK)) + continue; + + gpa = gpa & HDBSS_ENTRY_IPA_MASK; + kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); + } + + /* reset HDBSS index */ + write_sysreg_s(0, SYS_HDBSSPROD_EL2); + dsb(sy); + isb(); +} + /* * We may be single-stepping an emulated instruction. If the emulation * has been completed in the kernel, we can return to userspace with a @@ -251,6 +296,9 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run) int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index) { + if (vcpu->kvm->enable_hdbss) + kvm_flush_hdbss_buffer(vcpu); + if (ARM_SERROR_PENDING(exception_index)) { u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0152276ae834..0bd38bf3c3bb 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -854,6 +854,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, gfn_t gfn_offset, unsigned long mask); +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 6ada153c0e05..6fd79583312a 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1316,6 +1316,21 @@ int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *lo return r; } +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ + /* + * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called + * before reporting dirty_bitmap to userspace. KVM flushes the buffers + * on all VM-Exits, thus we only need to kick running vCPUs to force a + * VM-Exit. + */ + struct kvm_vcpu *vcpu; + unsigned long i; + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vcpu_kick(vcpu); +} + static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr) { diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 8d2e5f10543c..9c5532cca6f5 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1986,7 +1986,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) unsigned long fault_status; phys_addr_t fault_ipa; struct kvm_memory_slot *memslot; - unsigned long hva; + unsigned long hva, iss2; bool is_iabt, write_fault, writable; gfn_t gfn; int ret, idx; @@ -1996,6 +1996,14 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); is_iabt = kvm_vcpu_trap_is_iabt(vcpu); + /* + * HDBSS buffer already flushed when enter handle_trap_exceptions(). + * Nothing to do here. + */ + iss2 = ESR_ELx_ISS2(kvm_vcpu_get_esr(vcpu)); + if (fault_status == ESR_ELx_FSC_PERM && (iss2 & ESR_ELx_HDBSSF)) + return 1; + /* Synchronous External Abort? */ if (kvm_vcpu_dabt_isextabt(vcpu)) { /* diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 784d38dd6ceb..1fa4f763fc62 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1250,6 +1250,8 @@ int kvm_get_dirty_log(struct kvm *kvm, if (!memslot->dirty_bitmap) return -ENOENT; + kvm_arch_sync_dirty_log(kvm, memslot); + n = kvm_dirty_bitmap_bytes(memslot); for (i = 0; !any && i < n/sizeof(long); ++i) @@ -1309,6 +1311,8 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, if (!dirty_bitmap) return -ENOENT; + kvm_arch_sync_dirty_log(kvm, memslot); + n = kvm_dirty_bitmap_bytes(memslot); *flush = false; if (kvm->manual_dirty_log_protect) { @@ -1391,6 +1395,8 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) return -EINVAL; + kvm_arch_sync_dirty_log(kvm, memslot); + *flush = false; dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) -- Gitee From 9fb9bd0b78c84a5d48a57ebac8b1c1d0be7902d7 Mon Sep 17 00:00:00 2001 From: eillon Date: Mon, 7 Jul 2025 16:05:07 +0800 Subject: [PATCH 5/5] arm64/config: add config to control whether enable HDBSS feature The HDBSS feature introduces new assembly registers (HDBSSBR_EL2 and HDBSSPROD_EL2), which depends on the armv9.5-a compilation support. So add ARM64_HDBSS config to control whether enable the HDBSS feature. Signed-off-by: eillon Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/Kconfig | 12 ++++++++++++ arch/arm64/include/asm/cpufeature.h | 2 ++ arch/arm64/include/asm/esr.h | 3 +++ arch/arm64/include/asm/kvm_emulate.h | 2 ++ arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/include/asm/kvm_mmu.h | 2 ++ arch/arm64/include/asm/sysreg.h | 6 ++++-- arch/arm64/kvm/handle_exit.c | 4 ++++ arch/arm64/kvm/hyp/switch.c | 2 ++ arch/arm64/kvm/hyp/sysreg-sr.c | 2 ++ arch/arm64/kvm/reset.c | 4 ++++ include/linux/kvm_host.h | 2 ++ virt/kvm/arm/arm.c | 4 ++++ virt/kvm/arm/mmu.c | 5 ++++- virt/kvm/kvm_main.c | 10 ++++++++++ 15 files changed, 59 insertions(+), 3 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 93e47a234ba9..f7c376389f0a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1684,6 +1684,18 @@ config ARM64_HAFT endmenu # "ARMv8.8 architectural features" +menu "ARMv9.5 architectural features" + +config ARM64_HDBSS + bool "Enable support for Hardware Dirty state tracking Structure (HDBSS)" + default y + help + Hardware Dirty state tracking Structure(HDBSS) enhances tracking + translation table descriptors’ dirty state to reduce the cost of + surveying for dirtied granules. + +endmenu # "ARMv9.5 architectural features" + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 47ff1a5db978..a9c47046084d 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -622,6 +622,7 @@ static inline bool system_supports_fpsimd(void) return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); } +#ifdef CONFIG_ARM64_HDBSS static inline bool system_supports_hdbss(void) { u64 mmfr1; @@ -633,6 +634,7 @@ static inline bool system_supports_hdbss(void) return val == ID_AA64MMFR1_EL1_HAFDBS_HDBSS; } +#endif static inline bool system_uses_ttbr0_pan(void) { diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 18476aad385b..cfadb612179e 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -73,9 +73,12 @@ #define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT) #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) +#ifdef CONFIG_ARM64_HDBSS #define ESR_ELx_ISS2_SHIFT (32) #define ESR_ELx_ISS2_MASK (GENMASK_ULL(55, 32)) #define ESR_ELx_ISS2(esr) (((esr) & ESR_ELx_ISS2_MASK) >> ESR_ELx_ISS2_SHIFT) +#endif + /* ISS field definitions shared by different classes */ #define ESR_ELx_WNR_SHIFT (6) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 9ddeaf6d4eb9..02ba73421952 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -268,10 +268,12 @@ static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) return vcpu->arch.fault.esr_el2; } +#ifdef CONFIG_ARM64_HDBSS static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) { return vcpu->arch.fault.esr_el2; } +#endif static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 9b4e2d03d1b7..7e85ae8ec6c4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -342,11 +342,13 @@ struct kvm_vcpu_arch { * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; +#ifdef CONFIG_ARM64_HDBSS /* HDBSS registers info */ struct { u64 br_el2; u64 prod_el2; } hdbss; +#endif }; /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index c153292f8e7a..e6a2eb9dd3c3 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -606,6 +606,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; } +#ifdef CONFIG_ARM64_HDBSS static __always_inline void __load_hdbss(struct kvm_vcpu *vcpu) { if (!vcpu->kvm->enable_hdbss) @@ -617,6 +618,7 @@ static __always_inline void __load_hdbss(struct kvm_vcpu *vcpu) dsb(sy); isb(); } +#endif #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 10b93bba206b..daba2819f04c 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -430,7 +430,7 @@ #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) #define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2) - +#ifdef CONFIG_ARM64_HDBSS /* For SMCR_EL2 fields see SMCR_ELx */ #define REG_HDBSSBR_EL2 S3_4_C2_C3_2 @@ -488,7 +488,7 @@ #define HDBSSPROD_EL2_RES0 (UL(0) | GENMASK_ULL(63, 32) | GENMASK_ULL(25, 19)) #define HDBSSPROD_EL2_RES1 (UL(0)) #define HDBSSPROD_EL2_UNKN (UL(0)) - +#endif #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) @@ -876,6 +876,7 @@ /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) +#ifdef CONFIG_ARM64_HDBSS /* * Definitions for the HDBSS feature */ @@ -887,6 +888,7 @@ #define HDBSSBR_SZ(br) (((br) & HDBSSBR_EL2_SZ_MASK) >> HDBSSBR_EL2_SZ_SHIFT) #define HDBSSPROD_IDX(prod) (((prod) & HDBSSPROD_EL2_INDEX_MASK) >> HDBSSPROD_EL2_INDEX_SHIFT) +#endif #ifdef __ASSEMBLY__ diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index e258bd0bb5d8..d1062a3ab7b9 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -217,6 +217,7 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) return arm_exit_handlers[hsr_ec]; } +#ifdef CONFIG_ARM64_HDBSS #define HDBSS_ENTRY_VALID_SHIFT 0 #define HDBSS_ENTRY_VALID_MASK (1UL << HDBSS_ENTRY_VALID_SHIFT) #define HDBSS_ENTRY_IPA_SHIFT 12 @@ -261,6 +262,7 @@ static void kvm_flush_hdbss_buffer(struct kvm_vcpu *vcpu) dsb(sy); isb(); } +#endif /* * We may be single-stepping an emulated instruction. If the emulation @@ -296,8 +298,10 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run) int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index) { +#ifdef CONFIG_ARM64_HDBSS if (vcpu->kvm->enable_hdbss) kvm_flush_hdbss_buffer(vcpu); +#endif if (ARM_SERROR_PENDING(exception_index)) { u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index f59b439c9cec..16909fac6773 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -633,7 +633,9 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) * (among other things). */ __activate_vm(vcpu->kvm); +#ifdef CONFIG_ARM64_HDBSS __load_hdbss(vcpu); +#endif __activate_traps(vcpu); sysreg_restore_guest_state_vhe(guest_ctxt); diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index e855df16f738..12fcbbe9069f 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -263,7 +263,9 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) __sysreg_restore_user_state(guest_ctxt); __sysreg_restore_el1_state(guest_ctxt); +#ifdef CONFIG_ARM64_HDBSS __load_hdbss(vcpu); +#endif vcpu->arch.sysregs_loaded_on_cpu = true; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 7100925b21d4..0aada0034a48 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -206,15 +206,19 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { +#ifdef CONFIG_ARM64_HDBSS struct page *hdbss_pg; +#endif kfree(vcpu->arch.sve_state); +#ifdef CONFIG_ARM64_HDBSS if (vcpu->arch.hdbss.br_el2) { hdbss_pg = phys_to_page(HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2)); if (hdbss_pg) __free_pages(hdbss_pg, HDBSSBR_SZ(vcpu->arch.hdbss.br_el2)); } +#endif } static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0bd38bf3c3bb..69a41b7f07d3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -515,7 +515,9 @@ struct kvm { struct srcu_struct irq_srcu; pid_t userspace_pid; bool vm_bugged; +#ifdef CONFIG_ARM64_HDBSS bool enable_hdbss; +#endif }; #define kvm_err(fmt, ...) \ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 6fd79583312a..83b262cb7505 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -229,9 +229,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = sdev_enable; break; #endif +#ifdef CONFIG_ARM64_HDBSS case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: r = system_supports_hdbss(); break; +#endif default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); break; @@ -1318,6 +1320,7 @@ int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *lo void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { +#ifdef CONFIG_ARM64_HDBSS /* * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called * before reporting dirty_bitmap to userspace. KVM flushes the buffers @@ -1329,6 +1332,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) kvm_for_each_vcpu(i, vcpu, kvm) kvm_vcpu_kick(vcpu); +#endif } static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 9c5532cca6f5..f0fb00af87c0 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1905,9 +1905,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } else { pte_t new_pte = kvm_pfn_pte(pfn, mem_type); - /* 添加DBM位 */ +#ifdef CONFIG_ARM64_HDBSS if (writable && kvm->enable_hdbss && logging_active) new_pte = __pte(pte_val(new_pte) | S2_PTE_DBM); +#endif if (writable) { new_pte = kvm_s2pte_mkwrite(new_pte); @@ -1996,6 +1997,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); is_iabt = kvm_vcpu_trap_is_iabt(vcpu); +#ifdef CONFIG_ARM64_HDBSS /* * HDBSS buffer already flushed when enter handle_trap_exceptions(). * Nothing to do here. @@ -2003,6 +2005,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) iss2 = ESR_ELx_ISS2(kvm_vcpu_get_esr(vcpu)); if (fault_status == ESR_ELx_FSC_PERM && (iss2 & ESR_ELx_HDBSSF)) return 1; +#endif /* Synchronous External Abort? */ if (kvm_vcpu_dabt_isextabt(vcpu)) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1fa4f763fc62..2b58f6ad79d0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1250,7 +1250,9 @@ int kvm_get_dirty_log(struct kvm *kvm, if (!memslot->dirty_bitmap) return -ENOENT; +#ifdef CONFIG_ARM64_HDBSS kvm_arch_sync_dirty_log(kvm, memslot); +#endif n = kvm_dirty_bitmap_bytes(memslot); @@ -1311,7 +1313,9 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, if (!dirty_bitmap) return -ENOENT; +#ifdef CONFIG_ARM64_HDBSS kvm_arch_sync_dirty_log(kvm, memslot); +#endif n = kvm_dirty_bitmap_bytes(memslot); *flush = false; @@ -1395,7 +1399,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) return -EINVAL; +#ifdef CONFIG_ARM64_HDBSS kvm_arch_sync_dirty_log(kvm, memslot); +#endif *flush = false; dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); @@ -3430,6 +3436,7 @@ int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, return -EINVAL; } +#ifdef CONFIG_ARM64_HDBSS static int kvm_cap_arm_enable_hdbss(struct kvm *kvm, struct kvm_enable_cap *cap) { @@ -3496,6 +3503,7 @@ static int kvm_cap_arm_enable_hdbss(struct kvm *kvm, return 0; } +#endif static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, struct kvm_enable_cap *cap) @@ -3508,8 +3516,10 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, kvm->manual_dirty_log_protect = cap->args[0]; return 0; #endif +#ifdef CONFIG_ARM64_HDBSS case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: return kvm_cap_arm_enable_hdbss(kvm, cap); +#endif default: return kvm_vm_ioctl_enable_cap(kvm, cap); } -- Gitee