diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 93e47a234ba9a4a768a9bda3f44875355de7701d..f7c376389f0a0cf651bb13b0682b0091f5eff468 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1684,6 +1684,18 @@ config ARM64_HAFT endmenu # "ARMv8.8 architectural features" +menu "ARMv9.5 architectural features" + +config ARM64_HDBSS + bool "Enable support for Hardware Dirty state tracking Structure (HDBSS)" + default y + help + Hardware Dirty state tracking Structure(HDBSS) enhances tracking + translation table descriptors’ dirty state to reduce the cost of + surveying for dirtied granules. + +endmenu # "ARMv9.5 architectural features" + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index f509501323e749630c21e2fdc3601246087cd8d9..a9c47046084d9eb9511ce668889ebc582beec38f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -622,6 +622,20 @@ static inline bool system_supports_fpsimd(void) return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); } +#ifdef CONFIG_ARM64_HDBSS +static inline bool system_supports_hdbss(void) +{ + u64 mmfr1; + u32 val; + + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + val = cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_HADBS_SHIFT); + + return val == ID_AA64MMFR1_EL1_HAFDBS_HDBSS; +} +#endif + static inline bool system_uses_ttbr0_pan(void) { return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index d6a4234afa54f4ce75c38712396ab979c8bb5aff..cfadb612179e26142ec684f0b1faddf8932bae00 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -73,6 +73,13 @@ #define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT) #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) +#ifdef CONFIG_ARM64_HDBSS +#define ESR_ELx_ISS2_SHIFT (32) +#define ESR_ELx_ISS2_MASK (GENMASK_ULL(55, 32)) +#define ESR_ELx_ISS2(esr) (((esr) & ESR_ELx_ISS2_MASK) >> ESR_ELx_ISS2_SHIFT) +#endif + + /* ISS field definitions shared by different classes */ #define ESR_ELx_WNR_SHIFT (6) #define ESR_ELx_WNR (UL(1) << ESR_ELx_WNR_SHIFT) @@ -124,6 +131,10 @@ #define ESR_ELx_CM_SHIFT (8) #define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT) +/* ISS2 field definitions for Data Aborts */ +#define ESR_ELx_HDBSSF_SHIFT (11) +#define ESR_ELx_HDBSSF (UL(1) << ESR_ELx_HDBSSF_SHIFT) + /* ISS field definitions for exceptions taken in to Hyp */ #define ESR_ELx_CV (UL(1) << 24) #define ESR_ELx_COND_SHIFT (20) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index c847d76ef56975cfe6b23ec8e99d806c970cd46e..35e451972294dee28821634811a07363664b3061 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -96,6 +96,7 @@ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK) /* VTCR_EL2 Registers bits */ +#define VTCR_EL2_HDBSS (1UL << 45) #define VTCR_EL2_RES1 (1U << 31) #define VTCR_EL2_HD (1 << 22) #define VTCR_EL2_HA (1 << 21) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 2ad8a666b7b31aa9ede75e40cea0d50aaf342ddb..02ba73421952565cd152a00bd45d64575b992e71 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -268,6 +268,13 @@ static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) return vcpu->arch.fault.esr_el2; } +#ifdef CONFIG_ARM64_HDBSS +static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.esr_el2; +} +#endif + static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_hsr(vcpu); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 01886b83d1206afac90f61bb89b96cc719ecc1f4..7e85ae8ec6c45236176bc1b4a60c20c48ca15b6f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -341,6 +341,14 @@ struct kvm_vcpu_arch { /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; + +#ifdef CONFIG_ARM64_HDBSS + /* HDBSS registers info */ + struct { + u64 br_el2; + u64 prod_el2; + } hdbss; +#endif }; /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index ffe0aad96b17be9e6d952b3520264c934759667c..e6a2eb9dd3c3870f7c06a00aafcd7238eb2ecbc5 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -606,5 +606,19 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; } +#ifdef CONFIG_ARM64_HDBSS +static __always_inline void __load_hdbss(struct kvm_vcpu *vcpu) +{ + if (!vcpu->kvm->enable_hdbss) + return; + + write_sysreg_s(vcpu->arch.hdbss.br_el2, SYS_HDBSSBR_EL2); + write_sysreg_s(vcpu->arch.hdbss.prod_el2, SYS_HDBSSPROD_EL2); + + dsb(sy); + isb(); +} +#endif + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 761b26417a5d3151201047f0931ac45cddcc0976..daba2819f04cd87aa6cd9bd9af27244b5c20891b 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -429,6 +429,67 @@ #define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) #define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2) + +#ifdef CONFIG_ARM64_HDBSS +/* For SMCR_EL2 fields see SMCR_ELx */ + +#define REG_HDBSSBR_EL2 S3_4_C2_C3_2 +#define SYS_HDBSSBR_EL2 sys_reg(3, 4, 2, 3, 2) +#define SYS_HDBSSBR_EL2_Op0 3 +#define SYS_HDBSSBR_EL2_Op1 4 +#define SYS_HDBSSBR_EL2_CRn 2 +#define SYS_HDBSSBR_EL2_CRm 3 +#define SYS_HDBSSBR_EL2_Op2 2 + +#define HDBSSBR_EL2_BADDR GENMASK(55, 12) +#define HDBSSBR_EL2_BADDR_MASK GENMASK(55, 12) +#define HDBSSBR_EL2_BADDR_SHIFT 12 +#define HDBSSBR_EL2_BADDR_WIDTH 44 + +#define HDBSSBR_EL2_SZ GENMASK(3, 0) +#define HDBSSBR_EL2_SZ_MASK GENMASK(3, 0) +#define HDBSSBR_EL2_SZ_SHIFT 0 +#define HDBSSBR_EL2_SZ_WIDTH 4 +#define HDBSSBR_EL2_SZ_8KB UL(0b0001) +#define HDBSSBR_EL2_SZ_16KB UL(0b0010) +#define HDBSSBR_EL2_SZ_32KB UL(0b0011) +#define HDBSSBR_EL2_SZ_64KB UL(0b0100) +#define HDBSSBR_EL2_SZ_128KB UL(0b0101) +#define HDBSSBR_EL2_SZ_256KB UL(0b0110) +#define HDBSSBR_EL2_SZ_512KB UL(0b0111) +#define HDBSSBR_EL2_SZ_1MB UL(0b1000) +#define HDBSSBR_EL2_SZ_2MB UL(0b1001) + +#define HDBSSBR_EL2_RES0 (UL(0) | GENMASK_ULL(63, 56) | GENMASK_ULL(11, 4)) +#define HDBSSBR_EL2_RES1 (UL(0)) +#define HDBSSBR_EL2_UNKN (UL(0)) + +#define REG_HDBSSPROD_EL2 S3_4_C2_C3_3 +#define SYS_HDBSSPROD_EL2 sys_reg(3, 4, 2, 3, 3) +#define SYS_HDBSSPROD_EL2_Op0 3 +#define SYS_HDBSSPROD_EL2_Op1 4 +#define SYS_HDBSSPROD_EL2_CRn 2 +#define SYS_HDBSSPROD_EL2_CRm 3 +#define SYS_HDBSSPROD_EL2_Op2 3 + +#define HDBSSPROD_EL2_FSC GENMASK(31, 26) +#define HDBSSPROD_EL2_FSC_MASK GENMASK(31, 26) +#define HDBSSPROD_EL2_FSC_SHIFT 26 +#define HDBSSPROD_EL2_FSC_WIDTH 6 +#define HDBSSPROD_EL2_FSC_OK UL(0b000000) +#define HDBSSPROD_EL2_FSC_ExternalAbort UL(0b010000) +#define HDBSSPROD_EL2_FSC_GPF UL(0b101000) + +#define HDBSSPROD_EL2_INDEX GENMASK(18, 0) +#define HDBSSPROD_EL2_INDEX_MASK GENMASK(18, 0) +#define HDBSSPROD_EL2_INDEX_SHIFT 0 +#define HDBSSPROD_EL2_INDEX_WIDTH 19 + +#define HDBSSPROD_EL2_RES0 (UL(0) | GENMASK_ULL(63, 32) | GENMASK_ULL(25, 19)) +#define HDBSSPROD_EL2_RES1 (UL(0)) +#define HDBSSPROD_EL2_UNKN (UL(0)) +#endif + #define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) @@ -815,6 +876,20 @@ /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ #define SYS_MPIDR_SAFE_VAL (BIT(31)) +#ifdef CONFIG_ARM64_HDBSS +/* + * Definitions for the HDBSS feature + */ +#define HDBSS_MAX_SIZE HDBSSBR_EL2_SZ_2MB + +#define HDBSSBR_EL2(baddr, sz) (((baddr) & GENMASK(55, 12 + sz)) | \ + ((sz) << HDBSSBR_EL2_SZ_SHIFT)) +#define HDBSSBR_BADDR(br) ((br) & GENMASK(55, (12 + HDBSSBR_SZ(br)))) +#define HDBSSBR_SZ(br) (((br) & HDBSSBR_EL2_SZ_MASK) >> HDBSSBR_EL2_SZ_SHIFT) + +#define HDBSSPROD_IDX(prod) (((prod) & HDBSSPROD_EL2_INDEX_MASK) >> HDBSSPROD_EL2_INDEX_SHIFT) +#endif + #ifdef __ASSEMBLY__ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 1249f68a9418174edfdda3e8c0aca83c34da3733..d1062a3ab7b964a787d0e71443105bc879719cf0 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -217,6 +217,53 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) return arm_exit_handlers[hsr_ec]; } +#ifdef CONFIG_ARM64_HDBSS +#define HDBSS_ENTRY_VALID_SHIFT 0 +#define HDBSS_ENTRY_VALID_MASK (1UL << HDBSS_ENTRY_VALID_SHIFT) +#define HDBSS_ENTRY_IPA_SHIFT 12 +#define HDBSS_ENTRY_IPA_MASK GENMASK_ULL(55, HDBSS_ENTRY_IPA_SHIFT) + +static void kvm_flush_hdbss_buffer(struct kvm_vcpu *vcpu) +{ + int idx, curr_idx; + u64 *hdbss_buf; + + if (!vcpu->kvm->enable_hdbss) + return; + + dsb(sy); + isb(); + curr_idx = HDBSSPROD_IDX(read_sysreg_s(SYS_HDBSSPROD_EL2)); + + /* Do nothing if HDBSS buffer is empty or br_el2 is NULL */ + if (curr_idx == 0 || vcpu->arch.hdbss.br_el2 == 0) + return; + + hdbss_buf = page_address( + phys_to_page(HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2))); + if (!hdbss_buf) { + kvm_err("Enter flush hdbss buffer with buffer == NULL!"); + return; + } + + for (idx = 0; idx < curr_idx; idx++) { + u64 gpa; + + gpa = hdbss_buf[idx]; + if (!(gpa & HDBSS_ENTRY_VALID_MASK)) + continue; + + gpa = gpa & HDBSS_ENTRY_IPA_MASK; + kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); + } + + /* reset HDBSS index */ + write_sysreg_s(0, SYS_HDBSSPROD_EL2); + dsb(sy); + isb(); +} +#endif + /* * We may be single-stepping an emulated instruction. If the emulation * has been completed in the kernel, we can return to userspace with a @@ -251,6 +298,11 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run) int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index) { +#ifdef CONFIG_ARM64_HDBSS + if (vcpu->kvm->enable_hdbss) + kvm_flush_hdbss_buffer(vcpu); +#endif + if (ARM_SERROR_PENDING(exception_index)) { u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 624e5c83a4972bae0b993cd60ee3c753eb575c25..16909fac6773ecc76f81f3d3ae040d49a05169b9 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -633,6 +633,9 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) * (among other things). */ __activate_vm(vcpu->kvm); +#ifdef CONFIG_ARM64_HDBSS + __load_hdbss(vcpu); +#endif __activate_traps(vcpu); sysreg_restore_guest_state_vhe(guest_ctxt); diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 7ddbc849b58001fe8ed40ff0cd23d2a20c8664fe..12fcbbe9069fe55faa0f5d83938e05e4f61ae377 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -263,6 +263,10 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) __sysreg_restore_user_state(guest_ctxt); __sysreg_restore_el1_state(guest_ctxt); +#ifdef CONFIG_ARM64_HDBSS + __load_hdbss(vcpu); +#endif + vcpu->arch.sysregs_loaded_on_cpu = true; activate_traps_vhe_load(vcpu); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index a3105ae464be1bc584f324b10549ce7189c31b9f..0aada0034a48f8e67d8c2b8bdc0e6b8ac2036080 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -206,7 +206,19 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { +#ifdef CONFIG_ARM64_HDBSS + struct page *hdbss_pg; +#endif kfree(vcpu->arch.sve_state); + +#ifdef CONFIG_ARM64_HDBSS + if (vcpu->arch.hdbss.br_el2) { + hdbss_pg = phys_to_page(HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2)); + if (hdbss_pg) + __free_pages(hdbss_pg, + HDBSSBR_SZ(vcpu->arch.hdbss.br_el2)); + } +#endif } static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9019db30b8a762857429e0c92ebcb070b7351044..69a41b7f07d347dd4419f5c8b7698ce0f2da807c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -515,6 +515,9 @@ struct kvm { struct srcu_struct irq_srcu; pid_t userspace_pid; bool vm_bugged; +#ifdef CONFIG_ARM64_HDBSS + bool enable_hdbss; +#endif }; #define kvm_err(fmt, ...) \ @@ -853,6 +856,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, gfn_t gfn_offset, unsigned long mask); +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ebb362be8c953286f500d4ca6fef7abcd189687a..cb6ab8c7f3bc7e9e610a6ac049c5bf0d8309ad98 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1008,6 +1008,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175 #define KVM_CAP_ARM_VIRT_MSI_BYPASS 799 +#define KVM_CAP_ARM_HW_DIRTY_STATE_TRACK 502 + #ifdef KVM_CAP_IRQ_ROUTING @@ -1248,7 +1250,7 @@ struct kvm_master_dev_info { __u32 nvectors; struct kvm_msi msi[]; }; - + /* * ioctls for VM fds */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 52641d8ca9e83f25b983f3cc6be115c37bad2d98..c342a4eec53ccc763132fcace7b7e53771807c68 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -757,6 +757,7 @@ struct kvm_ppc_resize_hpt { __u32 pad; }; +#define KVM_CAP_ARM_HW_DIRTY_STATE_TRACK 502 #define KVMIO 0xAE /* machine type bits, to be used as argument to KVM_CREATE_VM */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 28cdd2f4d1eff766e66d9a56d198133c182b863c..83b262cb75050eb009e96cc1fbcb62a4298ea1ea 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -228,6 +228,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_VIRT_MSI_BYPASS: r = sdev_enable; break; +#endif +#ifdef CONFIG_ARM64_HDBSS + case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: + r = system_supports_hdbss(); + break; #endif default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); @@ -1313,6 +1318,23 @@ int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *lo return r; } +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +#ifdef CONFIG_ARM64_HDBSS + /* + * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called + * before reporting dirty_bitmap to userspace. KVM flushes the buffers + * on all VM-Exits, thus we only need to kick running vCPUs to force a + * VM-Exit. + */ + struct kvm_vcpu *vcpu; + unsigned long i; + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_vcpu_kick(vcpu); +#endif +} + static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr) { diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 6e20da9bf4f5924c5fd631d331a03827b6ce8ce1..f0fb00af87c00313b5938c7a9d046a539d452947 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -37,6 +37,7 @@ static unsigned long io_map_base; #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) +#define S2_PTE_DBM (UL(1) << 51) static bool is_iomap(unsigned long flags) { @@ -73,7 +74,7 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) * @size: Size of the range from the base to invalidate */ -void kvm_tlb_flush_vmid_range(struct kvm *kvm, +void kvm_tlb_flush_vmid_range(struct kvm *kvm, phys_addr_t addr, size_t size) { unsigned long pages, inval_pages; @@ -302,7 +303,7 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd, if (!pte_none(*pte)) { pte_t old_pte = *pte; kvm_set_pte(pte, __pte(0)); - + if (!stage2_unmap_defer_tlb_flush()) kvm_tlb_flush_vmid_ipa(kvm, addr); @@ -407,7 +408,7 @@ static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, next = stage2_pgd_addr_end(kvm, addr, end); if (!stage2_pgd_none(kvm, *pgd)) unmap_stage2_puds(kvm, pgd, addr, next); - + if (stage2_unmap_defer_tlb_flush()) /* Perform the deferred TLB invalidations */ kvm_tlb_flush_vmid_range(kvm, addr, size); @@ -1904,6 +1905,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } else { pte_t new_pte = kvm_pfn_pte(pfn, mem_type); +#ifdef CONFIG_ARM64_HDBSS + if (writable && kvm->enable_hdbss && logging_active) + new_pte = __pte(pte_val(new_pte) | S2_PTE_DBM); +#endif + if (writable) { new_pte = kvm_s2pte_mkwrite(new_pte); mark_page_dirty(kvm, gfn); @@ -1981,7 +1987,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) unsigned long fault_status; phys_addr_t fault_ipa; struct kvm_memory_slot *memslot; - unsigned long hva; + unsigned long hva, iss2; bool is_iabt, write_fault, writable; gfn_t gfn; int ret, idx; @@ -1991,6 +1997,16 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); is_iabt = kvm_vcpu_trap_is_iabt(vcpu); +#ifdef CONFIG_ARM64_HDBSS + /* + * HDBSS buffer already flushed when enter handle_trap_exceptions(). + * Nothing to do here. + */ + iss2 = ESR_ELx_ISS2(kvm_vcpu_get_esr(vcpu)); + if (fault_status == ESR_ELx_FSC_PERM && (iss2 & ESR_ELx_HDBSSF)) + return 1; +#endif + /* Synchronous External Abort? */ if (kvm_vcpu_dabt_isextabt(vcpu)) { /* diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ac74b31efb811f23135cf8941b84d22e8b5c5609..2b58f6ad79d0f24c7a61bd57c722c1f9ddf39cb4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1250,6 +1250,10 @@ int kvm_get_dirty_log(struct kvm *kvm, if (!memslot->dirty_bitmap) return -ENOENT; +#ifdef CONFIG_ARM64_HDBSS + kvm_arch_sync_dirty_log(kvm, memslot); +#endif + n = kvm_dirty_bitmap_bytes(memslot); for (i = 0; !any && i < n/sizeof(long); ++i) @@ -1309,6 +1313,10 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, if (!dirty_bitmap) return -ENOENT; +#ifdef CONFIG_ARM64_HDBSS + kvm_arch_sync_dirty_log(kvm, memslot); +#endif + n = kvm_dirty_bitmap_bytes(memslot); *flush = false; if (kvm->manual_dirty_log_protect) { @@ -1391,6 +1399,10 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) return -EINVAL; +#ifdef CONFIG_ARM64_HDBSS + kvm_arch_sync_dirty_log(kvm, memslot); +#endif + *flush = false; dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) @@ -1420,7 +1432,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, } } spin_unlock(&kvm->mmu_lock); - + if(flush) kvm_flush_remote_tlbs_memslot(kvm, memslot); @@ -1716,7 +1728,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, * tail pages of non-compound higher order allocations, which * would then underflow the refcount when the caller does the * required put_page. Don't allow those pages here. - */ + */ if (!kvm_try_get_pfn(pfn)) r = -EFAULT; @@ -3424,6 +3436,75 @@ int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, return -EINVAL; } +#ifdef CONFIG_ARM64_HDBSS +static int kvm_cap_arm_enable_hdbss(struct kvm *kvm, + struct kvm_enable_cap *cap) +{ + unsigned long i; + struct kvm_vcpu *vcpu; + struct page *hdbss_pg; + int size = cap->args[0]; + + if (!system_supports_hdbss()) { + kvm_err("This system does not support HDBSS!\n"); + return -EINVAL; + } + + if (size < 0 || size > HDBSS_MAX_SIZE) { + kvm_err("Invalid HDBSS buffer size: %d!\n", size); + return -EINVAL; + } + + /* Enable the HDBSS feature if size > 0, otherwise disable it. */ + if (size) { + kvm->enable_hdbss = true; + kvm->arch.vtcr |= VTCR_EL2_HD | VTCR_EL2_HDBSS; + + kvm_for_each_vcpu(i, vcpu, kvm) { + hdbss_pg = alloc_pages(GFP_KERNEL, size); + if (!hdbss_pg) { + kvm_err("Alloc HDBSS buffer failed!\n"); + return -EINVAL; + } + + vcpu->arch.hdbss.br_el2 = + HDBSSBR_EL2(page_to_phys(hdbss_pg), size); + vcpu->arch.hdbss.prod_el2 = 0; + + /* + * We should kick vcpus out of guest mode here to + * load new vtcr value to vtcr_el2 register when + * re-enter guest mode. + */ + kvm_vcpu_kick(vcpu); + } + + kvm_info("Enable HDBSS success, HDBSS buffer size: %d\n", size); + } else if (kvm->enable_hdbss) { + kvm->arch.vtcr &= ~(VTCR_EL2_HD | VTCR_EL2_HDBSS); + + kvm_for_each_vcpu(i, vcpu, kvm) { + /* Kick vcpus to flush hdbss buffer. */ + kvm_vcpu_kick(vcpu); + + hdbss_pg = phys_to_page( + HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2)); + if (hdbss_pg) + __free_pages(hdbss_pg, + HDBSSBR_SZ(vcpu->arch.hdbss.br_el2)); + + vcpu->arch.hdbss.br_el2 = 0; + vcpu->arch.hdbss.prod_el2 = 0; + } + + kvm->enable_hdbss = false; + kvm_info("Disable HDBSS success\n"); + } + + return 0; +} +#endif + static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, struct kvm_enable_cap *cap) { @@ -3434,6 +3515,10 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, return -EINVAL; kvm->manual_dirty_log_protect = cap->args[0]; return 0; +#endif +#ifdef CONFIG_ARM64_HDBSS + case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: + return kvm_cap_arm_enable_hdbss(kvm, cap); #endif default: return kvm_vm_ioctl_enable_cap(kvm, cap);