From f9e1e855ef6ef3346c001792e22b2a23b2fb2dad Mon Sep 17 00:00:00 2001 From: He Qiong Date: Tue, 10 Jun 2025 15:11:38 +0800 Subject: [PATCH] duptext: use 6.6kernel version callback and add a additional callback station. Signed-off-by: He Qiong --- arch/arm64/include/asm/tlbflush.h | 15 +++++++++++++ include/linux/mmu_notifier.h | 35 +++++++++++++++++++++++++++++++ mm/mmu_notifier.c | 14 +++++++++++++ 3 files changed, 64 insertions(+) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 11e9d8bd8b75..de105fb23380 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -15,6 +15,7 @@ #include #include #include +#include #ifdef ARM64_ASM_ARCH #define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n" @@ -256,6 +257,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) dsb(ishst); __tlbi(aside1is, asid); __tlbi_user(aside1is, asid); + mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); dsb(ish); } @@ -267,6 +269,8 @@ static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, dsb(ishst); __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); + mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, uaddr & PAGE_MASK, + (uaddr & PAGE_MASK) + PAGE_SIZE); } static inline void flush_tlb_page(struct vm_area_struct *vma, @@ -353,7 +357,10 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, int tlb_level) { unsigned long asid, pages; + unsigned long ustart, uend; + ustart = start; + uend = end; start = round_down(start, stride); end = round_up(end, stride); pages = (end - start) >> PAGE_SHIFT; @@ -368,6 +375,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, (end - start) >= (MAX_TLBI_OPS * stride)) || pages >= MAX_TLBI_RANGE_PAGES) { flush_tlb_mm(vma->vm_mm); + /* + * We think it is necessary to obtain the accurate address in the callback. + * However, the address of flush_tlb_mm is from 0 to -1, so we have added + * the callback with the accurate address here + */ + mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, ustart, uend); + dsb(ish); return; } @@ -379,6 +393,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, else __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true); + mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, ustart, uend); dsb(ish); } diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index aa4017b33306..6ae2628297e1 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -217,6 +217,26 @@ struct mmu_notifier_ops { void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); + /* + * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB + * which shares page-tables with the CPU. The + * invalidate_range_start()/end() callbacks should not be implemented as + * arch_invalidate_secondary_tlbs() already catches the points in time when + * an external TLB needs to be flushed. + * + * This requires arch_invalidate_secondary_tlbs() to be called while + * holding the ptl spin-lock and therefore this callback is not allowed + * to sleep. + * + * This is called by architecture code whenever invalidating a TLB + * entry. It is assumed that any secondary TLB has the same rules for + * when invalidations are required. If this is not the case architecture + * code will need to call this explicitly when required for secondary + * TLB invalidation. + */ + void (*arch_invalidate_secondary_tlbs)(struct mmu_notifier *mn, struct mm_struct *mm, + unsigned long start, unsigned long end); + /* * These callbacks are used with the get/put interface to manage the * lifetime of the mmu_notifier memory. alloc_notifier() returns a new @@ -298,6 +318,9 @@ extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r, bool only_end); extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end); +extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end); + extern bool mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range); @@ -397,6 +420,13 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, __mmu_notifier_invalidate_range(mm, start, end); } +static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); +} + static inline void mmu_notifier_mm_init(struct mm_struct *mm) { mm->mmu_notifier_mm = NULL; @@ -609,6 +639,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, { } +static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + static inline void mmu_notifier_mm_init(struct mm_struct *mm) { } diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 9a889e456168..faefbf1134c4 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -242,6 +242,20 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm, srcu_read_unlock(&srcu, id); } +void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct mmu_notifier *mn; + int id; + + id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { + if (mn->ops->arch_invalidate_secondary_tlbs) + mn->ops->arch_invalidate_secondary_tlbs(mn, mm, start, end); + } + srcu_read_unlock(&srcu, id); +} + /* * Same as mmu_notifier_register but here the caller must hold the * mmap_sem in write mode. -- Gitee