From 04225a5c8c49e30c4ff72c9cb4449c77059321c5 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Fri, 18 Nov 2022 02:08:51 +0000 Subject: [PATCH 01/13] arm64: extable: add new extable type "__mc_ex_table" A new type of extable is added, which is specially used fixup for machine check safe. In order to keep kabi consistency, we cannot add type and data members to struct exception_table_entry(d6e2cc564775 arm64: extable: add `type` and `data` fields), so we put the fixup entry to new extable separately. Signed-off-by: Tong Tiangen Conflicts: arch/arm64/Kconfig include/asm-generic/vmlinux.lds.h include/linux/module.h [ MC_EXCEPTION_TABLE place changed. ] Signed-off-by: huwentao --- arch/arm64/Kconfig | 3 +++ arch/arm64/include/asm/assembler.h | 15 ++++++++++++ arch/arm64/include/asm/uaccess.h | 10 ++++++++ arch/arm64/kernel/vmlinux.lds.S | 1 + include/asm-generic/vmlinux.lds.h | 15 ++++++++++++ include/linux/extable.h | 23 ++++++++++++++++++ include/linux/module.h | 5 ++++ kernel/extable.c | 29 +++++++++++++++++++++++ kernel/module.c | 38 ++++++++++++++++++++++++++++++ 9 files changed, 139 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c1357f45a35e..f257bb1c6c32 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -998,6 +998,9 @@ config ARCH_WANT_HUGE_PMD_SHARE config ARCH_HAS_CACHE_LINE_SIZE def_bool y +config ARCH_HAS_MC_EXTABLE + bool + config ARCH_ENABLE_SPLIT_PMD_PTLOCK def_bool y if PGTABLE_LEVELS > 2 diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 4a4258f17c86..a1959fe005b8 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -142,6 +142,21 @@ alternative_endif .popsection .endm +/* + * Emit an entry into the machine check exception table + */ +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + .macro _asm_mc_extable, from, to + .pushsection __mc_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection + .endm +#else + .macro _asm_mc_extable, from, to + .endm +#endif + #define USER(l, x...) \ 9999: x; \ _asm_extable 9999b, l diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 32fc8061aa76..d8d2f9219a72 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -101,6 +101,16 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si " .long (" #from " - .), (" #to " - .)\n" \ " .popsection\n" +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +#define _ASM_MC_EXTABLE(from, to) \ + " .pushsection __mc_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" +#else +#define _ASM_MC_EXTABLE(from, to) +#endif + /* * User access enabling/disabling. */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 0bab37b1acbe..e14ad28d7ad5 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -145,6 +145,7 @@ SECTIONS RO_DATA(PAGE_SIZE) /* everything from this point to */ EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */ + MC_EXCEPTION_TABLE(8) NOTES . = ALIGN(PAGE_SIZE); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 4b52a73b5910..2019c5bd11ca 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -605,6 +605,21 @@ __stop___ex_table = .; \ } +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +/* + * Machine Check Exception table + */ +#define MC_EXCEPTION_TABLE(align) \ + . = ALIGN(align); \ + __mc_ex_table : AT(ADDR(__mc_ex_table) - LOAD_OFFSET) { \ + __start___mc_ex_table = .; \ + KEEP(*(__mc_ex_table)) \ + __stop___mc_ex_table = .; \ + } +#else +#define MC_EXCEPTION_TABLE(align) +#endif + /* * .BTF */ diff --git a/include/linux/extable.h b/include/linux/extable.h index 4ab9e78f313b..e608f8a8df4e 100644 --- a/include/linux/extable.h +++ b/include/linux/extable.h @@ -19,18 +19,41 @@ void trim_init_extable(struct module *m); /* Given an address, look for it in the exception tables */ const struct exception_table_entry *search_exception_tables(unsigned long add); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +const struct exception_table_entry *search_mc_exception_tables(unsigned long add); +#else +static inline const struct exception_table_entry * +search_mc_exception_tables(unsigned long add) +{ + return NULL; +} +#endif const struct exception_table_entry * search_kernel_exception_table(unsigned long addr); #ifdef CONFIG_MODULES /* For extable.c to search modules' exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +const struct exception_table_entry *search_module_mc_extables(unsigned long addr); +#else +static inline const struct exception_table_entry * +search_module_mc_extables(unsigned long addr) +{ + return NULL; +} +#endif #else static inline const struct exception_table_entry * search_module_extables(unsigned long addr) { return NULL; } +static inline const struct exception_table_entry * +search_module_mc_extables(unsigned long addr) +{ + return NULL; +} #endif /*CONFIG_MODULES*/ #ifdef CONFIG_BPF_JIT diff --git a/include/linux/module.h b/include/linux/module.h index 78b684600319..7ab60fdf8529 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -448,6 +448,11 @@ struct module { keeping pointers to this stuff */ char *args; +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + unsigned int num_mc_exentries; + struct exception_table_entry *mc_extable; +#endif + KABI_RESERVE(1); KABI_RESERVE(2); KABI_RESERVE(3); diff --git a/kernel/extable.c b/kernel/extable.c index f6920a11e28a..3a45a0e37b48 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -28,6 +28,11 @@ DEFINE_MUTEX(text_mutex); extern struct exception_table_entry __start___ex_table[]; extern struct exception_table_entry __stop___ex_table[]; +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +extern struct exception_table_entry __start___mc_ex_table[]; +extern struct exception_table_entry __stop___mc_ex_table[]; +#endif + /* Cleared by build time tools if the table is already sorted. */ u32 __initdata __visible main_extable_sort_needed = 1; @@ -38,6 +43,14 @@ void __init sort_main_extable(void) pr_notice("Sorting __ex_table...\n"); sort_extable(__start___ex_table, __stop___ex_table); } + +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + if (main_extable_sort_needed && + &__stop___mc_ex_table > &__start___mc_ex_table) { + pr_notice("Sorting __mc_ex_table...\n"); + sort_extable(__start___mc_ex_table, __stop___mc_ex_table); + } +#endif } /* Given an address, look for it in the kernel exception table */ @@ -61,6 +74,22 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) return e; } +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +/* Given an address, look for it in the machine check exception table */ +const +struct exception_table_entry *search_mc_exception_tables(unsigned long addr) +{ + const struct exception_table_entry *e; + + e = search_extable(__start___mc_ex_table, + __stop___mc_ex_table - __start___mc_ex_table, addr); + if (!e) + e = search_module_mc_extables(addr); + + return e; +} +#endif + int init_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_sinittext && diff --git a/kernel/module.c b/kernel/module.c index 13471a694d43..4b6f47e7ef6d 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3418,6 +3418,11 @@ static int find_module_sections(struct module *mod, struct load_info *info) mod->extable = section_objs(info, "__ex_table", sizeof(*mod->extable), &mod->num_exentries); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + mod->mc_extable = section_objs(info, "__mc_ex_table", + sizeof(*mod->mc_extable), &mod->num_mc_exentries); +#endif + if (section_addr(info, "__obsparm")) pr_warn("%s: Ignoring obsolete parameters\n", mod->name); @@ -3658,6 +3663,10 @@ static int post_relocation(struct module *mod, const struct load_info *info) /* Sort exception table now relocations are done. */ sort_extable(mod->extable, mod->extable + mod->num_exentries); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + sort_extable(mod->mc_extable, mod->mc_extable + mod->num_mc_exentries); +#endif + /* Copy relocated percpu area over. */ percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, info->sechdrs[info->index.pcpu].sh_size); @@ -4622,6 +4631,35 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) return e; } +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +/* Given an address, look for it in the module machine check safe exception tables. */ +const struct exception_table_entry *search_module_mc_extables(unsigned long addr) +{ + const struct exception_table_entry *e = NULL; + struct module *mod; + + preempt_disable(); + mod = __module_address(addr); + if (!mod) + goto out; + + if (!mod->num_mc_exentries) + goto out; + + e = search_extable(mod->mc_extable, + mod->num_mc_exentries, + addr); +out: + preempt_enable(); + + /* + * Now, if we found one, we are running inside it now, hence + * we cannot unload the module, hence no refcnt needed. + */ + return e; +} +#endif + /* * is_module_address - is this address inside a module? * @addr: the address to check. -- Gitee From a7af1e96fc5434191ec604b129267165ca7f0e78 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Fri, 18 Nov 2022 02:08:52 +0000 Subject: [PATCH 02/13] arm64: add support for machine check error safe During the processing of arm64 kernel hardware memory errors(do_sea()), if the errors is consumed in the kernel, the current processing is panic. However, it is not optimal. Take uaccess for example, if the uaccess operation fails due to memory error, only the user process will be affected, kill the user process and isolate the user page with hardware memory errors is a better choice. This patch only enable machine error check framework, it add exception fixup before kernel panic in do_sea() and only limit the consumption of hardware memory errors in kernel mode triggered by user mode processes. If fixup successful, panic can be avoided. Signed-off-by: Tong Tiangen Conflicts: arch/arm64/Kconfig arch/arm64/include/asm/extable.h arch/arm64/mm/extable.c [ Rename ARCH_HAS_COPY_MC to ARCH_HAS_UACCESS_MCSAFE Merge ("uaccess: add generic fallback version of copy_mc_to_user()") and move to arch/arm64/ ] Signed-off-by: huwentao --- arch/arm64/Kconfig | 2 ++ arch/arm64/include/asm/extable.h | 1 + arch/arm64/include/asm/uaccess.h | 9 +++++++++ arch/arm64/mm/extable.c | 12 ++++++++++++ arch/arm64/mm/fault.c | 29 ++++++++++++++++++++++++++++- 5 files changed, 52 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f257bb1c6c32..39b643fe3662 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -10,6 +10,7 @@ config ARM64 select ACPI_SPCR_TABLE if ACPI select ACPI_PPTT if ACPI select ARCH_CLOCKSOURCE_DATA + select ARCH_HAS_UACCESS_MCSAFE if ACPI_APEI_GHES select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_COHERENT_TO_PFN @@ -21,6 +22,7 @@ config ARM64 select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV select ARCH_HAS_KEEPINITRD + select ARCH_HAS_MC_EXTABLE if ARCH_HAS_UACCESS_MCSAFE select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 56a4f68b262e..96b2cdc59006 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -23,4 +23,5 @@ struct exception_table_entry #define ARCH_HAS_RELATIVE_EXTABLE extern int fixup_exception(struct pt_regs *regs); +extern int fixup_exception_mc(struct pt_regs *regs); #endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index d8d2f9219a72..298667ca106f 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -454,4 +454,13 @@ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, } #endif +#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE +static inline unsigned long __must_check +copy_to_user_mcsafe(void *dst, const void *src, size_t cnt) +{ + check_object_size(src, cnt, true); + return raw_copy_to_user(dst, src, cnt); +} +#endif + #endif /* __ASM_UACCESS_H */ diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 81e694af5f8c..b6a6a9bda504 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -16,3 +16,15 @@ int fixup_exception(struct pt_regs *regs) return fixup != NULL; } + +int fixup_exception_mc(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_mc_exception_tables(instruction_pointer(regs)); + if (!fixup) + return 0; + + regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; + return 1; +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6366ffee4613..b89dcfea8e5f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -1403,6 +1403,31 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) return 1; /* "fault" */ } +static bool arm64_do_kernel_sea(void __user *addr, unsigned int esr, + struct pt_regs *regs, int sig, int code) +{ + if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_MCSAFE)) + return false; + + if (user_mode(regs)) + return false; + + if (apei_claim_sea(regs) < 0) + return false; + + if (!fixup_exception_mc(regs)) + return false; + + if (current->flags & PF_KTHREAD) + return true; + + set_thread_esr(0, esr); + arm64_force_sig_fault(sig, code, addr, + "Uncorrected memory error on access to user memory\n"); + + return true; +} + static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -1422,7 +1447,9 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) siaddr = NULL; else siaddr = (void __user *)addr; - arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); + + if (!arm64_do_kernel_sea(siaddr, esr, regs, inf->sig, inf->code)) + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; } -- Gitee From 844f456f8d9834ef49e0394e7d5dde814775c624 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 29 Apr 2020 19:37:02 +0100 Subject: [PATCH 03/13] arm64: Reorder the macro arguments in the copy routines [Upstream commit ada66f1837594f38bc2db4f98c4c6589ecc8a7f6] The current argument order is obviously buggy (memcpy.S): macro strb1 ptr, regB, val strb \ptr, [\regB], \val endm However, it cancels out as the calling sites in copy_template.S pass the address as the regB argument. Mechanically reorder the arguments to match the instruction mnemonics. There is no difference in objdump before and after this patch. Signed-off-by: Catalin Marinas Link: https://lore.kernel.org/r/20200429183702.28445-1-catalin.marinas@arm.com Signed-off-by: Will Deacon Dep-of: ("arm64: copy_form/to_user support machine check safe") Signed-off-by: huwentao --- arch/arm64/lib/copy_from_user.S | 32 ++++++++++++++++---------------- arch/arm64/lib/copy_in_user.S | 32 ++++++++++++++++---------------- arch/arm64/lib/copy_to_user.S | 32 ++++++++++++++++---------------- arch/arm64/lib/memcpy.S | 32 ++++++++++++++++---------------- 4 files changed, 64 insertions(+), 64 deletions(-) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 8e25e89ad01f..0f8a3a9e3795 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -20,36 +20,36 @@ * x0 - bytes not copied */ - .macro ldrb1 ptr, regB, val - uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val + .macro ldrb1 reg, ptr, val + uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val .endm - .macro strb1 ptr, regB, val - strb \ptr, [\regB], \val + .macro strb1 reg, ptr, val + strb \reg, [\ptr], \val .endm - .macro ldrh1 ptr, regB, val - uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val + .macro ldrh1 reg, ptr, val + uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val .endm - .macro strh1 ptr, regB, val - strh \ptr, [\regB], \val + .macro strh1 reg, ptr, val + strh \reg, [\ptr], \val .endm - .macro ldr1 ptr, regB, val - uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val + .macro ldr1 reg, ptr, val + uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val .endm - .macro str1 ptr, regB, val - str \ptr, [\regB], \val + .macro str1 reg, ptr, val + str \reg, [\ptr], \val .endm - .macro ldp1 ptr, regB, regC, val - uao_ldp 9998f, \ptr, \regB, \regC, \val + .macro ldp1 reg1, reg2, ptr, val + uao_ldp 9998f, \reg1, \reg2, \ptr, \val .endm - .macro stp1 ptr, regB, regC, val - stp \ptr, \regB, [\regC], \val + .macro stp1 reg1, reg2, ptr, val + stp \reg1, \reg2, [\ptr], \val .endm end .req x5 diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 667139013ed1..80e37ada0ee1 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -21,36 +21,36 @@ * Returns: * x0 - bytes not copied */ - .macro ldrb1 ptr, regB, val - uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val + .macro ldrb1 reg, ptr, val + uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val .endm - .macro strb1 ptr, regB, val - uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val + .macro strb1 reg, ptr, val + uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val .endm - .macro ldrh1 ptr, regB, val - uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val + .macro ldrh1 reg, ptr, val + uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val .endm - .macro strh1 ptr, regB, val - uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val + .macro strh1 reg, ptr, val + uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val .endm - .macro ldr1 ptr, regB, val - uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val + .macro ldr1 reg, ptr, val + uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val .endm - .macro str1 ptr, regB, val - uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val + .macro str1 reg, ptr, val + uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val .endm - .macro ldp1 ptr, regB, regC, val - uao_ldp 9998f, \ptr, \regB, \regC, \val + .macro ldp1 reg1, reg2, ptr, val + uao_ldp 9998f, \reg1, \reg2, \ptr, \val .endm - .macro stp1 ptr, regB, regC, val - uao_stp 9998f, \ptr, \regB, \regC, \val + .macro stp1 reg1, reg2, ptr, val + uao_stp 9998f, \reg1, \reg2, \ptr, \val .endm end .req x5 diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 1a104d0089f3..4ec59704b8f2 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -19,36 +19,36 @@ * Returns: * x0 - bytes not copied */ - .macro ldrb1 ptr, regB, val - ldrb \ptr, [\regB], \val + .macro ldrb1 reg, ptr, val + ldrb \reg, [\ptr], \val .endm - .macro strb1 ptr, regB, val - uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val + .macro strb1 reg, ptr, val + uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val .endm - .macro ldrh1 ptr, regB, val - ldrh \ptr, [\regB], \val + .macro ldrh1 reg, ptr, val + ldrh \reg, [\ptr], \val .endm - .macro strh1 ptr, regB, val - uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val + .macro strh1 reg, ptr, val + uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val .endm - .macro ldr1 ptr, regB, val - ldr \ptr, [\regB], \val + .macro ldr1 reg, ptr, val + ldr \reg, [\ptr], \val .endm - .macro str1 ptr, regB, val - uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val + .macro str1 reg, ptr, val + uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val .endm - .macro ldp1 ptr, regB, regC, val - ldp \ptr, \regB, [\regC], \val + .macro ldp1 reg1, reg2, ptr, val + ldp \reg1, \reg2, [\ptr], \val .endm - .macro stp1 ptr, regB, regC, val - uao_stp 9998f, \ptr, \regB, \regC, \val + .macro stp1 reg1, reg2, ptr, val + uao_stp 9998f, \reg1, \reg2, \ptr, \val .endm end .req x5 diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index b03cbb3455d4..dc8d2a216a6e 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -24,36 +24,36 @@ * Returns: * x0 - dest */ - .macro ldrb1 ptr, regB, val - ldrb \ptr, [\regB], \val + .macro ldrb1 reg, ptr, val + ldrb \reg, [\ptr], \val .endm - .macro strb1 ptr, regB, val - strb \ptr, [\regB], \val + .macro strb1 reg, ptr, val + strb \reg, [\ptr], \val .endm - .macro ldrh1 ptr, regB, val - ldrh \ptr, [\regB], \val + .macro ldrh1 reg, ptr, val + ldrh \reg, [\ptr], \val .endm - .macro strh1 ptr, regB, val - strh \ptr, [\regB], \val + .macro strh1 reg, ptr, val + strh \reg, [\ptr], \val .endm - .macro ldr1 ptr, regB, val - ldr \ptr, [\regB], \val + .macro ldr1 reg, ptr, val + ldr \reg, [\ptr], \val .endm - .macro str1 ptr, regB, val - str \ptr, [\regB], \val + .macro str1 reg, ptr, val + str \reg, [\ptr], \val .endm - .macro ldp1 ptr, regB, regC, val - ldp \ptr, \regB, [\regC], \val + .macro ldp1 reg1, reg2, ptr, val + ldp \reg1, \reg2, [\ptr], \val .endm - .macro stp1 ptr, regB, regC, val - stp \ptr, \regB, [\regC], \val + .macro stp1 reg1, reg2, ptr, val + stp \reg1, \reg2, [\ptr], \val .endm SYM_FUNC_START_ALIAS(__memcpy) -- Gitee From 08db44bba5c4a37fb9c558d9faa25234e26efb50 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 10 Feb 2022 19:52:35 +0800 Subject: [PATCH 04/13] arm64: uaccess: move uao_* alternatives to asm-uaccess.h [Upstream commit e2a2190a80ca0ebddd52c766caf08908d71fb949] The uao_* alternative asm macros are only used by the uaccess assembly routines in arch/arm64/lib/, where they are included indirectly via asm-uaccess.h. Since they're specific to the uaccess assembly (and will lose the alternatives in subsequent patches), let's move them into asm-uaccess.h. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon [will: update #include in mte.S to pull in uao asm macros] Signed-off-by: Will Deacon Signed-off-by: Zhen Lei Reviewed-by: Hanjun Guo Signed-off-by: Zheng Zengkai Dep-of: ("arm64: copy_form/to_user support machine check safe") Conflicts: arch/arm64/include/asm/asm-uaccess.h Signed-off-by: huwentao --- arch/arm64/include/asm/alternative.h | 59 ---------------------------- arch/arm64/include/asm/asm-uaccess.h | 59 ++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 59 deletions(-) diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 3cb3c4ab3ea5..451ce45e6c0e 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -224,65 +224,6 @@ alternative_endif _asm_extable 9999b, \label .endm -/* - * Generate the assembly for UAO alternatives with exception table entries. - * This is complicated as there is no post-increment or pair versions of the - * unprivileged instructions, and USER() only works for single instructions. - */ -#ifdef CONFIG_ARM64_UAO - .macro uao_ldp l, reg1, reg2, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: ldp \reg1, \reg2, [\addr], \post_inc; -8889: nop; - nop; - alternative_else - ldtr \reg1, [\addr]; - ldtr \reg2, [\addr, #8]; - add \addr, \addr, \post_inc; - alternative_endif - - _asm_extable 8888b,\l; - _asm_extable 8889b,\l; - .endm - - .macro uao_stp l, reg1, reg2, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: stp \reg1, \reg2, [\addr], \post_inc; -8889: nop; - nop; - alternative_else - sttr \reg1, [\addr]; - sttr \reg2, [\addr, #8]; - add \addr, \addr, \post_inc; - alternative_endif - - _asm_extable 8888b,\l; - _asm_extable 8889b,\l; - .endm - - .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: \inst \reg, [\addr], \post_inc; - nop; - alternative_else - \alt_inst \reg, [\addr]; - add \addr, \addr, \post_inc; - alternative_endif - - _asm_extable 8888b,\l; - .endm -#else - .macro uao_ldp l, reg1, reg2, addr, post_inc - USER(\l, ldp \reg1, \reg2, [\addr], \post_inc) - .endm - .macro uao_stp l, reg1, reg2, addr, post_inc - USER(\l, stp \reg1, \reg2, [\addr], \post_inc) - .endm - .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc - USER(\l, \inst \reg, [\addr], \post_inc) - .endm -#endif - #endif /* __ASSEMBLY__ */ /* diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index c764cc8fb3b6..287253c708db 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -66,4 +66,63 @@ alternative_else_nop_endif and \dst, \dst, \addr .endm +/* + * Generate the assembly for UAO alternatives with exception table entries. + * This is complicated as there is no post-increment or pair versions of the + * unprivileged instructions, and USER() only works for single instructions. + */ +#ifdef CONFIG_ARM64_UAO + .macro uao_ldp l, reg1, reg2, addr, post_inc + alternative_if_not ARM64_HAS_UAO +8888: ldp \reg1, \reg2, [\addr], \post_inc; +8889: nop; + nop; + alternative_else + ldtr \reg1, [\addr]; + ldtr \reg2, [\addr, #8]; + add \addr, \addr, \post_inc; + alternative_endif + + _asm_extable 8888b,\l; + _asm_extable 8889b,\l; + .endm + + .macro uao_stp l, reg1, reg2, addr, post_inc + alternative_if_not ARM64_HAS_UAO +8888: stp \reg1, \reg2, [\addr], \post_inc; +8889: nop; + nop; + alternative_else + sttr \reg1, [\addr]; + sttr \reg2, [\addr, #8]; + add \addr, \addr, \post_inc; + alternative_endif + + _asm_extable 8888b,\l; + _asm_extable 8889b,\l; + .endm + + .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc + alternative_if_not ARM64_HAS_UAO +8888: \inst \reg, [\addr], \post_inc; + nop; + alternative_else + \alt_inst \reg, [\addr]; + add \addr, \addr, \post_inc; + alternative_endif + + _asm_extable 8888b,\l; + .endm +#else + .macro uao_ldp l, reg1, reg2, addr, post_inc + USER(\l, ldp \reg1, \reg2, [\addr], \post_inc) + .endm + .macro uao_stp l, reg1, reg2, addr, post_inc + USER(\l, stp \reg1, \reg2, [\addr], \post_inc) + .endm + .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc + USER(\l, \inst \reg, [\addr], \post_inc) + .endm +#endif + #endif -- Gitee From a95e42b98cbc40f49a7e10912d183ed57db6fdc2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 10 Feb 2022 19:52:45 +0800 Subject: [PATCH 05/13] arm64: uaccess: refactor __{get,put}_user [Upstream commit f253d827f33cb5a5990b5cfd271941d1a21ecd85] As a step towards implementing __{get,put}_kernel_nofault(), this patch splits most user-memory specific logic out of __{get,put}_user(), with the memory access and fault handling in new __{raw_get,put}_mem() helpers. For now the LDR/LDTR patching is left within the *get_mem() helpers, and will be removed in a subsequent patch. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas Signed-off-by: Zhen Lei Reviewed-by: Hanjun Guo Signed-off-by: Zheng Zengkai Dep-of: ("arm64: copy_form/to_user support machine check safe") Signed-off-by: huwentao --- arch/arm64/include/asm/uaccess.h | 44 ++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 298667ca106f..2bb49c4759f9 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -262,7 +262,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) * The "__xxx_error" versions set the third argument to -EFAULT if an error * occurs, and leave it unchanged on success. */ -#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __get_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ alt_instr " " reg "1, [%2]\n", feature) \ @@ -277,35 +277,40 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT)) -#define __raw_get_user(x, ptr, err) \ +#define __raw_get_mem(x, ptr, err) \ do { \ unsigned long __gu_val; \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ + __get_mem_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ + __get_mem_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ + __get_mem_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ + __get_mem_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ - uaccess_disable_not_uao(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) +#define __raw_get_user(x, ptr, err) \ +do { \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ + __raw_get_mem(x, ptr, err); \ + uaccess_disable_not_uao(); \ +} while (0) + #define __get_user_error(x, ptr, err) \ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ @@ -327,7 +332,7 @@ do { \ #define get_user __get_user -#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __put_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ alt_instr " " reg "1, [%2]\n", feature) \ @@ -341,31 +346,36 @@ do { \ : "+r" (err) \ : "r" (x), "r" (addr), "i" (-EFAULT)) -#define __raw_put_user(x, ptr, err) \ +#define __raw_put_mem(x, ptr, err) \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ + __put_mem_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ + __put_mem_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ + __put_mem_asm("str", "sttr", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ + __put_mem_asm("str", "sttr", "%x", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ +} while (0) + +#define __raw_put_user(x, ptr, err) \ +do { \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ + __raw_put_mem(x, ptr, err); \ uaccess_disable_not_uao(); \ } while (0) -- Gitee From ea02295686f46d3035f63b2b2bc878792bc564d2 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Fri, 18 Nov 2022 02:08:53 +0000 Subject: [PATCH 06/13] arm64: copy_form/to_user support machine check safe Add copy_{to, from}_user() to machine check safe. If copy fail due to hardware memory error, only the relevant processes are affected, so killing the user process and isolate the user page with hardware memory errors is a more reasonable choice than kernel panic. Signed-off-by: Tong Tiangen Conflicts: arch/arm64/lib/copy_to_user.S [ Skip modification to non-existent code. ] Signed-off-by: huwentao --- arch/arm64/include/asm/asm-uaccess.h | 5 +++++ arch/arm64/include/asm/assembler.h | 6 +++++- arch/arm64/lib/copy_from_user.S | 8 ++++---- arch/arm64/lib/copy_to_user.S | 8 ++++---- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 287253c708db..320d68c0e09c 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -85,6 +85,9 @@ alternative_else_nop_endif _asm_extable 8888b,\l; _asm_extable 8889b,\l; + + _asm_mc_extable 8888b,\l; + _asm_mc_extable 8889b,\l; .endm .macro uao_stp l, reg1, reg2, addr, post_inc @@ -112,6 +115,8 @@ alternative_else_nop_endif alternative_endif _asm_extable 8888b,\l; + + _asm_mc_extable 8888b,\l; .endm #else .macro uao_ldp l, reg1, reg2, addr, post_inc diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index a1959fe005b8..5d205fc98d70 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -159,8 +159,12 @@ alternative_endif #define USER(l, x...) \ 9999: x; \ - _asm_extable 9999b, l + _asm_extable 9999b, l; \ + _asm_mc_extable 9999b, l +#define USER_MC(l, x...) \ +9999: x; \ + _asm_mc_extable 9999b, l /* * Register aliases. */ diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 0f8a3a9e3795..5584e53338c4 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -25,7 +25,7 @@ .endm .macro strb1 reg, ptr, val - strb \reg, [\ptr], \val + USER_MC(9998f, strb \reg, [\ptr], \val) .endm .macro ldrh1 reg, ptr, val @@ -33,7 +33,7 @@ .endm .macro strh1 reg, ptr, val - strh \reg, [\ptr], \val + USER_MC(9998f, strh \reg, [\ptr], \val) .endm .macro ldr1 reg, ptr, val @@ -41,7 +41,7 @@ .endm .macro str1 reg, ptr, val - str \reg, [\ptr], \val + USER_MC(9998f, str \reg, [\ptr], \val) .endm .macro ldp1 reg1, reg2, ptr, val @@ -49,7 +49,7 @@ .endm .macro stp1 reg1, reg2, ptr, val - stp \reg1, \reg2, [\ptr], \val + USER_MC(9998f, stp \reg1, \reg2, [\ptr], \val) .endm end .req x5 diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 4ec59704b8f2..2b1a3ef0e4a0 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -20,7 +20,7 @@ * x0 - bytes not copied */ .macro ldrb1 reg, ptr, val - ldrb \reg, [\ptr], \val + USER_MC(9998f, ldrb \reg, [\ptr], \val) .endm .macro strb1 reg, ptr, val @@ -28,7 +28,7 @@ .endm .macro ldrh1 reg, ptr, val - ldrh \reg, [\ptr], \val + USER_MC(9998f, ldrh \reg, [\ptr], \val) .endm .macro strh1 reg, ptr, val @@ -36,7 +36,7 @@ .endm .macro ldr1 reg, ptr, val - ldr \reg, [\ptr], \val + USER_MC(9998f, ldr \reg, [\ptr], \val) .endm .macro str1 reg, ptr, val @@ -44,7 +44,7 @@ .endm .macro ldp1 reg1, reg2, ptr, val - ldp \reg1, \reg2, [\ptr], \val + USER_MC(9998f, ldp \reg1, \reg2, [\ptr], \val) .endm .macro stp1 reg1, reg2, ptr, val -- Gitee From 32f0d1050b37e87d0197cb4e44ff13a14889fd97 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Fri, 18 Nov 2022 02:08:54 +0000 Subject: [PATCH 07/13] arm64: get/put_user support machine check safe Add {get, put}_user() to machine check safe. If get/put fail due to hardware memory error, only the relevant processes are affected, so killing the user process and isolate the user page with hardware memory errors is a more reasonable choice than kernel panic. Signed-off-by: Tong Tiangen Conflicts: arch/arm64/include/asm/uaccess.h Signed-off-by: huwentao --- arch/arm64/include/asm/uaccess.h | 43 ++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 2bb49c4759f9..6bb1f51ebc5a 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -111,6 +111,11 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si #define _ASM_MC_EXTABLE(from, to) #endif +#define _ASM_KACCESS_EXTABLE(from, to) _ASM_EXTABLE(from, to) +#define _ASM_UACCESS_EXTABLE(from, to) \ + _ASM_EXTABLE(from, to) \ + _ASM_MC_EXTABLE(from, to) + /* * User access enabling/disabling. */ @@ -262,7 +267,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) * The "__xxx_error" versions set the third argument to -EFAULT if an error * occurs, and leave it unchanged on success. */ -#define __get_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __get_mem_asm(instr, alt_instr, reg, x, addr, err, feature, type) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ alt_instr " " reg "1, [%2]\n", feature) \ @@ -273,29 +278,29 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) " mov %1, #0\n" \ " b 2b\n" \ " .previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_##type##ACCESS_EXTABLE(1b, 3b) \ : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT)) -#define __raw_get_mem(x, ptr, err) \ +#define __raw_get_mem(x, ptr, err, type) \ do { \ unsigned long __gu_val; \ switch (sizeof(*(ptr))) { \ case 1: \ __get_mem_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + (err), ARM64_HAS_UAO, type); \ break; \ case 2: \ __get_mem_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + (err), ARM64_HAS_UAO, type); \ break; \ case 4: \ __get_mem_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + (err), ARM64_HAS_UAO, type); \ break; \ case 8: \ __get_mem_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + (err), ARM64_HAS_UAO, type); \ break; \ default: \ BUILD_BUG(); \ @@ -307,7 +312,10 @@ do { \ do { \ __chk_user_ptr(ptr); \ uaccess_enable_not_uao(); \ - __raw_get_mem(x, ptr, err); \ + if (get_fs() == KERNEL_DS) \ + __raw_get_mem(x, ptr, err, K); \ + else \ + __raw_get_mem(x, ptr, err, U); \ uaccess_disable_not_uao(); \ } while (0) @@ -332,7 +340,7 @@ do { \ #define get_user __get_user -#define __put_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __put_mem_asm(instr, alt_instr, reg, x, addr, err, feature, type) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ alt_instr " " reg "1, [%2]\n", feature) \ @@ -342,29 +350,29 @@ do { \ "3: mov %w0, %3\n" \ " b 2b\n" \ " .previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_##type##ACCESS_EXTABLE(1b, 3b) \ : "+r" (err) \ : "r" (x), "r" (addr), "i" (-EFAULT)) -#define __raw_put_mem(x, ptr, err) \ +#define __raw_put_mem(x, ptr, err, type) \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ switch (sizeof(*(ptr))) { \ case 1: \ __put_mem_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + (err), ARM64_HAS_UAO, type); \ break; \ case 2: \ __put_mem_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + (err), ARM64_HAS_UAO, type); \ break; \ case 4: \ __put_mem_asm("str", "sttr", "%w", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + (err), ARM64_HAS_UAO, type); \ break; \ case 8: \ __put_mem_asm("str", "sttr", "%x", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + (err), ARM64_HAS_UAO, type); \ break; \ default: \ BUILD_BUG(); \ @@ -375,7 +383,10 @@ do { \ do { \ __chk_user_ptr(ptr); \ uaccess_enable_not_uao(); \ - __raw_put_mem(x, ptr, err); \ + if (get_fs() == KERNEL_DS) \ + __raw_put_mem(x, ptr, err, K); \ + else \ + __raw_put_mem(x, ptr, err, U); \ uaccess_disable_not_uao(); \ } while (0) -- Gitee From 124a87433d2649b627a17af97be7b284f1d4265a Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Fri, 18 Nov 2022 02:08:55 +0000 Subject: [PATCH 08/13] arm64: add cow to machine check safe In the cow(copy on write) processing, the data of the user process is copied, when hardware memory error is encountered during copy, only the relevant processes are affected, so killing the user process and isolate the user page with hardware memory errors is a more reasonable choice than kernel panic. Add new helper copy_page_mc() which provide a page copy implementation with machine check safe. At present, only used in cow. In future, we can expand more scenes. As long as the consequences of page copy failure are not fatal(eg: only affect user process), we can use this helper. The copy_page_mc() in copy_page_mc.S is largely borrows from copy_page() in copy_page.S and the main difference is copy_page_mc() add extable entry to every load/store insn to support machine check safe. largely to keep the patch simple. If needed those optimizations can be folded in. Signed-off-by: Tong Tiangen Conflicts: arch/arm64/include/asm/page.h arch/arm64/mm/copypage.c include/linux/highmem.h mm/memory.c [ Miss mte implementations. ] Signed-off-by: huwentao --- arch/arm64/include/asm/assembler.h | 5 ++ arch/arm64/include/asm/page.h | 10 ++++ arch/arm64/lib/Makefile | 2 + arch/arm64/lib/copy_page_mc.S | 80 ++++++++++++++++++++++++++++++ arch/arm64/mm/copypage.c | 19 +++++++ include/linux/highmem.h | 8 +++ mm/memory.c | 2 +- 7 files changed, 125 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/lib/copy_page_mc.S diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5d205fc98d70..0e1424ee75e1 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -165,6 +165,11 @@ alternative_endif #define USER_MC(l, x...) \ 9999: x; \ _asm_mc_extable 9999b, l + +#define CPY_MC(l, x...) \ +9999: x; \ + _asm_mc_extable 9999b, l + /* * Register aliases. */ diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index b91593452d56..7528801a315f 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -52,6 +52,16 @@ static inline void copy_page(void *to, const void *from) #endif extern void clear_page(void *to); +#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE +extern void copy_page_mc(void *to, const void *from); +void copy_highpage_mc(struct page *to, struct page *from); +#define __HAVE_ARCH_COPY_HIGHPAGE_MC + +void copy_user_highpage_mc(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE_MC +#endif + #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 6b762ee451b2..fb062dc732bc 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -14,6 +14,8 @@ endif lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o +lib-$(CONFIG_ARCH_HAS_UACCESS_MCSAFE) += copy_page_mc.o + obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/copy_page_mc.S b/arch/arm64/lib/copy_page_mc.S new file mode 100644 index 000000000000..8d4b9159fa8a --- /dev/null +++ b/arch/arm64/lib/copy_page_mc.S @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include + +/* + * Copy a page from src to dest (both are page aligned) with machine check + * + * Parameters: + * x0 - dest + * x1 - src + */ +SYM_FUNC_START(copy_page_mc) +alternative_if ARM64_HAS_NO_HW_PREFETCH + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #256 + add x1, x1, #128 +1: + tst x0, #(PAGE_SIZE - 1) + +alternative_if ARM64_HAS_NO_HW_PREFETCH + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #128 + add x1, x1, #128 + + b.ne 1b + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) + +9998: ret + +SYM_FUNC_END(copy_page_mc) +EXPORT_SYMBOL(copy_page_mc) diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 7eee44d716f9..001d38c95bb2 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -34,3 +34,22 @@ void fast_copy_page_switched(const void *from, void *to) (unsigned long)to); } EXPORT_SYMBOL_GPL(fast_copy_page_switched); + +#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE +void copy_highpage_mc(struct page *to, struct page *from) +{ + void *kto = page_address(to); + void *kfrom = page_address(from); + + copy_page_mc(kto, kfrom); +} +EXPORT_SYMBOL(copy_highpage_mc); + +void copy_user_highpage_mc(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + copy_highpage_mc(to, from); + flush_dcache_page(to); +} +EXPORT_SYMBOL_GPL(copy_user_highpage_mc); +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ea5cdbd8c2c3..5f3e248e03c0 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -261,6 +261,10 @@ static inline void copy_user_highpage(struct page *to, struct page *from, #endif +#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE_MC +#define copy_user_highpage_mc copy_user_highpage +#endif + #ifndef __HAVE_ARCH_COPY_HIGHPAGE static inline void copy_highpage(struct page *to, struct page *from) @@ -276,4 +280,8 @@ static inline void copy_highpage(struct page *to, struct page *from) #endif +#ifndef __HAVE_ARCH_COPY_HIGHPAGE_MC +#define copy_highpage_mc copy_highpage +#endif + #endif /* _LINUX_HIGHMEM_H */ diff --git a/mm/memory.c b/mm/memory.c index c55d3659112b..f9fa380d3677 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2172,7 +2172,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, debug_dma_assert_idle(src); if (likely(src)) { - copy_user_highpage(dst, src, addr, vma); + copy_user_highpage_mc(dst, src, addr, vma); return true; } -- Gitee From 8cabe3f8a286ecd78bb4bdcfc68ec8eb1121f25e Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Fri, 18 Nov 2022 02:08:56 +0000 Subject: [PATCH 09/13] arm64: introduce copy_mc_to_kernel() implementation The copy_mc_to_kernel() helper is memory copy implementation that handles source exceptions. It can be used in memory copy scenarios that tolerate hardware memory errors(e.g: pmem_read/dax_copy_to_iter). Currnently, only x86 and ppc suuport this helper, after arm64 support machine check safe framework, we introduce copy_mc_to_kernel() implementation. Signed-off-by: Tong Tiangen Conflicts: arch/arm64/lib/Makefile arch/arm64/include/asm/uaccess.h arch/arm64/include/asm/string.h [ Rename copy_mc_to_kernel to memcpy_mcsafe ] Signed-off-by: huwentao --- arch/arm64/include/asm/string.h | 24 +++++++++++ arch/arm64/lib/Makefile | 2 +- arch/arm64/lib/memcpy_mc.S | 73 +++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/lib/memcpy_mc.S diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index b31e8e87a0db..5463bfb17a18 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -35,6 +35,10 @@ extern void *memchr(const void *, int, __kernel_size_t); extern void *memcpy(void *, const void *, __kernel_size_t); extern void *__memcpy(void *, const void *, __kernel_size_t); +#define __HAVE_ARCH_MEMCPY_MC +extern unsigned long *memcpy_mcs(void *, const void *, __kernel_size_t); +extern unsigned long *__memcpy_mcs(void *, const void *, __kernel_size_t); + #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *, const void *, __kernel_size_t); extern void *__memmove(void *, const void *, __kernel_size_t); @@ -48,6 +52,25 @@ extern void *__memset(void *, int, __kernel_size_t); void memcpy_flushcache(void *dst, const void *src, size_t cnt); #endif +#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE +#define __HAVE_ARCH_MEMCPY_MCSAFE +/** + * memcpy_mcsafe - memory copy that handles source exceptions + * + * @dst: destination address + * @src: source address + * @len: number of bytes to copy + * + * Return 0 for success, or number of bytes not copied if there was an + * exception. + */ +static inline unsigned long __must_check +memcpy_mcsafe(void *to, const void *from, unsigned long size) +{ + return (unsigned long)memcpy_mcs(to, from, size); +} +#endif + #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) /* @@ -56,6 +79,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt); */ #define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memcpy_mcs(dst, src, len) __memcpy_mcs(dst, src, len) #define memmove(dst, src, len) __memmove(dst, src, len) #define memset(s, c, n) __memset(s, c, n) diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index fb062dc732bc..7451eb61d46d 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 lib-y := clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o slow_copy_page.o \ - clear_page.o memchr.o memcpy.o memmove.o memset.o \ + clear_page.o memchr.o memcpy.o memcpy_mc.o memmove.o memset.o \ memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \ strchr.o strrchr.o tishift.o diff --git a/arch/arm64/lib/memcpy_mc.S b/arch/arm64/lib/memcpy_mc.S new file mode 100644 index 000000000000..1e76a0d1cc43 --- /dev/null +++ b/arch/arm64/lib/memcpy_mc.S @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 ARM Ltd. + * Copyright (C) 2013 Linaro. + * + * This code is based on glibc cortex strings work originally authored by Linaro + * be found @ + * + * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ + * files/head:/src/aarch64/ + */ + +#include +#include +#include + +/* + * Copy a buffer from src to dest (alignment handled by the hardware) + * with machine check safe. + * + * Parameters: + * x0 - dest + * x1 - src + * x2 - n + * Returns: + * x0 - bytes not copied + */ + .macro ldrb1 reg, ptr, val + CPY_MC(9998f, ldrb \reg, [\ptr], \val) + .endm + + .macro strb1 reg, ptr, val + CPY_MC(9998f, strb \reg, [\ptr], \val) + .endm + + .macro ldrh1 reg, ptr, val + CPY_MC(9998f, ldrh \reg, [\ptr], \val) + .endm + + .macro strh1 reg, ptr, val + CPY_MC(9998f, strh \reg, [\ptr], \val) + .endm + + .macro ldr1 reg, ptr, val + CPY_MC(9998f, ldr \reg, [\ptr], \val) + .endm + + .macro str1 reg, ptr, val + CPY_MC(9998f, str \reg, [\ptr], \val) + .endm + + .macro ldp1 reg1, reg2, ptr, val + CPY_MC(9998f, ldp \reg1, \reg2, [\ptr], \val) + .endm + + .macro stp1 reg1, reg2, ptr, val + CPY_MC(9998f, stp \reg1, \reg2, [\ptr], \val) + .endm + +end .req x5 +SYM_FUNC_START_ALIAS(__memcpy_mcs) +SYM_FUNC_START_WEAK_PI(memcpy_mcs) + add end, x0, x2 +#include "copy_template.S" + mov x0, #0 + ret + +9998: sub x0, end, dst + ret +SYM_FUNC_END_PI(memcpy_mcs) +EXPORT_SYMBOL(memcpy_mcs) +SYM_FUNC_END_ALIAS(__memcpy_mcs) +EXPORT_SYMBOL(__memcpy_mcs) -- Gitee From f06f1a06f2c2fd8469033b4249fccc0d9fcf9bf0 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Fri, 18 Nov 2022 02:08:57 +0000 Subject: [PATCH 10/13] arm64: add dump_user_range() to machine check safe In the dump_user_range() processing, the data of the user process is dump to corefile, when hardware memory error is encountered during dump, only the relevant processes are affected, so killing the user process and isolate the user page with hardware memory errors is a more reasonable choice than kernel panic. The dump_user_range() typical usage scenarios is coredump. Coredump file writing to fs is related to the specific implementation of fs's write_iter operation. This patch only supports two typical fs write function (_copy_from_iter/iov_iter_copy_from_user_atomic) which is used by ext4/tmpfs/pipefs. Signed-off-by: Tong Tiangen Conflicts: include/linux/sched.h [ There is no place in task->flags. Miss commit afc63a97b764. ] Signed-off-by: huwentao --- fs/binfmt_elf.c | 7 +++++++ fs/binfmt_elf_fdpic.c | 7 +++++++ include/linux/sched.h | 5 +++++ lib/iov_iter.c | 14 ++++++++++++-- 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7ce3cfd965d2..e0c2ba3a50ba 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2349,7 +2349,14 @@ static int elf_core_dump(struct coredump_params *cprm) page = get_dump_page(addr); if (page) { void *kaddr = kmap(page); + +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + current->is_coredump_mcs = 1; +#endif stop = !dump_emit(cprm, kaddr, PAGE_SIZE); +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + current->is_coredump_mcs = 0; +#endif kunmap(page); put_page(page); } else diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index d86ebd0dcc3d..166927b6d02a 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1509,7 +1509,14 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm) struct page *page = get_dump_page(addr); if (page) { void *kaddr = kmap(page); + +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + current->is_coredump_mcs = 1; +#endif res = dump_emit(cprm, kaddr, PAGE_SIZE); +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + current->is_coredump_mcs = 0; +#endif kunmap(page); put_page(page); } else { diff --git a/include/linux/sched.h b/include/linux/sched.h index d7999c06015f..f6ab69c525a7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1300,6 +1300,11 @@ struct task_struct { */ randomized_struct_fields_end +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + /* Task coredump support machine check safe */ + unsigned long is_coredump_mcs; +#endif + KABI_RESERVE(1); KABI_RESERVE(2); KABI_RESERVE(3); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 9ea6f7bb8309..abe251587ec9 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -736,6 +736,16 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */ +static void *memcpy_iter(void *to, const void *from, __kernel_size_t size) +{ +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + if (current->is_coredump_mcs) + return (void *)memcpy_mcsafe(to, from, size); + else +#endif + return memcpy(to, from, size); +} + size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; @@ -749,7 +759,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), - memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) + memcpy_iter((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) return bytes; @@ -979,7 +989,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), - memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) + memcpy_iter((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) kunmap_atomic(kaddr); return bytes; -- Gitee From 941ede678c1875141648d2c8e6650e299267d8ae Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Fri, 18 Nov 2022 02:08:58 +0000 Subject: [PATCH 11/13] arm64: add machine check safe sysctl interface Add /proc/sys/kernel/machine_check_safe_enable. Set 1(default value) to enable machine check safe support. Set 0(default) to disable machine check safe support. Signed-off-by: Tong Tiangen Conflicts: Documentation/admin-guide/sysctl/kernel.rst Signed-off-by: huwentao --- Documentation/admin-guide/sysctl/kernel.rst | 22 +++++++++++++++++++++ arch/arm64/include/asm/processor.h | 2 ++ arch/arm64/mm/fault.c | 5 +++++ kernel/sysctl.c | 11 +++++++++++ 4 files changed, 40 insertions(+) diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 032c7cd3cede..834ed1145969 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -48,6 +48,7 @@ show up in /proc/sys/kernel: - hyperv_record_panic_msg - kexec_load_disabled - kptr_restrict +- machine_check_safe [ arm64 only ] - l2cr [ PPC only ] - modprobe ==> Documentation/debugging-modules.txt - modules_disabled @@ -437,6 +438,27 @@ values to unprivileged users is a concern. When kptr_restrict is set to (2), kernel pointers printed using %pK will be replaced with 0's regardless of privileges. +machine_check_safe (arm64 only) +================================ + +Controls the kernel's behaviour when an hardware memory error is +encountered in the following scenarios: + += =================== +1 cow +2 copy_mc_to_kernel +3 copy_from_user +4 copy_to_user +5 get_user +6 put_user += =================== + +Correspondence between sysctl value and behavior: + += ======================= +0 Kernel panic +1 Kill related processes += ======================= l2cr: (PPC only) ================ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 5623685c7d13..9ded66ffb800 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -81,6 +81,8 @@ #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ +extern int sysctl_machine_check_safe; + #ifndef CONFIG_ARM64_FORCE_52BIT #define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\ DEFAULT_MAP_WINDOW) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b89dcfea8e5f..49a6fab74c46 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -40,6 +40,8 @@ #include #include +int sysctl_machine_check_safe = 1; + struct fault_info { int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); @@ -1409,6 +1411,9 @@ static bool arm64_do_kernel_sea(void __user *addr, unsigned int esr, if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_MCSAFE)) return false; + if (!sysctl_machine_check_safe) + return false; + if (user_mode(regs)) return false; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 45183289a51e..f6dcc10ad3e9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2304,6 +2304,17 @@ static struct ctl_table debug_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, +#endif +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + { + .procname = "machine_check_safe", + .data = &sysctl_machine_check_safe, + .maxlen = sizeof(sysctl_machine_check_safe), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, #endif { } }; -- Gitee From bc81ec6eaa9447236efaa4fb4e7de650d01e0620 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Fri, 13 Jan 2023 07:23:43 +0000 Subject: [PATCH 12/13] arm64: fix return value type of memcpy_mcs() The return value type of function memcpy_mcs() should be unsigned long. Currently, using this function will force type conversion, but the error of the return value type must be corrected. Fixes: ee7b5d7559e6 ("arm64: introduce copy_mc_to_kernel() implementation") Signed-off-by: Tong Tiangen Signed-off-by: huwentao --- arch/arm64/include/asm/string.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index 5463bfb17a18..3012ea9c3a1f 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -36,8 +36,8 @@ extern void *memcpy(void *, const void *, __kernel_size_t); extern void *__memcpy(void *, const void *, __kernel_size_t); #define __HAVE_ARCH_MEMCPY_MC -extern unsigned long *memcpy_mcs(void *, const void *, __kernel_size_t); -extern unsigned long *__memcpy_mcs(void *, const void *, __kernel_size_t); +extern unsigned long memcpy_mcs(void *, const void *, __kernel_size_t); +extern unsigned long __memcpy_mcs(void *, const void *, __kernel_size_t); #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *, const void *, __kernel_size_t); -- Gitee From 557051f6ef902e12df6b9da5bbd9a014935a5e02 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Tue, 10 Dec 2024 12:06:41 +0800 Subject: [PATCH 13/13] ACPI: APEI: send SIGBUS to current task if synchronous memory error not recovered Synchronous error was detected as a result of user-space process accessing a 2-bit uncorrected error. The CPU will take a synchronous error exception such as Synchronous External Abort (SEA) on Arm64. The kernel will queue a memory_failure() work which poisons the related page, unmaps the page, and then sends a SIGBUS to the process, so that a system wide panic can be avoided. However, no memory_failure() work will be queued when abnormal synchronous errors occur. These errors can include situations such as invalid PA, unexpected severity, no memory failure config support, invalid GUID section, etc. In such case, the user-space process will trigger SEA again. This loop can potentially exceed the platform firmware threshold or even trigger a kernel hard lockup, leading to a system reboot. Fix it by performing a force kill if no memory_failure() work is queued for synchronous errors. Signed-off-by: Shuai Xue Reviewed-by: Jarkko Sakkinen Reviewed-by: Jonathan Cameron Reviewed-by: Yazen Ghannam Conflicts: drivers/acpi/apei/ghes.c [fix context conflicts and print format] Signed-off-by: Tong Tiangen Conflicts: drivers/acpi/apei/ghes.c [ Introduce is_hest_sync_notify ] Signed-off-by: huwentao --- drivers/acpi/apei/ghes.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 4adf9ef2b0e2..90b55f84fd1b 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -41,6 +41,9 @@ #include #include #include +#ifdef CONFIG_ARM64 +#include +#endif #include #include @@ -517,6 +520,22 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata) #endif } +#ifdef CONFIG_ARM64 +/* + * A platform may describe one error source for the handling of synchronous + * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI + * or External Interrupt). On x86, the HEST notifications are always + * asynchronous, so only SEA on ARM is delivered as a synchronous + * notification. + */ +static inline bool is_hest_sync_notify(struct ghes *ghes) +{ + u8 notify_type = ghes->generic->notify.type; + + return notify_type == ACPI_HEST_NOTIFY_SEA; +} +#endif + static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -526,6 +545,9 @@ static bool ghes_do_proc(struct ghes *ghes, guid_t *sec_type; const guid_t *fru_id = &guid_null; char *fru_text = ""; +#ifdef CONFIG_ARM64 + bool sync = is_hest_sync_notify(ghes); +#endif sev = ghes_severity(estatus->error_severity); apei_estatus_for_each_section(estatus, gdata) { @@ -562,6 +584,18 @@ static bool ghes_do_proc(struct ghes *ghes, } } +#ifdef CONFIG_ARM64 + /* + * If no memory failure work is queued for abnormal synchronous + * errors, do a force kill. + */ + if (sync && !work_queued) { + pr_err(HW_ERR GHES_PFX "%s:%d: hardware memory corruption (SIGBUS)\n", + current->comm, task_pid_nr(current)); + force_sig(SIGBUS); + } +#endif + return work_queued; } -- Gitee