diff --git a/0001-Support-LoongArch.patch b/0001-Support-LoongArch.patch new file mode 100644 index 0000000000000000000000000000000000000000..abfce5fd63edf6189fb8fb1c4d87570d3362550c --- /dev/null +++ b/0001-Support-LoongArch.patch @@ -0,0 +1,3720 @@ +diff --git a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake +index 416777171..1eb708029 100644 +--- a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake ++++ b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake +@@ -16,6 +16,7 @@ set(SPARCV9 sparcv9) + set(WASM32 wasm32) + set(WASM64 wasm64) + set(VE ve) ++set(LOONGARCH64 loongarch64) + + if(APPLE) + set(ARM64 arm64) +@@ -44,7 +45,7 @@ if(OS_NAME MATCHES "Linux") + elseif (OS_NAME MATCHES "Windows") + set(ALL_FUZZER_SUPPORTED_ARCH ${X86} ${X86_64}) + elseif(OS_NAME MATCHES "Android") +- set(ALL_FUZZER_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64}) ++set(ALL_FUZZER_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64} ${LOONGARCH64}) + elseif(OS_NAME MATCHES "Fuchsia") + set(ALL_FUZZER_SUPPORTED_ARCH ${X86_64} ${ARM64} ${RISCV64}) + else() +@@ -58,24 +59,22 @@ else() + set(ALL_LSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64} ${ARM64} ${ARM32} + ${PPC64} ${S390X} ${RISCV64} ${HEXAGON} ${LOONGARCH64}) + endif() +-set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${S390X} +- ${LOONGARCH64}) ++set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${S390X} ${LOONGARCH64}) + set(ALL_HWASAN_SUPPORTED_ARCH ${X86_64} ${ARM64} ${RISCV64}) + set(ALL_MEMPROF_SUPPORTED_ARCH ${X86_64}) + set(ALL_PROFILE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC32} ${PPC64} + ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9} ${HEXAGON} + ${RISCV32} ${RISCV64} ${LOONGARCH64}) + set(ALL_TSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64} ${S390X} +- ${LOONGARCH64} ${RISCV64}) ++ ${RISCV64} ${LOONGARCH64}) + set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64} +- ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9} ${HEXAGON} +- ${LOONGARCH64}) ++ ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9} ${HEXAGON} ${LOONGARCH64}) + set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64} + ${HEXAGON} ${LOONGARCH64}) + set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS64} + ${HEXAGON} ${LOONGARCH64}) + set(ALL_SCUDO_STANDALONE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} +- ${MIPS32} ${MIPS64} ${PPC64} ${HEXAGON} ${LOONGARCH64} ${RISCV64}) ++ ${MIPS32} ${MIPS64} ${PPC64} ${HEXAGON} ${RISCV64}) + if(APPLE) + set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM64}) + else() +diff --git a/compiler-rt/cmake/Modules/CompilerRTUtils.cmake b/compiler-rt/cmake/Modules/CompilerRTUtils.cmake +index 25e782371..bcc66ad72 100644 +--- a/compiler-rt/cmake/Modules/CompilerRTUtils.cmake ++++ b/compiler-rt/cmake/Modules/CompilerRTUtils.cmake +@@ -181,13 +181,13 @@ macro(detect_target_arch) + elseif(__I386) + add_default_target_arch(i386) + elseif(__LOONGARCH) +- if(CMAKE_SIZEOF_VOID_P EQUAL "4") +- add_default_target_arch(loongarch32) +- elseif(CMAKE_SIZEOF_VOID_P EQUAL "8") +- add_default_target_arch(loongarch64) +- else() +- message(FATAL_ERROR "Unsupported pointer size for LoongArch") +- endif() ++ if(CMAKE_SIZEOF_VOID_P EQUAL "4") ++ add_default_target_arch(loongarch32) ++ elseif(CMAKE_SIZEOF_VOID_P EQUAL "8") ++ add_default_target_arch(loongarch64) ++ else() ++ message(FATAL_ERROR "Unsupported pointer size for LoongArch") ++ endif() + elseif(__MIPS64) # must be checked before __MIPS + add_default_target_arch(mips64) + elseif(__MIPS) +diff --git a/compiler-rt/cmake/base-config-ix.cmake b/compiler-rt/cmake/base-config-ix.cmake +index 908c8a402..f552d25e3 100644 +--- a/compiler-rt/cmake/base-config-ix.cmake ++++ b/compiler-rt/cmake/base-config-ix.cmake +@@ -278,6 +278,8 @@ macro(test_targets) + test_target_arch(wasm64 "" "--target=wasm64-unknown-unknown") + elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "ve") + test_target_arch(ve "__ve__" "--target=ve-unknown-none") ++ elseif("${COMPILER_RT_DEFAULT_TARGET_ARCH}" MATCHES "loongarch64") ++ test_target_arch(loongarch64 "" "") + endif() + set(COMPILER_RT_OS_SUFFIX "") + endif() +diff --git a/compiler-rt/cmake/builtin-config-ix.cmake b/compiler-rt/cmake/builtin-config-ix.cmake +index b17c43bf6..d99d823a5 100644 +--- a/compiler-rt/cmake/builtin-config-ix.cmake ++++ b/compiler-rt/cmake/builtin-config-ix.cmake +@@ -55,7 +55,6 @@ set(AVR avr) + set(HEXAGON hexagon) + set(X86 i386) + set(X86_64 x86_64) +-set(LOONGARCH64 loongarch64) + set(MIPS32 mips mipsel) + set(MIPS64 mips64 mips64el) + set(PPC32 powerpc powerpcspe) +@@ -67,6 +66,7 @@ set(SPARCV9 sparcv9) + set(WASM32 wasm32) + set(WASM64 wasm64) + set(VE ve) ++set(LOONGARCH64 loongarch64) + + if(APPLE) + set(ARM64 arm64 arm64e) +diff --git a/compiler-rt/cmake/crt-config-ix.cmake b/compiler-rt/cmake/crt-config-ix.cmake +index ebc7d671e..1e1f271e3 100644 +--- a/compiler-rt/cmake/crt-config-ix.cmake ++++ b/compiler-rt/cmake/crt-config-ix.cmake +@@ -23,7 +23,6 @@ set(ARM32 arm armhf) + set(HEXAGON hexagon) + set(X86 i386) + set(X86_64 x86_64) +-set(LOONGARCH64 loongarch64) + set(MIPS32 mips mipsel) + set(MIPS64 mips64 mips64el) + set(PPC32 powerpc powerpcspe) +@@ -31,10 +30,11 @@ set(PPC64 powerpc64 powerpc64le) + set(RISCV32 riscv32) + set(RISCV64 riscv64) + set(VE ve) ++set(LOONGARCH64 loongarch64) + + set(ALL_CRT_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC32} +- ${PPC64} ${RISCV32} ${RISCV64} ${VE} ${HEXAGON} ${LOONGARCH64} +- ${MIPS32} ${MIPS64} ${SPARC} ${SPARCV9}) ++ ${PPC64} ${RISCV32} ${RISCV64} ${VE} ${HEXAGON} ++ ${MIPS32} ${MIPS64} ${SPARC} ${SPARCV9} ${LOONGARCH64}) + + include(CompilerRTUtils) + +diff --git a/compiler-rt/lib/asan/asan_interceptors.cpp b/compiler-rt/lib/asan/asan_interceptors.cpp +index 4de2fa356..8d9b992d8 100644 +--- a/compiler-rt/lib/asan/asan_interceptors.cpp ++++ b/compiler-rt/lib/asan/asan_interceptors.cpp +@@ -43,7 +43,7 @@ + + # if defined(__i386) && SANITIZER_LINUX + # define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" +-# elif defined(__mips__) && SANITIZER_LINUX ++# elif (defined(__mips__) || defined(__loongarch__)) && SANITIZER_LINUX + # define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2" + # endif + +diff --git a/compiler-rt/lib/asan/asan_interceptors.h b/compiler-rt/lib/asan/asan_interceptors.h +index 826b45f5a..e3d793c80 100644 +--- a/compiler-rt/lib/asan/asan_interceptors.h ++++ b/compiler-rt/lib/asan/asan_interceptors.h +@@ -112,7 +112,7 @@ void InitializePlatformInterceptors(); + + #if SANITIZER_LINUX && \ + (defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \ +- defined(__x86_64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) ++ defined(__x86_64__) || SANITIZER_RISCV64) + # define ASAN_INTERCEPT_VFORK 1 + #else + # define ASAN_INTERCEPT_VFORK 0 +diff --git a/compiler-rt/lib/asan/asan_mapping.h b/compiler-rt/lib/asan/asan_mapping.h +index c5f95c07a..16f7ee664 100644 +--- a/compiler-rt/lib/asan/asan_mapping.h ++++ b/compiler-rt/lib/asan/asan_mapping.h +@@ -167,6 +167,8 @@ + # define ASAN_SHADOW_OFFSET_DYNAMIC + # elif defined(__mips__) + # define ASAN_SHADOW_OFFSET_CONST 0x0aaa0000 ++# elif defined(__loongarch__) ++# define ASAN_SHADOW_OFFSET_CONST 0x0aaa0000 + # elif SANITIZER_FREEBSD + # define ASAN_SHADOW_OFFSET_CONST 0x40000000 + # elif SANITIZER_NETBSD +@@ -201,6 +203,8 @@ + # define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000 + # elif defined(__mips64) + # define ASAN_SHADOW_OFFSET_CONST 0x0000002000000000 ++# elif defined(__loongarch64) ++# define ASAN_SHADOW_OFFSET_CONST 0x0000002000000000 + # elif defined(__sparc__) + # define ASAN_SHADOW_OFFSET_CONST 0x0000080000000000 + # elif SANITIZER_LOONGARCH64 +diff --git a/compiler-rt/lib/asan/scripts/asan_symbolize.py b/compiler-rt/lib/asan/scripts/asan_symbolize.py +index b08769614..18b950a43 100755 +--- a/compiler-rt/lib/asan/scripts/asan_symbolize.py ++++ b/compiler-rt/lib/asan/scripts/asan_symbolize.py +@@ -63,8 +63,7 @@ def is_valid_arch(s): + "powerpc64le", + "s390x", + "s390", +- "riscv64", +- "loongarch64", ++ "riscv64" + ] + + +diff --git a/compiler-rt/lib/asan/tests/asan_test.cpp b/compiler-rt/lib/asan/tests/asan_test.cpp +index 827c2ae3a..265920241 100644 +--- a/compiler-rt/lib/asan/tests/asan_test.cpp ++++ b/compiler-rt/lib/asan/tests/asan_test.cpp +@@ -629,9 +629,9 @@ NOINLINE void SigLongJmpFunc1(sigjmp_buf buf) { + siglongjmp(buf, 1); + } + +-#if !defined(__ANDROID__) && !defined(__arm__) && !defined(__aarch64__) && \ +- !defined(__mips__) && !defined(__mips64) && !defined(__s390__) && \ +- !defined(__riscv) && !defined(__loongarch__) ++# if !defined(__ANDROID__) && !defined(__arm__) && !defined(__aarch64__) && \ ++ !defined(__mips__) && !defined(__mips64) && !defined(__s390__) && \ ++ !defined(__riscv) && !defined(__loongarch__) + NOINLINE void BuiltinLongJmpFunc1(jmp_buf buf) { + // create three red zones for these two stack objects. + int a; +diff --git a/compiler-rt/lib/builtins/CMakeLists.txt b/compiler-rt/lib/builtins/CMakeLists.txt +index 28ded8766..48b51723b 100644 +--- a/compiler-rt/lib/builtins/CMakeLists.txt ++++ b/compiler-rt/lib/builtins/CMakeLists.txt +@@ -14,7 +14,7 @@ if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) + + set(LLVM_COMMON_CMAKE_UTILS "${COMPILER_RT_SOURCE_DIR}/../cmake") + +- # Add path for custom modules ++# Add path for custom modules + list(INSERT CMAKE_MODULE_PATH 0 + "${COMPILER_RT_SOURCE_DIR}/cmake" + "${COMPILER_RT_SOURCE_DIR}/cmake/Modules" +@@ -670,6 +670,13 @@ set(hexagon_SOURCES + ${GENERIC_TF_SOURCES} + ) + ++ ++set(mips_SOURCES ${GENERIC_SOURCES}) ++set(mipsel_SOURCES ${mips_SOURCES}) ++set(mips64_SOURCES ${GENERIC_TF_SOURCES} ++ ${mips_SOURCES}) ++set(mips64el_SOURCES ${GENERIC_TF_SOURCES} ++ ${mips_SOURCES}) + set(loongarch_SOURCES + loongarch/fp_mode.c + ${GENERIC_SOURCES} +@@ -679,13 +686,6 @@ set(loongarch64_SOURCES + ${loongarch_SOURCES} + ) + +-set(mips_SOURCES ${GENERIC_SOURCES}) +-set(mipsel_SOURCES ${mips_SOURCES}) +-set(mips64_SOURCES ${GENERIC_TF_SOURCES} +- ${mips_SOURCES}) +-set(mips64el_SOURCES ${GENERIC_TF_SOURCES} +- ${mips_SOURCES}) +- + set(powerpc_SOURCES ${GENERIC_SOURCES}) + + set(powerpcspe_SOURCES ${GENERIC_SOURCES}) +diff --git a/compiler-rt/lib/interception/tests/CMakeLists.txt b/compiler-rt/lib/interception/tests/CMakeLists.txt +index 644a57664..4b468a2ca 100644 +--- a/compiler-rt/lib/interception/tests/CMakeLists.txt ++++ b/compiler-rt/lib/interception/tests/CMakeLists.txt +@@ -1,6 +1,6 @@ + include(CompilerRTCompile) + +-filter_available_targets(INTERCEPTION_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el) ++filter_available_targets(INTERCEPTION_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el loongarch64) + + set(INTERCEPTION_UNITTESTS + interception_linux_test.cpp +diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp +index 12d579a93..215379a74 100644 +--- a/compiler-rt/lib/lsan/lsan_allocator.cpp ++++ b/compiler-rt/lib/lsan/lsan_allocator.cpp +@@ -28,7 +28,7 @@ extern "C" void *memset(void *ptr, int value, uptr num); + namespace __lsan { + #if defined(__i386__) || defined(__arm__) + static const uptr kMaxAllowedMallocSize = 1ULL << 30; +-#elif defined(__mips64) || defined(__aarch64__) ++#elif defined(__mips64) || defined(__aarch64__) || defined(__loongarch64) + static const uptr kMaxAllowedMallocSize = 4ULL << 30; + #else + static const uptr kMaxAllowedMallocSize = 8ULL << 30; +diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp +index 0ecded8b2..5336c24ca 100644 +--- a/compiler-rt/lib/lsan/lsan_common.cpp ++++ b/compiler-rt/lib/lsan/lsan_common.cpp +@@ -274,15 +274,14 @@ static inline bool MaybeUserPointer(uptr p) { + return ((p & kPointerMask) == 0); + # elif defined(__mips64) + return ((p >> 40) == 0); ++# elif defined(__loongarch64) ++ return ((p >> 40) == 0); + # elif defined(__aarch64__) + // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in + // address translation and can be used to store a tag. + constexpr uptr kPointerMask = 255ULL << 48; + // Accept up to 48 bit VMA. + return ((p & kPointerMask) == 0); +-# elif defined(__loongarch_lp64) +- // Allow 47-bit user-space VMA at current. +- return ((p >> 47) == 0); + # else + return true; + # endif +diff --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h +index c598b6210..85064a850 100644 +--- a/compiler-rt/lib/lsan/lsan_common.h ++++ b/compiler-rt/lib/lsan/lsan_common.h +@@ -38,14 +38,12 @@ + # define CAN_SANITIZE_LEAKS 0 + #elif (SANITIZER_LINUX || SANITIZER_APPLE) && (SANITIZER_WORDSIZE == 64) && \ + (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \ +- defined(__powerpc64__) || defined(__s390x__)) ++ defined(__powerpc64__) || defined(__s390x__) || defined(__loongarch64)) + # define CAN_SANITIZE_LEAKS 1 + #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_APPLE) + # define CAN_SANITIZE_LEAKS 1 + #elif defined(__arm__) && SANITIZER_LINUX + # define CAN_SANITIZE_LEAKS 1 +-#elif SANITIZER_LOONGARCH64 && SANITIZER_LINUX +-# define CAN_SANITIZE_LEAKS 1 + #elif SANITIZER_RISCV64 && SANITIZER_LINUX + # define CAN_SANITIZE_LEAKS 1 + #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA +diff --git a/compiler-rt/lib/msan/msan.h b/compiler-rt/lib/msan/msan.h +index 7fb58be67..9798fc510 100644 +--- a/compiler-rt/lib/msan/msan.h ++++ b/compiler-rt/lib/msan/msan.h +@@ -66,8 +66,32 @@ const MappingDesc kMemoryLayout[] = { + {0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"}, + {0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}}; + +-#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL) +-#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL) ++# define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL) ++# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL) ++ ++#elif SANITIZER_LINUX && defined(__loongarch64) ++ ++// LOONGARCH64 maps: ++// - 0x0000000000-0x0200000000: Program own segments ++// - 0xa200000000-0xc000000000: PIE program segments ++// - 0xe200000000-0xffffffffff: libraries segments. ++const MappingDesc kMemoryLayout[] = { ++ {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"}, ++ {0x000200000000ULL, 0x002200000000ULL, MappingDesc::INVALID, "invalid"}, ++ {0x002200000000ULL, 0x004000000000ULL, MappingDesc::SHADOW, "shadow-2"}, ++ {0x004000000000ULL, 0x004200000000ULL, MappingDesc::INVALID, "invalid"}, ++ {0x004200000000ULL, 0x006000000000ULL, MappingDesc::ORIGIN, "origin-2"}, ++ {0x006000000000ULL, 0x006200000000ULL, MappingDesc::INVALID, "invalid"}, ++ {0x006200000000ULL, 0x008000000000ULL, MappingDesc::SHADOW, "shadow-3"}, ++ {0x008000000000ULL, 0x008200000000ULL, MappingDesc::SHADOW, "shadow-1"}, ++ {0x008200000000ULL, 0x00a000000000ULL, MappingDesc::ORIGIN, "origin-3"}, ++ {0x00a000000000ULL, 0x00a200000000ULL, MappingDesc::ORIGIN, "origin-1"}, ++ {0x00a200000000ULL, 0x00c000000000ULL, MappingDesc::APP, "app-2"}, ++ {0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"}, ++ {0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}}; ++ ++# define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL) ++# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL) + + #elif SANITIZER_LINUX && defined(__aarch64__) + +@@ -96,28 +120,6 @@ const MappingDesc kMemoryLayout[] = { + # define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0xB00000000000ULL) + # define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x200000000000ULL) + +-#elif SANITIZER_LINUX && SANITIZER_LOONGARCH64 +-// LoongArch64 maps: +-// - 0x000000000000-0x010000000000: Program own segments +-// - 0x555500000000-0x555600000000: PIE program segments +-// - 0x7fff00000000-0x7fffffffffff: libraries segments. +-const MappingDesc kMemoryLayout[] = { +- {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app-1"}, +- {0x010000000000ULL, 0x100000000000ULL, MappingDesc::SHADOW, "shadow-2"}, +- {0x100000000000ULL, 0x110000000000ULL, MappingDesc::INVALID, "invalid"}, +- {0x110000000000ULL, 0x200000000000ULL, MappingDesc::ORIGIN, "origin-2"}, +- {0x200000000000ULL, 0x300000000000ULL, MappingDesc::SHADOW, "shadow-3"}, +- {0x300000000000ULL, 0x400000000000ULL, MappingDesc::ORIGIN, "origin-3"}, +- {0x400000000000ULL, 0x500000000000ULL, MappingDesc::INVALID, "invalid"}, +- {0x500000000000ULL, 0x510000000000ULL, MappingDesc::SHADOW, "shadow-1"}, +- {0x510000000000ULL, 0x600000000000ULL, MappingDesc::APP, "app-2"}, +- {0x600000000000ULL, 0x610000000000ULL, MappingDesc::ORIGIN, "origin-1"}, +- {0x610000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"}, +- {0x700000000000ULL, 0x740000000000ULL, MappingDesc::ALLOCATOR, "allocator"}, +- {0x740000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}}; +-# define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL) +-# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x100000000000ULL) +- + #elif SANITIZER_LINUX && SANITIZER_PPC64 + const MappingDesc kMemoryLayout[] = { + {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "low memory"}, +diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp +index b1bc5b939..6065924e1 100644 +--- a/compiler-rt/lib/msan/msan_allocator.cpp ++++ b/compiler-rt/lib/msan/msan_allocator.cpp +@@ -51,7 +51,7 @@ struct MsanMapUnmapCallback { + // Note: to ensure that the allocator is compatible with the application memory + // layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be + // duplicated as MappingDesc::ALLOCATOR in msan.h. +-#if defined(__mips64) ++#if defined(__mips64) || defined(__loongarch64) + static const uptr kMaxAllowedMallocSize = 2UL << 30; + + struct AP32 { +@@ -85,22 +85,6 @@ struct AP64 { // Allocator64 parameters. Deliberately using a short name. + + typedef SizeClassAllocator64 PrimaryAllocator; + +-#elif defined(__loongarch_lp64) +-const uptr kAllocatorSpace = 0x700000000000ULL; +-const uptr kMaxAllowedMallocSize = 8UL << 30; +- +-struct AP64 { // Allocator64 parameters. Deliberately using a short name. +- static const uptr kSpaceBeg = kAllocatorSpace; +- static const uptr kSpaceSize = 0x40000000000; // 4T. +- static const uptr kMetadataSize = sizeof(Metadata); +- typedef DefaultSizeClassMap SizeClassMap; +- typedef MsanMapUnmapCallback MapUnmapCallback; +- static const uptr kFlags = 0; +- using AddressSpaceView = LocalAddressSpaceView; +-}; +- +-typedef SizeClassAllocator64 PrimaryAllocator; +- + #elif defined(__powerpc64__) + static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G + +diff --git a/compiler-rt/lib/msan/msan_interceptors.cpp b/compiler-rt/lib/msan/msan_interceptors.cpp +index 2c9f2c01e..63483aca9 100644 +--- a/compiler-rt/lib/msan/msan_interceptors.cpp ++++ b/compiler-rt/lib/msan/msan_interceptors.cpp +@@ -1894,7 +1894,7 @@ void InitializeInterceptors() { + INTERCEPT_FUNCTION(dlerror); + INTERCEPT_FUNCTION(dl_iterate_phdr); + INTERCEPT_FUNCTION(getrusage); +-#if defined(__mips__) ++#if defined(__mips__) || defined(__loongarch__) + INTERCEPT_FUNCTION_VER(pthread_create, "GLIBC_2.2"); + #else + INTERCEPT_FUNCTION(pthread_create); +diff --git a/compiler-rt/lib/msan/tests/msan_test.cpp b/compiler-rt/lib/msan/tests/msan_test.cpp +index 41b99fabe..890ae43f1 100644 +--- a/compiler-rt/lib/msan/tests/msan_test.cpp ++++ b/compiler-rt/lib/msan/tests/msan_test.cpp +@@ -3170,16 +3170,16 @@ static void GetPathToLoadable(char *buf, size_t sz) { + const char *last_slash = strrchr(program_path, '/'); + ASSERT_NE(nullptr, last_slash); + size_t dir_len = (size_t)(last_slash - program_path); +-# if defined(__x86_64__) ++#if defined(__x86_64__) + static const char basename[] = "libmsan_loadable.x86_64.so"; +-# elif defined(__MIPSEB__) || defined(MIPSEB) ++#elif defined(__MIPSEB__) || defined(MIPSEB) + static const char basename[] = "libmsan_loadable.mips64.so"; +-# elif defined(__mips64) ++#elif defined(__mips64) + static const char basename[] = "libmsan_loadable.mips64el.so"; ++# elif defined(__loongarch64) ++ static const char basename[] = "libmsan_loadable.loongarch64.so"; + # elif defined(__aarch64__) + static const char basename[] = "libmsan_loadable.aarch64.so"; +-# elif defined(__loongarch_lp64) +- static const char basename[] = "libmsan_loadable.loongarch64.so"; + # elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + static const char basename[] = "libmsan_loadable.powerpc64.so"; + # elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_asm.h b/compiler-rt/lib/sanitizer_common/sanitizer_asm.h +index 3af66a4e4..6545526f5 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_asm.h ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_asm.h +@@ -45,7 +45,7 @@ + #if defined(__x86_64__) || defined(__i386__) || defined(__sparc__) + # define ASM_TAIL_CALL jmp + #elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ +- defined(__powerpc__) || defined(__loongarch_lp64) ++ defined(__powerpc__) || defined(__loongarch__) + # define ASM_TAIL_CALL b + #elif defined(__s390__) + # define ASM_TAIL_CALL jg +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h +index b99c0cffc..e47c6ef54 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h +@@ -724,7 +724,6 @@ enum ModuleArch { + kModuleArchARMV7S, + kModuleArchARMV7K, + kModuleArchARM64, +- kModuleArchLoongArch64, + kModuleArchRISCV64, + kModuleArchHexagon + }; +@@ -797,8 +796,6 @@ inline const char *ModuleArchToString(ModuleArch arch) { + return "armv7k"; + case kModuleArchARM64: + return "arm64"; +- case kModuleArchLoongArch64: +- return "loongarch64"; + case kModuleArchRISCV64: + return "riscv64"; + case kModuleArchHexagon: +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S +index 8429d57d6..379328950 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S +@@ -1,10 +1,16 @@ +-#if defined(__loongarch_lp64) && defined(__linux__) ++#if defined(__loongarch64) && defined(__linux__) + + #include "sanitizer_common/sanitizer_asm.h" + + ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA) + ASM_HIDDEN(_ZN14__interception10real_vforkE) + ++.section .bss ++.type _ZN14__interception10real_vforkE, @object ++.size _ZN14__interception10real_vforkE, 8 ++_ZN14__interception10real_vforkE: ++ .zero 8 ++ + .text + .globl ASM_WRAPPER_NAME(vfork) + ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork)) +@@ -38,7 +44,7 @@ ASM_WRAPPER_NAME(vfork): + // $a0 != 0 => parent process. Clear stack shadow. + // put old $sp to $a0 + addi.d $a0, $sp, 16 +- bl %plt(COMMON_INTERCEPTOR_HANDLE_VFORK) ++ bl COMMON_INTERCEPTOR_HANDLE_VFORK + + .L_exit: + // Restore $ra +@@ -51,7 +57,7 @@ ASM_WRAPPER_NAME(vfork): + jr $ra + ASM_SIZE(vfork) + +-ASM_INTERCEPTOR_TRAMPOLINE(vfork) +-ASM_TRAMPOLINE_ALIAS(vfork, vfork) ++.weak vfork ++.set vfork, ASM_WRAPPER_NAME(vfork) + + #endif +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc +index c10943b3e..1efa1aa75 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc +@@ -2516,7 +2516,7 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { + # if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ +- defined(__loongarch__) || SANITIZER_RISCV64) ++ SANITIZER_RISCV64 || defined(__loongarch64)) + if (data) { + if (request == ptrace_setregs) { + PRE_READ((void *)data, struct_user_regs_struct_sz); +@@ -2538,7 +2538,7 @@ POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) { + # if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ +- defined(__loongarch__) || SANITIZER_RISCV64) ++ SANITIZER_RISCV64 || defined(__loongarch64)) + if (res >= 0 && data) { + // Note that this is different from the interceptor in + // sanitizer_common_interceptors.inc. +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp +index 5d2dd3a7a..f76e6182a 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp +@@ -13,6 +13,12 @@ + + #include "sanitizer_platform.h" + ++#if defined(__loongarch__) ++# define __ARCH_WANT_RENAMEAT 1 ++# define SC_ADDRERR_RD (1 << 30) ++# define SC_ADDRERR_WR (1 << 31) ++#endif ++ + #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ + SANITIZER_SOLARIS + +@@ -77,15 +83,16 @@ + # include + # endif + +-# if SANITIZER_LINUX && defined(__loongarch__) +-# include +-# endif ++#if SANITIZER_LINUX && defined(__loongarch__) ++# include ++#endif + + # if SANITIZER_FREEBSD + # include + # include + # include + # include ++ + extern "C" { + // must be included after and on + // FreeBSD 9.2 and 10.0. +@@ -294,28 +301,6 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) { + } + # endif + +-# if SANITIZER_LINUX && defined(__loongarch__) +-static void statx_to_stat(struct statx *in, struct stat *out) { +- internal_memset(out, 0, sizeof(*out)); +- out->st_dev = makedev(in->stx_dev_major, in->stx_dev_minor); +- out->st_ino = in->stx_ino; +- out->st_mode = in->stx_mode; +- out->st_nlink = in->stx_nlink; +- out->st_uid = in->stx_uid; +- out->st_gid = in->stx_gid; +- out->st_rdev = makedev(in->stx_rdev_major, in->stx_rdev_minor); +- out->st_size = in->stx_size; +- out->st_blksize = in->stx_blksize; +- out->st_blocks = in->stx_blocks; +- out->st_atime = in->stx_atime.tv_sec; +- out->st_atim.tv_nsec = in->stx_atime.tv_nsec; +- out->st_mtime = in->stx_mtime.tv_sec; +- out->st_mtim.tv_nsec = in->stx_mtime.tv_nsec; +- out->st_ctime = in->stx_ctime.tv_sec; +- out->st_ctim.tv_nsec = in->stx_ctime.tv_nsec; +-} +-# endif +- + # if SANITIZER_MIPS64 + // Undefine compatibility macros from + // so that they would not clash with the kernel_stat +@@ -369,30 +354,24 @@ uptr internal_stat(const char *path, void *buf) { + # if SANITIZER_FREEBSD + return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); + # elif SANITIZER_LINUX +-# if defined(__loongarch__) +- struct statx bufx; +- int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path, +- AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx); +- statx_to_stat(&bufx, (struct stat *)buf); +- return res; +-# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \ ++# if (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \ + (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ + !SANITIZER_SPARC + return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, + 0); +-# else ++# else + struct stat64 buf64; + int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path, + (uptr)&buf64, 0); + stat64_to_stat(&buf64, (struct stat *)buf); + return res; +-# endif +-# else ++# endif ++# else + struct stat64 buf64; + int res = internal_syscall(SYSCALL(stat64), path, &buf64); + stat64_to_stat(&buf64, (struct stat *)buf); + return res; +-# endif ++# endif + } + + uptr internal_lstat(const char *path, void *buf) { +@@ -400,31 +379,24 @@ uptr internal_lstat(const char *path, void *buf) { + return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, + AT_SYMLINK_NOFOLLOW); + # elif SANITIZER_LINUX +-# if defined(__loongarch__) +- struct statx bufx; +- int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path, +- AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT, +- STATX_BASIC_STATS, (uptr)&bufx); +- statx_to_stat(&bufx, (struct stat *)buf); +- return res; +-# elif (defined(_LP64) || SANITIZER_X32 || \ ++# if (defined(_LP64) || SANITIZER_X32 || \ + (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ + !SANITIZER_SPARC + return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, + AT_SYMLINK_NOFOLLOW); +-# else ++# else + struct stat64 buf64; + int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path, + (uptr)&buf64, AT_SYMLINK_NOFOLLOW); + stat64_to_stat(&buf64, (struct stat *)buf); + return res; +-# endif +-# else ++# endif ++# else + struct stat64 buf64; + int res = internal_syscall(SYSCALL(lstat64), path, &buf64); + stat64_to_stat(&buf64, (struct stat *)buf); + return res; +-# endif ++# endif + } + + uptr internal_fstat(fd_t fd, void *buf) { +@@ -435,12 +407,6 @@ uptr internal_fstat(fd_t fd, void *buf) { + int res = internal_syscall(SYSCALL(fstat), fd, &kbuf); + kernel_stat_to_stat(&kbuf, (struct stat *)buf); + return res; +-# elif SANITIZER_LINUX && defined(__loongarch__) +- struct statx bufx; +- int res = internal_syscall(SYSCALL(statx), fd, "", AT_EMPTY_PATH, +- STATX_BASIC_STATS, (uptr)&bufx); +- statx_to_stat(&bufx, (struct stat *)buf); +- return res; + # else + return internal_syscall(SYSCALL(fstat), fd, (uptr)buf); + # endif +@@ -487,15 +453,15 @@ uptr internal_unlink(const char *path) { + } + + uptr internal_rename(const char *oldpath, const char *newpath) { +-# if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__) ++# if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__) + return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD, + (uptr)newpath, 0); +-# elif SANITIZER_LINUX ++# elif SANITIZER_LINUX + return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD, + (uptr)newpath); +-# else ++# else + return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath); +-# endif ++# endif + } + + uptr internal_sched_yield() { return internal_syscall(SYSCALL(sched_yield)); } +@@ -1096,7 +1062,7 @@ uptr GetMaxVirtualAddress() { + # if SANITIZER_NETBSD && defined(__x86_64__) + return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE) + # elif SANITIZER_WORDSIZE == 64 +-# if defined(__powerpc64__) || defined(__aarch64__) || defined(__loongarch__) ++# if defined(__powerpc64__) || defined(__aarch64__) + // On PowerPC64 we have two different address space layouts: 44- and 46-bit. + // We somehow need to figure out which one we are using now and choose + // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL. +@@ -1104,12 +1070,13 @@ uptr GetMaxVirtualAddress() { + // of the address space, so simply checking the stack address is not enough. + // This should (does) work for both PowerPC64 Endian modes. + // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit. +- // loongarch64 also has multiple address space layouts: default is 47-bit. + return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1; + # elif SANITIZER_RISCV64 + return (1ULL << 38) - 1; + # elif SANITIZER_MIPS64 + return (1ULL << 40) - 1; // 0x000000ffffffffffUL; ++# elif defined(__loongarch64) ++ return (1ULL << 40) - 1; // 0x000000ffffffffffUL; + # elif defined(__s390x__) + return (1ULL << 53) - 1; // 0x001fffffffffffffUL; + # elif defined(__sparc__) +@@ -1436,6 +1403,61 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + : "memory"); + return res; + } ++# elif defined(__loongarch__) && SANITIZER_LINUX ++uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, ++ int *parent_tidptr, void *newtls, int *child_tidptr) { ++ long long res; ++ if (!fn || !child_stack) ++ return -EINVAL; ++ CHECK_EQ(0, (uptr)child_stack % 16); ++ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); ++ ((unsigned long long *)child_stack)[0] = (uptr)fn; ++ ((unsigned long long *)child_stack)[1] = (uptr)arg; ++ ++ register int __flags __asm__("r4") = flags; ++ register void *__child_stack __asm__("r5") = child_stack; ++ register int *__parent_tidptr __asm__("r6") = parent_tidptr; ++ register void *__newtls __asm__("r7") = newtls; ++ register int *__child_tidptr __asm__("r8") = child_tidptr; ++ ++ __asm__ __volatile__( ++ /* $a0 = syscall($a7 = SYSCALL(clone), ++ * $a0 = flags, ++ * $a1 = child_stack, ++ * $a2 = parent_tidptr, ++ * $a3 = new_tls, ++ * $a4 = child_tyidptr) ++ */ ++ ++ /* Do the system call */ ++ "addi.d $a7, $r0, %1\n" ++ "syscall 0\n" ++ ++ "move %0, $a0" ++ : "=r"(res) ++ : "i"(__NR_clone), "r"(__flags), "r"(__child_stack), "r"(__parent_tidptr), ++ "r"(__newtls), "r"(__child_tidptr) ++ : "memory"); ++ if (res != 0) { ++ return res; ++ } ++ __asm__ __volatile__( ++ /* In the child, now. Call "fn(arg)". */ ++ "ld.d $a6, $sp, 0\n" ++ "ld.d $a0, $sp, 8\n" ++ ++ "jirl $r1, $a6, 0\n" ++ ++ /* Call _exit($v0) */ ++ "addi.d $a7, $r0, %1\n" ++ "syscall 0\n" ++ ++ "move %0, $a0" ++ : "=r"(res) ++ : "i"(__NR_exit) ++ : "r1", "memory"); ++ return res; ++} + # elif defined(__aarch64__) + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { +@@ -1485,48 +1507,6 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + : "x30", "memory"); + return res; + } +-# elif SANITIZER_LOONGARCH64 +-uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, +- int *parent_tidptr, void *newtls, int *child_tidptr) { +- if (!fn || !child_stack) +- return -EINVAL; +- +- CHECK_EQ(0, (uptr)child_stack % 16); +- +- register int res __asm__("$a0"); +- register int __flags __asm__("$a0") = flags; +- register void *__stack __asm__("$a1") = child_stack; +- register int *__ptid __asm__("$a2") = parent_tidptr; +- register int *__ctid __asm__("$a3") = child_tidptr; +- register void *__tls __asm__("$a4") = newtls; +- register int (*__fn)(void *) __asm__("$a5") = fn; +- register void *__arg __asm__("$a6") = arg; +- register int nr_clone __asm__("$a7") = __NR_clone; +- +- __asm__ __volatile__( +- "syscall 0\n" +- +- // if ($a0 != 0) +- // return $a0; +- "bnez $a0, 1f\n" +- +- // In the child, now. Call "fn(arg)". +- "move $a0, $a6\n" +- "jirl $ra, $a5, 0\n" +- +- // Call _exit($a0). +- "addi.d $a7, $zero, %9\n" +- "syscall 0\n" +- +- "1:\n" +- +- : "=r"(res) +- : "0"(__flags), "r"(__stack), "r"(__ptid), "r"(__ctid), "r"(__tls), +- "r"(__fn), "r"(__arg), "r"(nr_clone), "i"(__NR_exit) +- : "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", +- "$t8"); +- return res; +-} + # elif defined(__powerpc64__) + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { +@@ -1957,6 +1937,17 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { + # endif + } + return SignalContext::Unknown; ++# elif defined(__loongarch__) ++#if SANITIZER_ANDROID ++ u32 flags = ucontext->uc_mcontext.sc_flags; ++#else ++ u32 flags = ucontext->uc_mcontext.__flags; ++#endif ++ if (flags & SC_ADDRERR_RD) ++ return SignalContext::Read; ++ if (flags & SC_ADDRERR_WR) ++ return SignalContext::Write; ++ return SignalContext::Unknown; + # elif defined(__arm__) + static const uptr FSR_WRITE = 1U << 11; + uptr fsr = ucontext->uc_mcontext.error_code; +@@ -1967,13 +1958,6 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const { + if (!Aarch64GetESR(ucontext, &esr)) + return Unknown; + return esr & ESR_ELx_WNR ? Write : Read; +-# elif defined(__loongarch__) +- u32 flags = ucontext->uc_mcontext.__flags; +- if (flags & SC_ADDRERR_RD) +- return SignalContext::Read; +- if (flags & SC_ADDRERR_WR) +- return SignalContext::Write; +- return SignalContext::Unknown; + # elif defined(__sparc__) + // Decode the instruction to determine the access type. + // From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype). +@@ -2217,6 +2201,17 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { + *pc = ucontext->uc_mcontext.pc; + *bp = ucontext->uc_mcontext.gregs[30]; + *sp = ucontext->uc_mcontext.gregs[29]; ++# elif defined(__loongarch__) ++ ucontext_t *ucontext = (ucontext_t *)context; ++#if SANITIZER_ANDROID ++ *pc = ucontext->uc_mcontext.sc_pc; ++ *bp = ucontext->uc_mcontext.sc_regs[22]; ++ *sp = ucontext->uc_mcontext.sc_regs[3]; ++#else ++ *pc = ucontext->uc_mcontext.__pc; ++ *bp = ucontext->uc_mcontext.__gregs[22]; ++ *sp = ucontext->uc_mcontext.__gregs[3]; ++#endif + # elif defined(__s390__) + ucontext_t *ucontext = (ucontext_t *)context; + # if defined(__s390x__) +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h +index c30f03267..cbe1a1168 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h +@@ -80,7 +80,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact); + void internal_sigdelset(__sanitizer_sigset_t *set, int signum); + # if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \ + defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \ +- defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64 ++ defined(__arm__) || SANITIZER_RISCV64 || defined(__loongarch__) + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr); + # endif +@@ -165,6 +165,13 @@ inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) { + : "=r"(__v)); \ + __v; \ + }) ++#elif defined (__loongarch__) ++# define __get_tls() \ ++ ({ \ ++ void **__v; \ ++ __asm__("move %0, $tp" : "=r"(__v)); \ ++ __v; \ ++ }) + # elif defined(__riscv) + # define __get_tls() \ + ({ \ +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +index cccbb4d25..886298188 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +@@ -208,7 +208,7 @@ void InitTlsSize() { + GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25; + + # if defined(__aarch64__) || defined(__x86_64__) || \ +- defined(__powerpc64__) || defined(__loongarch__) ++ defined(__powerpc64__) + void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); + size_t tls_align; + ((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align); +@@ -268,8 +268,8 @@ static uptr ThreadDescriptorSizeFallback() { + # elif defined(__mips__) + // TODO(sagarthakur): add more values as per different glibc versions. + val = FIRST_32_SECOND_64(1152, 1776); +-# elif SANITIZER_LOONGARCH64 +- val = 1856; // from glibc 2.36 ++# elif defined(__loongarch64) ++ val = 1776; + # elif SANITIZER_RISCV64 + int major; + int minor; +@@ -310,18 +310,18 @@ uptr ThreadDescriptorSize() { + } + + # if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \ +- SANITIZER_LOONGARCH64 ++ defined(__loongarch__) + // TlsPreTcbSize includes size of struct pthread_descr and size of tcb + // head structure. It lies before the static tls blocks. + static uptr TlsPreTcbSize() { + # if defined(__mips__) + const uptr kTcbHead = 16; // sizeof (tcbhead_t) ++# elif defined(__loongarch__) ++ const uptr kTcbHead = 16; // sizeof (tcbhead_t) + # elif defined(__powerpc64__) + const uptr kTcbHead = 88; // sizeof (tcbhead_t) + # elif SANITIZER_RISCV64 + const uptr kTcbHead = 16; // sizeof (tcbhead_t) +-# elif SANITIZER_LOONGARCH64 +- const uptr kTcbHead = 16; // sizeof (tcbhead_t) + # endif + const uptr kTlsAlign = 16; + const uptr kTlsPreTcbSize = +@@ -505,15 +505,6 @@ static void GetTls(uptr *addr, uptr *size) { + *addr = reinterpret_cast(__builtin_thread_pointer()) - + ThreadDescriptorSize(); + *size = g_tls_size + ThreadDescriptorSize(); +-# elif SANITIZER_GLIBC && defined(__loongarch__) +-# ifdef __clang__ +- *addr = reinterpret_cast(__builtin_thread_pointer()) - +- ThreadDescriptorSize(); +-# else +- asm("or %0,$tp,$zero" : "=r"(*addr)); +- *addr -= ThreadDescriptorSize(); +-# endif +- *size = g_tls_size + ThreadDescriptorSize(); + # elif SANITIZER_GLIBC && defined(__powerpc64__) + // Workaround for glibc<2.25(?). 2.27 is known to not need this. + uptr tp; +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h +index 596528155..6971d3f68 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h +@@ -283,12 +283,6 @@ + # define SANITIZER_RISCV64 0 + #endif + +-#if defined(__loongarch_lp64) +-# define SANITIZER_LOONGARCH64 1 +-#else +-# define SANITIZER_LOONGARCH64 0 +-#endif +- + // By default we allow to use SizeClassAllocator64 on 64-bit platform. + // But in some cases SizeClassAllocator64 does not work well and we need to + // fallback to SizeClassAllocator32. +@@ -298,7 +292,7 @@ + # if (SANITIZER_RISCV64 && !SANITIZER_FUCHSIA) || SANITIZER_IOS || \ + SANITIZER_DRIVERKIT + # define SANITIZER_CAN_USE_ALLOCATOR64 0 +-# elif defined(__mips64) || defined(__hexagon__) ++# elif defined(__mips64) || defined(__hexagon__) || defined(__loongarch64) + # define SANITIZER_CAN_USE_ALLOCATOR64 0 + # else + # define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) +@@ -308,7 +302,7 @@ + // The range of addresses which can be returned my mmap. + // FIXME: this value should be different on different platforms. Larger values + // will still work but will consume more memory for TwoLevelByteMap. +-#if defined(__mips__) ++#if defined(__mips__) || defined(__loongarch__) + # if SANITIZER_GO && defined(__mips64) + # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) + # else +@@ -371,6 +365,21 @@ + # define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12) + #endif + ++#if defined(__loongarch__) ++# define SANITIZER_LOONGARCH 1 ++# if defined(__loongarch64) ++# define SANITIZER_LOONGARCH32 0 ++# define SANITIZER_LOONGARCH64 1 ++# else ++# define SANITIZER_LOONGARCH32 1 ++# define SANITIZER_LOONGARCH64 0 ++# endif ++#else ++# define SANITIZER_LOONGARCH 0 ++# define SANITIZER_LOONGARCH32 0 ++# define SANITIZER_LOONGARCH64 0 ++#endif ++ + /// \macro MSC_PREREQ + /// \brief Is the compiler MSVC of at least the specified version? + /// The common \param version values to check for are: +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h +index de55c736d..68a6bf7cf 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h +@@ -274,8 +274,8 @@ + #if SI_LINUX_NOT_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64) +-#define SANITIZER_INTERCEPT_PTRACE 1 ++ defined(__s390__) || SANITIZER_RISCV64 || defined(__loongarch__)) ++# define SANITIZER_INTERCEPT_PTRACE 1 + #else + #define SANITIZER_INTERCEPT_PTRACE 0 + #endif +@@ -492,7 +492,8 @@ + #define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD) + #define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC + #define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID) +-#define SANITIZER_INTERCEPT_CFREE (SI_GLIBC && !SANITIZER_RISCV64) ++#define SANITIZER_INTERCEPT_CFREE \ ++ (SI_GLIBC && !SANITIZER_RISCV64 && !SANITIZER_LOONGARCH) + #define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX + #define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC) + #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD) +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp +index 6d61d276d..731883d34 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp +@@ -95,7 +95,7 @@ + # include + # include + # if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__hexagon__) || defined(__loongarch__) ||SANITIZER_RISCV64 ++ defined(__hexagon__) || SANITIZER_RISCV64 || defined(__loongarch64) + # include + # ifdef __arm__ + typedef struct user_fpregs elf_fpregset_t; +@@ -142,7 +142,7 @@ typedef struct user_fpregs elf_fpregset_t; + #include + #include + #include +-#if defined(__mips64) ++#if defined(__mips64) || defined(__loongarch64) + # include + #endif + #include +@@ -276,13 +276,13 @@ namespace __sanitizer { + #if SANITIZER_GLIBC + // Use pre-computed size of struct ustat to avoid which + // has been removed from glibc 2.28. +-#if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \ +- defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \ +- defined(__x86_64__) || SANITIZER_RISCV64 +-#define SIZEOF_STRUCT_USTAT 32 ++# if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \ ++ defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \ ++ defined(__x86_64__) || SANITIZER_RISCV64 || defined(__loongarch64) ++# define SIZEOF_STRUCT_USTAT 32 + # elif defined(__arm__) || defined(__i386__) || defined(__mips__) || \ + defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \ +- defined(__hexagon__) ++ defined(__hexagon__) || defined(__loongarch__) + # define SIZEOF_STRUCT_USTAT 20 + # elif defined(__loongarch__) + // Not used. The minimum Glibc version available for LoongArch is 2.36 +@@ -358,38 +358,38 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); + const int wordexp_wrde_dooffs = WRDE_DOOFFS; + # endif // !SANITIZER_ANDROID + +-#if SANITIZER_LINUX && !SANITIZER_ANDROID && \ +- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ +- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__) || defined(__loongarch__)|| SANITIZER_RISCV64) +-#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__) ++# if SANITIZER_LINUX && !SANITIZER_ANDROID && \ ++ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ ++ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ ++ defined(__s390__) || SANITIZER_RISCV64 || defined(__loongarch64)) ++# if defined(__mips64) || defined(__powerpc64__) || defined(__arm__) + unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t); +-#elif SANITIZER_RISCV64 ++# elif SANITIZER_RISCV64 + unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct); + unsigned struct_user_fpregs_struct_sz = sizeof(struct __riscv_q_ext_state); +-#elif defined(__aarch64__) ++# elif defined(__aarch64__) + unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state); +-#elif defined(__loongarch__) ++# elif defined(__loongarch64) + unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fp_state); +-#elif defined(__s390__) ++# elif defined(__s390__) + unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct); + unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct); +-#else ++# else + unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct); + unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct); +-#endif // __mips64 || __powerpc64__ || __aarch64__ || __loongarch__ +-#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \ +- defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \ +- defined(__loongarch__) || SANITIZER_RISCV64 ++# endif // __mips64 || __powerpc64__ || __aarch64__ || __loongarch64 ++# if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \ ++ defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \ ++ SANITIZER_RISCV64 || defined(__loongarch64) + unsigned struct_user_fpxregs_struct_sz = 0; + #else + unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct); + #endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__ +-// || __s390__ || __loongarch__ +-#ifdef __arm__ ++ // || __s390__ || __loongarch64 ++# ifdef __arm__ + unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE; + #else + unsigned struct_user_vfpregs_struct_sz = 0; +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h +index 34bfef1f7..3bc460814 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h +@@ -103,21 +103,24 @@ const unsigned struct_kernel_stat_sz = + ? FIRST_32_SECOND_64(104, 128) + : FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 176 : 160, 216); + const unsigned struct_kernel_stat64_sz = 104; +-#elif defined(__s390__) && !defined(__s390x__) ++# elif defined(__loongarch__) ++const unsigned struct_kernel_stat_sz = 128; ++const unsigned struct_kernel_stat64_sz = 128; ++# elif defined(__s390__) && !defined(__s390x__) + const unsigned struct_kernel_stat_sz = 64; + const unsigned struct_kernel_stat64_sz = 104; +-#elif defined(__s390x__) ++# elif defined(__s390x__) + const unsigned struct_kernel_stat_sz = 144; + const unsigned struct_kernel_stat64_sz = 0; +-#elif defined(__sparc__) && defined(__arch64__) ++# elif defined(__sparc__) && defined(__arch64__) + const unsigned struct___old_kernel_stat_sz = 0; + const unsigned struct_kernel_stat_sz = 104; + const unsigned struct_kernel_stat64_sz = 144; +-#elif defined(__sparc__) && !defined(__arch64__) ++# elif defined(__sparc__) && !defined(__arch64__) + const unsigned struct___old_kernel_stat_sz = 0; + const unsigned struct_kernel_stat_sz = 64; + const unsigned struct_kernel_stat64_sz = 104; +-#elif SANITIZER_RISCV64 ++# elif SANITIZER_RISCV64 + const unsigned struct_kernel_stat_sz = 128; + const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64 + # elif defined(__hexagon__) +@@ -565,6 +568,11 @@ union __sanitizer_pthread_attr_t { + #if SANITIZER_ANDROID + # if SANITIZER_MIPS + typedef unsigned long __sanitizer_sigset_t[16 / sizeof(unsigned long)]; ++# elif SANITIZER_LOONGARCH64 ++struct __sanitizer_sigset_t { ++ // The size is determined by looking at sizeof of real sigset_t on linux. ++ uptr val[128 / (sizeof(uptr) * 8)]; ++}; + # else + typedef unsigned long __sanitizer_sigset_t; + # endif +@@ -690,11 +698,11 @@ struct __sanitizer_sigaction { + }; + #endif // !SANITIZER_ANDROID + +-#if defined(__mips__) +-#define __SANITIZER_KERNEL_NSIG 128 +-#else +-#define __SANITIZER_KERNEL_NSIG 64 +-#endif ++# if defined(__mips__) || defined(__loongarch__) ++# define __SANITIZER_KERNEL_NSIG 128 ++# else ++# define __SANITIZER_KERNEL_NSIG 64 ++# endif + + struct __sanitizer_kernel_sigset_t { + uptr sig[__SANITIZER_KERNEL_NSIG / (sizeof(uptr) * 8)]; +@@ -855,10 +863,10 @@ typedef void __sanitizer_FILE; + # define SANITIZER_HAS_STRUCT_FILE 0 + #endif + +-#if SANITIZER_LINUX && !SANITIZER_ANDROID && \ +- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ +- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64) ++# if SANITIZER_LINUX && !SANITIZER_ANDROID && \ ++ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ ++ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ ++ defined(__s390__) || SANITIZER_RISCV64 || defined(__loongarch64)) + extern unsigned struct_user_regs_struct_sz; + extern unsigned struct_user_fpregs_struct_sz; + extern unsigned struct_user_fpxregs_struct_sz; +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h b/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h +index 6222a958b..afd6f7c6a 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h +@@ -86,21 +86,24 @@ template + class CompactRingBuffer { + // Top byte of long_ stores the buffer size in pages. + // Lower bytes store the address of the next buffer element. +- static constexpr int kPageSizeBits = 12; + static constexpr int kSizeShift = 56; + static constexpr int kSizeBits = 64 - kSizeShift; + static constexpr uptr kNextMask = (1ULL << kSizeShift) - 1; + +- uptr GetStorageSize() const { return (long_ >> kSizeShift) << kPageSizeBits; } ++ uptr GetStorageSize() const { ++ unsigned kPageSizeBits = Log2(GetPageSizeCached()); ++ return (long_ >> kSizeShift) << kPageSizeBits; ++ } + + static uptr SignExtend(uptr x) { return ((sptr)x) << kSizeBits >> kSizeBits; } + + void Init(void *storage, uptr size) { ++ unsigned kPageSizeBits = Log2(GetPageSizeCached()); + CHECK_EQ(sizeof(CompactRingBuffer), sizeof(void *)); + CHECK(IsPowerOfTwo(size)); + CHECK_GE(size, 1 << kPageSizeBits); + CHECK_LE(size, 128 << kPageSizeBits); +- CHECK_EQ(size % 4096, 0); ++ CHECK_EQ(size % GetPageSizeCached(), 0); + CHECK_EQ(size % sizeof(T), 0); + uptr st = (uptr)storage; + CHECK_EQ(st % (size * 2), 0); +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp +index d24fae982..f00bc1a8e 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp +@@ -124,9 +124,9 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top, + #elif defined(__loongarch__) || defined(__riscv) + // frame[-1] contains the return address + uhwptr pc1 = frame[-1]; +-#else ++# else + uhwptr pc1 = STRIP_PAC_PC((void *)frame[1]); +-#endif ++# endif + // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and + // x86_64) is invalid and stop unwinding here. If we're adding support for + // a platform where this isn't true, we need to reconsider this check. +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h +index 47aed488c..f402a66dc 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h +@@ -22,8 +22,8 @@ struct BufferedStackTrace; + + static const u32 kStackTraceMax = 255; + +-#if SANITIZER_LINUX && defined(__mips__) +-# define SANITIZER_CAN_FAST_UNWIND 0 ++#if (SANITIZER_LINUX && defined(__mips__)) ++# define SANITIZER_CAN_FAST_UNWIND 0 + #elif SANITIZER_WINDOWS + # define SANITIZER_CAN_FAST_UNWIND 0 + #else +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp +index 25c4af708..2ae1dd05e 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp +@@ -16,46 +16,48 @@ + #if SANITIZER_LINUX && \ + (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \ + defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \ +- defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) +- +-#include "sanitizer_stoptheworld.h" +- +-#include "sanitizer_platform_limits_posix.h" +-#include "sanitizer_atomic.h" +- +-#include +-#include // for CLONE_* definitions +-#include +-#include // for PR_* definitions +-#include // for PTRACE_* definitions +-#include // for pid_t +-#include // for iovec +-#include // for NT_PRSTATUS +-#if (defined(__aarch64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) && \ +- !SANITIZER_ANDROID ++ defined(__arm__) || SANITIZER_RISCV64 || defined(__loongarch__)) ++ ++# include "sanitizer_atomic.h" ++# include "sanitizer_platform_limits_posix.h" ++# include "sanitizer_stoptheworld.h" ++ ++# if defined(__loongarch__) ++# include ++# endif ++ ++# include // for NT_PRSTATUS ++# include ++# include // for CLONE_* definitions ++# include ++# include // for PR_* definitions ++# include // for PTRACE_* definitions ++# include // for pid_t ++# include // for iovec ++# if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID + // GLIBC 2.20+ sys/user does not include asm/ptrace.h + # include + #endif + #include // for user_regs_struct +-#if SANITIZER_ANDROID && SANITIZER_MIPS +-# include // for mips SP register in sys/user.h +-#endif +-#include // for signal-related stuff +- +-#ifdef sa_handler +-# undef sa_handler +-#endif +- +-#ifdef sa_sigaction +-# undef sa_sigaction +-#endif +- +-#include "sanitizer_common.h" +-#include "sanitizer_flags.h" +-#include "sanitizer_libc.h" +-#include "sanitizer_linux.h" +-#include "sanitizer_mutex.h" +-#include "sanitizer_placement_new.h" ++# if (SANITIZER_ANDROID && SANITIZER_MIPS) || SANITIZER_LOONGARCH ++# include // for mips SP register in sys/user.h ++# endif ++# include // for signal-related stuff ++ ++# ifdef sa_handler ++# undef sa_handler ++# endif ++ ++# ifdef sa_sigaction ++# undef sa_sigaction ++# endif ++ ++# include "sanitizer_common.h" ++# include "sanitizer_flags.h" ++# include "sanitizer_libc.h" ++# include "sanitizer_linux.h" ++# include "sanitizer_mutex.h" ++# include "sanitizer_placement_new.h" + + // Sufficiently old kernel headers don't provide this value, but we can still + // call prctl with it. If the runtime kernel is new enough, the prctl call will +@@ -509,35 +511,40 @@ typedef struct user regs_struct; + # define REG_SP regs[EF_REG29] + # endif + +-#elif defined(__aarch64__) +-typedef struct user_pt_regs regs_struct; +-#define REG_SP sp ++# elif defined(__loongarch__) ++typedef struct user_regs_struct regs_struct; + static constexpr uptr kExtraRegs[] = {0}; +-#define ARCH_IOVEC_FOR_GETREGSET ++# define ARCH_IOVEC_FOR_GETREGSET + +-#elif defined(__loongarch__) ++# if SANITIZER_ANDROID ++# define REG_SP regs[3] ++# elif SANITIZER_LOONGARCH ++# define REG_SP gpr[3] ++# endif ++ ++# elif defined(__aarch64__) + typedef struct user_pt_regs regs_struct; +-#define REG_SP regs[3] ++# define REG_SP sp + static constexpr uptr kExtraRegs[] = {0}; +-#define ARCH_IOVEC_FOR_GETREGSET ++# define ARCH_IOVEC_FOR_GETREGSET + +-#elif SANITIZER_RISCV64 ++# elif SANITIZER_RISCV64 + typedef struct user_regs_struct regs_struct; + // sys/ucontext.h already defines REG_SP as 2. Undefine it first. +-#undef REG_SP +-#define REG_SP sp ++# undef REG_SP ++# define REG_SP sp + static constexpr uptr kExtraRegs[] = {0}; +-#define ARCH_IOVEC_FOR_GETREGSET ++# define ARCH_IOVEC_FOR_GETREGSET + +-#elif defined(__s390__) ++# elif defined(__s390__) + typedef _user_regs_struct regs_struct; +-#define REG_SP gprs[15] ++# define REG_SP gprs[15] + static constexpr uptr kExtraRegs[] = {0}; +-#define ARCH_IOVEC_FOR_GETREGSET ++# define ARCH_IOVEC_FOR_GETREGSET + +-#else +-#error "Unsupported architecture" +-#endif // SANITIZER_ANDROID && defined(__arm__) ++# else ++# error "Unsupported architecture" ++# endif // SANITIZER_ANDROID && defined(__arm__) + + tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const { + CHECK_LT(index, thread_ids_.size()); +@@ -628,4 +635,3 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP( + #endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) + // || defined(__aarch64__) || defined(__powerpc64__) + // || defined(__s390__) || defined(__i386__) || defined(__arm__) +- // || SANITIZER_LOONGARCH64 +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +index 74458028a..b954b7d92 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +@@ -266,8 +266,6 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess { + const char* const kSymbolizerArch = "--default-arch=x86_64"; + #elif defined(__i386__) + const char* const kSymbolizerArch = "--default-arch=i386"; +-#elif SANITIZER_LOONGARCH64 +- const char *const kSymbolizerArch = "--default-arch=loongarch64"; + #elif SANITIZER_RISCV64 + const char *const kSymbolizerArch = "--default-arch=riscv64"; + #elif defined(__aarch64__) +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc b/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc +index 80f5e6be8..97ca7f2f3 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc +@@ -14,22 +14,18 @@ + // About local register variables: + // https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables + // +-// Kernel ABI: +-// https://lore.kernel.org/loongarch/1f353678-3398-e30b-1c87-6edb278f74db@xen0n.name/T/#m1613bc86c2d7bf5f6da92bd62984302bfd699a2f +-// syscall number is placed in a7 +-// parameters, if present, are placed in a0-a6 +-// upon return: +-// the return value is placed in a0 +-// t0-t8 should be considered clobbered +-// all other registers are preserved ++// Kernel ABI... ++// syscall number is passed in a7 ++// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in ++// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments ++// are passed in: a0-a7 (confirmed by inspecting glibc sources). + #define SYSCALL(name) __NR_##name + +-#define INTERNAL_SYSCALL_CLOBBERS \ +- "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8" ++#define INTERNAL_SYSCALL_CLOBBERS "memory" + + static uptr __internal_syscall(u64 nr) { +- register u64 a7 asm("$a7") = nr; +- register u64 a0 asm("$a0"); ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0"); + __asm__ volatile("syscall 0\n\t" + : "=r"(a0) + : "r"(a7) +@@ -39,8 +35,8 @@ static uptr __internal_syscall(u64 nr) { + #define __internal_syscall0(n) (__internal_syscall)(n) + + static uptr __internal_syscall(u64 nr, u64 arg1) { +- register u64 a7 asm("$a7") = nr; +- register u64 a0 asm("$a0") = arg1; ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7) +@@ -50,9 +46,9 @@ static uptr __internal_syscall(u64 nr, u64 arg1) { + #define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1)) + + static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) { +- register u64 a7 asm("$a7") = nr; +- register u64 a0 asm("$a0") = arg1; +- register u64 a1 asm("$a1") = arg2; ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1) +@@ -63,10 +59,10 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) { + (__internal_syscall)(n, (u64)(a1), (long)(a2)) + + static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) { +- register u64 a7 asm("$a7") = nr; +- register u64 a0 asm("$a0") = arg1; +- register u64 a1 asm("$a1") = arg2; +- register u64 a2 asm("$a2") = arg3; ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2) +@@ -78,11 +74,11 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) { + + static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4) { +- register u64 a7 asm("$a7") = nr; +- register u64 a0 asm("$a0") = arg1; +- register u64 a1 asm("$a1") = arg2; +- register u64 a2 asm("$a2") = arg3; +- register u64 a3 asm("$a3") = arg4; ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; ++ register u64 a3 asm("a3") = arg4; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3) +@@ -94,12 +90,12 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + + static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + long arg5) { +- register u64 a7 asm("$a7") = nr; +- register u64 a0 asm("$a0") = arg1; +- register u64 a1 asm("$a1") = arg2; +- register u64 a2 asm("$a2") = arg3; +- register u64 a3 asm("$a3") = arg4; +- register u64 a4 asm("$a4") = arg5; ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; ++ register u64 a3 asm("a3") = arg4; ++ register u64 a4 asm("a4") = arg5; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4) +@@ -112,13 +108,13 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + + static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + long arg5, long arg6) { +- register u64 a7 asm("$a7") = nr; +- register u64 a0 asm("$a0") = arg1; +- register u64 a1 asm("$a1") = arg2; +- register u64 a2 asm("$a2") = arg3; +- register u64 a3 asm("$a3") = arg4; +- register u64 a4 asm("$a4") = arg5; +- register u64 a5 asm("$a5") = arg6; ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; ++ register u64 a3 asm("a3") = arg4; ++ register u64 a4 asm("a4") = arg5; ++ register u64 a5 asm("a5") = arg6; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5) +@@ -131,14 +127,14 @@ static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + + static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + long arg5, long arg6, long arg7) { +- register u64 a7 asm("$a7") = nr; +- register u64 a0 asm("$a0") = arg1; +- register u64 a1 asm("$a1") = arg2; +- register u64 a2 asm("$a2") = arg3; +- register u64 a3 asm("$a3") = arg4; +- register u64 a4 asm("$a4") = arg5; +- register u64 a5 asm("$a5") = arg6; +- register u64 a6 asm("$a6") = arg7; ++ register u64 a7 asm("a7") = nr; ++ register u64 a0 asm("a0") = arg1; ++ register u64 a1 asm("a1") = arg2; ++ register u64 a2 asm("a2") = arg3; ++ register u64 a3 asm("a3") = arg4; ++ register u64 a4 asm("a4") = arg5; ++ register u64 a5 asm("a5") = arg6; ++ register u64 a6 asm("a6") = arg7; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), +diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp +index 252979f1c..d5dc5e66a 100644 +--- a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp ++++ b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp +@@ -93,14 +93,14 @@ void DTLS_Destroy() { + // "Dynamic thread vector pointers point 0x8000 past the start of each + // TLS block." (sysdeps//dl-tls.h) + static const uptr kDtvOffset = 0x8000; +-#elif defined(__riscv) ++# elif defined(__riscv) + // This is glibc's TLS_DTV_OFFSET: + // "Dynamic thread vector pointers point 0x800 past the start of each + // TLS block." (sysdeps/riscv/dl-tls.h) + static const uptr kDtvOffset = 0x800; +-#else ++# else + static const uptr kDtvOffset = 0; +-#endif ++# endif + + extern "C" { + SANITIZER_WEAK_ATTRIBUTE +diff --git a/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt +index 3c709e411..f09dad86c 100644 +--- a/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt ++++ b/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt +@@ -3,7 +3,7 @@ include(CompilerRTCompile) + clang_compiler_add_cxx_check() + + # FIXME: use SANITIZER_COMMON_SUPPORTED_ARCH here +-filter_available_targets(SANITIZER_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el riscv64 sparcv9 sparc) ++filter_available_targets(SANITIZER_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el riscv64 sparcv9 sparc loongarch64) + if(APPLE) + list(APPEND SANITIZER_UNITTEST_SUPPORTED_ARCH arm64) + darwin_filter_host_archs(SANITIZER_UNITTEST_SUPPORTED_ARCH SANITIZER_UNITTEST_SUPPORTED_ARCH) +diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp +index 58f2c8f7b..2df56bcd4 100644 +--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp ++++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp +@@ -162,9 +162,11 @@ static const u64 kAddressSpaceSize = 1ULL << 39; + static const u64 kAddressSpaceSize = 1ULL << 53; + #elif defined(__s390__) + static const u64 kAddressSpaceSize = 1ULL << 31; +-#else ++# elif defined(__loongarch64) ++static const u64 kAddressSpaceSize = 1ULL << 40; ++# else + static const u64 kAddressSpaceSize = 1ULL << 32; +-#endif ++# endif + + static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24); + +diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_ring_buffer_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_ring_buffer_test.cpp +index 91ec2f9e2..cbaefe1c4 100644 +--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_ring_buffer_test.cpp ++++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_ring_buffer_test.cpp +@@ -10,7 +10,9 @@ + // + //===----------------------------------------------------------------------===// + #include "sanitizer_common/sanitizer_ring_buffer.h" ++ + #include "gtest/gtest.h" ++#include "sanitizer_common/sanitizer_common.h" + + namespace __sanitizer { + +@@ -84,9 +86,10 @@ CompactRingBuffer *AllocCompactRingBuffer(size_t count) { + + TEST(CompactRingBuffer, int64) { + const size_t page_sizes[] = {1, 2, 4, 128}; ++ size_t page_size = GetPageSizeCached(); + + for (size_t pages : page_sizes) { +- size_t count = 4096 * pages / sizeof(int64_t); ++ size_t count = page_size * pages / sizeof(int64_t); + auto R = AllocCompactRingBuffer(count); + int64_t top = count * 3 + 13; + for (int64_t i = 0; i < top; ++i) R->push(i); +diff --git a/compiler-rt/lib/tsan/go/buildgo.sh b/compiler-rt/lib/tsan/go/buildgo.sh +index 78ba41a0b..0bd59368c 100755 +--- a/compiler-rt/lib/tsan/go/buildgo.sh ++++ b/compiler-rt/lib/tsan/go/buildgo.sh +@@ -10,8 +10,6 @@ if [ "`uname -a | grep Linux`" != "" ]; then + HOST_GOARCH="amd64" + elif [ "`uname -a | grep aarch64`" != "" ]; then + HOST_GOARCH="arm64" +- elif [ "`uname -a | grep loongarch64`" != "" ]; then +- HOST_GOARCH="loong64" + elif [ "`uname -a | grep -i mips64`" != "" ]; then + if [ "`lscpu | grep -i Little`" != "" ]; then + HOST_GOARCH="mips64le" +diff --git a/compiler-rt/lib/tsan/rtl/CMakeLists.txt b/compiler-rt/lib/tsan/rtl/CMakeLists.txt +index 791c0596f..a82c7c548 100644 +--- a/compiler-rt/lib/tsan/rtl/CMakeLists.txt ++++ b/compiler-rt/lib/tsan/rtl/CMakeLists.txt +@@ -127,6 +127,7 @@ if(APPLE) + set(TSAN_ASM_SOURCES + tsan_rtl_amd64.S + tsan_rtl_aarch64.S ++ tsan_rtl_loongarch64.S + ) + + set(TSAN_LINK_LIBS ${SANITIZER_COMMON_LINK_LIBS}) +@@ -212,10 +213,6 @@ else() + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../go + COMMENT "Checking TSan Go runtime..." + VERBATIM) +- elseif(arch MATCHES "loongarch64") +- set(TSAN_ASM_SOURCES +- tsan_rtl_loongarch64.S +- ) + elseif(arch MATCHES "mips64|mips64le") + set(TSAN_ASM_SOURCES + tsan_rtl_mips64.S +@@ -224,6 +221,10 @@ else() + set(TSAN_ASM_SOURCES + tsan_rtl_riscv64.S + ) ++ elseif(arch MATCHES "loongarch64") ++ set(TSAN_ASM_SOURCES ++ tsan_rtl_loongarch64.S ++ ) + elseif(arch MATCHES "s390x") + set(TSAN_ASM_SOURCES + tsan_rtl_s390x.S +diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +index d0282c270..2555811cd 100644 +--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp ++++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +@@ -76,12 +76,10 @@ struct ucontext_t { + #endif + + #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \ +- defined(__s390x__) +-#define PTHREAD_ABI_BASE "GLIBC_2.3.2" ++ defined(__s390x__) || defined(__loongarch__) ++# define PTHREAD_ABI_BASE "GLIBC_2.3.2" + #elif defined(__aarch64__) || SANITIZER_PPC64V2 + #define PTHREAD_ABI_BASE "GLIBC_2.17" +-#elif SANITIZER_LOONGARCH64 +-#define PTHREAD_ABI_BASE "GLIBC_2.36" + #elif SANITIZER_RISCV64 + # define PTHREAD_ABI_BASE "GLIBC_2.27" + #endif +diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform.h b/compiler-rt/lib/tsan/rtl/tsan_platform.h +index 377f8aeb8..651a1d6bb 100644 +--- a/compiler-rt/lib/tsan/rtl/tsan_platform.h ++++ b/compiler-rt/lib/tsan/rtl/tsan_platform.h +@@ -122,6 +122,42 @@ struct MappingMips64_40 { + static const uptr kVdsoBeg = 0xfffff00000ull; + }; + ++/* ++ * TODO same as mips64 and need to change in the future ++C/C++ on linux/loongarch64 (40-bit VMA) ++0000 0000 00 - 0100 0000 00: - (4 GB) ++0100 0000 00 - 0200 0000 00: main binary (4 GB) ++0200 0000 00 - 2000 0000 00: - (120 GB) ++2000 0000 00 - 4000 0000 00: shadow (128 GB) ++4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB) ++5000 0000 00 - aa00 0000 00: - (360 GB) ++aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB) ++ab00 0000 00 - b000 0000 00: - (20 GB) ++b000 0000 00 - b200 0000 00: traces (8 GB) ++b200 0000 00 - fe00 0000 00: - (304 GB) ++fe00 0000 00 - ff00 0000 00: heap (4 GB) ++ff00 0000 00 - ff80 0000 00: - (2 GB) ++ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB) ++*/ ++struct MappingLoongArch64_40 { ++ static const uptr kMetaShadowBeg = 0x4000000000ull; ++ static const uptr kMetaShadowEnd = 0x5000000000ull; ++ static const uptr kShadowBeg = 0x1200000000ull; ++ static const uptr kShadowEnd = 0x2200000000ull; ++ static const uptr kHeapMemBeg = 0xfe00000000ull; ++ static const uptr kHeapMemEnd = 0xff00000000ull; ++ static const uptr kLoAppMemBeg = 0x0100000000ull; ++ static const uptr kLoAppMemEnd = 0x0200000000ull; ++ static const uptr kMidAppMemBeg = 0xaa00000000ull; ++ static const uptr kMidAppMemEnd = 0xab00000000ull; ++ static const uptr kHiAppMemBeg = 0xff80000000ull; ++ static const uptr kHiAppMemEnd = 0xffffffffffull; ++ static const uptr kShadowMsk = 0xf800000000ull; ++ static const uptr kShadowXor = 0x0800000000ull; ++ static const uptr kShadowAdd = 0x0000000000ull; ++ static const uptr kVdsoBeg = 0xfffff00000ull; ++}; ++ + /* + C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM) + 0000 0000 00 - 0100 0000 00: - (4 GB) +@@ -249,38 +285,6 @@ struct MappingAarch64_48 { + static const uptr kVdsoBeg = 0xffff000000000ull; + }; + +-/* C/C++ on linux/loongarch64 (47-bit VMA) +-0000 0000 4000 - 0080 0000 0000: main binary +-0080 0000 0000 - 0100 0000 0000: - +-0100 0000 0000 - 1000 0000 0000: shadow memory +-1000 0000 0000 - 3000 0000 0000: - +-3000 0000 0000 - 3400 0000 0000: metainfo +-3400 0000 0000 - 5555 0000 0000: - +-5555 0000 0000 - 5556 0000 0000: main binary (PIE) +-5556 0000 0000 - 7ffe 0000 0000: - +-7ffe 0000 0000 - 7fff 0000 0000: heap +-7fff 0000 0000 - 7fff 8000 0000: - +-7fff 8000 0000 - 8000 0000 0000: modules and main thread stack +-*/ +-struct MappingLoongArch64_47 { +- static const uptr kMetaShadowBeg = 0x300000000000ull; +- static const uptr kMetaShadowEnd = 0x340000000000ull; +- static const uptr kShadowBeg = 0x010000000000ull; +- static const uptr kShadowEnd = 0x100000000000ull; +- static const uptr kHeapMemBeg = 0x7ffe00000000ull; +- static const uptr kHeapMemEnd = 0x7fff00000000ull; +- static const uptr kLoAppMemBeg = 0x000000004000ull; +- static const uptr kLoAppMemEnd = 0x008000000000ull; +- static const uptr kMidAppMemBeg = 0x555500000000ull; +- static const uptr kMidAppMemEnd = 0x555600000000ull; +- static const uptr kHiAppMemBeg = 0x7fff80000000ull; +- static const uptr kHiAppMemEnd = 0x800000000000ull; +- static const uptr kShadowMsk = 0x780000000000ull; +- static const uptr kShadowXor = 0x040000000000ull; +- static const uptr kShadowAdd = 0x000000000000ull; +- static const uptr kVdsoBeg = 0x7fffffffc000ull; +-}; +- + /* + C/C++ on linux/powerpc64 (44-bit VMA) + 0000 0000 0100 - 0001 0000 0000: main binary +@@ -622,35 +626,6 @@ struct MappingGoAarch64 { + static const uptr kShadowAdd = 0x200000000000ull; + }; + +-/* Go on linux/loongarch64 (47-bit VMA) +-0000 0000 1000 - 0000 1000 0000: executable +-0000 1000 0000 - 00c0 0000 0000: - +-00c0 0000 0000 - 00e0 0000 0000: heap +-00e0 0000 0000 - 2000 0000 0000: - +-2000 0000 0000 - 2800 0000 0000: shadow +-2800 0000 0000 - 3000 0000 0000: - +-3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects) +-3200 0000 0000 - 8000 0000 0000: - +-*/ +-struct MappingGoLoongArch64_47 { +- static const uptr kMetaShadowBeg = 0x300000000000ull; +- static const uptr kMetaShadowEnd = 0x320000000000ull; +- static const uptr kShadowBeg = 0x200000000000ull; +- static const uptr kShadowEnd = 0x280000000000ull; +- static const uptr kLoAppMemBeg = 0x000000001000ull; +- static const uptr kLoAppMemEnd = 0x00e000000000ull; +- static const uptr kMidAppMemBeg = 0; +- static const uptr kMidAppMemEnd = 0; +- static const uptr kHiAppMemBeg = 0; +- static const uptr kHiAppMemEnd = 0; +- static const uptr kHeapMemBeg = 0; +- static const uptr kHeapMemEnd = 0; +- static const uptr kVdsoBeg = 0; +- static const uptr kShadowMsk = 0; +- static const uptr kShadowXor = 0; +- static const uptr kShadowAdd = 0x200000000000ull; +-}; +- + /* + Go on linux/mips64 (47-bit VMA) + 0000 0000 1000 - 0000 1000 0000: executable +@@ -726,8 +701,6 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) { + return Func::template Apply(arg); + # elif defined(__aarch64__) + return Func::template Apply(arg); +-# elif defined(__loongarch_lp64) +- return Func::template Apply(arg); + # elif SANITIZER_WINDOWS + return Func::template Apply(arg); + # else +@@ -747,8 +720,6 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) { + case 48: + return Func::template Apply(arg); + } +-# elif SANITIZER_LOONGARCH64 +- return Func::template Apply(arg); + # elif defined(__powerpc64__) + switch (vmaSize) { + case 44: +@@ -767,6 +738,8 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) { + case 48: + return Func::template Apply(arg); + } ++# elif defined(__loongarch64) ++ return Func::template Apply(arg); + # elif defined(__s390x__) + return Func::template Apply(arg); + # else +@@ -780,11 +753,11 @@ template + void ForEachMapping() { + Func::template Apply(); + Func::template Apply(); ++ Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); +- Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); +@@ -796,7 +769,6 @@ void ForEachMapping() { + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); +- Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + } +diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp +index 0d0b1aba1..aad67cf05 100644 +--- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp ++++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp +@@ -66,8 +66,7 @@ extern "C" void *__libc_stack_end; + void *__libc_stack_end = 0; + #endif + +-#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64)) && \ +- !SANITIZER_GO ++#if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO + # define INIT_LONGJMP_XOR_KEY 1 + #else + # define INIT_LONGJMP_XOR_KEY 0 +@@ -313,20 +312,6 @@ void InitializePlatformEarly() { + Die(); + } + #endif +-#elif SANITIZER_LOONGARCH64 +-# if !SANITIZER_GO +- if (vmaSize != 47) { +- Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); +- Printf("FATAL: Found %zd - Supported 47\n", vmaSize); +- Die(); +- } +-# else +- if (vmaSize != 47) { +- Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); +- Printf("FATAL: Found %zd - Supported 47\n", vmaSize); +- Die(); +- } +-# endif + #elif defined(__powerpc64__) + # if !SANITIZER_GO + if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) { +@@ -380,7 +365,7 @@ void InitializePlatform() { + // is not compiled with -pie. + #if !SANITIZER_GO + { +-# if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64)) ++# if SANITIZER_LINUX && defined(__aarch64__) + // Initialize the xor key used in {sig}{set,long}jump. + InitializeLongjmpXorKey(); + # endif +@@ -460,8 +445,6 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) { + # else + return mangled_sp; + # endif +-#elif defined(__loongarch_lp64) +- return mangled_sp ^ longjmp_xor_key; + #elif defined(__powerpc64__) + // Reverse of: + // ld r4, -28696(r13) +@@ -473,6 +456,8 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) { + return mangled_sp; + # elif SANITIZER_RISCV64 + return mangled_sp; ++# elif defined(__loongarch__) ++ return mangled_sp; + # elif defined(__s390x__) + // tcbhead_t.stack_guard + uptr xor_key = ((uptr *)__builtin_thread_pointer())[5]; +@@ -499,10 +484,10 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) { + #elif SANITIZER_LINUX + # ifdef __aarch64__ + # define LONG_JMP_SP_ENV_SLOT 13 +-# elif defined(__loongarch__) +-# define LONG_JMP_SP_ENV_SLOT 1 + # elif defined(__mips64) + # define LONG_JMP_SP_ENV_SLOT 1 ++# elif defined(__loongarch64) ++# define LONG_JMP_SP_ENV_SLOT 1 + # elif SANITIZER_RISCV64 + # define LONG_JMP_SP_ENV_SLOT 13 + # elif defined(__s390x__) +@@ -529,11 +514,7 @@ static void InitializeLongjmpXorKey() { + + // 2. Retrieve vanilla/mangled SP. + uptr sp; +-#ifdef __loongarch__ +- asm("move %0, $sp" : "=r" (sp)); +-#else + asm("mov %0, sp" : "=r" (sp)); +-#endif + uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT]; + + // 3. xor SPs to obtain key. +diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +index fd9441dfc..eb2411acd 100644 +--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp ++++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +@@ -520,7 +520,7 @@ static void StartBackgroundThread() { + ctx->background_thread = internal_start_thread(&BackgroundThread, 0); + } + +-#ifndef __mips__ ++# if !(defined(__mips__) || defined(__loongarch__)) + static void StopBackgroundThread() { + atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); + internal_join_thread(ctx->background_thread); +@@ -754,7 +754,7 @@ void MaybeSpawnBackgroundThread() { + // On MIPS, TSan initialization is run before + // __pthread_initialize_minimal_internal() is finished, so we can not spawn + // new threads. +-#if !SANITIZER_GO && !defined(__mips__) ++#if !SANITIZER_GO && !(defined(__mips__) || defined(__loongarch__)) + static atomic_uint32_t bg_thread = {}; + if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && + atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { +diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h +index de4ea0bb5..119706475 100644 +--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h ++++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h +@@ -56,8 +56,8 @@ namespace __tsan { + + #if !SANITIZER_GO + struct MapUnmapCallback; +-# if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \ +- defined(__powerpc__) || SANITIZER_RISCV64 ++# if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) || \ ++ SANITIZER_RISCV64 || defined(__loongarch64) + + struct AP32 { + static const uptr kSpaceBeg = 0; +diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_loongarch64.S b/compiler-rt/lib/tsan/rtl/tsan_rtl_loongarch64.S +index 12856bd11..8da65e890 100644 +--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_loongarch64.S ++++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_loongarch64.S +@@ -2,195 +2,148 @@ + + .section .text + +-ASM_HIDDEN(__tsan_setjmp) ++.hidden __tsan_setjmp + .comm _ZN14__interception11real_setjmpE,8,8 +-.globl ASM_SYMBOL_INTERCEPTOR(setjmp) +-ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp)) +-ASM_SYMBOL_INTERCEPTOR(setjmp): +- CFI_STARTPROC +- +- // Save frame pointer and return address register +- addi.d $sp, $sp, -32 +- st.d $ra, $sp, 24 +- st.d $fp, $sp, 16 +- CFI_DEF_CFA_OFFSET (32) +- CFI_OFFSET (1, -8) +- CFI_OFFSET (22, -16) +- +- // Adjust the SP for previous frame +- addi.d $fp, $sp, 32 +- CFI_DEF_CFA_REGISTER (22) +- +- // Save env parameter +- st.d $a0, $sp, 8 +- CFI_OFFSET (4, -24) ++.globl setjmp ++.type setjmp, @function ++setjmp: ++ ++ // Save env parameters ++ addi.d $r3,$r3,-24 ++ st.d $r1,$r3,16 ++ ++ // Save jmp_buf ++ st.d $r4,$r3,0 + + // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)` +- addi.d $a0, $fp, 0 ++ addi.d $r4,$r3,24 + + // call tsan interceptor +- bl ASM_SYMBOL(__tsan_setjmp) ++ bl __tsan_setjmp ++ ++ // Restore jmp_buf ++ ld.d $r4,$r3,0 + +- // Restore env parameter +- ld.d $a0, $sp, 8 +- CFI_RESTORE (4) ++ // Load libc setjmp to r20 ++ la $r20,_ZN14__interception11real_setjmpE + +- // Restore frame/link register +- ld.d $fp, $sp, 16 +- ld.d $ra, $sp, 24 +- addi.d $sp, $sp, 32 +- CFI_RESTORE (22) +- CFI_RESTORE (1) +- CFI_DEF_CFA (3, 0) ++ // Restore env parameters ++ ld.d $r1,$r3,16 ++ addi.d $r3,$r3,24 + +- // tail jump to libc setjmp +- la.local $a1, _ZN14__interception11real_setjmpE +- ld.d $a1, $a1, 0 +- jr $a1 ++ # tail jump to libc setjmp ++ ld.d $r20,$r20,0 ++ jr $r20 + +- CFI_ENDPROC +-ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp)) ++.size setjmp, .-setjmp + ++.hidden __tsan_setjmp ++.globl _setjmp + .comm _ZN14__interception12real__setjmpE,8,8 +-.globl ASM_SYMBOL_INTERCEPTOR(_setjmp) +-ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp)) +-ASM_SYMBOL_INTERCEPTOR(_setjmp): +- CFI_STARTPROC +- +- // Save frame pointer and return address register +- addi.d $sp, $sp, -32 +- st.d $ra, $sp, 24 +- st.d $fp, $sp, 16 +- CFI_DEF_CFA_OFFSET (32) +- CFI_OFFSET (1, -8) +- CFI_OFFSET (22, -16) +- +- // Adjust the SP for previous frame +- addi.d $fp, $sp, 32 +- CFI_DEF_CFA_REGISTER (22) +- +- // Save env parameter +- st.d $a0, $sp, 8 +- CFI_OFFSET (4, -24) ++.type _setjmp, @function ++_setjmp: ++ ++ // Save env parameters ++ addi.d $r3,$r3,-24 ++ st.d $r1,$r3,16 ++ ++ // Save jmp_buf ++ st.d $r4,$r3,0 + + // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)` +- addi.d $a0, $fp, 0 ++ addi.d $r4,$r3,24 + + // call tsan interceptor +- bl ASM_SYMBOL(__tsan_setjmp) ++ bl __tsan_setjmp + +- // Restore env parameter +- ld.d $a0, $sp, 8 +- CFI_RESTORE (4) ++ // Restore jmp_buf ++ ld.d $r4,$r3,0 + +- // Restore frame/link register +- ld.d $fp, $sp, 16 +- ld.d $ra, $sp, 24 +- addi.d $sp, $sp, 32 +- CFI_RESTORE (22) +- CFI_RESTORE (1) +- CFI_DEF_CFA (3, 0) ++ // Load libc _setjmp to r20 ++ la $r20,_ZN14__interception12real__setjmpE + +- // tail jump to libc setjmp +- la.local $a1, _ZN14__interception12real__setjmpE +- ld.d $a1, $a1, 0 +- jr $a1 ++ // Restore env parameters ++ ld.d $r1,$r3,16 ++ addi.d $r3,$r3,24 + +- CFI_ENDPROC +-ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp)) ++ // tail jump to libc _setjmp ++ ld.d $r20,$r20,0 ++ jr $r20 + ++.size _setjmp, .-_setjmp ++ ++.hidden __tsan_setjmp ++.globl sigsetjmp + .comm _ZN14__interception14real_sigsetjmpE,8,8 +-.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp) +-ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp)) +-ASM_SYMBOL_INTERCEPTOR(sigsetjmp): +- CFI_STARTPROC +- +- // Save frame pointer and return address register +- addi.d $sp, $sp, -32 +- st.d $ra, $sp, 24 +- st.d $fp, $sp, 16 +- CFI_DEF_CFA_OFFSET (32) +- CFI_OFFSET (1, -8) +- CFI_OFFSET (22, -16) +- +- // Adjust the SP for previous frame +- addi.d $fp, $sp, 32 +- CFI_DEF_CFA_REGISTER (22) +- +- // Save env parameter +- st.d $a0, $sp, 8 +- CFI_OFFSET (4, -24) ++.type sigsetjmp, @function ++sigsetjmp: ++ ++ // Save env parameters ++ addi.d $r3,$r3,-32 ++ st.d $r1,$r3,24 ++ ++ // Save jmp_buf and savesig ++ st.d $r4,$r3,0 ++ st.d $r5,$r3,8 + + // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)` +- addi.d $a0, $fp, 0 ++ addi.d $r4,$r3,32 + + // call tsan interceptor +- bl ASM_SYMBOL(__tsan_setjmp) ++ bl __tsan_setjmp ++ ++ // Restore jmp_buf and savesig ++ ld.d $r4,$r3,0 ++ ld.d $r5,$r3,8 + +- // Restore env parameter +- ld.d $a0, $sp, 8 +- CFI_RESTORE (4) ++ // Load libc sigsetjmp to r20 ++ la $r20,_ZN14__interception14real_sigsetjmpE + +- // Restore frame/link register +- ld.d $fp, $sp, 16 +- ld.d $ra, $sp, 24 +- addi.d $sp, $sp, 32 +- CFI_RESTORE (22) +- CFI_RESTORE (1) +- CFI_DEF_CFA (3, 0) ++ // Restore env parameters ++ ld.d $r1,$r3,24 ++ addi.d $r3,$r3,32 + +- // tail jump to libc setjmp +- la.local $a1, _ZN14__interception14real_sigsetjmpE +- ld.d $a1, $a1, 0 +- jr $a1 ++ // tail jump to libc sigsetjmp ++ ld.d $r20,$r20,0 ++ jr $r20 + +- CFI_ENDPROC +-ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp)) ++.size sigsetjmp, .-sigsetjmp + ++.hidden __tsan_setjmp + .comm _ZN14__interception16real___sigsetjmpE,8,8 +-.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp) +-ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)) +-ASM_SYMBOL_INTERCEPTOR(__sigsetjmp): +- CFI_STARTPROC +- +- // Save frame pointer and return address register +- addi.d $sp, $sp, -32 +- st.d $ra, $sp, 24 +- st.d $fp, $sp, 16 +- CFI_DEF_CFA_OFFSET (32) +- CFI_OFFSET (1, -8) +- CFI_OFFSET (22, -16) +- +- // Adjust the SP for previous frame +- addi.d $fp, $sp, 32 +- CFI_DEF_CFA_REGISTER (22) +- +- // Save env parameter +- st.d $a0, $sp, 8 +- CFI_OFFSET (4, -24) ++.globl __sigsetjmp ++.type __sigsetjmp, @function ++__sigsetjmp: ++ ++ // Save env parameters ++ addi.d $r3,$r3,-32 ++ st.d $r1,$r3,24 ++ ++ // Save jmp_buf and savesig ++ st.d $r4,$r3,0 ++ st.d $r5,$r3,8 + + // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)` +- addi.d $a0, $fp, 0 ++ addi.d $r4,$r3,32 + + // call tsan interceptor +- bl ASM_SYMBOL(__tsan_setjmp) +- +- // Restore env parameter +- ld.d $a0, $sp, 8 +- CFI_RESTORE (4) +- +- // Restore frame/link register +- ld.d $fp, $sp, 16 +- ld.d $ra, $sp, 24 +- addi.d $sp, $sp, 32 +- CFI_RESTORE (22) +- CFI_RESTORE (1) +- CFI_DEF_CFA (3, 0) +- +- // tail jump to libc setjmp +- la.local $a1, _ZN14__interception16real___sigsetjmpE +- ld.d $a1, $a1, 0 +- jr $a1 +- +- CFI_ENDPROC +-ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)) ++ bl __tsan_setjmp ++ ++ // Restore jmp_buf and savesig ++ ld.d $r4,$r3,0 ++ ld.d $r5,$r3,8 ++ ++ // Load libc __sigsetjmp in r20 ++ la $r20,_ZN14__interception16real___sigsetjmpE ++ ++ // Restore env parameters ++ ld.d $r1,$r3,24 ++ addi.d $r3,$r3,32 ++ ++ // tail jump to libc __sigsetjmp ++ ld.d $r20,$r20,0 ++ jr $r20 ++ ++.size __sigsetjmp, .-__sigsetjmp ++ ++NO_EXEC_STACK_DIRECTIVE +diff --git a/compiler-rt/lib/xray/CMakeLists.txt b/compiler-rt/lib/xray/CMakeLists.txt +index cf7b5062a..1e4f9346b 100644 +--- a/compiler-rt/lib/xray/CMakeLists.txt ++++ b/compiler-rt/lib/xray/CMakeLists.txt +@@ -48,8 +48,8 @@ set(aarch64_SOURCES + ) + + set(loongarch64_SOURCES +- xray_loongarch64.cpp +- xray_trampoline_loongarch64.S ++ xray_loongarch.cpp ++ xray_trampoline_loongarch.S + ) + + set(mips_SOURCES +diff --git a/compiler-rt/lib/xray/xray_interface.cpp b/compiler-rt/lib/xray/xray_interface.cpp +index 5839043fc..3f901383b 100644 +--- a/compiler-rt/lib/xray/xray_interface.cpp ++++ b/compiler-rt/lib/xray/xray_interface.cpp +@@ -46,12 +46,12 @@ static const int16_t cSledLength = 12; + static const int16_t cSledLength = 32; + #elif defined(__arm__) + static const int16_t cSledLength = 28; +-#elif SANITIZER_LOONGARCH64 +-static const int16_t cSledLength = 48; + #elif SANITIZER_MIPS32 + static const int16_t cSledLength = 48; + #elif SANITIZER_MIPS64 + static const int16_t cSledLength = 64; ++#elif SANITIZER_LOONGARCH64 ++static const int16_t cSledLength = 48; + #elif defined(__powerpc64__) + static const int16_t cSledLength = 8; + #elif defined(__hexagon__) +diff --git a/compiler-rt/lib/xray/xray_loongarch.cpp b/compiler-rt/lib/xray/xray_loongarch.cpp +new file mode 100644 +index 000000000..379526b5a +--- /dev/null ++++ b/compiler-rt/lib/xray/xray_loongarch.cpp +@@ -0,0 +1,173 @@ ++//===-- xray_loongarch.cpp -----------------------------------------*- C++ ++//-*-===// ++// ++// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. ++// See https://llvm.org/LICENSE.txt for license information. ++// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception ++// ++//===----------------------------------------------------------------------===// ++// ++// This file is a part of XRay, a dynamic runtime instrumentation system. ++// ++// Implementation of loongarch-specific routines. ++// ++//===----------------------------------------------------------------------===// ++#include "sanitizer_common/sanitizer_common.h" ++#include "xray_defs.h" ++#include "xray_interface_internal.h" ++#include ++ ++namespace __xray { ++ ++// The machine codes for some instructions used in runtime patching. ++enum PatchOpcodes : uint32_t { ++ PO_ADDID = 0x02c00000, // addi.d rd, rj, imm ++ PO_SD = 0x29c00000, // st.d rd, base, offset ++ PO_LU12IW = 0x14000000, // lu12i.w rd, imm ++ PO_ORI = 0x03800000, // ori rd, rs, imm ++ PO_LU32ID = 0x16000000, // lu32i.d rd, imm ++ PO_LU52ID = 0x03000000, // lu52i.d rd, rj, imm ++ PO_JIRL = 0x4c000000, // jirl rd, rj, 0 ++ PO_LD = 0x28c00000, // ld.d rd, base, offset ++ PO_B48 = 0x50003000, // b #48 ++}; ++ ++enum RegNum : uint32_t { ++ RN_T0 = 0xC, ++ RN_T1 = 0xD, ++ RN_RA = 0x1, ++ RN_SP = 0x3, ++}; ++ ++// addi.d lu521.d ori ld.d st.d ++inline static uint32_t ++encodeInstruction_i12(uint32_t Opcode, uint32_t Rd, uint32_t Rj, ++ uint32_t Imm) XRAY_NEVER_INSTRUMENT { ++ return (Opcode | Rj << 5 | Rd | Imm << 10); ++} ++ ++// lu12i.w lu32i.d ++inline static uint32_t ++encodeInstruction_si20(uint32_t Opcode, uint32_t Rd, ++ uint32_t Imm) XRAY_NEVER_INSTRUMENT { ++ return (Opcode | Rd | Imm << 5); ++} ++ ++// jirl ++inline static uint32_t ++encodeInstruction_si16(uint32_t Opcode, uint32_t Rd, uint32_t Rj, ++ uint32_t Imm) XRAY_NEVER_INSTRUMENT { ++ return (Opcode | Rj << 5 | Rd | Imm << 10); ++} ++ ++inline static bool patchSled(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled, ++ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT { ++ // When |Enable| == true, ++ // We replace the following compile-time stub (sled): ++ // ++ // xray_sled_n: ++ // B .tmpN ++ // 11 NOPs (44 bytes) ++ // .tmpN ++ // ++ // With the following runtime patch: ++ // xray_sled_n (64-bit): ++ // addi.d sp,sp, -16 ;create stack frame ++ // st.d ra, sp, 8 ;save return address ++ // lu12i.w t0,%%abs_hi20(__xray_FunctionEntry/Exit) ++ // ori t0,t0,%%abs_lo12(__xray_FunctionEntry/Exit) ++ // lu32i.d t0,%%abs64_lo20(__xray_FunctionEntry/Exit) ++ // lu52i.d t0,t0,%%abs64_hi12(__xray_FunctionEntry/Exit) ++ // lu12i.w t1,%%abs_hi20(function_id) ++ // ori t1,t1,%%abs_lo12(function_id) ;pass function id ++ // jirl ra, t0, 0 ;call Tracing hook ++ // ld.d ra, sp, 8 ;restore return address ++ // addi.d sp, sp, 16 ;delete stack frame ++ // ++ // Replacement of the first 4-byte instruction should be the last and atomic ++ // operation, so that the user code which reaches the sled concurrently ++ // either jumps over the whole sled, or executes the whole sled when the ++ // latter is ready. ++ // ++ // When |Enable|==false, we set back the first instruction in the sled to be ++ // B #48 ++ ++ uint32_t *Address = reinterpret_cast(Sled.address()); ++ if (Enable) { ++ uint32_t LoTracingHookAddr = reinterpret_cast(TracingHook) & 0xfff; ++ uint32_t HiTracingHookAddr = ++ (reinterpret_cast(TracingHook) >> 12) & 0xfffff; ++ uint32_t HigherTracingHookAddr = ++ (reinterpret_cast(TracingHook) >> 32) & 0xfffff; ++ uint32_t HighestTracingHookAddr = ++ (reinterpret_cast(TracingHook) >> 52) & 0xfff; ++ uint32_t LoFunctionID = FuncId & 0xfff; ++ uint32_t HiFunctionID = (FuncId >> 12) & 0xfffff; ++ Address[1] = encodeInstruction_i12(PatchOpcodes::PO_SD, RegNum::RN_RA, ++ RegNum::RN_SP, 0x8); ++ Address[2] = encodeInstruction_si20(PatchOpcodes::PO_LU12IW, RegNum::RN_T0, ++ HiTracingHookAddr); ++ Address[3] = encodeInstruction_i12(PatchOpcodes::PO_ORI, RegNum::RN_T0, ++ RegNum::RN_T0, LoTracingHookAddr); ++ Address[4] = encodeInstruction_si20(PatchOpcodes::PO_LU32ID, RegNum::RN_T0, ++ HigherTracingHookAddr); ++ Address[5] = encodeInstruction_i12(PatchOpcodes::PO_LU52ID, RegNum::RN_T0, ++ RegNum::RN_T0, HighestTracingHookAddr); ++ Address[6] = encodeInstruction_si20(PatchOpcodes::PO_LU12IW, RegNum::RN_T1, ++ HiFunctionID); ++ Address[7] = encodeInstruction_i12(PatchOpcodes::PO_ORI, RegNum::RN_T1, ++ RegNum::RN_T1, LoFunctionID); ++ Address[8] = encodeInstruction_si16(PatchOpcodes::PO_JIRL, RegNum::RN_RA, ++ RegNum::RN_T0, 0); ++ Address[9] = encodeInstruction_i12(PatchOpcodes::PO_LD, RegNum::RN_RA, ++ RegNum::RN_SP, 0x8); ++ Address[10] = encodeInstruction_i12(PatchOpcodes::PO_ADDID, RegNum::RN_SP, ++ RegNum::RN_SP, 0x10); ++ uint32_t CreateStackSpace = encodeInstruction_i12( ++ PatchOpcodes::PO_ADDID, RegNum::RN_SP, RegNum::RN_SP, 0xff0); ++ std::atomic_store_explicit( ++ reinterpret_cast *>(Address), CreateStackSpace, ++ std::memory_order_release); ++ } else { ++ std::atomic_store_explicit( ++ reinterpret_cast *>(Address), ++ uint32_t(PatchOpcodes::PO_B48), std::memory_order_release); ++ } ++ return true; ++} ++ ++bool patchFunctionEntry(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled, ++ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT { ++ return patchSled(Enable, FuncId, Sled, Trampoline); ++} ++ ++bool patchFunctionExit(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { ++ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit); ++} ++ ++bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { ++ // FIXME: In the future we'd need to distinguish between non-tail exits and ++ // tail exits for better information preservation. ++ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit); ++} ++ ++bool patchCustomEvent(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { ++ // FIXME: Implement in loongarch? ++ return false; ++} ++ ++bool patchTypedEvent(const bool Enable, const uint32_t FuncId, ++ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { ++ // FIXME: Implement in loongarch? ++ return false; ++} ++} // namespace __xray ++ ++extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT { ++ // FIXME: this will have to be implemented in the trampoline assembly file ++} +diff --git a/compiler-rt/lib/xray/xray_loongarch64.cpp b/compiler-rt/lib/xray/xray_loongarch64.cpp +deleted file mode 100644 +index b839adba0..000000000 +--- a/compiler-rt/lib/xray/xray_loongarch64.cpp ++++ /dev/null +@@ -1,160 +0,0 @@ +-//===-------- xray_loongarch64.cpp ------------------------------*- C++ -*-===// +-// +-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +-// See https://llvm.org/LICENSE.txt for license information. +-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +-// +-//===----------------------------------------------------------------------===// +-// +-// This file is a part of XRay, a dynamic runtime instrumentation system. +-// +-// Implementation of loongarch-specific routines. +-// +-//===----------------------------------------------------------------------===// +-#include "sanitizer_common/sanitizer_common.h" +-#include "xray_defs.h" +-#include "xray_interface_internal.h" +-#include +- +-namespace __xray { +- +-enum RegNum : uint32_t { +- RN_RA = 1, +- RN_SP = 3, +- RN_T0 = 12, +- RN_T1 = 13, +-}; +- +-// Encode instructions in the 2RIx format, where the primary formats here +-// are 2RI12-type and 2RI16-type. +-static inline uint32_t +-encodeInstruction2RIx(uint32_t Opcode, uint32_t Rd, uint32_t Rj, +- uint32_t Imm) XRAY_NEVER_INSTRUMENT { +- return Opcode | (Imm << 10) | (Rj << 5) | Rd; +-} +- +-// Encode instructions in 1RI20 format, e.g. lu12i.w/lu32i.d. +-static inline uint32_t +-encodeInstruction1RI20(uint32_t Opcode, uint32_t Rd, +- uint32_t Imm) XRAY_NEVER_INSTRUMENT { +- return Opcode | (Imm << 5) | Rd; +-} +- +-static inline bool patchSled(const bool Enable, const uint32_t FuncId, +- const XRaySledEntry &Sled, +- void (*TracingHook)()) XRAY_NEVER_INSTRUMENT { +- // When |Enable| == true, +- // We replace the following compile-time stub (sled): +- // +- // .Lxray_sled_beginN: +- // B .Lxray_sled_endN +- // 11 NOPs (44 bytes) +- // .Lxray_sled_endN: +- // +- // With the following runtime patch: +- // +- // xray_sled_n: +- // addi.d sp, sp, -16 ; create the stack frame +- // st.d ra, sp, 8 ; save the return address +- // lu12i.w t0, %abs_hi20(__xray_FunctionEntry/Exit) +- // ori t0, t0, %abs_lo12(__xray_FunctionEntry/Exit) +- // lu32i.d t0, %abs64_lo20(__xray_FunctionEntry/Exit) +- // lu52i.d t0, t0, %abs64_hi12(__xray_FunctionEntry/Exit) +- // lu12i.w t1, %abs_hi20(function_id) +- // ori t1, t1, %abs_lo12(function_id) ; pass the function id +- // jirl ra, t0, 0 ; call the tracing hook +- // ld.d ra, sp, 8 ; restore the return address +- // addi.d sp, sp, 16 ; de-allocate the stack frame +- // +- // Replacement of the first 4-byte instruction should be the last and atomic +- // operation, so that the user code which reaches the sled concurrently +- // either jumps over the whole sled, or executes the whole sled when the +- // latter is ready. +- // +- // When |Enable|==false, we set the first instruction in the sled back to +- // B #48 +- +- uint32_t *Address = reinterpret_cast(Sled.address()); +- if (Enable) { +- uint32_t LoTracingHookAddr = reinterpret_cast(TracingHook) & 0xfff; +- uint32_t HiTracingHookAddr = +- (reinterpret_cast(TracingHook) >> 12) & 0xfffff; +- uint32_t HigherTracingHookAddr = +- (reinterpret_cast(TracingHook) >> 32) & 0xfffff; +- uint32_t HighestTracingHookAddr = +- (reinterpret_cast(TracingHook) >> 52) & 0xfff; +- uint32_t LoFunctionID = FuncId & 0xfff; +- uint32_t HiFunctionID = (FuncId >> 12) & 0xfffff; +- Address[1] = encodeInstruction2RIx(0x29c00000, RegNum::RN_RA, RegNum::RN_SP, +- 0x8); // st.d ra, sp, 8 +- Address[2] = encodeInstruction1RI20( +- 0x14000000, RegNum::RN_T0, +- HiTracingHookAddr); // lu12i.w t0, HiTracingHookAddr +- Address[3] = encodeInstruction2RIx( +- 0x03800000, RegNum::RN_T0, RegNum::RN_T0, +- LoTracingHookAddr); // ori t0, t0, LoTracingHookAddr +- Address[4] = encodeInstruction1RI20( +- 0x16000000, RegNum::RN_T0, +- HigherTracingHookAddr); // lu32i.d t0, HigherTracingHookAddr +- Address[5] = encodeInstruction2RIx( +- 0x03000000, RegNum::RN_T0, RegNum::RN_T0, +- HighestTracingHookAddr); // lu52i.d t0, t0, HighestTracingHookAddr +- Address[6] = +- encodeInstruction1RI20(0x14000000, RegNum::RN_T1, +- HiFunctionID); // lu12i.w t1, HiFunctionID +- Address[7] = +- encodeInstruction2RIx(0x03800000, RegNum::RN_T1, RegNum::RN_T1, +- LoFunctionID); // ori t1, t1, LoFunctionID +- Address[8] = encodeInstruction2RIx(0x4c000000, RegNum::RN_RA, RegNum::RN_T0, +- 0); // jirl ra, t0, 0 +- Address[9] = encodeInstruction2RIx(0x28c00000, RegNum::RN_RA, RegNum::RN_SP, +- 0x8); // ld.d ra, sp, 8 +- Address[10] = encodeInstruction2RIx( +- 0x02c00000, RegNum::RN_SP, RegNum::RN_SP, 0x10); // addi.d sp, sp, 16 +- uint32_t CreateStackSpace = encodeInstruction2RIx( +- 0x02c00000, RegNum::RN_SP, RegNum::RN_SP, 0xff0); // addi.d sp, sp, -16 +- std::atomic_store_explicit( +- reinterpret_cast *>(Address), CreateStackSpace, +- std::memory_order_release); +- } else { +- std::atomic_store_explicit( +- reinterpret_cast *>(Address), +- uint32_t(0x50003000), std::memory_order_release); // b #48 +- } +- return true; +-} +- +-bool patchFunctionEntry(const bool Enable, const uint32_t FuncId, +- const XRaySledEntry &Sled, +- void (*Trampoline)()) XRAY_NEVER_INSTRUMENT { +- return patchSled(Enable, FuncId, Sled, Trampoline); +-} +- +-bool patchFunctionExit(const bool Enable, const uint32_t FuncId, +- const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { +- return patchSled(Enable, FuncId, Sled, __xray_FunctionExit); +-} +- +-bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId, +- const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { +- // TODO: In the future we'd need to distinguish between non-tail exits and +- // tail exits for better information preservation. +- return patchSled(Enable, FuncId, Sled, __xray_FunctionExit); +-} +- +-bool patchCustomEvent(const bool Enable, const uint32_t FuncId, +- const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { +- // FIXME: Implement in loongarch? +- return false; +-} +- +-bool patchTypedEvent(const bool Enable, const uint32_t FuncId, +- const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { +- // FIXME: Implement in loongarch? +- return false; +-} +-} // namespace __xray +- +-extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT { +- // TODO: This will have to be implemented in the trampoline assembly file. +-} +diff --git a/compiler-rt/lib/xray/xray_trampoline_loongarch64.S b/compiler-rt/lib/xray/xray_trampoline_loongarch.S +similarity index 50% +rename from compiler-rt/lib/xray/xray_trampoline_loongarch64.S +rename to compiler-rt/lib/xray/xray_trampoline_loongarch.S +index fcbefcc5f..5c93cdfa8 100644 +--- a/compiler-rt/lib/xray/xray_trampoline_loongarch64.S ++++ b/compiler-rt/lib/xray/xray_trampoline_loongarch.S +@@ -1,4 +1,6 @@ +-//===-- xray_trampoline_loongarch64.s ---------------------------*- ASM -*-===// ++#include "../sanitizer_common/sanitizer_asm.h" ++ ++//===-- xray_trampoline_loongarch.s -----------------------------*- ASM -*-===// + // + // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + // See https://llvm.org/LICENSE.txt for license information. +@@ -12,113 +14,113 @@ + // + //===----------------------------------------------------------------------===// + +-#include "../sanitizer_common/sanitizer_asm.h" +- +-#define FROM_0_TO_7 0,1,2,3,4,5,6,7 +-#define FROM_7_TO_0 7,6,5,4,3,2,1,0 +- +-.macro SAVE_ARG_REGISTERS +- .irp i,FROM_7_TO_0 +- st.d $a\i, $sp, (8 * 8 + 8 * \i) +- .endr +- .irp i,FROM_7_TO_0 +- fst.d $f\i, $sp, (8 * \i) +- .endr +-.endm +- +-.macro RESTORE_ARG_REGISTERS +- .irp i,FROM_0_TO_7 +- fld.d $f\i, $sp, (8 * \i) +- .endr +- .irp i,FROM_0_TO_7 +- ld.d $a\i, $sp, (8 * 8 + 8 * \i) +- .endr +-.endm +- +-.macro SAVE_RET_REGISTERS +- st.d $a1, $sp, 24 +- st.d $a0, $sp, 16 +- fst.d $f1, $sp, 8 +- fst.d $f0, $sp, 0 +-.endm +- +-.macro RESTORE_RET_REGISTERS +- fld.d $f0, $sp, 0 +- fld.d $f1, $sp, 8 +- ld.d $a0, $sp, 16 +- ld.d $a1, $sp, 24 +-.endm +- + .text +- .file "xray_trampoline_loongarch64.S" +- .globl ASM_SYMBOL(__xray_FunctionEntry) +- ASM_HIDDEN(__xray_FunctionEntry) ++ .file "xray_trampoline_loongarch.S" ++ .globl __xray_FunctionEntry + .p2align 2 +- ASM_TYPE_FUNCTION(__xray_FunctionEntry) +-ASM_SYMBOL(__xray_FunctionEntry): ++ .type __xray_FunctionEntry,@function ++__xray_FunctionEntry: + .cfi_startproc + // Save argument registers before doing any actual work. + .cfi_def_cfa_offset 136 + addi.d $sp, $sp, -136 + st.d $ra, $sp, 128 + .cfi_offset 1, -8 +- SAVE_ARG_REGISTERS +- +- la.got $t2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE) +- ld.d $t2, $t2, 0 ++ st.d $a7, $sp, 120 ++ st.d $a6, $sp, 112 ++ st.d $a5, $sp, 104 ++ st.d $a4, $sp, 96 ++ st.d $a3, $sp, 88 ++ st.d $a2, $sp, 80 ++ st.d $a1, $sp, 72 ++ st.d $a0, $sp, 64 ++ fst.d $f7, $sp, 56 ++ fst.d $f6, $sp, 48 ++ fst.d $f5, $sp, 40 ++ fst.d $f4, $sp, 32 ++ fst.d $f3, $sp, 24 ++ fst.d $f2, $sp, 16 ++ fst.d $f1, $sp, 8 ++ fst.d $f0, $sp, 0 ++ ++ ++ la.got $t2, _ZN6__xray19XRayPatchedFunctionE ++ ld.d $t2, $t2, 0 + + beqz $t2, FunctionEntry_restore + +- // a1=0 means that we are tracing an entry event. ++ // a1=0 means that we are tracing an entry event + move $a1, $zero + // Function ID is in t1 (the first parameter). + move $a0, $t1 + jirl $ra, $t2, 0 + + FunctionEntry_restore: +- // Restore argument registers. +- RESTORE_ARG_REGISTERS +- ld.d $ra, $sp, 128 ++ // Restore argument registers ++ fld.d $f0, $sp, 0 ++ fld.d $f1, $sp, 8 ++ fld.d $f2, $sp, 16 ++ fld.d $f3, $sp, 24 ++ fld.d $f4, $sp, 32 ++ fld.d $f5, $sp, 40 ++ fld.d $f6, $sp, 48 ++ fld.d $f7, $sp, 56 ++ ld.d $a0, $sp, 64 ++ ld.d $a1, $sp, 72 ++ ld.d $a2, $sp, 80 ++ ld.d $a3, $sp, 88 ++ ld.d $a4, $sp, 96 ++ ld.d $a5, $sp, 104 ++ ld.d $a6, $sp, 112 ++ ld.d $a7, $sp, 120 ++ ld.d $ra, $sp, 128 + addi.d $sp, $sp, 136 +- ret ++ jr $ra + FunctionEntry_end: +- ASM_SIZE(__xray_FunctionEntry) ++ .size __xray_FunctionEntry, FunctionEntry_end-__xray_FunctionEntry + .cfi_endproc + + .text +- .globl ASM_SYMBOL(__xray_FunctionExit) +- ASM_HIDDEN(__xray_FunctionExit) ++ .globl __xray_FunctionExit + .p2align 2 +- ASM_TYPE_FUNCTION(__xray_FunctionExit) +-ASM_SYMBOL(__xray_FunctionExit): ++ .type __xray_FunctionExit,@function ++__xray_FunctionExit: + .cfi_startproc + // Save return registers before doing any actual work. + .cfi_def_cfa_offset 48 + addi.d $sp, $sp, -48 +- st.d $ra, $sp, 40 ++ st.d $ra, $sp, 40 + .cfi_offset 1, -8 + st.d $fp, $sp, 32 +- SAVE_RET_REGISTERS ++ st.d $a1, $sp, 24 ++ st.d $a0, $sp, 16 ++ fst.d $f1, $sp, 8 ++ fst.d $f0, $sp, 0 + +- la.got $t2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE) +- ld.d $t2, $t2, 0 ++ la.got $t2, _ZN6__xray19XRayPatchedFunctionE ++ ld.d $t2, $t2, 0 + + beqz $t2, FunctionExit_restore + +- // a1=1 means that we are tracing an exit event. +- li.w $a1, 1 ++ // a1=1 means that we are tracing an exit event ++ ori $a1, $zero, 1 + // Function ID is in t1 (the first parameter). + move $a0, $t1 + jirl $ra, $t2, 0 + + FunctionExit_restore: +- // Restore return registers. +- RESTORE_RET_REGISTERS ++ // Restore return registers ++ fld.d $f0, $sp, 0 ++ fld.d $f1, $sp, 8 ++ ld.d $a1, $sp, 24 ++ ld.d $a0, $sp, 16 + ld.d $fp, $sp, 32 + ld.d $ra, $sp, 40 + addi.d $sp, $sp, 48 +- ret ++ jr $ra + + FunctionExit_end: +- ASM_SIZE(__xray_FunctionExit) ++ .size __xray_FunctionExit, FunctionExit_end-__xray_FunctionExit + .cfi_endproc ++ ++NO_EXEC_STACK_DIRECTIVE +diff --git a/compiler-rt/lib/xray/xray_tsc.h b/compiler-rt/lib/xray/xray_tsc.h +index e1cafe1bf..c685e6cbe 100644 +--- a/compiler-rt/lib/xray/xray_tsc.h ++++ b/compiler-rt/lib/xray/xray_tsc.h +@@ -43,7 +43,7 @@ inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT { + #elif defined(__powerpc64__) + #include "xray_powerpc64.inc" + #elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ +- defined(__hexagon__) || defined(__loongarch_lp64) ++ defined(__hexagon__) || defined(__loongarch__) + // Emulated TSC. + // There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does + // not have a constant frequency like TSC on x86(_64), it may go faster +diff --git a/compiler-rt/test/asan/TestCases/Linux/ptrace.cpp b/compiler-rt/test/asan/TestCases/Linux/ptrace.cpp +index e01021ff3..5109ac228 100644 +--- a/compiler-rt/test/asan/TestCases/Linux/ptrace.cpp ++++ b/compiler-rt/test/asan/TestCases/Linux/ptrace.cpp +@@ -1,6 +1,6 @@ + // FIXME: https://code.google.com/p/address-sanitizer/issues/detail?id=316 + // XFAIL: android +-// XFAIL: target=mips{{.*}} ++// XFAIL: target={{(mips|loongarch).*}} + // + // RUN: %clangxx_asan -O0 %s -o %t && %run %t + // RUN: %clangxx_asan -DPOSITIVE -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s +@@ -14,8 +14,8 @@ + #include + #include // for iovec + #include // for NT_PRSTATUS +-#if defined(__aarch64__) || defined(__loongarch__) +-# include ++#ifdef __aarch64__ ++# include + #endif + + #if defined(__i386__) || defined(__x86_64__) +@@ -37,13 +37,6 @@ typedef struct user_fpsimd_state fpregs_struct; + #define PRINT_REG_FP(__fpregs) printf ("%x\n", (unsigned) (__fpregs.fpsr)) + #define ARCH_IOVEC_FOR_GETREGSET + +-#elif defined(__loongarch__) +-typedef struct user_pt_regs regs_struct; +-typedef struct user_fp_state fpregs_struct; +-# define PRINT_REG_PC(__regs) printf("%lx\n", (unsigned long)(__regs.csr_era)) +-# define PRINT_REG_FP(__fpregs) printf("%x\n", (unsigned)(__fpregs.fcsr)) +-# define ARCH_IOVEC_FOR_GETREGSET +- + #elif defined(__powerpc64__) + typedef struct pt_regs regs_struct; + typedef elf_fpregset_t fpregs_struct; +diff --git a/compiler-rt/test/asan/TestCases/Linux/segv_read_write.c b/compiler-rt/test/asan/TestCases/Linux/segv_read_write.c +index 7e29386d6..cb47c5518 100644 +--- a/compiler-rt/test/asan/TestCases/Linux/segv_read_write.c ++++ b/compiler-rt/test/asan/TestCases/Linux/segv_read_write.c +@@ -1,7 +1,7 @@ + // RUN: %clangxx_asan -std=c++11 -O0 %s -o %t + // RUN: not %run %t 2>&1 | FileCheck %s --check-prefix=READ + // RUN: not %run %t write 2>&1 | FileCheck %s --check-prefix=WRITE +-// UNSUPPORTED: target={{(powerpc64|mips|s390).*}} ++// UNSUPPORTED: target={{(powerpc64|loongarch|mips|s390).*}} + + #include + +diff --git a/compiler-rt/test/asan/TestCases/Linux/vfork.cpp b/compiler-rt/test/asan/TestCases/Linux/vfork.cpp +index b943e4deb..4c0f02c50 100644 +--- a/compiler-rt/test/asan/TestCases/Linux/vfork.cpp ++++ b/compiler-rt/test/asan/TestCases/Linux/vfork.cpp +@@ -1,7 +1,7 @@ + // https://github.com/google/sanitizers/issues/925 + // RUN: %clang_asan -O0 %s -o %t && %run %t 2>&1 + +-// REQUIRES: aarch64-target-arch || x86_64-target-arch || i386-target-arch || arm-target-arch || riscv64-target-arch || loongarch64-target-arch ++// REQUIRES: aarch64-target-arch || x86_64-target-arch || i386-target-arch || arm-target-arch || riscv64-target-arch + + #include + #include +diff --git a/compiler-rt/test/builtins/Unit/addtf3_test.c b/compiler-rt/test/builtins/Unit/addtf3_test.c +index cd5872e7d..6a0f53a78 100644 +--- a/compiler-rt/test/builtins/Unit/addtf3_test.c ++++ b/compiler-rt/test/builtins/Unit/addtf3_test.c +@@ -33,57 +33,71 @@ char assumption_1[sizeof(tf_float) * CHAR_BIT == 128] = {0}; + + #endif + +-int main() { +-#if defined(CRT_HAS_IEEE_TF) +- // qNaN + any = qNaN +- if (test__addtf3(makeQNaN128(), 0x1.23456789abcdefp+5L, +- UINT64_C(0x7fff800000000000), UINT64_C(0x0))) +- return 1; +- // NaN + any = NaN +- if (test__addtf3(makeNaN128(UINT64_C(0x800030000000)), +- TF_C(0x1.23456789abcdefp+5), UINT64_C(0x7fff800000000000), +- UINT64_C(0x0))) +- return 1; +- // inf + inf = inf +- if (test__addtf3(makeInf128(), makeInf128(), UINT64_C(0x7fff000000000000), +- UINT64_C(0x0))) +- return 1; +- // inf + any = inf +- if (test__addtf3(makeInf128(), TF_C(0x1.2335653452436234723489432abcdefp+5), +- UINT64_C(0x7fff000000000000), UINT64_C(0x0))) +- return 1; +- // any + any +- if (test__addtf3(TF_C(0x1.23456734245345543849abcdefp+5), +- TF_C(0x1.edcba52449872455634654321fp-1), +- UINT64_C(0x40042afc95c8b579), UINT64_C(0x61e58dd6c51eb77c))) +- return 1; +- +-# if (defined(__arm__) || defined(__aarch64__)) && defined(__ARM_FP) || \ +- defined(i386) || defined(__x86_64__) || \ +- (defined(__loongarch__) && __loongarch_frlen != 0) +- // Rounding mode tests on supported architectures +- const tf_float m = 1234.0L, n = 0.01L; +- +- fesetround(FE_UPWARD); +- if (test__addtf3(m, n, UINT64_C(0x40093480a3d70a3d), +- UINT64_C(0x70a3d70a3d70a3d8))) +- return 1; +- +- fesetround(FE_DOWNWARD); +- if (test__addtf3(m, n, UINT64_C(0x40093480a3d70a3d), +- UINT64_C(0x70a3d70a3d70a3d7))) +- return 1; +- +- fesetround(FE_TOWARDZERO); +- if (test__addtf3(m, n, UINT64_C(0x40093480a3d70a3d), +- UINT64_C(0x70a3d70a3d70a3d7))) +- return 1; +- +- fesetround(FE_TONEAREST); +- if (test__addtf3(m, n, UINT64_C(0x40093480a3d70a3d), +- UINT64_C(0x70a3d70a3d70a3d7))) +- return 1; +-# endif ++int main() ++{ ++#if __LDBL_MANT_DIG__ == 113 ++ // qNaN + any = qNaN ++ if (test__addtf3(makeQNaN128(), ++ 0x1.23456789abcdefp+5L, ++ UINT64_C(0x7fff800000000000), ++ UINT64_C(0x0))) ++ return 1; ++ // NaN + any = NaN ++ if (test__addtf3(makeNaN128(UINT64_C(0x800030000000)), ++ 0x1.23456789abcdefp+5L, ++ UINT64_C(0x7fff800000000000), ++ UINT64_C(0x0))) ++ return 1; ++ // inf + inf = inf ++ if (test__addtf3(makeInf128(), ++ makeInf128(), ++ UINT64_C(0x7fff000000000000), ++ UINT64_C(0x0))) ++ return 1; ++ // inf + any = inf ++ if (test__addtf3(makeInf128(), ++ 0x1.2335653452436234723489432abcdefp+5L, ++ UINT64_C(0x7fff000000000000), ++ UINT64_C(0x0))) ++ return 1; ++ // any + any ++ if (test__addtf3(0x1.23456734245345543849abcdefp+5L, ++ 0x1.edcba52449872455634654321fp-1L, ++ UINT64_C(0x40042afc95c8b579), ++ UINT64_C(0x61e58dd6c51eb77c))) ++ return 1; ++ ++#if (defined(__arm__) || defined(__aarch64__)) && defined(__ARM_FP) || \ ++ defined(i386) || defined(__x86_64__) || (defined(__loongarch__) && \ ++ __loongarch_frlen != 0) ++ // Rounding mode tests on supported architectures ++ const long double m = 1234.0L, n = 0.01L; ++ ++ fesetround(FE_UPWARD); ++ if (test__addtf3(m, n, ++ UINT64_C(0x40093480a3d70a3d), ++ UINT64_C(0x70a3d70a3d70a3d8))) ++ return 1; ++ ++ fesetround(FE_DOWNWARD); ++ if (test__addtf3(m, n, ++ UINT64_C(0x40093480a3d70a3d), ++ UINT64_C(0x70a3d70a3d70a3d7))) ++ return 1; ++ ++ ++ fesetround(FE_TOWARDZERO); ++ if (test__addtf3(m, n, ++ UINT64_C(0x40093480a3d70a3d), ++ UINT64_C(0x70a3d70a3d70a3d7))) ++ return 1; ++ ++ fesetround(FE_TONEAREST); ++ if (test__addtf3(m, n, ++ UINT64_C(0x40093480a3d70a3d), ++ UINT64_C(0x70a3d70a3d70a3d7))) ++ return 1; ++#endif + + #else + printf("skipped\n"); +diff --git a/compiler-rt/test/dfsan/lit.cfg.py b/compiler-rt/test/dfsan/lit.cfg.py +index e947c51f9..286c0c7aa 100644 +--- a/compiler-rt/test/dfsan/lit.cfg.py ++++ b/compiler-rt/test/dfsan/lit.cfg.py +@@ -25,5 +25,5 @@ config.substitutions.append(("%clangxx_dfsan ", build_invocation(clang_dfsan_cxx + config.suffixes = [".c", ".cpp"] + + # DataFlowSanitizer tests are currently supported on Linux only. +-if not (config.host_os in ["Linux"] and config.target_arch in ["aarch64", "x86_64", "loongarch64"]): ++if not (config.host_os in ["Linux"] and config.target_arch in ["aarch64", "x86_64"]): + config.unsupported = True +diff --git a/compiler-rt/test/fuzzer/disable-leaks.test b/compiler-rt/test/fuzzer/disable-leaks.test +index 534db7fa2..1b369ea5b 100644 +--- a/compiler-rt/test/fuzzer/disable-leaks.test ++++ b/compiler-rt/test/fuzzer/disable-leaks.test +@@ -1,5 +1,5 @@ + REQUIRES: lsan +-UNSUPPORTED: target=aarch64{{.*}} ++UNSUPPORTED: target={{(aarch64|loongarch).*}} + RUN: %cpp_compiler %S/AccumulateAllocationsTest.cpp -o %t-AccumulateAllocationsTest + RUN: %run %t-AccumulateAllocationsTest -detect_leaks=1 -runs=100000 2>&1 | FileCheck %s --check-prefix=ACCUMULATE_ALLOCS + ACCUMULATE_ALLOCS: INFO: libFuzzer disabled leak detection after every mutation +diff --git a/compiler-rt/test/fuzzer/fork-ubsan.test b/compiler-rt/test/fuzzer/fork-ubsan.test +index 2d68b72fe..dfcddcc72 100644 +--- a/compiler-rt/test/fuzzer/fork-ubsan.test ++++ b/compiler-rt/test/fuzzer/fork-ubsan.test +@@ -1,4 +1,4 @@ +-# UNSUPPORTED: darwin, target={{.*freebsd.*}}, target=aarch64{{.*}} ++# UNSUPPORTED: darwin, target={{.*freebsd.*}}, target=aarch64{{.*}}, target=loongarch{{.*}} + # Tests how the fork mode works together with ubsan. + RUN: %cpp_compiler %S/IntegerOverflowTest.cpp -o %t-IntegerOverflowTest -fsanitize=signed-integer-overflow -fno-sanitize-recover=signed-integer-overflow + RUN: not %run %t-IntegerOverflowTest -fork=1 -ignore_crashes=1 -runs=10000 2>&1 | FileCheck %s --check-prefix=UBSAN_FORK +diff --git a/compiler-rt/test/lsan/TestCases/Linux/leak_check_segv.cpp b/compiler-rt/test/lsan/TestCases/Linux/leak_check_segv.cpp +index faa154456..ed478cb4a 100644 +--- a/compiler-rt/test/lsan/TestCases/Linux/leak_check_segv.cpp ++++ b/compiler-rt/test/lsan/TestCases/Linux/leak_check_segv.cpp +@@ -1,22 +1,23 @@ + // Test that SIGSEGV during leak checking does not crash the process. + // RUN: %clangxx_lsan -O1 %s -o %t && not %run %t 2>&1 | FileCheck %s + // UNSUPPORTED: ppc +-#include +-#include + #include ++#include + #include ++#include + #include + + char data[10 * 1024 * 1024]; + + int main() { +- long pagesize_mask = sysconf(_SC_PAGESIZE) - 1; + void *p = malloc(10 * 1024 * 1024); ++ long pagesz_minus_one = sysconf(_SC_PAGESIZE) - 1; + // surprise-surprise! +- mprotect((void *)(((unsigned long)p + pagesize_mask) & ~pagesize_mask), +- 16 * 1024, PROT_NONE); +- mprotect((void *)(((unsigned long)data + pagesize_mask) & ~pagesize_mask), ++ mprotect((void *)(((unsigned long)p + pagesz_minus_one) & ~pagesz_minus_one), + 16 * 1024, PROT_NONE); ++ mprotect( ++ (void *)(((unsigned long)data + pagesz_minus_one) & ~pagesz_minus_one), ++ 16 * 1024, PROT_NONE); + __lsan_do_leak_check(); + fprintf(stderr, "DONE\n"); + } +diff --git a/compiler-rt/test/lsan/TestCases/strace_test.cpp b/compiler-rt/test/lsan/TestCases/strace_test.cpp +index 18c809ca3..2b4835dcf 100644 +--- a/compiler-rt/test/lsan/TestCases/strace_test.cpp ++++ b/compiler-rt/test/lsan/TestCases/strace_test.cpp +@@ -5,6 +5,7 @@ + // FIXME: This technically works in practice but cannot be tested because the + // fatal-error caused adb to failed. Could not be captured to stderr to lit-check. + // XFAIL: android ++// UNSUPPORTED : loongarch + + #include + #include +diff --git a/compiler-rt/test/lsan/TestCases/swapcontext.cpp b/compiler-rt/test/lsan/TestCases/swapcontext.cpp +index 567cde744..a2f7cb574 100644 +--- a/compiler-rt/test/lsan/TestCases/swapcontext.cpp ++++ b/compiler-rt/test/lsan/TestCases/swapcontext.cpp +@@ -5,7 +5,7 @@ + // RUN: %env_lsan_opts= %run %t 2>&1 + // RUN: %env_lsan_opts= not %run %t foo 2>&1 | FileCheck %s + // Missing 'getcontext' and 'makecontext' on Android. +-// UNSUPPORTED: target={{(arm|aarch64|loongarch64|powerpc64).*}},android ++// UNSUPPORTED: target={{(arm|aarch64|loongarch|powerpc64).*}},android + + #include "sanitizer_common/sanitizer_ucontext.h" + #include +diff --git a/compiler-rt/test/lsan/TestCases/use_registers.cpp b/compiler-rt/test/lsan/TestCases/use_registers.cpp +index a4fbf27ae..9f1edcde4 100644 +--- a/compiler-rt/test/lsan/TestCases/use_registers.cpp ++++ b/compiler-rt/test/lsan/TestCases/use_registers.cpp +@@ -31,6 +31,10 @@ extern "C" void *registers_thread_func(void *arg) { + asm("move $16, %0" + : + : "r"(p)); ++#elif defined(__loongarch__) ++ asm("move $r23, %0" ++ : ++ : "r"(p)); + #elif defined(__arm__) + asm("mov r5, %0" + : +@@ -43,8 +47,6 @@ extern "C" void *registers_thread_func(void *arg) { + "mov x14, %0" + : + : "r"(p)); +-#elif defined(__loongarch_lp64) +- asm("move $s8, %0" : : "r"(p)); + #elif defined(__powerpc__) + asm("mr 30, %0" + : +diff --git a/compiler-rt/test/lsan/lit.common.cfg.py b/compiler-rt/test/lsan/lit.common.cfg.py +index e9b974955..6b9afd7a7 100644 +--- a/compiler-rt/test/lsan/lit.common.cfg.py ++++ b/compiler-rt/test/lsan/lit.common.cfg.py +@@ -93,7 +93,7 @@ config.substitutions.append(("%clangxx_hwasan ", build_invocation(clang_lsan_cxx + + + # LeakSanitizer tests are currently supported on +-# Android{aarch64, x86, x86_64}, x86-64 Linux, PowerPC64 Linux, arm Linux, mips64 Linux, s390x Linux, loongarch64 Linux and x86_64 Darwin. ++# Android{aarch64, x86, x86_64}, x86-64 Linux, PowerPC64 Linux, arm Linux, mips64 Linux, s390x Linux and x86_64 Darwin. + supported_android = ( + config.android + and config.target_arch in ["x86_64", "i386", "aarch64"] +@@ -108,13 +108,13 @@ supported_linux = ( + "x86_64", + "ppc64", + "ppc64le", ++ "loongarch64", + "mips64", + "riscv64", + "arm", + "armhf", + "armv7l", + "s390x", +- "loongarch64", + ] + ) + supported_darwin = config.host_os == "Darwin" and config.target_arch in ["x86_64"] +diff --git a/compiler-rt/test/msan/allocator_mapping.cpp b/compiler-rt/test/msan/allocator_mapping.cpp +index e7a12da48..88943e982 100644 +--- a/compiler-rt/test/msan/allocator_mapping.cpp ++++ b/compiler-rt/test/msan/allocator_mapping.cpp +@@ -8,7 +8,7 @@ + // This test only makes sense for the 64-bit allocator. The 32-bit allocator + // does not have a fixed mapping. Exclude platforms that use the 32-bit + // allocator. +-// UNSUPPORTED: target-is-mips64,target-is-mips64el,target=aarch64{{.*}} ++// UNSUPPORTED: target-is-mips64,target-is-mips64el,target={{(aarch64|loongarch).*}} + + #include + #include +diff --git a/compiler-rt/test/msan/lit.cfg.py b/compiler-rt/test/msan/lit.cfg.py +index 361be79e2..e3dd2dcd3 100644 +--- a/compiler-rt/test/msan/lit.cfg.py ++++ b/compiler-rt/test/msan/lit.cfg.py +@@ -50,7 +50,7 @@ if config.host_os not in ["Linux", "NetBSD", "FreeBSD"]: + # For mips64, mips64el we have forced store_context_size to 1 because these + # archs use slow unwinder which is not async signal safe. Therefore we only + # check the first frame since store_context size is 1. +-if config.host_arch in ["mips64", "mips64el"]: ++if config.host_arch in ["mips64", "mips64el", "loongarch64"]: + config.substitutions.append(("CHECK-%short-stack", "CHECK-SHORT-STACK")) + else: + config.substitutions.append(("CHECK-%short-stack", "CHECK-FULL-STACK")) +diff --git a/compiler-rt/test/msan/mmap.cpp b/compiler-rt/test/msan/mmap.cpp +index 16c482628..ac2c26a0c 100644 +--- a/compiler-rt/test/msan/mmap.cpp ++++ b/compiler-rt/test/msan/mmap.cpp +@@ -18,11 +18,7 @@ bool AddrIsApp(void *p) { + return (addr >= 0x000000000000ULL && addr < 0x010000000000ULL) || + (addr >= 0x510000000000ULL && addr < 0x600000000000ULL) || + (addr >= 0x700000000000ULL && addr < 0x800000000000ULL); +-#elif defined(__loongarch_lp64) +- return (addr >= 0x000000000000ULL && addr < 0x010000000000ULL) || +- (addr >= 0x510000000000ULL && addr < 0x600000000000ULL) || +- (addr >= 0x700000000000ULL && addr < 0x800000000000ULL); +-#elif defined(__mips64) ++#elif defined(__mips64) || defined(__loongarch64) + return (addr >= 0x0000000000ULL && addr <= 0x0200000000ULL) || + (addr >= 0xa200000000ULL && addr <= 0xc000000000ULL) || + addr >= 0xe200000000ULL; +@@ -53,7 +49,7 @@ bool AddrIsApp(void *p) { + + int main() { + // Large enough to quickly exhaust the entire address space. +-#if defined(__mips64) || defined(__aarch64__) ++#if defined(__mips64) || defined(__aarch64__) || defined(__loongarch64) + const size_t kMapSize = 0x100000000ULL; + #else + const size_t kMapSize = 0x1000000000ULL; +diff --git a/compiler-rt/test/msan/mmap_below_shadow.cpp b/compiler-rt/test/msan/mmap_below_shadow.cpp +index 97b561e50..c57a6cf36 100644 +--- a/compiler-rt/test/msan/mmap_below_shadow.cpp ++++ b/compiler-rt/test/msan/mmap_below_shadow.cpp +@@ -21,10 +21,7 @@ int main(void) { + #elif defined(__x86_64__) + uintptr_t hint = 0x4f0000000000ULL; + const uintptr_t app_start = 0x600000000000ULL; +-#elif defined(__loongarch_lp64) +- uintptr_t hint = 0x4f0000000000ULL; +- const uintptr_t app_start = 0x600000000000ULL; +-#elif defined (__mips64) ++#elif defined(__mips64) || defined(__loongarch64) + uintptr_t hint = 0x4f00000000ULL; + const uintptr_t app_start = 0x6000000000ULL; + #elif defined (__powerpc64__) +diff --git a/compiler-rt/test/msan/param_tls_limit.cpp b/compiler-rt/test/msan/param_tls_limit.cpp +index 35032f9ed..db845792a 100644 +--- a/compiler-rt/test/msan/param_tls_limit.cpp ++++ b/compiler-rt/test/msan/param_tls_limit.cpp +@@ -5,9 +5,9 @@ + // RUN: %clangxx_msan -fno-sanitize-memory-param-retval -fsanitize-memory-track-origins -O0 %s -o %t && %run %t + // RUN: %clangxx_msan -fno-sanitize-memory-param-retval -fsanitize-memory-track-origins=2 -O0 %s -o %t && %run %t + // +-// AArch64 and LoongArch64 fail with: ++// AArch64 and LoongArch fails with: + // void f801(S<801>): Assertion `__msan_test_shadow(&s, sizeof(s)) == -1' failed +-// XFAIL: target={{(aarch64|loongarch64).*}} ++// XFAIL: target={{(aarch64|loongarch).*}} + // When passing huge structs by value, SystemZ uses pointers, therefore this + // test in its present form is unfortunately not applicable. + // ABI says: "A struct or union of any other size . Replace such an +diff --git a/compiler-rt/test/msan/poison_in_signal.cpp b/compiler-rt/test/msan/poison_in_signal.cpp +index 5e833e516..5eaf0598d 100644 +--- a/compiler-rt/test/msan/poison_in_signal.cpp ++++ b/compiler-rt/test/msan/poison_in_signal.cpp +@@ -1,8 +1,9 @@ + // Stress test of poisoning from signal handler. +- + // RUN: %clangxx_msan -std=c++11 -O2 %s -o %t && %run %t + // RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -std=c++11 -O2 %s -o %t && %run %t + // RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -fsanitize-memory-use-after-dtor -std=c++11 -O2 %s -o %t && %run %t ++// ++// UNSUPPORTED: loongarch + + #include + #include +diff --git a/compiler-rt/test/msan/strlen_of_shadow.cpp b/compiler-rt/test/msan/strlen_of_shadow.cpp +index 39962481d..d8b3ff602 100644 +--- a/compiler-rt/test/msan/strlen_of_shadow.cpp ++++ b/compiler-rt/test/msan/strlen_of_shadow.cpp +@@ -15,9 +15,7 @@ + const char *mem_to_shadow(const char *p) { + #if defined(__x86_64__) + return (char *)((uintptr_t)p ^ 0x500000000000ULL); +-#elif defined(__loongarch_lp64) +- return (char *)((uintptr_t)p ^ 0x500000000000ULL); +-#elif defined (__mips64) ++#elif defined(__mips64) || defined(__loongarch64) + return (char *)((uintptr_t)p ^ 0x8000000000ULL); + #elif defined(__powerpc64__) + #define LINEARIZE_MEM(mem) \ +diff --git a/compiler-rt/test/msan/vararg.cpp b/compiler-rt/test/msan/vararg.cpp +index 1091ce3bc..6a2bc8456 100644 +--- a/compiler-rt/test/msan/vararg.cpp ++++ b/compiler-rt/test/msan/vararg.cpp +@@ -10,8 +10,8 @@ + + // Check that shadow and origin are passed through va_args. + +-// Copying origins on AArch64, LoongArch64, MIPS and PowerPC isn't supported yet. +-// XFAIL: target={{(aarch64|loongarch64|mips|powerpc64).*}} ++// Copying origins on AArch64, LoongArch, MIPS and PowerPC isn't supported yet. ++// XFAIL: target={{(aarch64|loongarch|mips|powerpc64).*}} + + #include + #include +diff --git a/compiler-rt/test/msan/vararg_shadow.cpp b/compiler-rt/test/msan/vararg_shadow.cpp +index 20e55da5b..1fd39676d 100644 +--- a/compiler-rt/test/msan/vararg_shadow.cpp ++++ b/compiler-rt/test/msan/vararg_shadow.cpp +@@ -4,7 +4,7 @@ + // RUN: %clangxx_msan -fno-sanitize-memory-param-retval -fsanitize-memory-track-origins=0 -O3 %s -o %t + + // FIXME: The rest is likely still broken. +-// XFAIL: target={{(loongarch64|mips|powerpc64).*}} ++// XFAIL: target={{(mips|powerpc64).*}} + + #include + #include +diff --git a/compiler-rt/test/msan/vector_select.cpp b/compiler-rt/test/msan/vector_select.cpp +index 0cf116497..8173b864e 100644 +--- a/compiler-rt/test/msan/vector_select.cpp ++++ b/compiler-rt/test/msan/vector_select.cpp +@@ -11,7 +11,7 @@ __m128d select(bool b, __m128d c, __m128d d) + { + return b ? c : d; + } +-#elif defined (__mips64) || defined (__powerpc64__) ++#elif defined(__mips64) || defined(__powerpc64__) || defined(__loongarch64) + typedef double __w64d __attribute__ ((vector_size(16))); + + __w64d select(bool b, __w64d c, __w64d d) +diff --git a/compiler-rt/test/sanitizer_common/TestCases/Linux/ptrace.cpp b/compiler-rt/test/sanitizer_common/TestCases/Linux/ptrace.cpp +index c43b13a0b..dec96b401 100644 +--- a/compiler-rt/test/sanitizer_common/TestCases/Linux/ptrace.cpp ++++ b/compiler-rt/test/sanitizer_common/TestCases/Linux/ptrace.cpp +@@ -17,10 +17,13 @@ + #include + #include + #endif +-#if defined(__aarch64__) || defined(__loongarch__) ++#ifdef __aarch64__ + // GLIBC 2.20+ sys/user does not include asm/ptrace.h + #include + #endif ++#ifdef __loongarch64 ++# include ++#endif + + int main(void) { + pid_t pid; +@@ -100,28 +103,6 @@ int main(void) { + printf("%x\n", fpregs.fpsr); + #endif // (__aarch64__) + +-#if (__loongarch__) +- struct iovec regset_io; +- +- struct user_pt_regs regs; +- regset_io.iov_base = ®s; +- regset_io.iov_len = sizeof(regs); +- res = +- ptrace(PTRACE_GETREGSET, pid, (void *)NT_PRSTATUS, (void *)®set_io); +- assert(!res); +- if (regs.csr_era) +- printf("%lx\n", regs.csr_era); +- +- struct user_fp_state fpregs; +- regset_io.iov_base = &fpregs; +- regset_io.iov_len = sizeof(fpregs); +- res = +- ptrace(PTRACE_GETREGSET, pid, (void *)NT_FPREGSET, (void *)®set_io); +- assert(!res); +- if (fpregs.fcsr) +- printf("%x\n", fpregs.fcsr); +-#endif // (__loongarch__) +- + #if (__s390__) + struct iovec regset_io; + +@@ -142,6 +123,24 @@ int main(void) { + printf("%x\n", fpregs.fpc); + #endif // (__s390__) + ++#if (__loongarch64) ++ struct iovec regset_io; ++ ++ struct user_pt_regs regs; ++ regset_io.iov_base = ®s; ++ regset_io.iov_len = sizeof(regs); ++ res = ptrace(PTRACE_GETREGSET, pid, (void *)NT_PRSTATUS, (void *)®set_io); ++ assert(!res); ++ ++ struct user_fp_state fpregs; ++ regset_io.iov_base = &fpregs; ++ regset_io.iov_len = sizeof(fpregs); ++ res = ptrace(PTRACE_GETREGSET, pid, (void *)NT_FPREGSET, (void *)®set_io); ++ assert(!res); ++ if (fpregs.fcsr) ++ printf("%lx\n", fpregs.fcsr); ++#endif // (__loongarch64) ++ + siginfo_t siginfo; + res = ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo); + assert(!res); +diff --git a/compiler-rt/test/sanitizer_common/print_address.h b/compiler-rt/test/sanitizer_common/print_address.h +index df3132224..3e64d3bcd 100644 +--- a/compiler-rt/test/sanitizer_common/print_address.h ++++ b/compiler-rt/test/sanitizer_common/print_address.h +@@ -10,9 +10,8 @@ void print_address(const char *str, int n, ...) { + va_start(ap, n); + while (n--) { + void *p = va_arg(ap, void *); +-#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \ +- defined(__s390x__) || (defined(__riscv) && __riscv_xlen == 64) || \ +- defined(__loongarch_lp64) ++#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \ ++ defined(__s390x__) || (defined(__riscv) && __riscv_xlen == 64) || defined(__loongarch__) + // On FreeBSD, the %p conversion specifier works as 0x%x and thus does not + // match to the format used in the diagnotic message. + fprintf(stderr, "0x%012lx ", (unsigned long) p); +diff --git a/compiler-rt/test/tsan/mmap_large.cpp b/compiler-rt/test/tsan/mmap_large.cpp +index a6aca720b..02eb7e574 100644 +--- a/compiler-rt/test/tsan/mmap_large.cpp ++++ b/compiler-rt/test/tsan/mmap_large.cpp +@@ -18,7 +18,9 @@ int main() { + #ifdef __x86_64__ + const size_t kLog2Size = 39; + #elif defined(__mips64) || defined(__aarch64__) || \ +- defined(__loongarch_lp64) || (defined(__riscv) && __riscv_xlen == 64) ++ (defined(__riscv) && __riscv_xlen == 64) ++ const size_t kLog2Size = 32; ++#elif defined(__loongarch64) + const size_t kLog2Size = 32; + #elif defined(__powerpc64__) + const size_t kLog2Size = 39; +diff --git a/compiler-rt/test/tsan/test.h b/compiler-rt/test/tsan/test.h +index 6fd552465..47fe508b1 100644 +--- a/compiler-rt/test/tsan/test.h ++++ b/compiler-rt/test/tsan/test.h +@@ -78,6 +78,8 @@ const int kPCInc = 1; + const int kPCInc = 8; + #elif defined(__riscv) && __riscv_xlen == 64 + const int kPCInc = 2; ++#elif defined(__loongarch__) ++const int kPCInc = 4; + #else + const int kPCInc = 4; + #endif +diff --git a/compiler-rt/test/xray/TestCases/Posix/argv0-log-file-name.cpp b/compiler-rt/test/xray/TestCases/Posix/argv0-log-file-name.cpp +index bd48693d3..f364151eb 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/argv0-log-file-name.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/argv0-log-file-name.cpp +@@ -7,6 +7,7 @@ + // RUN: rm xray-log.argv0-log-file-name.* xray.log.file.name + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include + #include +diff --git a/compiler-rt/test/xray/TestCases/Posix/c-test.cpp b/compiler-rt/test/xray/TestCases/Posix/c-test.cpp +index 642756618..9a8a16dd3 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/c-test.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/c-test.cpp +@@ -4,7 +4,7 @@ + // RUN: 2>&1 | FileCheck %s + // RUN: rm -f xray-log.c-test.* + // +-// REQUIRES: target={{(aarch64|loongarch64|x86_64)-.*}} ++// REQUIRES: target={{(aarch64|x86_64)-.*}} + // REQUIRES: built-in-llvm-tree + __attribute__((xray_always_instrument)) void always() {} + +diff --git a/compiler-rt/test/xray/TestCases/Posix/coverage-sample.cpp b/compiler-rt/test/xray/TestCases/Posix/coverage-sample.cpp +index 1903ad6fb..70dfd4642 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/coverage-sample.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/coverage-sample.cpp +@@ -6,6 +6,7 @@ + // RUN: XRAY_OPTIONS="patch_premain=false" %run %t | FileCheck %s + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include "xray/xray_interface.h" + +diff --git a/compiler-rt/test/xray/TestCases/Posix/fdr-single-thread.cpp b/compiler-rt/test/xray/TestCases/Posix/fdr-single-thread.cpp +index b8803aedc..9be9b77e8 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/fdr-single-thread.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/fdr-single-thread.cpp +@@ -9,6 +9,7 @@ + // RUN: rm fdr-logging-1thr-* + + // UNSUPPORTED: target=arm{{.*}} ++// UNSUPPORTED: target=loongarch{{.*}} + + #include "xray/xray_log_interface.h" + #include +diff --git a/compiler-rt/test/xray/TestCases/Posix/fdr-thread-order.cpp b/compiler-rt/test/xray/TestCases/Posix/fdr-thread-order.cpp +index 85284fc27..1dc79db27 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/fdr-thread-order.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/fdr-thread-order.cpp +@@ -8,7 +8,7 @@ + // RUN: %llvm_xray convert --symbolize --output-format=yaml -instr_map=%t.exe %t/* | \ + // RUN: FileCheck %s --check-prefix TRACE + +-// REQUIRES: target={{(aarch64|loongarch64|x86_64)-.*}} ++// REQUIRES: target={{(aarch64|x86_64)-.*}} + // REQUIRES: built-in-llvm-tree + + #include "xray/xray_log_interface.h" +diff --git a/compiler-rt/test/xray/TestCases/Posix/fixedsize-logging.cpp b/compiler-rt/test/xray/TestCases/Posix/fixedsize-logging.cpp +index e4462c8b4..d9cdad5ba 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/fixedsize-logging.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/fixedsize-logging.cpp +@@ -8,6 +8,7 @@ + // RUN: rm fixedsize-logging-* + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include + +diff --git a/compiler-rt/test/xray/TestCases/Posix/func-id-utils.cpp b/compiler-rt/test/xray/TestCases/Posix/func-id-utils.cpp +index ab0c5b01c..b2631f1bc 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/func-id-utils.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/func-id-utils.cpp +@@ -7,6 +7,7 @@ + // RUN: XRAY_OPTIONS="patch_premain=false" %run %t + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include "xray/xray_interface.h" + #include +diff --git a/compiler-rt/test/xray/TestCases/Posix/logging-modes.cpp b/compiler-rt/test/xray/TestCases/Posix/logging-modes.cpp +index f839ba5e5..2302995c0 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/logging-modes.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/logging-modes.cpp +@@ -5,6 +5,7 @@ + // RUN: %run %t | FileCheck %s + // + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include "xray/xray_interface.h" + #include "xray/xray_log_interface.h" +diff --git a/compiler-rt/test/xray/TestCases/Posix/optional-inmemory-log.cpp b/compiler-rt/test/xray/TestCases/Posix/optional-inmemory-log.cpp +index a32c87466..59d4c53c2 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/optional-inmemory-log.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/optional-inmemory-log.cpp +@@ -9,6 +9,7 @@ + // RUN: rm -f optional-inmemory-log.xray-* + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include + +diff --git a/compiler-rt/test/xray/TestCases/Posix/patching-unpatching.cpp b/compiler-rt/test/xray/TestCases/Posix/patching-unpatching.cpp +index 978a897ac..267c431f8 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/patching-unpatching.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/patching-unpatching.cpp +@@ -7,6 +7,7 @@ + // RUN: XRAY_OPTIONS="patch_premain=false" %run %t 2>&1 | FileCheck %s + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include "xray/xray_interface.h" + +diff --git a/compiler-rt/test/xray/TestCases/Posix/pic_test.cpp b/compiler-rt/test/xray/TestCases/Posix/pic_test.cpp +index fbf6bdcd4..161567b64 100644 +--- a/compiler-rt/test/xray/TestCases/Posix/pic_test.cpp ++++ b/compiler-rt/test/xray/TestCases/Posix/pic_test.cpp +@@ -10,6 +10,7 @@ + // RUN: rm -f pic-test-logging-* + + // UNSUPPORTED: target-is-mips64,target-is-mips64el ++// UNSUPPORTED: target-is-loongarch64 + + #include + diff --git a/compiler-rt.spec b/compiler-rt.spec index 394c59e4fcfbf20a1ea69848c3f911af9427a1b0..88a442c88a31a80ba42d3f0d89fd376daf6577b5 100644 --- a/compiler-rt.spec +++ b/compiler-rt.spec @@ -1,3 +1,4 @@ +%define anolis_release .0.1 %bcond_with snapshot_build %if %{with snapshot_build} @@ -40,7 +41,7 @@ Name: %{pkg_name} Version: %{compiler_rt_version}%{?rc_ver:~rc%{rc_ver}}%{?llvm_snapshot_version_suffix:~%{llvm_snapshot_version_suffix}} -Release: 1%{?dist} +Release: 1%{anolis_release}%{?dist} Summary: LLVM "compiler-rt" runtime libraries License: NCSA or MIT @@ -55,6 +56,7 @@ Source2: release-keys.asc %endif Patch0: 0001-compiler-rt-Fix-FLOAT16-feature-detection.patch +Patch1: 0001-Support-LoongArch.patch # RHEL-specific patches Patch100: 0001-Drop-fno-stack-protector-from-the-compiler-flags.patch @@ -152,6 +154,9 @@ ln -s i386-redhat-linux-gnu %{buildroot}%{_prefix}/lib/clang/%{maj_ver}/lib/%{_t #%endif %changelog +* Thu Dec 12 2024 Chen Li - 18.1.8-1.0.1 +- Add support for LoongArch + * Tue Jul 09 2024 Tom Stellard - 18.1.8-1 - 18.1.8 Release